[19/46] Make vect_dr_stmt return a stmt_vec_info
[official-gcc.git] / gcc / tree-vect-loop.c
blob3ac4aee02b3e6d612ddbe0fcce8f3745279256b3
1 /* Loop Vectorization
2 Copyright (C) 2003-2018 Free Software Foundation, Inc.
3 Contributed by Dorit Naishlos <dorit@il.ibm.com> and
4 Ira Rosen <irar@il.ibm.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "backend.h"
26 #include "target.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "gimple.h"
30 #include "cfghooks.h"
31 #include "tree-pass.h"
32 #include "ssa.h"
33 #include "optabs-tree.h"
34 #include "diagnostic-core.h"
35 #include "fold-const.h"
36 #include "stor-layout.h"
37 #include "cfganal.h"
38 #include "gimplify.h"
39 #include "gimple-iterator.h"
40 #include "gimplify-me.h"
41 #include "tree-ssa-loop-ivopts.h"
42 #include "tree-ssa-loop-manip.h"
43 #include "tree-ssa-loop-niter.h"
44 #include "tree-ssa-loop.h"
45 #include "cfgloop.h"
46 #include "params.h"
47 #include "tree-scalar-evolution.h"
48 #include "tree-vectorizer.h"
49 #include "gimple-fold.h"
50 #include "cgraph.h"
51 #include "tree-cfg.h"
52 #include "tree-if-conv.h"
53 #include "internal-fn.h"
54 #include "tree-vector-builder.h"
55 #include "vec-perm-indices.h"
56 #include "tree-eh.h"
58 /* Loop Vectorization Pass.
60 This pass tries to vectorize loops.
62 For example, the vectorizer transforms the following simple loop:
64 short a[N]; short b[N]; short c[N]; int i;
66 for (i=0; i<N; i++){
67 a[i] = b[i] + c[i];
70 as if it was manually vectorized by rewriting the source code into:
72 typedef int __attribute__((mode(V8HI))) v8hi;
73 short a[N]; short b[N]; short c[N]; int i;
74 v8hi *pa = (v8hi*)a, *pb = (v8hi*)b, *pc = (v8hi*)c;
75 v8hi va, vb, vc;
77 for (i=0; i<N/8; i++){
78 vb = pb[i];
79 vc = pc[i];
80 va = vb + vc;
81 pa[i] = va;
84 The main entry to this pass is vectorize_loops(), in which
85 the vectorizer applies a set of analyses on a given set of loops,
86 followed by the actual vectorization transformation for the loops that
87 had successfully passed the analysis phase.
88 Throughout this pass we make a distinction between two types of
89 data: scalars (which are represented by SSA_NAMES), and memory references
90 ("data-refs"). These two types of data require different handling both
91 during analysis and transformation. The types of data-refs that the
92 vectorizer currently supports are ARRAY_REFS which base is an array DECL
93 (not a pointer), and INDIRECT_REFS through pointers; both array and pointer
94 accesses are required to have a simple (consecutive) access pattern.
96 Analysis phase:
97 ===============
98 The driver for the analysis phase is vect_analyze_loop().
99 It applies a set of analyses, some of which rely on the scalar evolution
100 analyzer (scev) developed by Sebastian Pop.
102 During the analysis phase the vectorizer records some information
103 per stmt in a "stmt_vec_info" struct which is attached to each stmt in the
104 loop, as well as general information about the loop as a whole, which is
105 recorded in a "loop_vec_info" struct attached to each loop.
107 Transformation phase:
108 =====================
109 The loop transformation phase scans all the stmts in the loop, and
110 creates a vector stmt (or a sequence of stmts) for each scalar stmt S in
111 the loop that needs to be vectorized. It inserts the vector code sequence
112 just before the scalar stmt S, and records a pointer to the vector code
113 in STMT_VINFO_VEC_STMT (stmt_info) (stmt_info is the stmt_vec_info struct
114 attached to S). This pointer will be used for the vectorization of following
115 stmts which use the def of stmt S. Stmt S is removed if it writes to memory;
116 otherwise, we rely on dead code elimination for removing it.
118 For example, say stmt S1 was vectorized into stmt VS1:
120 VS1: vb = px[i];
121 S1: b = x[i]; STMT_VINFO_VEC_STMT (stmt_info (S1)) = VS1
122 S2: a = b;
124 To vectorize stmt S2, the vectorizer first finds the stmt that defines
125 the operand 'b' (S1), and gets the relevant vector def 'vb' from the
126 vector stmt VS1 pointed to by STMT_VINFO_VEC_STMT (stmt_info (S1)). The
127 resulting sequence would be:
129 VS1: vb = px[i];
130 S1: b = x[i]; STMT_VINFO_VEC_STMT (stmt_info (S1)) = VS1
131 VS2: va = vb;
132 S2: a = b; STMT_VINFO_VEC_STMT (stmt_info (S2)) = VS2
134 Operands that are not SSA_NAMEs, are data-refs that appear in
135 load/store operations (like 'x[i]' in S1), and are handled differently.
137 Target modeling:
138 =================
139 Currently the only target specific information that is used is the
140 size of the vector (in bytes) - "TARGET_VECTORIZE_UNITS_PER_SIMD_WORD".
141 Targets that can support different sizes of vectors, for now will need
142 to specify one value for "TARGET_VECTORIZE_UNITS_PER_SIMD_WORD". More
143 flexibility will be added in the future.
145 Since we only vectorize operations which vector form can be
146 expressed using existing tree codes, to verify that an operation is
147 supported, the vectorizer checks the relevant optab at the relevant
148 machine_mode (e.g, optab_handler (add_optab, V8HImode)). If
149 the value found is CODE_FOR_nothing, then there's no target support, and
150 we can't vectorize the stmt.
152 For additional information on this project see:
153 http://gcc.gnu.org/projects/tree-ssa/vectorization.html
156 static void vect_estimate_min_profitable_iters (loop_vec_info, int *, int *);
158 /* Subroutine of vect_determine_vf_for_stmt that handles only one
159 statement. VECTYPE_MAYBE_SET_P is true if STMT_VINFO_VECTYPE
160 may already be set for general statements (not just data refs). */
162 static bool
163 vect_determine_vf_for_stmt_1 (stmt_vec_info stmt_info,
164 bool vectype_maybe_set_p,
165 poly_uint64 *vf,
166 vec<stmt_vec_info > *mask_producers)
168 gimple *stmt = stmt_info->stmt;
170 if ((!STMT_VINFO_RELEVANT_P (stmt_info)
171 && !STMT_VINFO_LIVE_P (stmt_info))
172 || gimple_clobber_p (stmt))
174 if (dump_enabled_p ())
175 dump_printf_loc (MSG_NOTE, vect_location, "skip.\n");
176 return true;
179 tree stmt_vectype, nunits_vectype;
180 if (!vect_get_vector_types_for_stmt (stmt_info, &stmt_vectype,
181 &nunits_vectype))
182 return false;
184 if (stmt_vectype)
186 if (STMT_VINFO_VECTYPE (stmt_info))
187 /* The only case when a vectype had been already set is for stmts
188 that contain a data ref, or for "pattern-stmts" (stmts generated
189 by the vectorizer to represent/replace a certain idiom). */
190 gcc_assert ((STMT_VINFO_DATA_REF (stmt_info)
191 || vectype_maybe_set_p)
192 && STMT_VINFO_VECTYPE (stmt_info) == stmt_vectype);
193 else if (stmt_vectype == boolean_type_node)
194 mask_producers->safe_push (stmt_info);
195 else
196 STMT_VINFO_VECTYPE (stmt_info) = stmt_vectype;
199 if (nunits_vectype)
200 vect_update_max_nunits (vf, nunits_vectype);
202 return true;
205 /* Subroutine of vect_determine_vectorization_factor. Set the vector
206 types of STMT_INFO and all attached pattern statements and update
207 the vectorization factor VF accordingly. If some of the statements
208 produce a mask result whose vector type can only be calculated later,
209 add them to MASK_PRODUCERS. Return true on success or false if
210 something prevented vectorization. */
212 static bool
213 vect_determine_vf_for_stmt (stmt_vec_info stmt_info, poly_uint64 *vf,
214 vec<stmt_vec_info > *mask_producers)
216 vec_info *vinfo = stmt_info->vinfo;
217 if (dump_enabled_p ())
219 dump_printf_loc (MSG_NOTE, vect_location, "==> examining statement: ");
220 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt_info->stmt, 0);
222 if (!vect_determine_vf_for_stmt_1 (stmt_info, false, vf, mask_producers))
223 return false;
225 if (STMT_VINFO_IN_PATTERN_P (stmt_info)
226 && STMT_VINFO_RELATED_STMT (stmt_info))
228 gimple *pattern_def_seq = STMT_VINFO_PATTERN_DEF_SEQ (stmt_info);
229 stmt_info = STMT_VINFO_RELATED_STMT (stmt_info);
231 /* If a pattern statement has def stmts, analyze them too. */
232 for (gimple_stmt_iterator si = gsi_start (pattern_def_seq);
233 !gsi_end_p (si); gsi_next (&si))
235 stmt_vec_info def_stmt_info = vinfo->lookup_stmt (gsi_stmt (si));
236 if (dump_enabled_p ())
238 dump_printf_loc (MSG_NOTE, vect_location,
239 "==> examining pattern def stmt: ");
240 dump_gimple_stmt (MSG_NOTE, TDF_SLIM,
241 def_stmt_info->stmt, 0);
243 if (!vect_determine_vf_for_stmt_1 (def_stmt_info, true,
244 vf, mask_producers))
245 return false;
248 if (dump_enabled_p ())
250 dump_printf_loc (MSG_NOTE, vect_location,
251 "==> examining pattern statement: ");
252 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt_info->stmt, 0);
254 if (!vect_determine_vf_for_stmt_1 (stmt_info, true, vf, mask_producers))
255 return false;
258 return true;
261 /* Function vect_determine_vectorization_factor
263 Determine the vectorization factor (VF). VF is the number of data elements
264 that are operated upon in parallel in a single iteration of the vectorized
265 loop. For example, when vectorizing a loop that operates on 4byte elements,
266 on a target with vector size (VS) 16byte, the VF is set to 4, since 4
267 elements can fit in a single vector register.
269 We currently support vectorization of loops in which all types operated upon
270 are of the same size. Therefore this function currently sets VF according to
271 the size of the types operated upon, and fails if there are multiple sizes
272 in the loop.
274 VF is also the factor by which the loop iterations are strip-mined, e.g.:
275 original loop:
276 for (i=0; i<N; i++){
277 a[i] = b[i] + c[i];
280 vectorized loop:
281 for (i=0; i<N; i+=VF){
282 a[i:VF] = b[i:VF] + c[i:VF];
286 static bool
287 vect_determine_vectorization_factor (loop_vec_info loop_vinfo)
289 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
290 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
291 unsigned nbbs = loop->num_nodes;
292 poly_uint64 vectorization_factor = 1;
293 tree scalar_type = NULL_TREE;
294 gphi *phi;
295 tree vectype;
296 stmt_vec_info stmt_info;
297 unsigned i;
298 auto_vec<stmt_vec_info> mask_producers;
300 DUMP_VECT_SCOPE ("vect_determine_vectorization_factor");
302 for (i = 0; i < nbbs; i++)
304 basic_block bb = bbs[i];
306 for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
307 gsi_next (&si))
309 phi = si.phi ();
310 stmt_info = loop_vinfo->lookup_stmt (phi);
311 if (dump_enabled_p ())
313 dump_printf_loc (MSG_NOTE, vect_location, "==> examining phi: ");
314 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
317 gcc_assert (stmt_info);
319 if (STMT_VINFO_RELEVANT_P (stmt_info)
320 || STMT_VINFO_LIVE_P (stmt_info))
322 gcc_assert (!STMT_VINFO_VECTYPE (stmt_info));
323 scalar_type = TREE_TYPE (PHI_RESULT (phi));
325 if (dump_enabled_p ())
327 dump_printf_loc (MSG_NOTE, vect_location,
328 "get vectype for scalar type: ");
329 dump_generic_expr (MSG_NOTE, TDF_SLIM, scalar_type);
330 dump_printf (MSG_NOTE, "\n");
333 vectype = get_vectype_for_scalar_type (scalar_type);
334 if (!vectype)
336 if (dump_enabled_p ())
338 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
339 "not vectorized: unsupported "
340 "data-type ");
341 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
342 scalar_type);
343 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
345 return false;
347 STMT_VINFO_VECTYPE (stmt_info) = vectype;
349 if (dump_enabled_p ())
351 dump_printf_loc (MSG_NOTE, vect_location, "vectype: ");
352 dump_generic_expr (MSG_NOTE, TDF_SLIM, vectype);
353 dump_printf (MSG_NOTE, "\n");
356 if (dump_enabled_p ())
358 dump_printf_loc (MSG_NOTE, vect_location, "nunits = ");
359 dump_dec (MSG_NOTE, TYPE_VECTOR_SUBPARTS (vectype));
360 dump_printf (MSG_NOTE, "\n");
363 vect_update_max_nunits (&vectorization_factor, vectype);
367 for (gimple_stmt_iterator si = gsi_start_bb (bb); !gsi_end_p (si);
368 gsi_next (&si))
370 stmt_info = loop_vinfo->lookup_stmt (gsi_stmt (si));
371 if (!vect_determine_vf_for_stmt (stmt_info, &vectorization_factor,
372 &mask_producers))
373 return false;
377 /* TODO: Analyze cost. Decide if worth while to vectorize. */
378 if (dump_enabled_p ())
380 dump_printf_loc (MSG_NOTE, vect_location, "vectorization factor = ");
381 dump_dec (MSG_NOTE, vectorization_factor);
382 dump_printf (MSG_NOTE, "\n");
385 if (known_le (vectorization_factor, 1U))
387 if (dump_enabled_p ())
388 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
389 "not vectorized: unsupported data-type\n");
390 return false;
392 LOOP_VINFO_VECT_FACTOR (loop_vinfo) = vectorization_factor;
394 for (i = 0; i < mask_producers.length (); i++)
396 stmt_info = mask_producers[i];
397 tree mask_type = vect_get_mask_type_for_stmt (stmt_info);
398 if (!mask_type)
399 return false;
400 STMT_VINFO_VECTYPE (stmt_info) = mask_type;
403 return true;
407 /* Function vect_is_simple_iv_evolution.
409 FORNOW: A simple evolution of an induction variables in the loop is
410 considered a polynomial evolution. */
412 static bool
413 vect_is_simple_iv_evolution (unsigned loop_nb, tree access_fn, tree * init,
414 tree * step)
416 tree init_expr;
417 tree step_expr;
418 tree evolution_part = evolution_part_in_loop_num (access_fn, loop_nb);
419 basic_block bb;
421 /* When there is no evolution in this loop, the evolution function
422 is not "simple". */
423 if (evolution_part == NULL_TREE)
424 return false;
426 /* When the evolution is a polynomial of degree >= 2
427 the evolution function is not "simple". */
428 if (tree_is_chrec (evolution_part))
429 return false;
431 step_expr = evolution_part;
432 init_expr = unshare_expr (initial_condition_in_loop_num (access_fn, loop_nb));
434 if (dump_enabled_p ())
436 dump_printf_loc (MSG_NOTE, vect_location, "step: ");
437 dump_generic_expr (MSG_NOTE, TDF_SLIM, step_expr);
438 dump_printf (MSG_NOTE, ", init: ");
439 dump_generic_expr (MSG_NOTE, TDF_SLIM, init_expr);
440 dump_printf (MSG_NOTE, "\n");
443 *init = init_expr;
444 *step = step_expr;
446 if (TREE_CODE (step_expr) != INTEGER_CST
447 && (TREE_CODE (step_expr) != SSA_NAME
448 || ((bb = gimple_bb (SSA_NAME_DEF_STMT (step_expr)))
449 && flow_bb_inside_loop_p (get_loop (cfun, loop_nb), bb))
450 || (!INTEGRAL_TYPE_P (TREE_TYPE (step_expr))
451 && (!SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr))
452 || !flag_associative_math)))
453 && (TREE_CODE (step_expr) != REAL_CST
454 || !flag_associative_math))
456 if (dump_enabled_p ())
457 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
458 "step unknown.\n");
459 return false;
462 return true;
465 /* Function vect_analyze_scalar_cycles_1.
467 Examine the cross iteration def-use cycles of scalar variables
468 in LOOP. LOOP_VINFO represents the loop that is now being
469 considered for vectorization (can be LOOP, or an outer-loop
470 enclosing LOOP). */
472 static void
473 vect_analyze_scalar_cycles_1 (loop_vec_info loop_vinfo, struct loop *loop)
475 basic_block bb = loop->header;
476 tree init, step;
477 auto_vec<gimple *, 64> worklist;
478 gphi_iterator gsi;
479 bool double_reduc;
481 DUMP_VECT_SCOPE ("vect_analyze_scalar_cycles");
483 /* First - identify all inductions. Reduction detection assumes that all the
484 inductions have been identified, therefore, this order must not be
485 changed. */
486 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
488 gphi *phi = gsi.phi ();
489 tree access_fn = NULL;
490 tree def = PHI_RESULT (phi);
491 stmt_vec_info stmt_vinfo = loop_vinfo->lookup_stmt (phi);
493 if (dump_enabled_p ())
495 dump_printf_loc (MSG_NOTE, vect_location, "Analyze phi: ");
496 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
499 /* Skip virtual phi's. The data dependences that are associated with
500 virtual defs/uses (i.e., memory accesses) are analyzed elsewhere. */
501 if (virtual_operand_p (def))
502 continue;
504 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_unknown_def_type;
506 /* Analyze the evolution function. */
507 access_fn = analyze_scalar_evolution (loop, def);
508 if (access_fn)
510 STRIP_NOPS (access_fn);
511 if (dump_enabled_p ())
513 dump_printf_loc (MSG_NOTE, vect_location,
514 "Access function of PHI: ");
515 dump_generic_expr (MSG_NOTE, TDF_SLIM, access_fn);
516 dump_printf (MSG_NOTE, "\n");
518 STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED (stmt_vinfo)
519 = initial_condition_in_loop_num (access_fn, loop->num);
520 STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_vinfo)
521 = evolution_part_in_loop_num (access_fn, loop->num);
524 if (!access_fn
525 || !vect_is_simple_iv_evolution (loop->num, access_fn, &init, &step)
526 || (LOOP_VINFO_LOOP (loop_vinfo) != loop
527 && TREE_CODE (step) != INTEGER_CST))
529 worklist.safe_push (phi);
530 continue;
533 gcc_assert (STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED (stmt_vinfo)
534 != NULL_TREE);
535 gcc_assert (STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_vinfo) != NULL_TREE);
537 if (dump_enabled_p ())
538 dump_printf_loc (MSG_NOTE, vect_location, "Detected induction.\n");
539 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_induction_def;
543 /* Second - identify all reductions and nested cycles. */
544 while (worklist.length () > 0)
546 gimple *phi = worklist.pop ();
547 tree def = PHI_RESULT (phi);
548 stmt_vec_info stmt_vinfo = vinfo_for_stmt (phi);
550 if (dump_enabled_p ())
552 dump_printf_loc (MSG_NOTE, vect_location, "Analyze phi: ");
553 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
556 gcc_assert (!virtual_operand_p (def)
557 && STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_unknown_def_type);
559 stmt_vec_info reduc_stmt_info
560 = vect_force_simple_reduction (loop_vinfo, stmt_vinfo,
561 &double_reduc, false);
562 if (reduc_stmt_info)
564 if (double_reduc)
566 if (dump_enabled_p ())
567 dump_printf_loc (MSG_NOTE, vect_location,
568 "Detected double reduction.\n");
570 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_double_reduction_def;
571 STMT_VINFO_DEF_TYPE (reduc_stmt_info)
572 = vect_double_reduction_def;
574 else
576 if (loop != LOOP_VINFO_LOOP (loop_vinfo))
578 if (dump_enabled_p ())
579 dump_printf_loc (MSG_NOTE, vect_location,
580 "Detected vectorizable nested cycle.\n");
582 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_nested_cycle;
583 STMT_VINFO_DEF_TYPE (reduc_stmt_info) = vect_nested_cycle;
585 else
587 if (dump_enabled_p ())
588 dump_printf_loc (MSG_NOTE, vect_location,
589 "Detected reduction.\n");
591 STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_reduction_def;
592 STMT_VINFO_DEF_TYPE (reduc_stmt_info) = vect_reduction_def;
593 /* Store the reduction cycles for possible vectorization in
594 loop-aware SLP if it was not detected as reduction
595 chain. */
596 if (! REDUC_GROUP_FIRST_ELEMENT (reduc_stmt_info))
597 LOOP_VINFO_REDUCTIONS (loop_vinfo).safe_push
598 (reduc_stmt_info);
602 else
603 if (dump_enabled_p ())
604 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
605 "Unknown def-use cycle pattern.\n");
610 /* Function vect_analyze_scalar_cycles.
612 Examine the cross iteration def-use cycles of scalar variables, by
613 analyzing the loop-header PHIs of scalar variables. Classify each
614 cycle as one of the following: invariant, induction, reduction, unknown.
615 We do that for the loop represented by LOOP_VINFO, and also to its
616 inner-loop, if exists.
617 Examples for scalar cycles:
619 Example1: reduction:
621 loop1:
622 for (i=0; i<N; i++)
623 sum += a[i];
625 Example2: induction:
627 loop2:
628 for (i=0; i<N; i++)
629 a[i] = i; */
631 static void
632 vect_analyze_scalar_cycles (loop_vec_info loop_vinfo)
634 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
636 vect_analyze_scalar_cycles_1 (loop_vinfo, loop);
638 /* When vectorizing an outer-loop, the inner-loop is executed sequentially.
639 Reductions in such inner-loop therefore have different properties than
640 the reductions in the nest that gets vectorized:
641 1. When vectorized, they are executed in the same order as in the original
642 scalar loop, so we can't change the order of computation when
643 vectorizing them.
644 2. FIXME: Inner-loop reductions can be used in the inner-loop, so the
645 current checks are too strict. */
647 if (loop->inner)
648 vect_analyze_scalar_cycles_1 (loop_vinfo, loop->inner);
651 /* Transfer group and reduction information from STMT to its pattern stmt. */
653 static void
654 vect_fixup_reduc_chain (gimple *stmt)
656 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
657 stmt_vec_info firstp = STMT_VINFO_RELATED_STMT (stmt_info);
658 stmt_vec_info stmtp;
659 gcc_assert (!REDUC_GROUP_FIRST_ELEMENT (firstp)
660 && REDUC_GROUP_FIRST_ELEMENT (stmt_info));
661 REDUC_GROUP_SIZE (firstp) = REDUC_GROUP_SIZE (stmt_info);
664 stmtp = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (stmt));
665 REDUC_GROUP_FIRST_ELEMENT (stmtp) = firstp;
666 stmt = REDUC_GROUP_NEXT_ELEMENT (vinfo_for_stmt (stmt));
667 if (stmt)
668 REDUC_GROUP_NEXT_ELEMENT (stmtp)
669 = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (stmt));
671 while (stmt);
672 STMT_VINFO_DEF_TYPE (stmtp) = vect_reduction_def;
675 /* Fixup scalar cycles that now have their stmts detected as patterns. */
677 static void
678 vect_fixup_scalar_cycles_with_patterns (loop_vec_info loop_vinfo)
680 gimple *first;
681 unsigned i;
683 FOR_EACH_VEC_ELT (LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo), i, first)
684 if (STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (first)))
686 gimple *next = REDUC_GROUP_NEXT_ELEMENT (vinfo_for_stmt (first));
687 while (next)
689 if (! STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (next)))
690 break;
691 next = REDUC_GROUP_NEXT_ELEMENT (vinfo_for_stmt (next));
693 /* If not all stmt in the chain are patterns try to handle
694 the chain without patterns. */
695 if (! next)
697 vect_fixup_reduc_chain (first);
698 LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo)[i]
699 = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (first));
704 /* Function vect_get_loop_niters.
706 Determine how many iterations the loop is executed and place it
707 in NUMBER_OF_ITERATIONS. Place the number of latch iterations
708 in NUMBER_OF_ITERATIONSM1. Place the condition under which the
709 niter information holds in ASSUMPTIONS.
711 Return the loop exit condition. */
714 static gcond *
715 vect_get_loop_niters (struct loop *loop, tree *assumptions,
716 tree *number_of_iterations, tree *number_of_iterationsm1)
718 edge exit = single_exit (loop);
719 struct tree_niter_desc niter_desc;
720 tree niter_assumptions, niter, may_be_zero;
721 gcond *cond = get_loop_exit_condition (loop);
723 *assumptions = boolean_true_node;
724 *number_of_iterationsm1 = chrec_dont_know;
725 *number_of_iterations = chrec_dont_know;
726 DUMP_VECT_SCOPE ("get_loop_niters");
728 if (!exit)
729 return cond;
731 niter = chrec_dont_know;
732 may_be_zero = NULL_TREE;
733 niter_assumptions = boolean_true_node;
734 if (!number_of_iterations_exit_assumptions (loop, exit, &niter_desc, NULL)
735 || chrec_contains_undetermined (niter_desc.niter))
736 return cond;
738 niter_assumptions = niter_desc.assumptions;
739 may_be_zero = niter_desc.may_be_zero;
740 niter = niter_desc.niter;
742 if (may_be_zero && integer_zerop (may_be_zero))
743 may_be_zero = NULL_TREE;
745 if (may_be_zero)
747 if (COMPARISON_CLASS_P (may_be_zero))
749 /* Try to combine may_be_zero with assumptions, this can simplify
750 computation of niter expression. */
751 if (niter_assumptions && !integer_nonzerop (niter_assumptions))
752 niter_assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
753 niter_assumptions,
754 fold_build1 (TRUTH_NOT_EXPR,
755 boolean_type_node,
756 may_be_zero));
757 else
758 niter = fold_build3 (COND_EXPR, TREE_TYPE (niter), may_be_zero,
759 build_int_cst (TREE_TYPE (niter), 0),
760 rewrite_to_non_trapping_overflow (niter));
762 may_be_zero = NULL_TREE;
764 else if (integer_nonzerop (may_be_zero))
766 *number_of_iterationsm1 = build_int_cst (TREE_TYPE (niter), 0);
767 *number_of_iterations = build_int_cst (TREE_TYPE (niter), 1);
768 return cond;
770 else
771 return cond;
774 *assumptions = niter_assumptions;
775 *number_of_iterationsm1 = niter;
777 /* We want the number of loop header executions which is the number
778 of latch executions plus one.
779 ??? For UINT_MAX latch executions this number overflows to zero
780 for loops like do { n++; } while (n != 0); */
781 if (niter && !chrec_contains_undetermined (niter))
782 niter = fold_build2 (PLUS_EXPR, TREE_TYPE (niter), unshare_expr (niter),
783 build_int_cst (TREE_TYPE (niter), 1));
784 *number_of_iterations = niter;
786 return cond;
789 /* Function bb_in_loop_p
791 Used as predicate for dfs order traversal of the loop bbs. */
793 static bool
794 bb_in_loop_p (const_basic_block bb, const void *data)
796 const struct loop *const loop = (const struct loop *)data;
797 if (flow_bb_inside_loop_p (loop, bb))
798 return true;
799 return false;
803 /* Create and initialize a new loop_vec_info struct for LOOP_IN, as well as
804 stmt_vec_info structs for all the stmts in LOOP_IN. */
806 _loop_vec_info::_loop_vec_info (struct loop *loop_in, vec_info_shared *shared)
807 : vec_info (vec_info::loop, init_cost (loop_in), shared),
808 loop (loop_in),
809 bbs (XCNEWVEC (basic_block, loop->num_nodes)),
810 num_itersm1 (NULL_TREE),
811 num_iters (NULL_TREE),
812 num_iters_unchanged (NULL_TREE),
813 num_iters_assumptions (NULL_TREE),
814 th (0),
815 versioning_threshold (0),
816 vectorization_factor (0),
817 max_vectorization_factor (0),
818 mask_skip_niters (NULL_TREE),
819 mask_compare_type (NULL_TREE),
820 unaligned_dr (NULL),
821 peeling_for_alignment (0),
822 ptr_mask (0),
823 ivexpr_map (NULL),
824 slp_unrolling_factor (1),
825 single_scalar_iteration_cost (0),
826 vectorizable (false),
827 can_fully_mask_p (true),
828 fully_masked_p (false),
829 peeling_for_gaps (false),
830 peeling_for_niter (false),
831 operands_swapped (false),
832 no_data_dependencies (false),
833 has_mask_store (false),
834 scalar_loop (NULL),
835 orig_loop_info (NULL)
837 /* Create/Update stmt_info for all stmts in the loop. */
838 basic_block *body = get_loop_body (loop);
839 for (unsigned int i = 0; i < loop->num_nodes; i++)
841 basic_block bb = body[i];
842 gimple_stmt_iterator si;
844 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
846 gimple *phi = gsi_stmt (si);
847 gimple_set_uid (phi, 0);
848 add_stmt (phi);
851 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
853 gimple *stmt = gsi_stmt (si);
854 gimple_set_uid (stmt, 0);
855 add_stmt (stmt);
858 free (body);
860 /* CHECKME: We want to visit all BBs before their successors (except for
861 latch blocks, for which this assertion wouldn't hold). In the simple
862 case of the loop forms we allow, a dfs order of the BBs would the same
863 as reversed postorder traversal, so we are safe. */
865 unsigned int nbbs = dfs_enumerate_from (loop->header, 0, bb_in_loop_p,
866 bbs, loop->num_nodes, loop);
867 gcc_assert (nbbs == loop->num_nodes);
870 /* Free all levels of MASKS. */
872 void
873 release_vec_loop_masks (vec_loop_masks *masks)
875 rgroup_masks *rgm;
876 unsigned int i;
877 FOR_EACH_VEC_ELT (*masks, i, rgm)
878 rgm->masks.release ();
879 masks->release ();
882 /* Free all memory used by the _loop_vec_info, as well as all the
883 stmt_vec_info structs of all the stmts in the loop. */
885 _loop_vec_info::~_loop_vec_info ()
887 int nbbs;
888 gimple_stmt_iterator si;
889 int j;
891 /* ??? We're releasing loop_vinfos en-block. */
892 set_stmt_vec_info_vec (&stmt_vec_infos);
893 nbbs = loop->num_nodes;
894 for (j = 0; j < nbbs; j++)
896 basic_block bb = bbs[j];
897 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
898 free_stmt_vec_info (gsi_stmt (si));
900 for (si = gsi_start_bb (bb); !gsi_end_p (si); )
902 gimple *stmt = gsi_stmt (si);
904 /* We may have broken canonical form by moving a constant
905 into RHS1 of a commutative op. Fix such occurrences. */
906 if (operands_swapped && is_gimple_assign (stmt))
908 enum tree_code code = gimple_assign_rhs_code (stmt);
910 if ((code == PLUS_EXPR
911 || code == POINTER_PLUS_EXPR
912 || code == MULT_EXPR)
913 && CONSTANT_CLASS_P (gimple_assign_rhs1 (stmt)))
914 swap_ssa_operands (stmt,
915 gimple_assign_rhs1_ptr (stmt),
916 gimple_assign_rhs2_ptr (stmt));
917 else if (code == COND_EXPR
918 && CONSTANT_CLASS_P (gimple_assign_rhs2 (stmt)))
920 tree cond_expr = gimple_assign_rhs1 (stmt);
921 enum tree_code cond_code = TREE_CODE (cond_expr);
923 if (TREE_CODE_CLASS (cond_code) == tcc_comparison)
925 bool honor_nans = HONOR_NANS (TREE_OPERAND (cond_expr,
926 0));
927 cond_code = invert_tree_comparison (cond_code,
928 honor_nans);
929 if (cond_code != ERROR_MARK)
931 TREE_SET_CODE (cond_expr, cond_code);
932 swap_ssa_operands (stmt,
933 gimple_assign_rhs2_ptr (stmt),
934 gimple_assign_rhs3_ptr (stmt));
940 /* Free stmt_vec_info. */
941 free_stmt_vec_info (stmt);
942 gsi_next (&si);
946 free (bbs);
948 release_vec_loop_masks (&masks);
949 delete ivexpr_map;
951 loop->aux = NULL;
954 /* Return an invariant or register for EXPR and emit necessary
955 computations in the LOOP_VINFO loop preheader. */
957 tree
958 cse_and_gimplify_to_preheader (loop_vec_info loop_vinfo, tree expr)
960 if (is_gimple_reg (expr)
961 || is_gimple_min_invariant (expr))
962 return expr;
964 if (! loop_vinfo->ivexpr_map)
965 loop_vinfo->ivexpr_map = new hash_map<tree_operand_hash, tree>;
966 tree &cached = loop_vinfo->ivexpr_map->get_or_insert (expr);
967 if (! cached)
969 gimple_seq stmts = NULL;
970 cached = force_gimple_operand (unshare_expr (expr),
971 &stmts, true, NULL_TREE);
972 if (stmts)
974 edge e = loop_preheader_edge (LOOP_VINFO_LOOP (loop_vinfo));
975 gsi_insert_seq_on_edge_immediate (e, stmts);
978 return cached;
981 /* Return true if we can use CMP_TYPE as the comparison type to produce
982 all masks required to mask LOOP_VINFO. */
984 static bool
985 can_produce_all_loop_masks_p (loop_vec_info loop_vinfo, tree cmp_type)
987 rgroup_masks *rgm;
988 unsigned int i;
989 FOR_EACH_VEC_ELT (LOOP_VINFO_MASKS (loop_vinfo), i, rgm)
990 if (rgm->mask_type != NULL_TREE
991 && !direct_internal_fn_supported_p (IFN_WHILE_ULT,
992 cmp_type, rgm->mask_type,
993 OPTIMIZE_FOR_SPEED))
994 return false;
995 return true;
998 /* Calculate the maximum number of scalars per iteration for every
999 rgroup in LOOP_VINFO. */
1001 static unsigned int
1002 vect_get_max_nscalars_per_iter (loop_vec_info loop_vinfo)
1004 unsigned int res = 1;
1005 unsigned int i;
1006 rgroup_masks *rgm;
1007 FOR_EACH_VEC_ELT (LOOP_VINFO_MASKS (loop_vinfo), i, rgm)
1008 res = MAX (res, rgm->max_nscalars_per_iter);
1009 return res;
1012 /* Each statement in LOOP_VINFO can be masked where necessary. Check
1013 whether we can actually generate the masks required. Return true if so,
1014 storing the type of the scalar IV in LOOP_VINFO_MASK_COMPARE_TYPE. */
1016 static bool
1017 vect_verify_full_masking (loop_vec_info loop_vinfo)
1019 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1020 unsigned int min_ni_width;
1022 /* Use a normal loop if there are no statements that need masking.
1023 This only happens in rare degenerate cases: it means that the loop
1024 has no loads, no stores, and no live-out values. */
1025 if (LOOP_VINFO_MASKS (loop_vinfo).is_empty ())
1026 return false;
1028 /* Get the maximum number of iterations that is representable
1029 in the counter type. */
1030 tree ni_type = TREE_TYPE (LOOP_VINFO_NITERSM1 (loop_vinfo));
1031 widest_int max_ni = wi::to_widest (TYPE_MAX_VALUE (ni_type)) + 1;
1033 /* Get a more refined estimate for the number of iterations. */
1034 widest_int max_back_edges;
1035 if (max_loop_iterations (loop, &max_back_edges))
1036 max_ni = wi::smin (max_ni, max_back_edges + 1);
1038 /* Account for rgroup masks, in which each bit is replicated N times. */
1039 max_ni *= vect_get_max_nscalars_per_iter (loop_vinfo);
1041 /* Work out how many bits we need to represent the limit. */
1042 min_ni_width = wi::min_precision (max_ni, UNSIGNED);
1044 /* Find a scalar mode for which WHILE_ULT is supported. */
1045 opt_scalar_int_mode cmp_mode_iter;
1046 tree cmp_type = NULL_TREE;
1047 FOR_EACH_MODE_IN_CLASS (cmp_mode_iter, MODE_INT)
1049 unsigned int cmp_bits = GET_MODE_BITSIZE (cmp_mode_iter.require ());
1050 if (cmp_bits >= min_ni_width
1051 && targetm.scalar_mode_supported_p (cmp_mode_iter.require ()))
1053 tree this_type = build_nonstandard_integer_type (cmp_bits, true);
1054 if (this_type
1055 && can_produce_all_loop_masks_p (loop_vinfo, this_type))
1057 /* Although we could stop as soon as we find a valid mode,
1058 it's often better to continue until we hit Pmode, since the
1059 operands to the WHILE are more likely to be reusable in
1060 address calculations. */
1061 cmp_type = this_type;
1062 if (cmp_bits >= GET_MODE_BITSIZE (Pmode))
1063 break;
1068 if (!cmp_type)
1069 return false;
1071 LOOP_VINFO_MASK_COMPARE_TYPE (loop_vinfo) = cmp_type;
1072 return true;
1075 /* Calculate the cost of one scalar iteration of the loop. */
1076 static void
1077 vect_compute_single_scalar_iteration_cost (loop_vec_info loop_vinfo)
1079 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1080 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
1081 int nbbs = loop->num_nodes, factor;
1082 int innerloop_iters, i;
1084 /* Gather costs for statements in the scalar loop. */
1086 /* FORNOW. */
1087 innerloop_iters = 1;
1088 if (loop->inner)
1089 innerloop_iters = 50; /* FIXME */
1091 for (i = 0; i < nbbs; i++)
1093 gimple_stmt_iterator si;
1094 basic_block bb = bbs[i];
1096 if (bb->loop_father == loop->inner)
1097 factor = innerloop_iters;
1098 else
1099 factor = 1;
1101 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
1103 gimple *stmt = gsi_stmt (si);
1104 stmt_vec_info stmt_info = loop_vinfo->lookup_stmt (stmt);
1106 if (!is_gimple_assign (stmt) && !is_gimple_call (stmt))
1107 continue;
1109 /* Skip stmts that are not vectorized inside the loop. */
1110 if (stmt_info
1111 && !STMT_VINFO_RELEVANT_P (stmt_info)
1112 && (!STMT_VINFO_LIVE_P (stmt_info)
1113 || !VECTORIZABLE_CYCLE_DEF (STMT_VINFO_DEF_TYPE (stmt_info)))
1114 && !STMT_VINFO_IN_PATTERN_P (stmt_info))
1115 continue;
1117 vect_cost_for_stmt kind;
1118 if (STMT_VINFO_DATA_REF (stmt_info))
1120 if (DR_IS_READ (STMT_VINFO_DATA_REF (stmt_info)))
1121 kind = scalar_load;
1122 else
1123 kind = scalar_store;
1125 else
1126 kind = scalar_stmt;
1128 record_stmt_cost (&LOOP_VINFO_SCALAR_ITERATION_COST (loop_vinfo),
1129 factor, kind, stmt_info, 0, vect_prologue);
1133 /* Now accumulate cost. */
1134 void *target_cost_data = init_cost (loop);
1135 stmt_info_for_cost *si;
1136 int j;
1137 FOR_EACH_VEC_ELT (LOOP_VINFO_SCALAR_ITERATION_COST (loop_vinfo),
1138 j, si)
1140 struct _stmt_vec_info *stmt_info
1141 = si->stmt ? vinfo_for_stmt (si->stmt) : NULL_STMT_VEC_INFO;
1142 (void) add_stmt_cost (target_cost_data, si->count,
1143 si->kind, stmt_info, si->misalign,
1144 vect_body);
1146 unsigned dummy, body_cost = 0;
1147 finish_cost (target_cost_data, &dummy, &body_cost, &dummy);
1148 destroy_cost_data (target_cost_data);
1149 LOOP_VINFO_SINGLE_SCALAR_ITERATION_COST (loop_vinfo) = body_cost;
1153 /* Function vect_analyze_loop_form_1.
1155 Verify that certain CFG restrictions hold, including:
1156 - the loop has a pre-header
1157 - the loop has a single entry and exit
1158 - the loop exit condition is simple enough
1159 - the number of iterations can be analyzed, i.e, a countable loop. The
1160 niter could be analyzed under some assumptions. */
1162 bool
1163 vect_analyze_loop_form_1 (struct loop *loop, gcond **loop_cond,
1164 tree *assumptions, tree *number_of_iterationsm1,
1165 tree *number_of_iterations, gcond **inner_loop_cond)
1167 DUMP_VECT_SCOPE ("vect_analyze_loop_form");
1169 /* Different restrictions apply when we are considering an inner-most loop,
1170 vs. an outer (nested) loop.
1171 (FORNOW. May want to relax some of these restrictions in the future). */
1173 if (!loop->inner)
1175 /* Inner-most loop. We currently require that the number of BBs is
1176 exactly 2 (the header and latch). Vectorizable inner-most loops
1177 look like this:
1179 (pre-header)
1181 header <--------+
1182 | | |
1183 | +--> latch --+
1185 (exit-bb) */
1187 if (loop->num_nodes != 2)
1189 if (dump_enabled_p ())
1190 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1191 "not vectorized: control flow in loop.\n");
1192 return false;
1195 if (empty_block_p (loop->header))
1197 if (dump_enabled_p ())
1198 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1199 "not vectorized: empty loop.\n");
1200 return false;
1203 else
1205 struct loop *innerloop = loop->inner;
1206 edge entryedge;
1208 /* Nested loop. We currently require that the loop is doubly-nested,
1209 contains a single inner loop, and the number of BBs is exactly 5.
1210 Vectorizable outer-loops look like this:
1212 (pre-header)
1214 header <---+
1216 inner-loop |
1218 tail ------+
1220 (exit-bb)
1222 The inner-loop has the properties expected of inner-most loops
1223 as described above. */
1225 if ((loop->inner)->inner || (loop->inner)->next)
1227 if (dump_enabled_p ())
1228 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1229 "not vectorized: multiple nested loops.\n");
1230 return false;
1233 if (loop->num_nodes != 5)
1235 if (dump_enabled_p ())
1236 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1237 "not vectorized: control flow in loop.\n");
1238 return false;
1241 entryedge = loop_preheader_edge (innerloop);
1242 if (entryedge->src != loop->header
1243 || !single_exit (innerloop)
1244 || single_exit (innerloop)->dest != EDGE_PRED (loop->latch, 0)->src)
1246 if (dump_enabled_p ())
1247 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1248 "not vectorized: unsupported outerloop form.\n");
1249 return false;
1252 /* Analyze the inner-loop. */
1253 tree inner_niterm1, inner_niter, inner_assumptions;
1254 if (! vect_analyze_loop_form_1 (loop->inner, inner_loop_cond,
1255 &inner_assumptions, &inner_niterm1,
1256 &inner_niter, NULL)
1257 /* Don't support analyzing niter under assumptions for inner
1258 loop. */
1259 || !integer_onep (inner_assumptions))
1261 if (dump_enabled_p ())
1262 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1263 "not vectorized: Bad inner loop.\n");
1264 return false;
1267 if (!expr_invariant_in_loop_p (loop, inner_niter))
1269 if (dump_enabled_p ())
1270 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1271 "not vectorized: inner-loop count not"
1272 " invariant.\n");
1273 return false;
1276 if (dump_enabled_p ())
1277 dump_printf_loc (MSG_NOTE, vect_location,
1278 "Considering outer-loop vectorization.\n");
1281 if (!single_exit (loop)
1282 || EDGE_COUNT (loop->header->preds) != 2)
1284 if (dump_enabled_p ())
1286 if (!single_exit (loop))
1287 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1288 "not vectorized: multiple exits.\n");
1289 else if (EDGE_COUNT (loop->header->preds) != 2)
1290 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1291 "not vectorized: too many incoming edges.\n");
1293 return false;
1296 /* We assume that the loop exit condition is at the end of the loop. i.e,
1297 that the loop is represented as a do-while (with a proper if-guard
1298 before the loop if needed), where the loop header contains all the
1299 executable statements, and the latch is empty. */
1300 if (!empty_block_p (loop->latch)
1301 || !gimple_seq_empty_p (phi_nodes (loop->latch)))
1303 if (dump_enabled_p ())
1304 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1305 "not vectorized: latch block not empty.\n");
1306 return false;
1309 /* Make sure the exit is not abnormal. */
1310 edge e = single_exit (loop);
1311 if (e->flags & EDGE_ABNORMAL)
1313 if (dump_enabled_p ())
1314 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1315 "not vectorized: abnormal loop exit edge.\n");
1316 return false;
1319 *loop_cond = vect_get_loop_niters (loop, assumptions, number_of_iterations,
1320 number_of_iterationsm1);
1321 if (!*loop_cond)
1323 if (dump_enabled_p ())
1324 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1325 "not vectorized: complicated exit condition.\n");
1326 return false;
1329 if (integer_zerop (*assumptions)
1330 || !*number_of_iterations
1331 || chrec_contains_undetermined (*number_of_iterations))
1333 if (dump_enabled_p ())
1334 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1335 "not vectorized: number of iterations cannot be "
1336 "computed.\n");
1337 return false;
1340 if (integer_zerop (*number_of_iterations))
1342 if (dump_enabled_p ())
1343 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1344 "not vectorized: number of iterations = 0.\n");
1345 return false;
1348 return true;
1351 /* Analyze LOOP form and return a loop_vec_info if it is of suitable form. */
1353 loop_vec_info
1354 vect_analyze_loop_form (struct loop *loop, vec_info_shared *shared)
1356 tree assumptions, number_of_iterations, number_of_iterationsm1;
1357 gcond *loop_cond, *inner_loop_cond = NULL;
1359 if (! vect_analyze_loop_form_1 (loop, &loop_cond,
1360 &assumptions, &number_of_iterationsm1,
1361 &number_of_iterations, &inner_loop_cond))
1362 return NULL;
1364 loop_vec_info loop_vinfo = new _loop_vec_info (loop, shared);
1365 LOOP_VINFO_NITERSM1 (loop_vinfo) = number_of_iterationsm1;
1366 LOOP_VINFO_NITERS (loop_vinfo) = number_of_iterations;
1367 LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo) = number_of_iterations;
1368 if (!integer_onep (assumptions))
1370 /* We consider to vectorize this loop by versioning it under
1371 some assumptions. In order to do this, we need to clear
1372 existing information computed by scev and niter analyzer. */
1373 scev_reset_htab ();
1374 free_numbers_of_iterations_estimates (loop);
1375 /* Also set flag for this loop so that following scev and niter
1376 analysis are done under the assumptions. */
1377 loop_constraint_set (loop, LOOP_C_FINITE);
1378 /* Also record the assumptions for versioning. */
1379 LOOP_VINFO_NITERS_ASSUMPTIONS (loop_vinfo) = assumptions;
1382 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
1384 if (dump_enabled_p ())
1386 dump_printf_loc (MSG_NOTE, vect_location,
1387 "Symbolic number of iterations is ");
1388 dump_generic_expr (MSG_NOTE, TDF_DETAILS, number_of_iterations);
1389 dump_printf (MSG_NOTE, "\n");
1393 stmt_vec_info loop_cond_info = loop_vinfo->lookup_stmt (loop_cond);
1394 STMT_VINFO_TYPE (loop_cond_info) = loop_exit_ctrl_vec_info_type;
1395 if (inner_loop_cond)
1397 stmt_vec_info inner_loop_cond_info
1398 = loop_vinfo->lookup_stmt (inner_loop_cond);
1399 STMT_VINFO_TYPE (inner_loop_cond_info) = loop_exit_ctrl_vec_info_type;
1402 gcc_assert (!loop->aux);
1403 loop->aux = loop_vinfo;
1404 return loop_vinfo;
1409 /* Scan the loop stmts and dependent on whether there are any (non-)SLP
1410 statements update the vectorization factor. */
1412 static void
1413 vect_update_vf_for_slp (loop_vec_info loop_vinfo)
1415 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1416 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
1417 int nbbs = loop->num_nodes;
1418 poly_uint64 vectorization_factor;
1419 int i;
1421 DUMP_VECT_SCOPE ("vect_update_vf_for_slp");
1423 vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1424 gcc_assert (known_ne (vectorization_factor, 0U));
1426 /* If all the stmts in the loop can be SLPed, we perform only SLP, and
1427 vectorization factor of the loop is the unrolling factor required by
1428 the SLP instances. If that unrolling factor is 1, we say, that we
1429 perform pure SLP on loop - cross iteration parallelism is not
1430 exploited. */
1431 bool only_slp_in_loop = true;
1432 for (i = 0; i < nbbs; i++)
1434 basic_block bb = bbs[i];
1435 for (gimple_stmt_iterator si = gsi_start_bb (bb); !gsi_end_p (si);
1436 gsi_next (&si))
1438 stmt_vec_info stmt_info = loop_vinfo->lookup_stmt (gsi_stmt (si));
1439 if (STMT_VINFO_IN_PATTERN_P (stmt_info)
1440 && STMT_VINFO_RELATED_STMT (stmt_info))
1441 stmt_info = STMT_VINFO_RELATED_STMT (stmt_info);
1442 if ((STMT_VINFO_RELEVANT_P (stmt_info)
1443 || VECTORIZABLE_CYCLE_DEF (STMT_VINFO_DEF_TYPE (stmt_info)))
1444 && !PURE_SLP_STMT (stmt_info))
1445 /* STMT needs both SLP and loop-based vectorization. */
1446 only_slp_in_loop = false;
1450 if (only_slp_in_loop)
1452 dump_printf_loc (MSG_NOTE, vect_location,
1453 "Loop contains only SLP stmts\n");
1454 vectorization_factor = LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo);
1456 else
1458 dump_printf_loc (MSG_NOTE, vect_location,
1459 "Loop contains SLP and non-SLP stmts\n");
1460 /* Both the vectorization factor and unroll factor have the form
1461 current_vector_size * X for some rational X, so they must have
1462 a common multiple. */
1463 vectorization_factor
1464 = force_common_multiple (vectorization_factor,
1465 LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo));
1468 LOOP_VINFO_VECT_FACTOR (loop_vinfo) = vectorization_factor;
1469 if (dump_enabled_p ())
1471 dump_printf_loc (MSG_NOTE, vect_location,
1472 "Updating vectorization factor to ");
1473 dump_dec (MSG_NOTE, vectorization_factor);
1474 dump_printf (MSG_NOTE, ".\n");
1478 /* Return true if STMT_INFO describes a double reduction phi and if
1479 the other phi in the reduction is also relevant for vectorization.
1480 This rejects cases such as:
1482 outer1:
1483 x_1 = PHI <x_3(outer2), ...>;
1486 inner:
1487 x_2 = ...;
1490 outer2:
1491 x_3 = PHI <x_2(inner)>;
1493 if nothing in x_2 or elsewhere makes x_1 relevant. */
1495 static bool
1496 vect_active_double_reduction_p (stmt_vec_info stmt_info)
1498 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_double_reduction_def)
1499 return false;
1501 return STMT_VINFO_RELEVANT_P (STMT_VINFO_REDUC_DEF (stmt_info));
1504 /* Function vect_analyze_loop_operations.
1506 Scan the loop stmts and make sure they are all vectorizable. */
1508 static bool
1509 vect_analyze_loop_operations (loop_vec_info loop_vinfo)
1511 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1512 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
1513 int nbbs = loop->num_nodes;
1514 int i;
1515 stmt_vec_info stmt_info;
1516 bool need_to_vectorize = false;
1517 bool ok;
1519 DUMP_VECT_SCOPE ("vect_analyze_loop_operations");
1521 stmt_vector_for_cost cost_vec;
1522 cost_vec.create (2);
1524 for (i = 0; i < nbbs; i++)
1526 basic_block bb = bbs[i];
1528 for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
1529 gsi_next (&si))
1531 gphi *phi = si.phi ();
1532 ok = true;
1534 stmt_info = loop_vinfo->lookup_stmt (phi);
1535 if (dump_enabled_p ())
1537 dump_printf_loc (MSG_NOTE, vect_location, "examining phi: ");
1538 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
1540 if (virtual_operand_p (gimple_phi_result (phi)))
1541 continue;
1543 /* Inner-loop loop-closed exit phi in outer-loop vectorization
1544 (i.e., a phi in the tail of the outer-loop). */
1545 if (! is_loop_header_bb_p (bb))
1547 /* FORNOW: we currently don't support the case that these phis
1548 are not used in the outerloop (unless it is double reduction,
1549 i.e., this phi is vect_reduction_def), cause this case
1550 requires to actually do something here. */
1551 if (STMT_VINFO_LIVE_P (stmt_info)
1552 && !vect_active_double_reduction_p (stmt_info))
1554 if (dump_enabled_p ())
1555 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1556 "Unsupported loop-closed phi in "
1557 "outer-loop.\n");
1558 return false;
1561 /* If PHI is used in the outer loop, we check that its operand
1562 is defined in the inner loop. */
1563 if (STMT_VINFO_RELEVANT_P (stmt_info))
1565 tree phi_op;
1567 if (gimple_phi_num_args (phi) != 1)
1568 return false;
1570 phi_op = PHI_ARG_DEF (phi, 0);
1571 stmt_vec_info op_def_info = loop_vinfo->lookup_def (phi_op);
1572 if (!op_def_info)
1573 return false;
1575 if (STMT_VINFO_RELEVANT (op_def_info) != vect_used_in_outer
1576 && (STMT_VINFO_RELEVANT (op_def_info)
1577 != vect_used_in_outer_by_reduction))
1578 return false;
1581 continue;
1584 gcc_assert (stmt_info);
1586 if ((STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_scope
1587 || STMT_VINFO_LIVE_P (stmt_info))
1588 && STMT_VINFO_DEF_TYPE (stmt_info) != vect_induction_def)
1590 /* A scalar-dependence cycle that we don't support. */
1591 if (dump_enabled_p ())
1592 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1593 "not vectorized: scalar dependence cycle.\n");
1594 return false;
1597 if (STMT_VINFO_RELEVANT_P (stmt_info))
1599 need_to_vectorize = true;
1600 if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def
1601 && ! PURE_SLP_STMT (stmt_info))
1602 ok = vectorizable_induction (phi, NULL, NULL, NULL, &cost_vec);
1603 else if ((STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def
1604 || STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle)
1605 && ! PURE_SLP_STMT (stmt_info))
1606 ok = vectorizable_reduction (phi, NULL, NULL, NULL, NULL,
1607 &cost_vec);
1610 /* SLP PHIs are tested by vect_slp_analyze_node_operations. */
1611 if (ok
1612 && STMT_VINFO_LIVE_P (stmt_info)
1613 && !PURE_SLP_STMT (stmt_info))
1614 ok = vectorizable_live_operation (phi, NULL, NULL, -1, NULL,
1615 &cost_vec);
1617 if (!ok)
1619 if (dump_enabled_p ())
1621 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1622 "not vectorized: relevant phi not "
1623 "supported: ");
1624 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, phi, 0);
1626 return false;
1630 for (gimple_stmt_iterator si = gsi_start_bb (bb); !gsi_end_p (si);
1631 gsi_next (&si))
1633 gimple *stmt = gsi_stmt (si);
1634 if (!gimple_clobber_p (stmt)
1635 && !vect_analyze_stmt (stmt, &need_to_vectorize, NULL, NULL,
1636 &cost_vec))
1637 return false;
1639 } /* bbs */
1641 add_stmt_costs (loop_vinfo->target_cost_data, &cost_vec);
1642 cost_vec.release ();
1644 /* All operations in the loop are either irrelevant (deal with loop
1645 control, or dead), or only used outside the loop and can be moved
1646 out of the loop (e.g. invariants, inductions). The loop can be
1647 optimized away by scalar optimizations. We're better off not
1648 touching this loop. */
1649 if (!need_to_vectorize)
1651 if (dump_enabled_p ())
1652 dump_printf_loc (MSG_NOTE, vect_location,
1653 "All the computation can be taken out of the loop.\n");
1654 if (dump_enabled_p ())
1655 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1656 "not vectorized: redundant loop. no profit to "
1657 "vectorize.\n");
1658 return false;
1661 return true;
1664 /* Analyze the cost of the loop described by LOOP_VINFO. Decide if it
1665 is worthwhile to vectorize. Return 1 if definitely yes, 0 if
1666 definitely no, or -1 if it's worth retrying. */
1668 static int
1669 vect_analyze_loop_costing (loop_vec_info loop_vinfo)
1671 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1672 unsigned int assumed_vf = vect_vf_for_cost (loop_vinfo);
1674 /* Only fully-masked loops can have iteration counts less than the
1675 vectorization factor. */
1676 if (!LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
1678 HOST_WIDE_INT max_niter;
1680 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
1681 max_niter = LOOP_VINFO_INT_NITERS (loop_vinfo);
1682 else
1683 max_niter = max_stmt_executions_int (loop);
1685 if (max_niter != -1
1686 && (unsigned HOST_WIDE_INT) max_niter < assumed_vf)
1688 if (dump_enabled_p ())
1689 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1690 "not vectorized: iteration count smaller than "
1691 "vectorization factor.\n");
1692 return 0;
1696 int min_profitable_iters, min_profitable_estimate;
1697 vect_estimate_min_profitable_iters (loop_vinfo, &min_profitable_iters,
1698 &min_profitable_estimate);
1700 if (min_profitable_iters < 0)
1702 if (dump_enabled_p ())
1703 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1704 "not vectorized: vectorization not profitable.\n");
1705 if (dump_enabled_p ())
1706 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1707 "not vectorized: vector version will never be "
1708 "profitable.\n");
1709 return -1;
1712 int min_scalar_loop_bound = (PARAM_VALUE (PARAM_MIN_VECT_LOOP_BOUND)
1713 * assumed_vf);
1715 /* Use the cost model only if it is more conservative than user specified
1716 threshold. */
1717 unsigned int th = (unsigned) MAX (min_scalar_loop_bound,
1718 min_profitable_iters);
1720 LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo) = th;
1722 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
1723 && LOOP_VINFO_INT_NITERS (loop_vinfo) < th)
1725 if (dump_enabled_p ())
1726 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1727 "not vectorized: vectorization not profitable.\n");
1728 if (dump_enabled_p ())
1729 dump_printf_loc (MSG_NOTE, vect_location,
1730 "not vectorized: iteration count smaller than user "
1731 "specified loop bound parameter or minimum profitable "
1732 "iterations (whichever is more conservative).\n");
1733 return 0;
1736 HOST_WIDE_INT estimated_niter = estimated_stmt_executions_int (loop);
1737 if (estimated_niter == -1)
1738 estimated_niter = likely_max_stmt_executions_int (loop);
1739 if (estimated_niter != -1
1740 && ((unsigned HOST_WIDE_INT) estimated_niter
1741 < MAX (th, (unsigned) min_profitable_estimate)))
1743 if (dump_enabled_p ())
1744 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1745 "not vectorized: estimated iteration count too "
1746 "small.\n");
1747 if (dump_enabled_p ())
1748 dump_printf_loc (MSG_NOTE, vect_location,
1749 "not vectorized: estimated iteration count smaller "
1750 "than specified loop bound parameter or minimum "
1751 "profitable iterations (whichever is more "
1752 "conservative).\n");
1753 return -1;
1756 return 1;
1759 static bool
1760 vect_get_datarefs_in_loop (loop_p loop, basic_block *bbs,
1761 vec<data_reference_p> *datarefs,
1762 unsigned int *n_stmts)
1764 *n_stmts = 0;
1765 for (unsigned i = 0; i < loop->num_nodes; i++)
1766 for (gimple_stmt_iterator gsi = gsi_start_bb (bbs[i]);
1767 !gsi_end_p (gsi); gsi_next (&gsi))
1769 gimple *stmt = gsi_stmt (gsi);
1770 if (is_gimple_debug (stmt))
1771 continue;
1772 ++(*n_stmts);
1773 if (!vect_find_stmt_data_reference (loop, stmt, datarefs))
1775 if (is_gimple_call (stmt) && loop->safelen)
1777 tree fndecl = gimple_call_fndecl (stmt), op;
1778 if (fndecl != NULL_TREE)
1780 cgraph_node *node = cgraph_node::get (fndecl);
1781 if (node != NULL && node->simd_clones != NULL)
1783 unsigned int j, n = gimple_call_num_args (stmt);
1784 for (j = 0; j < n; j++)
1786 op = gimple_call_arg (stmt, j);
1787 if (DECL_P (op)
1788 || (REFERENCE_CLASS_P (op)
1789 && get_base_address (op)))
1790 break;
1792 op = gimple_call_lhs (stmt);
1793 /* Ignore #pragma omp declare simd functions
1794 if they don't have data references in the
1795 call stmt itself. */
1796 if (j == n
1797 && !(op
1798 && (DECL_P (op)
1799 || (REFERENCE_CLASS_P (op)
1800 && get_base_address (op)))))
1801 continue;
1805 return false;
1807 /* If dependence analysis will give up due to the limit on the
1808 number of datarefs stop here and fail fatally. */
1809 if (datarefs->length ()
1810 > (unsigned)PARAM_VALUE (PARAM_LOOP_MAX_DATAREFS_FOR_DATADEPS))
1811 return false;
1813 return true;
1816 /* Function vect_analyze_loop_2.
1818 Apply a set of analyses on LOOP, and create a loop_vec_info struct
1819 for it. The different analyses will record information in the
1820 loop_vec_info struct. */
1821 static bool
1822 vect_analyze_loop_2 (loop_vec_info loop_vinfo, bool &fatal, unsigned *n_stmts)
1824 bool ok;
1825 int res;
1826 unsigned int max_vf = MAX_VECTORIZATION_FACTOR;
1827 poly_uint64 min_vf = 2;
1829 /* The first group of checks is independent of the vector size. */
1830 fatal = true;
1832 /* Find all data references in the loop (which correspond to vdefs/vuses)
1833 and analyze their evolution in the loop. */
1835 loop_p loop = LOOP_VINFO_LOOP (loop_vinfo);
1837 /* Gather the data references and count stmts in the loop. */
1838 if (!LOOP_VINFO_DATAREFS (loop_vinfo).exists ())
1840 if (!vect_get_datarefs_in_loop (loop, LOOP_VINFO_BBS (loop_vinfo),
1841 &LOOP_VINFO_DATAREFS (loop_vinfo),
1842 n_stmts))
1844 if (dump_enabled_p ())
1845 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1846 "not vectorized: loop contains function "
1847 "calls or data references that cannot "
1848 "be analyzed\n");
1849 return false;
1851 loop_vinfo->shared->save_datarefs ();
1853 else
1854 loop_vinfo->shared->check_datarefs ();
1856 /* Analyze the data references and also adjust the minimal
1857 vectorization factor according to the loads and stores. */
1859 ok = vect_analyze_data_refs (loop_vinfo, &min_vf);
1860 if (!ok)
1862 if (dump_enabled_p ())
1863 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1864 "bad data references.\n");
1865 return false;
1868 /* Classify all cross-iteration scalar data-flow cycles.
1869 Cross-iteration cycles caused by virtual phis are analyzed separately. */
1870 vect_analyze_scalar_cycles (loop_vinfo);
1872 vect_pattern_recog (loop_vinfo);
1874 vect_fixup_scalar_cycles_with_patterns (loop_vinfo);
1876 /* Analyze the access patterns of the data-refs in the loop (consecutive,
1877 complex, etc.). FORNOW: Only handle consecutive access pattern. */
1879 ok = vect_analyze_data_ref_accesses (loop_vinfo);
1880 if (!ok)
1882 if (dump_enabled_p ())
1883 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1884 "bad data access.\n");
1885 return false;
1888 /* Data-flow analysis to detect stmts that do not need to be vectorized. */
1890 ok = vect_mark_stmts_to_be_vectorized (loop_vinfo);
1891 if (!ok)
1893 if (dump_enabled_p ())
1894 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1895 "unexpected pattern.\n");
1896 return false;
1899 /* While the rest of the analysis below depends on it in some way. */
1900 fatal = false;
1902 /* Analyze data dependences between the data-refs in the loop
1903 and adjust the maximum vectorization factor according to
1904 the dependences.
1905 FORNOW: fail at the first data dependence that we encounter. */
1907 ok = vect_analyze_data_ref_dependences (loop_vinfo, &max_vf);
1908 if (!ok
1909 || (max_vf != MAX_VECTORIZATION_FACTOR
1910 && maybe_lt (max_vf, min_vf)))
1912 if (dump_enabled_p ())
1913 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1914 "bad data dependence.\n");
1915 return false;
1917 LOOP_VINFO_MAX_VECT_FACTOR (loop_vinfo) = max_vf;
1919 ok = vect_determine_vectorization_factor (loop_vinfo);
1920 if (!ok)
1922 if (dump_enabled_p ())
1923 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1924 "can't determine vectorization factor.\n");
1925 return false;
1927 if (max_vf != MAX_VECTORIZATION_FACTOR
1928 && maybe_lt (max_vf, LOOP_VINFO_VECT_FACTOR (loop_vinfo)))
1930 if (dump_enabled_p ())
1931 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1932 "bad data dependence.\n");
1933 return false;
1936 /* Compute the scalar iteration cost. */
1937 vect_compute_single_scalar_iteration_cost (loop_vinfo);
1939 poly_uint64 saved_vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1940 unsigned th;
1942 /* Check the SLP opportunities in the loop, analyze and build SLP trees. */
1943 ok = vect_analyze_slp (loop_vinfo, *n_stmts);
1944 if (!ok)
1945 return false;
1947 /* If there are any SLP instances mark them as pure_slp. */
1948 bool slp = vect_make_slp_decision (loop_vinfo);
1949 if (slp)
1951 /* Find stmts that need to be both vectorized and SLPed. */
1952 vect_detect_hybrid_slp (loop_vinfo);
1954 /* Update the vectorization factor based on the SLP decision. */
1955 vect_update_vf_for_slp (loop_vinfo);
1958 bool saved_can_fully_mask_p = LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo);
1960 /* We don't expect to have to roll back to anything other than an empty
1961 set of rgroups. */
1962 gcc_assert (LOOP_VINFO_MASKS (loop_vinfo).is_empty ());
1964 /* This is the point where we can re-start analysis with SLP forced off. */
1965 start_over:
1967 /* Now the vectorization factor is final. */
1968 poly_uint64 vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1969 gcc_assert (known_ne (vectorization_factor, 0U));
1971 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo) && dump_enabled_p ())
1973 dump_printf_loc (MSG_NOTE, vect_location,
1974 "vectorization_factor = ");
1975 dump_dec (MSG_NOTE, vectorization_factor);
1976 dump_printf (MSG_NOTE, ", niters = " HOST_WIDE_INT_PRINT_DEC "\n",
1977 LOOP_VINFO_INT_NITERS (loop_vinfo));
1980 HOST_WIDE_INT max_niter
1981 = likely_max_stmt_executions_int (LOOP_VINFO_LOOP (loop_vinfo));
1983 /* Analyze the alignment of the data-refs in the loop.
1984 Fail if a data reference is found that cannot be vectorized. */
1986 ok = vect_analyze_data_refs_alignment (loop_vinfo);
1987 if (!ok)
1989 if (dump_enabled_p ())
1990 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1991 "bad data alignment.\n");
1992 return false;
1995 /* Prune the list of ddrs to be tested at run-time by versioning for alias.
1996 It is important to call pruning after vect_analyze_data_ref_accesses,
1997 since we use grouping information gathered by interleaving analysis. */
1998 ok = vect_prune_runtime_alias_test_list (loop_vinfo);
1999 if (!ok)
2000 return false;
2002 /* Do not invoke vect_enhance_data_refs_alignment for eplilogue
2003 vectorization. */
2004 if (!LOOP_VINFO_EPILOGUE_P (loop_vinfo))
2006 /* This pass will decide on using loop versioning and/or loop peeling in
2007 order to enhance the alignment of data references in the loop. */
2008 ok = vect_enhance_data_refs_alignment (loop_vinfo);
2009 if (!ok)
2011 if (dump_enabled_p ())
2012 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2013 "bad data alignment.\n");
2014 return false;
2018 if (slp)
2020 /* Analyze operations in the SLP instances. Note this may
2021 remove unsupported SLP instances which makes the above
2022 SLP kind detection invalid. */
2023 unsigned old_size = LOOP_VINFO_SLP_INSTANCES (loop_vinfo).length ();
2024 vect_slp_analyze_operations (loop_vinfo);
2025 if (LOOP_VINFO_SLP_INSTANCES (loop_vinfo).length () != old_size)
2026 goto again;
2029 /* Scan all the remaining operations in the loop that are not subject
2030 to SLP and make sure they are vectorizable. */
2031 ok = vect_analyze_loop_operations (loop_vinfo);
2032 if (!ok)
2034 if (dump_enabled_p ())
2035 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2036 "bad operation or unsupported loop bound.\n");
2037 return false;
2040 /* Decide whether to use a fully-masked loop for this vectorization
2041 factor. */
2042 LOOP_VINFO_FULLY_MASKED_P (loop_vinfo)
2043 = (LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo)
2044 && vect_verify_full_masking (loop_vinfo));
2045 if (dump_enabled_p ())
2047 if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
2048 dump_printf_loc (MSG_NOTE, vect_location,
2049 "using a fully-masked loop.\n");
2050 else
2051 dump_printf_loc (MSG_NOTE, vect_location,
2052 "not using a fully-masked loop.\n");
2055 /* If epilog loop is required because of data accesses with gaps,
2056 one additional iteration needs to be peeled. Check if there is
2057 enough iterations for vectorization. */
2058 if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo)
2059 && LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
2060 && !LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
2062 poly_uint64 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2063 tree scalar_niters = LOOP_VINFO_NITERSM1 (loop_vinfo);
2065 if (known_lt (wi::to_widest (scalar_niters), vf))
2067 if (dump_enabled_p ())
2068 dump_printf_loc (MSG_NOTE, vect_location,
2069 "loop has no enough iterations to support"
2070 " peeling for gaps.\n");
2071 return false;
2075 /* Check the costings of the loop make vectorizing worthwhile. */
2076 res = vect_analyze_loop_costing (loop_vinfo);
2077 if (res < 0)
2078 goto again;
2079 if (!res)
2081 if (dump_enabled_p ())
2082 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2083 "Loop costings not worthwhile.\n");
2084 return false;
2087 /* Decide whether we need to create an epilogue loop to handle
2088 remaining scalar iterations. */
2089 th = LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo);
2091 unsigned HOST_WIDE_INT const_vf;
2092 if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
2093 /* The main loop handles all iterations. */
2094 LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo) = false;
2095 else if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
2096 && LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) > 0)
2098 if (!multiple_p (LOOP_VINFO_INT_NITERS (loop_vinfo)
2099 - LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo),
2100 LOOP_VINFO_VECT_FACTOR (loop_vinfo)))
2101 LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo) = true;
2103 else if (LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo)
2104 || !LOOP_VINFO_VECT_FACTOR (loop_vinfo).is_constant (&const_vf)
2105 || ((tree_ctz (LOOP_VINFO_NITERS (loop_vinfo))
2106 < (unsigned) exact_log2 (const_vf))
2107 /* In case of versioning, check if the maximum number of
2108 iterations is greater than th. If they are identical,
2109 the epilogue is unnecessary. */
2110 && (!LOOP_REQUIRES_VERSIONING (loop_vinfo)
2111 || ((unsigned HOST_WIDE_INT) max_niter
2112 > (th / const_vf) * const_vf))))
2113 LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo) = true;
2115 /* If an epilogue loop is required make sure we can create one. */
2116 if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo)
2117 || LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo))
2119 if (dump_enabled_p ())
2120 dump_printf_loc (MSG_NOTE, vect_location, "epilog loop required\n");
2121 if (!vect_can_advance_ivs_p (loop_vinfo)
2122 || !slpeel_can_duplicate_loop_p (LOOP_VINFO_LOOP (loop_vinfo),
2123 single_exit (LOOP_VINFO_LOOP
2124 (loop_vinfo))))
2126 if (dump_enabled_p ())
2127 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2128 "not vectorized: can't create required "
2129 "epilog loop\n");
2130 goto again;
2134 /* During peeling, we need to check if number of loop iterations is
2135 enough for both peeled prolog loop and vector loop. This check
2136 can be merged along with threshold check of loop versioning, so
2137 increase threshold for this case if necessary. */
2138 if (LOOP_REQUIRES_VERSIONING (loop_vinfo))
2140 poly_uint64 niters_th = 0;
2142 if (!vect_use_loop_mask_for_alignment_p (loop_vinfo))
2144 /* Niters for peeled prolog loop. */
2145 if (LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) < 0)
2147 struct data_reference *dr = LOOP_VINFO_UNALIGNED_DR (loop_vinfo);
2148 tree vectype = STMT_VINFO_VECTYPE (vect_dr_stmt (dr));
2149 niters_th += TYPE_VECTOR_SUBPARTS (vectype) - 1;
2151 else
2152 niters_th += LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo);
2155 /* Niters for at least one iteration of vectorized loop. */
2156 if (!LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
2157 niters_th += LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2158 /* One additional iteration because of peeling for gap. */
2159 if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo))
2160 niters_th += 1;
2161 LOOP_VINFO_VERSIONING_THRESHOLD (loop_vinfo) = niters_th;
2164 gcc_assert (known_eq (vectorization_factor,
2165 LOOP_VINFO_VECT_FACTOR (loop_vinfo)));
2167 /* Ok to vectorize! */
2168 return true;
2170 again:
2171 /* Try again with SLP forced off but if we didn't do any SLP there is
2172 no point in re-trying. */
2173 if (!slp)
2174 return false;
2176 /* If there are reduction chains re-trying will fail anyway. */
2177 if (! LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo).is_empty ())
2178 return false;
2180 /* Likewise if the grouped loads or stores in the SLP cannot be handled
2181 via interleaving or lane instructions. */
2182 slp_instance instance;
2183 slp_tree node;
2184 unsigned i, j;
2185 FOR_EACH_VEC_ELT (LOOP_VINFO_SLP_INSTANCES (loop_vinfo), i, instance)
2187 stmt_vec_info vinfo;
2188 vinfo = SLP_TREE_SCALAR_STMTS (SLP_INSTANCE_TREE (instance))[0];
2189 if (! STMT_VINFO_GROUPED_ACCESS (vinfo))
2190 continue;
2191 vinfo = vinfo_for_stmt (DR_GROUP_FIRST_ELEMENT (vinfo));
2192 unsigned int size = DR_GROUP_SIZE (vinfo);
2193 tree vectype = STMT_VINFO_VECTYPE (vinfo);
2194 if (! vect_store_lanes_supported (vectype, size, false)
2195 && ! known_eq (TYPE_VECTOR_SUBPARTS (vectype), 1U)
2196 && ! vect_grouped_store_supported (vectype, size))
2197 return false;
2198 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (instance), j, node)
2200 vinfo = SLP_TREE_SCALAR_STMTS (node)[0];
2201 vinfo = vinfo_for_stmt (DR_GROUP_FIRST_ELEMENT (vinfo));
2202 bool single_element_p = !DR_GROUP_NEXT_ELEMENT (vinfo);
2203 size = DR_GROUP_SIZE (vinfo);
2204 vectype = STMT_VINFO_VECTYPE (vinfo);
2205 if (! vect_load_lanes_supported (vectype, size, false)
2206 && ! vect_grouped_load_supported (vectype, single_element_p,
2207 size))
2208 return false;
2212 if (dump_enabled_p ())
2213 dump_printf_loc (MSG_NOTE, vect_location,
2214 "re-trying with SLP disabled\n");
2216 /* Roll back state appropriately. No SLP this time. */
2217 slp = false;
2218 /* Restore vectorization factor as it were without SLP. */
2219 LOOP_VINFO_VECT_FACTOR (loop_vinfo) = saved_vectorization_factor;
2220 /* Free the SLP instances. */
2221 FOR_EACH_VEC_ELT (LOOP_VINFO_SLP_INSTANCES (loop_vinfo), j, instance)
2222 vect_free_slp_instance (instance, false);
2223 LOOP_VINFO_SLP_INSTANCES (loop_vinfo).release ();
2224 /* Reset SLP type to loop_vect on all stmts. */
2225 for (i = 0; i < LOOP_VINFO_LOOP (loop_vinfo)->num_nodes; ++i)
2227 basic_block bb = LOOP_VINFO_BBS (loop_vinfo)[i];
2228 for (gimple_stmt_iterator si = gsi_start_phis (bb);
2229 !gsi_end_p (si); gsi_next (&si))
2231 stmt_vec_info stmt_info = loop_vinfo->lookup_stmt (gsi_stmt (si));
2232 STMT_SLP_TYPE (stmt_info) = loop_vect;
2234 for (gimple_stmt_iterator si = gsi_start_bb (bb);
2235 !gsi_end_p (si); gsi_next (&si))
2237 stmt_vec_info stmt_info = loop_vinfo->lookup_stmt (gsi_stmt (si));
2238 STMT_SLP_TYPE (stmt_info) = loop_vect;
2239 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
2241 gimple *pattern_def_seq = STMT_VINFO_PATTERN_DEF_SEQ (stmt_info);
2242 stmt_info = STMT_VINFO_RELATED_STMT (stmt_info);
2243 STMT_SLP_TYPE (stmt_info) = loop_vect;
2244 for (gimple_stmt_iterator pi = gsi_start (pattern_def_seq);
2245 !gsi_end_p (pi); gsi_next (&pi))
2246 STMT_SLP_TYPE (loop_vinfo->lookup_stmt (gsi_stmt (pi)))
2247 = loop_vect;
2251 /* Free optimized alias test DDRS. */
2252 LOOP_VINFO_LOWER_BOUNDS (loop_vinfo).truncate (0);
2253 LOOP_VINFO_COMP_ALIAS_DDRS (loop_vinfo).release ();
2254 LOOP_VINFO_CHECK_UNEQUAL_ADDRS (loop_vinfo).release ();
2255 /* Reset target cost data. */
2256 destroy_cost_data (LOOP_VINFO_TARGET_COST_DATA (loop_vinfo));
2257 LOOP_VINFO_TARGET_COST_DATA (loop_vinfo)
2258 = init_cost (LOOP_VINFO_LOOP (loop_vinfo));
2259 /* Reset accumulated rgroup information. */
2260 release_vec_loop_masks (&LOOP_VINFO_MASKS (loop_vinfo));
2261 /* Reset assorted flags. */
2262 LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo) = false;
2263 LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) = false;
2264 LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo) = 0;
2265 LOOP_VINFO_VERSIONING_THRESHOLD (loop_vinfo) = 0;
2266 LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = saved_can_fully_mask_p;
2268 goto start_over;
2271 /* Function vect_analyze_loop.
2273 Apply a set of analyses on LOOP, and create a loop_vec_info struct
2274 for it. The different analyses will record information in the
2275 loop_vec_info struct. If ORIG_LOOP_VINFO is not NULL epilogue must
2276 be vectorized. */
2277 loop_vec_info
2278 vect_analyze_loop (struct loop *loop, loop_vec_info orig_loop_vinfo,
2279 vec_info_shared *shared)
2281 loop_vec_info loop_vinfo;
2282 auto_vector_sizes vector_sizes;
2284 /* Autodetect first vector size we try. */
2285 current_vector_size = 0;
2286 targetm.vectorize.autovectorize_vector_sizes (&vector_sizes);
2287 unsigned int next_size = 0;
2289 DUMP_VECT_SCOPE ("analyze_loop_nest");
2291 if (loop_outer (loop)
2292 && loop_vec_info_for_loop (loop_outer (loop))
2293 && LOOP_VINFO_VECTORIZABLE_P (loop_vec_info_for_loop (loop_outer (loop))))
2295 if (dump_enabled_p ())
2296 dump_printf_loc (MSG_NOTE, vect_location,
2297 "outer-loop already vectorized.\n");
2298 return NULL;
2301 if (!find_loop_nest (loop, &shared->loop_nest))
2303 if (dump_enabled_p ())
2304 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2305 "not vectorized: loop nest containing two "
2306 "or more consecutive inner loops cannot be "
2307 "vectorized\n");
2308 return NULL;
2311 unsigned n_stmts = 0;
2312 poly_uint64 autodetected_vector_size = 0;
2313 while (1)
2315 /* Check the CFG characteristics of the loop (nesting, entry/exit). */
2316 loop_vinfo = vect_analyze_loop_form (loop, shared);
2317 if (!loop_vinfo)
2319 if (dump_enabled_p ())
2320 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2321 "bad loop form.\n");
2322 return NULL;
2325 bool fatal = false;
2327 if (orig_loop_vinfo)
2328 LOOP_VINFO_ORIG_LOOP_INFO (loop_vinfo) = orig_loop_vinfo;
2330 if (vect_analyze_loop_2 (loop_vinfo, fatal, &n_stmts))
2332 LOOP_VINFO_VECTORIZABLE_P (loop_vinfo) = 1;
2334 return loop_vinfo;
2337 delete loop_vinfo;
2339 if (next_size == 0)
2340 autodetected_vector_size = current_vector_size;
2342 if (next_size < vector_sizes.length ()
2343 && known_eq (vector_sizes[next_size], autodetected_vector_size))
2344 next_size += 1;
2346 if (fatal
2347 || next_size == vector_sizes.length ()
2348 || known_eq (current_vector_size, 0U))
2349 return NULL;
2351 /* Try the next biggest vector size. */
2352 current_vector_size = vector_sizes[next_size++];
2353 if (dump_enabled_p ())
2355 dump_printf_loc (MSG_NOTE, vect_location,
2356 "***** Re-trying analysis with "
2357 "vector size ");
2358 dump_dec (MSG_NOTE, current_vector_size);
2359 dump_printf (MSG_NOTE, "\n");
2364 /* Return true if there is an in-order reduction function for CODE, storing
2365 it in *REDUC_FN if so. */
2367 static bool
2368 fold_left_reduction_fn (tree_code code, internal_fn *reduc_fn)
2370 switch (code)
2372 case PLUS_EXPR:
2373 *reduc_fn = IFN_FOLD_LEFT_PLUS;
2374 return true;
2376 default:
2377 return false;
2381 /* Function reduction_fn_for_scalar_code
2383 Input:
2384 CODE - tree_code of a reduction operations.
2386 Output:
2387 REDUC_FN - the corresponding internal function to be used to reduce the
2388 vector of partial results into a single scalar result, or IFN_LAST
2389 if the operation is a supported reduction operation, but does not have
2390 such an internal function.
2392 Return FALSE if CODE currently cannot be vectorized as reduction. */
2394 static bool
2395 reduction_fn_for_scalar_code (enum tree_code code, internal_fn *reduc_fn)
2397 switch (code)
2399 case MAX_EXPR:
2400 *reduc_fn = IFN_REDUC_MAX;
2401 return true;
2403 case MIN_EXPR:
2404 *reduc_fn = IFN_REDUC_MIN;
2405 return true;
2407 case PLUS_EXPR:
2408 *reduc_fn = IFN_REDUC_PLUS;
2409 return true;
2411 case BIT_AND_EXPR:
2412 *reduc_fn = IFN_REDUC_AND;
2413 return true;
2415 case BIT_IOR_EXPR:
2416 *reduc_fn = IFN_REDUC_IOR;
2417 return true;
2419 case BIT_XOR_EXPR:
2420 *reduc_fn = IFN_REDUC_XOR;
2421 return true;
2423 case MULT_EXPR:
2424 case MINUS_EXPR:
2425 *reduc_fn = IFN_LAST;
2426 return true;
2428 default:
2429 return false;
2433 /* If there is a neutral value X such that SLP reduction NODE would not
2434 be affected by the introduction of additional X elements, return that X,
2435 otherwise return null. CODE is the code of the reduction. REDUC_CHAIN
2436 is true if the SLP statements perform a single reduction, false if each
2437 statement performs an independent reduction. */
2439 static tree
2440 neutral_op_for_slp_reduction (slp_tree slp_node, tree_code code,
2441 bool reduc_chain)
2443 vec<stmt_vec_info> stmts = SLP_TREE_SCALAR_STMTS (slp_node);
2444 stmt_vec_info stmt_vinfo = stmts[0];
2445 tree vector_type = STMT_VINFO_VECTYPE (stmt_vinfo);
2446 tree scalar_type = TREE_TYPE (vector_type);
2447 struct loop *loop = gimple_bb (stmt_vinfo->stmt)->loop_father;
2448 gcc_assert (loop);
2450 switch (code)
2452 case WIDEN_SUM_EXPR:
2453 case DOT_PROD_EXPR:
2454 case SAD_EXPR:
2455 case PLUS_EXPR:
2456 case MINUS_EXPR:
2457 case BIT_IOR_EXPR:
2458 case BIT_XOR_EXPR:
2459 return build_zero_cst (scalar_type);
2461 case MULT_EXPR:
2462 return build_one_cst (scalar_type);
2464 case BIT_AND_EXPR:
2465 return build_all_ones_cst (scalar_type);
2467 case MAX_EXPR:
2468 case MIN_EXPR:
2469 /* For MIN/MAX the initial values are neutral. A reduction chain
2470 has only a single initial value, so that value is neutral for
2471 all statements. */
2472 if (reduc_chain)
2473 return PHI_ARG_DEF_FROM_EDGE (stmt_vinfo->stmt,
2474 loop_preheader_edge (loop));
2475 return NULL_TREE;
2477 default:
2478 return NULL_TREE;
2482 /* Error reporting helper for vect_is_simple_reduction below. GIMPLE statement
2483 STMT is printed with a message MSG. */
2485 static void
2486 report_vect_op (dump_flags_t msg_type, gimple *stmt, const char *msg)
2488 dump_printf_loc (msg_type, vect_location, "%s", msg);
2489 dump_gimple_stmt (msg_type, TDF_SLIM, stmt, 0);
2492 /* DEF_STMT_INFO occurs in a loop that contains a potential reduction
2493 operation. Return true if the results of DEF_STMT_INFO are something
2494 that can be accumulated by such a reduction. */
2496 static bool
2497 vect_valid_reduction_input_p (stmt_vec_info def_stmt_info)
2499 return (is_gimple_assign (def_stmt_info->stmt)
2500 || is_gimple_call (def_stmt_info->stmt)
2501 || STMT_VINFO_DEF_TYPE (def_stmt_info) == vect_induction_def
2502 || (gimple_code (def_stmt_info->stmt) == GIMPLE_PHI
2503 && STMT_VINFO_DEF_TYPE (def_stmt_info) == vect_internal_def
2504 && !is_loop_header_bb_p (gimple_bb (def_stmt_info->stmt))));
2507 /* Detect SLP reduction of the form:
2509 #a1 = phi <a5, a0>
2510 a2 = operation (a1)
2511 a3 = operation (a2)
2512 a4 = operation (a3)
2513 a5 = operation (a4)
2515 #a = phi <a5>
2517 PHI is the reduction phi node (#a1 = phi <a5, a0> above)
2518 FIRST_STMT is the first reduction stmt in the chain
2519 (a2 = operation (a1)).
2521 Return TRUE if a reduction chain was detected. */
2523 static bool
2524 vect_is_slp_reduction (loop_vec_info loop_info, gimple *phi,
2525 gimple *first_stmt)
2527 struct loop *loop = (gimple_bb (phi))->loop_father;
2528 struct loop *vect_loop = LOOP_VINFO_LOOP (loop_info);
2529 enum tree_code code;
2530 gimple *loop_use_stmt = NULL, *first, *next_stmt;
2531 stmt_vec_info use_stmt_info, current_stmt_info = NULL;
2532 tree lhs;
2533 imm_use_iterator imm_iter;
2534 use_operand_p use_p;
2535 int nloop_uses, size = 0, n_out_of_loop_uses;
2536 bool found = false;
2538 if (loop != vect_loop)
2539 return false;
2541 lhs = PHI_RESULT (phi);
2542 code = gimple_assign_rhs_code (first_stmt);
2543 while (1)
2545 nloop_uses = 0;
2546 n_out_of_loop_uses = 0;
2547 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, lhs)
2549 gimple *use_stmt = USE_STMT (use_p);
2550 if (is_gimple_debug (use_stmt))
2551 continue;
2553 /* Check if we got back to the reduction phi. */
2554 if (use_stmt == phi)
2556 loop_use_stmt = use_stmt;
2557 found = true;
2558 break;
2561 if (flow_bb_inside_loop_p (loop, gimple_bb (use_stmt)))
2563 loop_use_stmt = use_stmt;
2564 nloop_uses++;
2566 else
2567 n_out_of_loop_uses++;
2569 /* There are can be either a single use in the loop or two uses in
2570 phi nodes. */
2571 if (nloop_uses > 1 || (n_out_of_loop_uses && nloop_uses))
2572 return false;
2575 if (found)
2576 break;
2578 /* We reached a statement with no loop uses. */
2579 if (nloop_uses == 0)
2580 return false;
2582 /* This is a loop exit phi, and we haven't reached the reduction phi. */
2583 if (gimple_code (loop_use_stmt) == GIMPLE_PHI)
2584 return false;
2586 if (!is_gimple_assign (loop_use_stmt)
2587 || code != gimple_assign_rhs_code (loop_use_stmt)
2588 || !flow_bb_inside_loop_p (loop, gimple_bb (loop_use_stmt)))
2589 return false;
2591 /* Insert USE_STMT into reduction chain. */
2592 use_stmt_info = loop_info->lookup_stmt (loop_use_stmt);
2593 if (current_stmt_info)
2595 REDUC_GROUP_NEXT_ELEMENT (current_stmt_info) = loop_use_stmt;
2596 REDUC_GROUP_FIRST_ELEMENT (use_stmt_info)
2597 = REDUC_GROUP_FIRST_ELEMENT (current_stmt_info);
2599 else
2600 REDUC_GROUP_FIRST_ELEMENT (use_stmt_info) = loop_use_stmt;
2602 lhs = gimple_assign_lhs (loop_use_stmt);
2603 current_stmt_info = use_stmt_info;
2604 size++;
2607 if (!found || loop_use_stmt != phi || size < 2)
2608 return false;
2610 /* Swap the operands, if needed, to make the reduction operand be the second
2611 operand. */
2612 lhs = PHI_RESULT (phi);
2613 next_stmt = REDUC_GROUP_FIRST_ELEMENT (current_stmt_info);
2614 while (next_stmt)
2616 if (gimple_assign_rhs2 (next_stmt) == lhs)
2618 tree op = gimple_assign_rhs1 (next_stmt);
2619 stmt_vec_info def_stmt_info = loop_info->lookup_def (op);
2621 /* Check that the other def is either defined in the loop
2622 ("vect_internal_def"), or it's an induction (defined by a
2623 loop-header phi-node). */
2624 if (def_stmt_info
2625 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt_info->stmt))
2626 && vect_valid_reduction_input_p (def_stmt_info))
2628 lhs = gimple_assign_lhs (next_stmt);
2629 next_stmt = REDUC_GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
2630 continue;
2633 return false;
2635 else
2637 tree op = gimple_assign_rhs2 (next_stmt);
2638 stmt_vec_info def_stmt_info = loop_info->lookup_def (op);
2640 /* Check that the other def is either defined in the loop
2641 ("vect_internal_def"), or it's an induction (defined by a
2642 loop-header phi-node). */
2643 if (def_stmt_info
2644 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt_info->stmt))
2645 && vect_valid_reduction_input_p (def_stmt_info))
2647 if (dump_enabled_p ())
2649 dump_printf_loc (MSG_NOTE, vect_location, "swapping oprnds: ");
2650 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, next_stmt, 0);
2653 swap_ssa_operands (next_stmt,
2654 gimple_assign_rhs1_ptr (next_stmt),
2655 gimple_assign_rhs2_ptr (next_stmt));
2656 update_stmt (next_stmt);
2658 if (CONSTANT_CLASS_P (gimple_assign_rhs1 (next_stmt)))
2659 LOOP_VINFO_OPERANDS_SWAPPED (loop_info) = true;
2661 else
2662 return false;
2665 lhs = gimple_assign_lhs (next_stmt);
2666 next_stmt = REDUC_GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
2669 /* Save the chain for further analysis in SLP detection. */
2670 first = REDUC_GROUP_FIRST_ELEMENT (current_stmt_info);
2671 LOOP_VINFO_REDUCTION_CHAINS (loop_info).safe_push (first);
2672 REDUC_GROUP_SIZE (vinfo_for_stmt (first)) = size;
2674 return true;
2677 /* Return true if we need an in-order reduction for operation CODE
2678 on type TYPE. NEED_WRAPPING_INTEGRAL_OVERFLOW is true if integer
2679 overflow must wrap. */
2681 static bool
2682 needs_fold_left_reduction_p (tree type, tree_code code,
2683 bool need_wrapping_integral_overflow)
2685 /* CHECKME: check for !flag_finite_math_only too? */
2686 if (SCALAR_FLOAT_TYPE_P (type))
2687 switch (code)
2689 case MIN_EXPR:
2690 case MAX_EXPR:
2691 return false;
2693 default:
2694 return !flag_associative_math;
2697 if (INTEGRAL_TYPE_P (type))
2699 if (!operation_no_trapping_overflow (type, code))
2700 return true;
2701 if (need_wrapping_integral_overflow
2702 && !TYPE_OVERFLOW_WRAPS (type)
2703 && operation_can_overflow (code))
2704 return true;
2705 return false;
2708 if (SAT_FIXED_POINT_TYPE_P (type))
2709 return true;
2711 return false;
2714 /* Return true if the reduction PHI in LOOP with latch arg LOOP_ARG and
2715 reduction operation CODE has a handled computation expression. */
2717 bool
2718 check_reduction_path (dump_user_location_t loc, loop_p loop, gphi *phi,
2719 tree loop_arg, enum tree_code code)
2721 auto_vec<std::pair<ssa_op_iter, use_operand_p> > path;
2722 auto_bitmap visited;
2723 tree lookfor = PHI_RESULT (phi);
2724 ssa_op_iter curri;
2725 use_operand_p curr = op_iter_init_phiuse (&curri, phi, SSA_OP_USE);
2726 while (USE_FROM_PTR (curr) != loop_arg)
2727 curr = op_iter_next_use (&curri);
2728 curri.i = curri.numops;
2731 path.safe_push (std::make_pair (curri, curr));
2732 tree use = USE_FROM_PTR (curr);
2733 if (use == lookfor)
2734 break;
2735 gimple *def = SSA_NAME_DEF_STMT (use);
2736 if (gimple_nop_p (def)
2737 || ! flow_bb_inside_loop_p (loop, gimple_bb (def)))
2739 pop:
2742 std::pair<ssa_op_iter, use_operand_p> x = path.pop ();
2743 curri = x.first;
2744 curr = x.second;
2746 curr = op_iter_next_use (&curri);
2747 /* Skip already visited or non-SSA operands (from iterating
2748 over PHI args). */
2749 while (curr != NULL_USE_OPERAND_P
2750 && (TREE_CODE (USE_FROM_PTR (curr)) != SSA_NAME
2751 || ! bitmap_set_bit (visited,
2752 SSA_NAME_VERSION
2753 (USE_FROM_PTR (curr)))));
2755 while (curr == NULL_USE_OPERAND_P && ! path.is_empty ());
2756 if (curr == NULL_USE_OPERAND_P)
2757 break;
2759 else
2761 if (gimple_code (def) == GIMPLE_PHI)
2762 curr = op_iter_init_phiuse (&curri, as_a <gphi *>(def), SSA_OP_USE);
2763 else
2764 curr = op_iter_init_use (&curri, def, SSA_OP_USE);
2765 while (curr != NULL_USE_OPERAND_P
2766 && (TREE_CODE (USE_FROM_PTR (curr)) != SSA_NAME
2767 || ! bitmap_set_bit (visited,
2768 SSA_NAME_VERSION
2769 (USE_FROM_PTR (curr)))))
2770 curr = op_iter_next_use (&curri);
2771 if (curr == NULL_USE_OPERAND_P)
2772 goto pop;
2775 while (1);
2776 if (dump_file && (dump_flags & TDF_DETAILS))
2778 dump_printf_loc (MSG_NOTE, loc, "reduction path: ");
2779 unsigned i;
2780 std::pair<ssa_op_iter, use_operand_p> *x;
2781 FOR_EACH_VEC_ELT (path, i, x)
2783 dump_generic_expr (MSG_NOTE, TDF_SLIM, USE_FROM_PTR (x->second));
2784 dump_printf (MSG_NOTE, " ");
2786 dump_printf (MSG_NOTE, "\n");
2789 /* Check whether the reduction path detected is valid. */
2790 bool fail = path.length () == 0;
2791 bool neg = false;
2792 for (unsigned i = 1; i < path.length (); ++i)
2794 gimple *use_stmt = USE_STMT (path[i].second);
2795 tree op = USE_FROM_PTR (path[i].second);
2796 if (! has_single_use (op)
2797 || ! is_gimple_assign (use_stmt))
2799 fail = true;
2800 break;
2802 if (gimple_assign_rhs_code (use_stmt) != code)
2804 if (code == PLUS_EXPR
2805 && gimple_assign_rhs_code (use_stmt) == MINUS_EXPR)
2807 /* Track whether we negate the reduction value each iteration. */
2808 if (gimple_assign_rhs2 (use_stmt) == op)
2809 neg = ! neg;
2811 else
2813 fail = true;
2814 break;
2818 return ! fail && ! neg;
2822 /* Function vect_is_simple_reduction
2824 (1) Detect a cross-iteration def-use cycle that represents a simple
2825 reduction computation. We look for the following pattern:
2827 loop_header:
2828 a1 = phi < a0, a2 >
2829 a3 = ...
2830 a2 = operation (a3, a1)
2834 a3 = ...
2835 loop_header:
2836 a1 = phi < a0, a2 >
2837 a2 = operation (a3, a1)
2839 such that:
2840 1. operation is commutative and associative and it is safe to
2841 change the order of the computation
2842 2. no uses for a2 in the loop (a2 is used out of the loop)
2843 3. no uses of a1 in the loop besides the reduction operation
2844 4. no uses of a1 outside the loop.
2846 Conditions 1,4 are tested here.
2847 Conditions 2,3 are tested in vect_mark_stmts_to_be_vectorized.
2849 (2) Detect a cross-iteration def-use cycle in nested loops, i.e.,
2850 nested cycles.
2852 (3) Detect cycles of phi nodes in outer-loop vectorization, i.e., double
2853 reductions:
2855 a1 = phi < a0, a2 >
2856 inner loop (def of a3)
2857 a2 = phi < a3 >
2859 (4) Detect condition expressions, ie:
2860 for (int i = 0; i < N; i++)
2861 if (a[i] < val)
2862 ret_val = a[i];
2866 static stmt_vec_info
2867 vect_is_simple_reduction (loop_vec_info loop_info, stmt_vec_info phi_info,
2868 bool *double_reduc,
2869 bool need_wrapping_integral_overflow,
2870 enum vect_reduction_type *v_reduc_type)
2872 gphi *phi = as_a <gphi *> (phi_info->stmt);
2873 struct loop *loop = (gimple_bb (phi))->loop_father;
2874 struct loop *vect_loop = LOOP_VINFO_LOOP (loop_info);
2875 gimple *phi_use_stmt = NULL;
2876 enum tree_code orig_code, code;
2877 tree op1, op2, op3 = NULL_TREE, op4 = NULL_TREE;
2878 tree type;
2879 int nloop_uses;
2880 tree name;
2881 imm_use_iterator imm_iter;
2882 use_operand_p use_p;
2883 bool phi_def;
2885 *double_reduc = false;
2886 *v_reduc_type = TREE_CODE_REDUCTION;
2888 tree phi_name = PHI_RESULT (phi);
2889 /* ??? If there are no uses of the PHI result the inner loop reduction
2890 won't be detected as possibly double-reduction by vectorizable_reduction
2891 because that tries to walk the PHI arg from the preheader edge which
2892 can be constant. See PR60382. */
2893 if (has_zero_uses (phi_name))
2894 return NULL;
2895 nloop_uses = 0;
2896 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, phi_name)
2898 gimple *use_stmt = USE_STMT (use_p);
2899 if (is_gimple_debug (use_stmt))
2900 continue;
2902 if (!flow_bb_inside_loop_p (loop, gimple_bb (use_stmt)))
2904 if (dump_enabled_p ())
2905 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2906 "intermediate value used outside loop.\n");
2908 return NULL;
2911 nloop_uses++;
2912 if (nloop_uses > 1)
2914 if (dump_enabled_p ())
2915 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2916 "reduction value used in loop.\n");
2917 return NULL;
2920 phi_use_stmt = use_stmt;
2923 edge latch_e = loop_latch_edge (loop);
2924 tree loop_arg = PHI_ARG_DEF_FROM_EDGE (phi, latch_e);
2925 if (TREE_CODE (loop_arg) != SSA_NAME)
2927 if (dump_enabled_p ())
2929 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2930 "reduction: not ssa_name: ");
2931 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, loop_arg);
2932 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
2934 return NULL;
2937 stmt_vec_info def_stmt_info = loop_info->lookup_def (loop_arg);
2938 if (!def_stmt_info)
2939 return NULL;
2941 if (gassign *def_stmt = dyn_cast <gassign *> (def_stmt_info->stmt))
2943 name = gimple_assign_lhs (def_stmt);
2944 phi_def = false;
2946 else if (gphi *def_stmt = dyn_cast <gphi *> (def_stmt_info->stmt))
2948 name = PHI_RESULT (def_stmt);
2949 phi_def = true;
2951 else
2953 if (dump_enabled_p ())
2955 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2956 "reduction: unhandled reduction operation: ");
2957 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
2958 def_stmt_info->stmt, 0);
2960 return NULL;
2963 nloop_uses = 0;
2964 auto_vec<gphi *, 3> lcphis;
2965 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, name)
2967 gimple *use_stmt = USE_STMT (use_p);
2968 if (is_gimple_debug (use_stmt))
2969 continue;
2970 if (flow_bb_inside_loop_p (loop, gimple_bb (use_stmt)))
2971 nloop_uses++;
2972 else
2973 /* We can have more than one loop-closed PHI. */
2974 lcphis.safe_push (as_a <gphi *> (use_stmt));
2975 if (nloop_uses > 1)
2977 if (dump_enabled_p ())
2978 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2979 "reduction used in loop.\n");
2980 return NULL;
2984 /* If DEF_STMT is a phi node itself, we expect it to have a single argument
2985 defined in the inner loop. */
2986 if (phi_def)
2988 gphi *def_stmt = as_a <gphi *> (def_stmt_info->stmt);
2989 op1 = PHI_ARG_DEF (def_stmt, 0);
2991 if (gimple_phi_num_args (def_stmt) != 1
2992 || TREE_CODE (op1) != SSA_NAME)
2994 if (dump_enabled_p ())
2995 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2996 "unsupported phi node definition.\n");
2998 return NULL;
3001 gimple *def1 = SSA_NAME_DEF_STMT (op1);
3002 if (gimple_bb (def1)
3003 && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt))
3004 && loop->inner
3005 && flow_bb_inside_loop_p (loop->inner, gimple_bb (def1))
3006 && is_gimple_assign (def1)
3007 && flow_bb_inside_loop_p (loop->inner, gimple_bb (phi_use_stmt)))
3009 if (dump_enabled_p ())
3010 report_vect_op (MSG_NOTE, def_stmt,
3011 "detected double reduction: ");
3013 *double_reduc = true;
3014 return def_stmt_info;
3017 return NULL;
3020 /* If we are vectorizing an inner reduction we are executing that
3021 in the original order only in case we are not dealing with a
3022 double reduction. */
3023 bool check_reduction = true;
3024 if (flow_loop_nested_p (vect_loop, loop))
3026 gphi *lcphi;
3027 unsigned i;
3028 check_reduction = false;
3029 FOR_EACH_VEC_ELT (lcphis, i, lcphi)
3030 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, gimple_phi_result (lcphi))
3032 gimple *use_stmt = USE_STMT (use_p);
3033 if (is_gimple_debug (use_stmt))
3034 continue;
3035 if (! flow_bb_inside_loop_p (vect_loop, gimple_bb (use_stmt)))
3036 check_reduction = true;
3040 gassign *def_stmt = as_a <gassign *> (def_stmt_info->stmt);
3041 bool nested_in_vect_loop = flow_loop_nested_p (vect_loop, loop);
3042 code = orig_code = gimple_assign_rhs_code (def_stmt);
3044 /* We can handle "res -= x[i]", which is non-associative by
3045 simply rewriting this into "res += -x[i]". Avoid changing
3046 gimple instruction for the first simple tests and only do this
3047 if we're allowed to change code at all. */
3048 if (code == MINUS_EXPR && gimple_assign_rhs2 (def_stmt) != phi_name)
3049 code = PLUS_EXPR;
3051 if (code == COND_EXPR)
3053 if (! nested_in_vect_loop)
3054 *v_reduc_type = COND_REDUCTION;
3056 op3 = gimple_assign_rhs1 (def_stmt);
3057 if (COMPARISON_CLASS_P (op3))
3059 op4 = TREE_OPERAND (op3, 1);
3060 op3 = TREE_OPERAND (op3, 0);
3062 if (op3 == phi_name || op4 == phi_name)
3064 if (dump_enabled_p ())
3065 report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt,
3066 "reduction: condition depends on previous"
3067 " iteration: ");
3068 return NULL;
3071 op1 = gimple_assign_rhs2 (def_stmt);
3072 op2 = gimple_assign_rhs3 (def_stmt);
3074 else if (!commutative_tree_code (code) || !associative_tree_code (code))
3076 if (dump_enabled_p ())
3077 report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt,
3078 "reduction: not commutative/associative: ");
3079 return NULL;
3081 else if (get_gimple_rhs_class (code) == GIMPLE_BINARY_RHS)
3083 op1 = gimple_assign_rhs1 (def_stmt);
3084 op2 = gimple_assign_rhs2 (def_stmt);
3086 else
3088 if (dump_enabled_p ())
3089 report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt,
3090 "reduction: not handled operation: ");
3091 return NULL;
3094 if (TREE_CODE (op1) != SSA_NAME && TREE_CODE (op2) != SSA_NAME)
3096 if (dump_enabled_p ())
3097 report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt,
3098 "reduction: both uses not ssa_names: ");
3100 return NULL;
3103 type = TREE_TYPE (gimple_assign_lhs (def_stmt));
3104 if ((TREE_CODE (op1) == SSA_NAME
3105 && !types_compatible_p (type,TREE_TYPE (op1)))
3106 || (TREE_CODE (op2) == SSA_NAME
3107 && !types_compatible_p (type, TREE_TYPE (op2)))
3108 || (op3 && TREE_CODE (op3) == SSA_NAME
3109 && !types_compatible_p (type, TREE_TYPE (op3)))
3110 || (op4 && TREE_CODE (op4) == SSA_NAME
3111 && !types_compatible_p (type, TREE_TYPE (op4))))
3113 if (dump_enabled_p ())
3115 dump_printf_loc (MSG_NOTE, vect_location,
3116 "reduction: multiple types: operation type: ");
3117 dump_generic_expr (MSG_NOTE, TDF_SLIM, type);
3118 dump_printf (MSG_NOTE, ", operands types: ");
3119 dump_generic_expr (MSG_NOTE, TDF_SLIM,
3120 TREE_TYPE (op1));
3121 dump_printf (MSG_NOTE, ",");
3122 dump_generic_expr (MSG_NOTE, TDF_SLIM,
3123 TREE_TYPE (op2));
3124 if (op3)
3126 dump_printf (MSG_NOTE, ",");
3127 dump_generic_expr (MSG_NOTE, TDF_SLIM,
3128 TREE_TYPE (op3));
3131 if (op4)
3133 dump_printf (MSG_NOTE, ",");
3134 dump_generic_expr (MSG_NOTE, TDF_SLIM,
3135 TREE_TYPE (op4));
3137 dump_printf (MSG_NOTE, "\n");
3140 return NULL;
3143 /* Check whether it's ok to change the order of the computation.
3144 Generally, when vectorizing a reduction we change the order of the
3145 computation. This may change the behavior of the program in some
3146 cases, so we need to check that this is ok. One exception is when
3147 vectorizing an outer-loop: the inner-loop is executed sequentially,
3148 and therefore vectorizing reductions in the inner-loop during
3149 outer-loop vectorization is safe. */
3150 if (check_reduction
3151 && *v_reduc_type == TREE_CODE_REDUCTION
3152 && needs_fold_left_reduction_p (type, code,
3153 need_wrapping_integral_overflow))
3154 *v_reduc_type = FOLD_LEFT_REDUCTION;
3156 /* Reduction is safe. We're dealing with one of the following:
3157 1) integer arithmetic and no trapv
3158 2) floating point arithmetic, and special flags permit this optimization
3159 3) nested cycle (i.e., outer loop vectorization). */
3160 stmt_vec_info def1_info = loop_info->lookup_def (op1);
3161 stmt_vec_info def2_info = loop_info->lookup_def (op2);
3162 if (code != COND_EXPR && !def1_info && !def2_info)
3164 if (dump_enabled_p ())
3165 report_vect_op (MSG_NOTE, def_stmt, "reduction: no defs for operands: ");
3166 return NULL;
3169 /* Check that one def is the reduction def, defined by PHI,
3170 the other def is either defined in the loop ("vect_internal_def"),
3171 or it's an induction (defined by a loop-header phi-node). */
3173 if (def2_info
3174 && def2_info->stmt == phi
3175 && (code == COND_EXPR
3176 || !def1_info
3177 || vect_valid_reduction_input_p (def1_info)))
3179 if (dump_enabled_p ())
3180 report_vect_op (MSG_NOTE, def_stmt, "detected reduction: ");
3181 return def_stmt_info;
3184 if (def1_info
3185 && def1_info->stmt == phi
3186 && (code == COND_EXPR
3187 || !def2_info
3188 || vect_valid_reduction_input_p (def2_info)))
3190 if (! nested_in_vect_loop && orig_code != MINUS_EXPR)
3192 /* Check if we can swap operands (just for simplicity - so that
3193 the rest of the code can assume that the reduction variable
3194 is always the last (second) argument). */
3195 if (code == COND_EXPR)
3197 /* Swap cond_expr by inverting the condition. */
3198 tree cond_expr = gimple_assign_rhs1 (def_stmt);
3199 enum tree_code invert_code = ERROR_MARK;
3200 enum tree_code cond_code = TREE_CODE (cond_expr);
3202 if (TREE_CODE_CLASS (cond_code) == tcc_comparison)
3204 bool honor_nans = HONOR_NANS (TREE_OPERAND (cond_expr, 0));
3205 invert_code = invert_tree_comparison (cond_code, honor_nans);
3207 if (invert_code != ERROR_MARK)
3209 TREE_SET_CODE (cond_expr, invert_code);
3210 swap_ssa_operands (def_stmt,
3211 gimple_assign_rhs2_ptr (def_stmt),
3212 gimple_assign_rhs3_ptr (def_stmt));
3214 else
3216 if (dump_enabled_p ())
3217 report_vect_op (MSG_NOTE, def_stmt,
3218 "detected reduction: cannot swap operands "
3219 "for cond_expr");
3220 return NULL;
3223 else
3224 swap_ssa_operands (def_stmt, gimple_assign_rhs1_ptr (def_stmt),
3225 gimple_assign_rhs2_ptr (def_stmt));
3227 if (dump_enabled_p ())
3228 report_vect_op (MSG_NOTE, def_stmt,
3229 "detected reduction: need to swap operands: ");
3231 if (CONSTANT_CLASS_P (gimple_assign_rhs1 (def_stmt)))
3232 LOOP_VINFO_OPERANDS_SWAPPED (loop_info) = true;
3234 else
3236 if (dump_enabled_p ())
3237 report_vect_op (MSG_NOTE, def_stmt, "detected reduction: ");
3240 return def_stmt_info;
3243 /* Try to find SLP reduction chain. */
3244 if (! nested_in_vect_loop
3245 && code != COND_EXPR
3246 && orig_code != MINUS_EXPR
3247 && vect_is_slp_reduction (loop_info, phi, def_stmt))
3249 if (dump_enabled_p ())
3250 report_vect_op (MSG_NOTE, def_stmt,
3251 "reduction: detected reduction chain: ");
3253 return def_stmt_info;
3256 /* Dissolve group eventually half-built by vect_is_slp_reduction. */
3257 gimple *first = REDUC_GROUP_FIRST_ELEMENT (vinfo_for_stmt (def_stmt));
3258 while (first)
3260 gimple *next = REDUC_GROUP_NEXT_ELEMENT (vinfo_for_stmt (first));
3261 REDUC_GROUP_FIRST_ELEMENT (vinfo_for_stmt (first)) = NULL;
3262 REDUC_GROUP_NEXT_ELEMENT (vinfo_for_stmt (first)) = NULL;
3263 first = next;
3266 /* Look for the expression computing loop_arg from loop PHI result. */
3267 if (check_reduction_path (vect_location, loop, phi, loop_arg, code))
3268 return def_stmt_info;
3270 if (dump_enabled_p ())
3272 report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt,
3273 "reduction: unknown pattern: ");
3276 return NULL;
3279 /* Wrapper around vect_is_simple_reduction, which will modify code
3280 in-place if it enables detection of more reductions. Arguments
3281 as there. */
3283 stmt_vec_info
3284 vect_force_simple_reduction (loop_vec_info loop_info, stmt_vec_info phi_info,
3285 bool *double_reduc,
3286 bool need_wrapping_integral_overflow)
3288 enum vect_reduction_type v_reduc_type;
3289 stmt_vec_info def_info
3290 = vect_is_simple_reduction (loop_info, phi_info, double_reduc,
3291 need_wrapping_integral_overflow,
3292 &v_reduc_type);
3293 if (def_info)
3295 STMT_VINFO_REDUC_TYPE (phi_info) = v_reduc_type;
3296 STMT_VINFO_REDUC_DEF (phi_info) = def_info;
3297 STMT_VINFO_REDUC_TYPE (def_info) = v_reduc_type;
3298 STMT_VINFO_REDUC_DEF (def_info) = phi_info;
3300 return def_info;
3303 /* Calculate cost of peeling the loop PEEL_ITERS_PROLOGUE times. */
3305 vect_get_known_peeling_cost (loop_vec_info loop_vinfo, int peel_iters_prologue,
3306 int *peel_iters_epilogue,
3307 stmt_vector_for_cost *scalar_cost_vec,
3308 stmt_vector_for_cost *prologue_cost_vec,
3309 stmt_vector_for_cost *epilogue_cost_vec)
3311 int retval = 0;
3312 int assumed_vf = vect_vf_for_cost (loop_vinfo);
3314 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
3316 *peel_iters_epilogue = assumed_vf / 2;
3317 if (dump_enabled_p ())
3318 dump_printf_loc (MSG_NOTE, vect_location,
3319 "cost model: epilogue peel iters set to vf/2 "
3320 "because loop iterations are unknown .\n");
3322 /* If peeled iterations are known but number of scalar loop
3323 iterations are unknown, count a taken branch per peeled loop. */
3324 retval = record_stmt_cost (prologue_cost_vec, 1, cond_branch_taken,
3325 NULL, 0, vect_prologue);
3326 retval = record_stmt_cost (prologue_cost_vec, 1, cond_branch_taken,
3327 NULL, 0, vect_epilogue);
3329 else
3331 int niters = LOOP_VINFO_INT_NITERS (loop_vinfo);
3332 peel_iters_prologue = niters < peel_iters_prologue ?
3333 niters : peel_iters_prologue;
3334 *peel_iters_epilogue = (niters - peel_iters_prologue) % assumed_vf;
3335 /* If we need to peel for gaps, but no peeling is required, we have to
3336 peel VF iterations. */
3337 if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) && !*peel_iters_epilogue)
3338 *peel_iters_epilogue = assumed_vf;
3341 stmt_info_for_cost *si;
3342 int j;
3343 if (peel_iters_prologue)
3344 FOR_EACH_VEC_ELT (*scalar_cost_vec, j, si)
3346 stmt_vec_info stmt_info
3347 = si->stmt ? vinfo_for_stmt (si->stmt) : NULL_STMT_VEC_INFO;
3348 retval += record_stmt_cost (prologue_cost_vec,
3349 si->count * peel_iters_prologue,
3350 si->kind, stmt_info, si->misalign,
3351 vect_prologue);
3353 if (*peel_iters_epilogue)
3354 FOR_EACH_VEC_ELT (*scalar_cost_vec, j, si)
3356 stmt_vec_info stmt_info
3357 = si->stmt ? vinfo_for_stmt (si->stmt) : NULL_STMT_VEC_INFO;
3358 retval += record_stmt_cost (epilogue_cost_vec,
3359 si->count * *peel_iters_epilogue,
3360 si->kind, stmt_info, si->misalign,
3361 vect_epilogue);
3364 return retval;
3367 /* Function vect_estimate_min_profitable_iters
3369 Return the number of iterations required for the vector version of the
3370 loop to be profitable relative to the cost of the scalar version of the
3371 loop.
3373 *RET_MIN_PROFITABLE_NITERS is a cost model profitability threshold
3374 of iterations for vectorization. -1 value means loop vectorization
3375 is not profitable. This returned value may be used for dynamic
3376 profitability check.
3378 *RET_MIN_PROFITABLE_ESTIMATE is a profitability threshold to be used
3379 for static check against estimated number of iterations. */
3381 static void
3382 vect_estimate_min_profitable_iters (loop_vec_info loop_vinfo,
3383 int *ret_min_profitable_niters,
3384 int *ret_min_profitable_estimate)
3386 int min_profitable_iters;
3387 int min_profitable_estimate;
3388 int peel_iters_prologue;
3389 int peel_iters_epilogue;
3390 unsigned vec_inside_cost = 0;
3391 int vec_outside_cost = 0;
3392 unsigned vec_prologue_cost = 0;
3393 unsigned vec_epilogue_cost = 0;
3394 int scalar_single_iter_cost = 0;
3395 int scalar_outside_cost = 0;
3396 int assumed_vf = vect_vf_for_cost (loop_vinfo);
3397 int npeel = LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo);
3398 void *target_cost_data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo);
3400 /* Cost model disabled. */
3401 if (unlimited_cost_model (LOOP_VINFO_LOOP (loop_vinfo)))
3403 dump_printf_loc (MSG_NOTE, vect_location, "cost model disabled.\n");
3404 *ret_min_profitable_niters = 0;
3405 *ret_min_profitable_estimate = 0;
3406 return;
3409 /* Requires loop versioning tests to handle misalignment. */
3410 if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo))
3412 /* FIXME: Make cost depend on complexity of individual check. */
3413 unsigned len = LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo).length ();
3414 (void) add_stmt_cost (target_cost_data, len, vector_stmt, NULL, 0,
3415 vect_prologue);
3416 dump_printf (MSG_NOTE,
3417 "cost model: Adding cost of checks for loop "
3418 "versioning to treat misalignment.\n");
3421 /* Requires loop versioning with alias checks. */
3422 if (LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
3424 /* FIXME: Make cost depend on complexity of individual check. */
3425 unsigned len = LOOP_VINFO_COMP_ALIAS_DDRS (loop_vinfo).length ();
3426 (void) add_stmt_cost (target_cost_data, len, vector_stmt, NULL, 0,
3427 vect_prologue);
3428 len = LOOP_VINFO_CHECK_UNEQUAL_ADDRS (loop_vinfo).length ();
3429 if (len)
3430 /* Count LEN - 1 ANDs and LEN comparisons. */
3431 (void) add_stmt_cost (target_cost_data, len * 2 - 1, scalar_stmt,
3432 NULL, 0, vect_prologue);
3433 len = LOOP_VINFO_LOWER_BOUNDS (loop_vinfo).length ();
3434 if (len)
3436 /* Count LEN - 1 ANDs and LEN comparisons. */
3437 unsigned int nstmts = len * 2 - 1;
3438 /* +1 for each bias that needs adding. */
3439 for (unsigned int i = 0; i < len; ++i)
3440 if (!LOOP_VINFO_LOWER_BOUNDS (loop_vinfo)[i].unsigned_p)
3441 nstmts += 1;
3442 (void) add_stmt_cost (target_cost_data, nstmts, scalar_stmt,
3443 NULL, 0, vect_prologue);
3445 dump_printf (MSG_NOTE,
3446 "cost model: Adding cost of checks for loop "
3447 "versioning aliasing.\n");
3450 /* Requires loop versioning with niter checks. */
3451 if (LOOP_REQUIRES_VERSIONING_FOR_NITERS (loop_vinfo))
3453 /* FIXME: Make cost depend on complexity of individual check. */
3454 (void) add_stmt_cost (target_cost_data, 1, vector_stmt, NULL, 0,
3455 vect_prologue);
3456 dump_printf (MSG_NOTE,
3457 "cost model: Adding cost of checks for loop "
3458 "versioning niters.\n");
3461 if (LOOP_REQUIRES_VERSIONING (loop_vinfo))
3462 (void) add_stmt_cost (target_cost_data, 1, cond_branch_taken, NULL, 0,
3463 vect_prologue);
3465 /* Count statements in scalar loop. Using this as scalar cost for a single
3466 iteration for now.
3468 TODO: Add outer loop support.
3470 TODO: Consider assigning different costs to different scalar
3471 statements. */
3473 scalar_single_iter_cost
3474 = LOOP_VINFO_SINGLE_SCALAR_ITERATION_COST (loop_vinfo);
3476 /* Add additional cost for the peeled instructions in prologue and epilogue
3477 loop. (For fully-masked loops there will be no peeling.)
3479 FORNOW: If we don't know the value of peel_iters for prologue or epilogue
3480 at compile-time - we assume it's vf/2 (the worst would be vf-1).
3482 TODO: Build an expression that represents peel_iters for prologue and
3483 epilogue to be used in a run-time test. */
3485 if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
3487 peel_iters_prologue = 0;
3488 peel_iters_epilogue = 0;
3490 if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo))
3492 /* We need to peel exactly one iteration. */
3493 peel_iters_epilogue += 1;
3494 stmt_info_for_cost *si;
3495 int j;
3496 FOR_EACH_VEC_ELT (LOOP_VINFO_SCALAR_ITERATION_COST (loop_vinfo),
3497 j, si)
3499 struct _stmt_vec_info *stmt_info
3500 = si->stmt ? vinfo_for_stmt (si->stmt) : NULL_STMT_VEC_INFO;
3501 (void) add_stmt_cost (target_cost_data, si->count,
3502 si->kind, stmt_info, si->misalign,
3503 vect_epilogue);
3507 else if (npeel < 0)
3509 peel_iters_prologue = assumed_vf / 2;
3510 dump_printf (MSG_NOTE, "cost model: "
3511 "prologue peel iters set to vf/2.\n");
3513 /* If peeling for alignment is unknown, loop bound of main loop becomes
3514 unknown. */
3515 peel_iters_epilogue = assumed_vf / 2;
3516 dump_printf (MSG_NOTE, "cost model: "
3517 "epilogue peel iters set to vf/2 because "
3518 "peeling for alignment is unknown.\n");
3520 /* If peeled iterations are unknown, count a taken branch and a not taken
3521 branch per peeled loop. Even if scalar loop iterations are known,
3522 vector iterations are not known since peeled prologue iterations are
3523 not known. Hence guards remain the same. */
3524 (void) add_stmt_cost (target_cost_data, 1, cond_branch_taken,
3525 NULL, 0, vect_prologue);
3526 (void) add_stmt_cost (target_cost_data, 1, cond_branch_not_taken,
3527 NULL, 0, vect_prologue);
3528 (void) add_stmt_cost (target_cost_data, 1, cond_branch_taken,
3529 NULL, 0, vect_epilogue);
3530 (void) add_stmt_cost (target_cost_data, 1, cond_branch_not_taken,
3531 NULL, 0, vect_epilogue);
3532 stmt_info_for_cost *si;
3533 int j;
3534 FOR_EACH_VEC_ELT (LOOP_VINFO_SCALAR_ITERATION_COST (loop_vinfo), j, si)
3536 struct _stmt_vec_info *stmt_info
3537 = si->stmt ? vinfo_for_stmt (si->stmt) : NULL_STMT_VEC_INFO;
3538 (void) add_stmt_cost (target_cost_data,
3539 si->count * peel_iters_prologue,
3540 si->kind, stmt_info, si->misalign,
3541 vect_prologue);
3542 (void) add_stmt_cost (target_cost_data,
3543 si->count * peel_iters_epilogue,
3544 si->kind, stmt_info, si->misalign,
3545 vect_epilogue);
3548 else
3550 stmt_vector_for_cost prologue_cost_vec, epilogue_cost_vec;
3551 stmt_info_for_cost *si;
3552 int j;
3553 void *data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo);
3555 prologue_cost_vec.create (2);
3556 epilogue_cost_vec.create (2);
3557 peel_iters_prologue = npeel;
3559 (void) vect_get_known_peeling_cost (loop_vinfo, peel_iters_prologue,
3560 &peel_iters_epilogue,
3561 &LOOP_VINFO_SCALAR_ITERATION_COST
3562 (loop_vinfo),
3563 &prologue_cost_vec,
3564 &epilogue_cost_vec);
3566 FOR_EACH_VEC_ELT (prologue_cost_vec, j, si)
3568 struct _stmt_vec_info *stmt_info
3569 = si->stmt ? vinfo_for_stmt (si->stmt) : NULL_STMT_VEC_INFO;
3570 (void) add_stmt_cost (data, si->count, si->kind, stmt_info,
3571 si->misalign, vect_prologue);
3574 FOR_EACH_VEC_ELT (epilogue_cost_vec, j, si)
3576 struct _stmt_vec_info *stmt_info
3577 = si->stmt ? vinfo_for_stmt (si->stmt) : NULL_STMT_VEC_INFO;
3578 (void) add_stmt_cost (data, si->count, si->kind, stmt_info,
3579 si->misalign, vect_epilogue);
3582 prologue_cost_vec.release ();
3583 epilogue_cost_vec.release ();
3586 /* FORNOW: The scalar outside cost is incremented in one of the
3587 following ways:
3589 1. The vectorizer checks for alignment and aliasing and generates
3590 a condition that allows dynamic vectorization. A cost model
3591 check is ANDED with the versioning condition. Hence scalar code
3592 path now has the added cost of the versioning check.
3594 if (cost > th & versioning_check)
3595 jmp to vector code
3597 Hence run-time scalar is incremented by not-taken branch cost.
3599 2. The vectorizer then checks if a prologue is required. If the
3600 cost model check was not done before during versioning, it has to
3601 be done before the prologue check.
3603 if (cost <= th)
3604 prologue = scalar_iters
3605 if (prologue == 0)
3606 jmp to vector code
3607 else
3608 execute prologue
3609 if (prologue == num_iters)
3610 go to exit
3612 Hence the run-time scalar cost is incremented by a taken branch,
3613 plus a not-taken branch, plus a taken branch cost.
3615 3. The vectorizer then checks if an epilogue is required. If the
3616 cost model check was not done before during prologue check, it
3617 has to be done with the epilogue check.
3619 if (prologue == 0)
3620 jmp to vector code
3621 else
3622 execute prologue
3623 if (prologue == num_iters)
3624 go to exit
3625 vector code:
3626 if ((cost <= th) | (scalar_iters-prologue-epilogue == 0))
3627 jmp to epilogue
3629 Hence the run-time scalar cost should be incremented by 2 taken
3630 branches.
3632 TODO: The back end may reorder the BBS's differently and reverse
3633 conditions/branch directions. Change the estimates below to
3634 something more reasonable. */
3636 /* If the number of iterations is known and we do not do versioning, we can
3637 decide whether to vectorize at compile time. Hence the scalar version
3638 do not carry cost model guard costs. */
3639 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
3640 || LOOP_REQUIRES_VERSIONING (loop_vinfo))
3642 /* Cost model check occurs at versioning. */
3643 if (LOOP_REQUIRES_VERSIONING (loop_vinfo))
3644 scalar_outside_cost += vect_get_stmt_cost (cond_branch_not_taken);
3645 else
3647 /* Cost model check occurs at prologue generation. */
3648 if (LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) < 0)
3649 scalar_outside_cost += 2 * vect_get_stmt_cost (cond_branch_taken)
3650 + vect_get_stmt_cost (cond_branch_not_taken);
3651 /* Cost model check occurs at epilogue generation. */
3652 else
3653 scalar_outside_cost += 2 * vect_get_stmt_cost (cond_branch_taken);
3657 /* Complete the target-specific cost calculations. */
3658 finish_cost (LOOP_VINFO_TARGET_COST_DATA (loop_vinfo), &vec_prologue_cost,
3659 &vec_inside_cost, &vec_epilogue_cost);
3661 vec_outside_cost = (int)(vec_prologue_cost + vec_epilogue_cost);
3663 if (dump_enabled_p ())
3665 dump_printf_loc (MSG_NOTE, vect_location, "Cost model analysis: \n");
3666 dump_printf (MSG_NOTE, " Vector inside of loop cost: %d\n",
3667 vec_inside_cost);
3668 dump_printf (MSG_NOTE, " Vector prologue cost: %d\n",
3669 vec_prologue_cost);
3670 dump_printf (MSG_NOTE, " Vector epilogue cost: %d\n",
3671 vec_epilogue_cost);
3672 dump_printf (MSG_NOTE, " Scalar iteration cost: %d\n",
3673 scalar_single_iter_cost);
3674 dump_printf (MSG_NOTE, " Scalar outside cost: %d\n",
3675 scalar_outside_cost);
3676 dump_printf (MSG_NOTE, " Vector outside cost: %d\n",
3677 vec_outside_cost);
3678 dump_printf (MSG_NOTE, " prologue iterations: %d\n",
3679 peel_iters_prologue);
3680 dump_printf (MSG_NOTE, " epilogue iterations: %d\n",
3681 peel_iters_epilogue);
3684 /* Calculate number of iterations required to make the vector version
3685 profitable, relative to the loop bodies only. The following condition
3686 must hold true:
3687 SIC * niters + SOC > VIC * ((niters-PL_ITERS-EP_ITERS)/VF) + VOC
3688 where
3689 SIC = scalar iteration cost, VIC = vector iteration cost,
3690 VOC = vector outside cost, VF = vectorization factor,
3691 PL_ITERS = prologue iterations, EP_ITERS= epilogue iterations
3692 SOC = scalar outside cost for run time cost model check. */
3694 if ((scalar_single_iter_cost * assumed_vf) > (int) vec_inside_cost)
3696 min_profitable_iters = ((vec_outside_cost - scalar_outside_cost)
3697 * assumed_vf
3698 - vec_inside_cost * peel_iters_prologue
3699 - vec_inside_cost * peel_iters_epilogue);
3700 if (min_profitable_iters <= 0)
3701 min_profitable_iters = 0;
3702 else
3704 min_profitable_iters /= ((scalar_single_iter_cost * assumed_vf)
3705 - vec_inside_cost);
3707 if ((scalar_single_iter_cost * assumed_vf * min_profitable_iters)
3708 <= (((int) vec_inside_cost * min_profitable_iters)
3709 + (((int) vec_outside_cost - scalar_outside_cost)
3710 * assumed_vf)))
3711 min_profitable_iters++;
3714 /* vector version will never be profitable. */
3715 else
3717 if (LOOP_VINFO_LOOP (loop_vinfo)->force_vectorize)
3718 warning_at (vect_location.get_location_t (), OPT_Wopenmp_simd,
3719 "vectorization did not happen for a simd loop");
3721 if (dump_enabled_p ())
3722 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3723 "cost model: the vector iteration cost = %d "
3724 "divided by the scalar iteration cost = %d "
3725 "is greater or equal to the vectorization factor = %d"
3726 ".\n",
3727 vec_inside_cost, scalar_single_iter_cost, assumed_vf);
3728 *ret_min_profitable_niters = -1;
3729 *ret_min_profitable_estimate = -1;
3730 return;
3733 dump_printf (MSG_NOTE,
3734 " Calculated minimum iters for profitability: %d\n",
3735 min_profitable_iters);
3737 if (!LOOP_VINFO_FULLY_MASKED_P (loop_vinfo)
3738 && min_profitable_iters < (assumed_vf + peel_iters_prologue))
3739 /* We want the vectorized loop to execute at least once. */
3740 min_profitable_iters = assumed_vf + peel_iters_prologue;
3742 if (dump_enabled_p ())
3743 dump_printf_loc (MSG_NOTE, vect_location,
3744 " Runtime profitability threshold = %d\n",
3745 min_profitable_iters);
3747 *ret_min_profitable_niters = min_profitable_iters;
3749 /* Calculate number of iterations required to make the vector version
3750 profitable, relative to the loop bodies only.
3752 Non-vectorized variant is SIC * niters and it must win over vector
3753 variant on the expected loop trip count. The following condition must hold true:
3754 SIC * niters > VIC * ((niters-PL_ITERS-EP_ITERS)/VF) + VOC + SOC */
3756 if (vec_outside_cost <= 0)
3757 min_profitable_estimate = 0;
3758 else
3760 min_profitable_estimate = ((vec_outside_cost + scalar_outside_cost)
3761 * assumed_vf
3762 - vec_inside_cost * peel_iters_prologue
3763 - vec_inside_cost * peel_iters_epilogue)
3764 / ((scalar_single_iter_cost * assumed_vf)
3765 - vec_inside_cost);
3767 min_profitable_estimate = MAX (min_profitable_estimate, min_profitable_iters);
3768 if (dump_enabled_p ())
3769 dump_printf_loc (MSG_NOTE, vect_location,
3770 " Static estimate profitability threshold = %d\n",
3771 min_profitable_estimate);
3773 *ret_min_profitable_estimate = min_profitable_estimate;
3776 /* Writes into SEL a mask for a vec_perm, equivalent to a vec_shr by OFFSET
3777 vector elements (not bits) for a vector with NELT elements. */
3778 static void
3779 calc_vec_perm_mask_for_shift (unsigned int offset, unsigned int nelt,
3780 vec_perm_builder *sel)
3782 /* The encoding is a single stepped pattern. Any wrap-around is handled
3783 by vec_perm_indices. */
3784 sel->new_vector (nelt, 1, 3);
3785 for (unsigned int i = 0; i < 3; i++)
3786 sel->quick_push (i + offset);
3789 /* Checks whether the target supports whole-vector shifts for vectors of mode
3790 MODE. This is the case if _either_ the platform handles vec_shr_optab, _or_
3791 it supports vec_perm_const with masks for all necessary shift amounts. */
3792 static bool
3793 have_whole_vector_shift (machine_mode mode)
3795 if (optab_handler (vec_shr_optab, mode) != CODE_FOR_nothing)
3796 return true;
3798 /* Variable-length vectors should be handled via the optab. */
3799 unsigned int nelt;
3800 if (!GET_MODE_NUNITS (mode).is_constant (&nelt))
3801 return false;
3803 vec_perm_builder sel;
3804 vec_perm_indices indices;
3805 for (unsigned int i = nelt / 2; i >= 1; i /= 2)
3807 calc_vec_perm_mask_for_shift (i, nelt, &sel);
3808 indices.new_vector (sel, 2, nelt);
3809 if (!can_vec_perm_const_p (mode, indices, false))
3810 return false;
3812 return true;
3815 /* TODO: Close dependency between vect_model_*_cost and vectorizable_*
3816 functions. Design better to avoid maintenance issues. */
3818 /* Function vect_model_reduction_cost.
3820 Models cost for a reduction operation, including the vector ops
3821 generated within the strip-mine loop, the initial definition before
3822 the loop, and the epilogue code that must be generated. */
3824 static void
3825 vect_model_reduction_cost (stmt_vec_info stmt_info, internal_fn reduc_fn,
3826 int ncopies, stmt_vector_for_cost *cost_vec)
3828 int prologue_cost = 0, epilogue_cost = 0, inside_cost;
3829 enum tree_code code;
3830 optab optab;
3831 tree vectype;
3832 machine_mode mode;
3833 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
3834 struct loop *loop = NULL;
3836 if (loop_vinfo)
3837 loop = LOOP_VINFO_LOOP (loop_vinfo);
3839 /* Condition reductions generate two reductions in the loop. */
3840 vect_reduction_type reduction_type
3841 = STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info);
3842 if (reduction_type == COND_REDUCTION)
3843 ncopies *= 2;
3845 vectype = STMT_VINFO_VECTYPE (stmt_info);
3846 mode = TYPE_MODE (vectype);
3847 stmt_vec_info orig_stmt_info = STMT_VINFO_RELATED_STMT (stmt_info);
3849 if (!orig_stmt_info)
3850 orig_stmt_info = stmt_info;
3852 code = gimple_assign_rhs_code (orig_stmt_info->stmt);
3854 if (reduction_type == EXTRACT_LAST_REDUCTION
3855 || reduction_type == FOLD_LEFT_REDUCTION)
3857 /* No extra instructions needed in the prologue. */
3858 prologue_cost = 0;
3860 if (reduction_type == EXTRACT_LAST_REDUCTION || reduc_fn != IFN_LAST)
3861 /* Count one reduction-like operation per vector. */
3862 inside_cost = record_stmt_cost (cost_vec, ncopies, vec_to_scalar,
3863 stmt_info, 0, vect_body);
3864 else
3866 /* Use NELEMENTS extracts and NELEMENTS scalar ops. */
3867 unsigned int nelements = ncopies * vect_nunits_for_cost (vectype);
3868 inside_cost = record_stmt_cost (cost_vec, nelements,
3869 vec_to_scalar, stmt_info, 0,
3870 vect_body);
3871 inside_cost += record_stmt_cost (cost_vec, nelements,
3872 scalar_stmt, stmt_info, 0,
3873 vect_body);
3876 else
3878 /* Add in cost for initial definition.
3879 For cond reduction we have four vectors: initial index, step,
3880 initial result of the data reduction, initial value of the index
3881 reduction. */
3882 int prologue_stmts = reduction_type == COND_REDUCTION ? 4 : 1;
3883 prologue_cost += record_stmt_cost (cost_vec, prologue_stmts,
3884 scalar_to_vec, stmt_info, 0,
3885 vect_prologue);
3887 /* Cost of reduction op inside loop. */
3888 inside_cost = record_stmt_cost (cost_vec, ncopies, vector_stmt,
3889 stmt_info, 0, vect_body);
3892 /* Determine cost of epilogue code.
3894 We have a reduction operator that will reduce the vector in one statement.
3895 Also requires scalar extract. */
3897 if (!loop || !nested_in_vect_loop_p (loop, orig_stmt_info))
3899 if (reduc_fn != IFN_LAST)
3901 if (reduction_type == COND_REDUCTION)
3903 /* An EQ stmt and an COND_EXPR stmt. */
3904 epilogue_cost += record_stmt_cost (cost_vec, 2,
3905 vector_stmt, stmt_info, 0,
3906 vect_epilogue);
3907 /* Reduction of the max index and a reduction of the found
3908 values. */
3909 epilogue_cost += record_stmt_cost (cost_vec, 2,
3910 vec_to_scalar, stmt_info, 0,
3911 vect_epilogue);
3912 /* A broadcast of the max value. */
3913 epilogue_cost += record_stmt_cost (cost_vec, 1,
3914 scalar_to_vec, stmt_info, 0,
3915 vect_epilogue);
3917 else
3919 epilogue_cost += record_stmt_cost (cost_vec, 1, vector_stmt,
3920 stmt_info, 0, vect_epilogue);
3921 epilogue_cost += record_stmt_cost (cost_vec, 1,
3922 vec_to_scalar, stmt_info, 0,
3923 vect_epilogue);
3926 else if (reduction_type == COND_REDUCTION)
3928 unsigned estimated_nunits = vect_nunits_for_cost (vectype);
3929 /* Extraction of scalar elements. */
3930 epilogue_cost += record_stmt_cost (cost_vec,
3931 2 * estimated_nunits,
3932 vec_to_scalar, stmt_info, 0,
3933 vect_epilogue);
3934 /* Scalar max reductions via COND_EXPR / MAX_EXPR. */
3935 epilogue_cost += record_stmt_cost (cost_vec,
3936 2 * estimated_nunits - 3,
3937 scalar_stmt, stmt_info, 0,
3938 vect_epilogue);
3940 else if (reduction_type == EXTRACT_LAST_REDUCTION
3941 || reduction_type == FOLD_LEFT_REDUCTION)
3942 /* No extra instructions need in the epilogue. */
3944 else
3946 int vec_size_in_bits = tree_to_uhwi (TYPE_SIZE (vectype));
3947 tree bitsize =
3948 TYPE_SIZE (TREE_TYPE (gimple_assign_lhs (orig_stmt_info->stmt)));
3949 int element_bitsize = tree_to_uhwi (bitsize);
3950 int nelements = vec_size_in_bits / element_bitsize;
3952 if (code == COND_EXPR)
3953 code = MAX_EXPR;
3955 optab = optab_for_tree_code (code, vectype, optab_default);
3957 /* We have a whole vector shift available. */
3958 if (optab != unknown_optab
3959 && VECTOR_MODE_P (mode)
3960 && optab_handler (optab, mode) != CODE_FOR_nothing
3961 && have_whole_vector_shift (mode))
3963 /* Final reduction via vector shifts and the reduction operator.
3964 Also requires scalar extract. */
3965 epilogue_cost += record_stmt_cost (cost_vec,
3966 exact_log2 (nelements) * 2,
3967 vector_stmt, stmt_info, 0,
3968 vect_epilogue);
3969 epilogue_cost += record_stmt_cost (cost_vec, 1,
3970 vec_to_scalar, stmt_info, 0,
3971 vect_epilogue);
3973 else
3974 /* Use extracts and reduction op for final reduction. For N
3975 elements, we have N extracts and N-1 reduction ops. */
3976 epilogue_cost += record_stmt_cost (cost_vec,
3977 nelements + nelements - 1,
3978 vector_stmt, stmt_info, 0,
3979 vect_epilogue);
3983 if (dump_enabled_p ())
3984 dump_printf (MSG_NOTE,
3985 "vect_model_reduction_cost: inside_cost = %d, "
3986 "prologue_cost = %d, epilogue_cost = %d .\n", inside_cost,
3987 prologue_cost, epilogue_cost);
3991 /* Function vect_model_induction_cost.
3993 Models cost for induction operations. */
3995 static void
3996 vect_model_induction_cost (stmt_vec_info stmt_info, int ncopies,
3997 stmt_vector_for_cost *cost_vec)
3999 unsigned inside_cost, prologue_cost;
4001 if (PURE_SLP_STMT (stmt_info))
4002 return;
4004 /* loop cost for vec_loop. */
4005 inside_cost = record_stmt_cost (cost_vec, ncopies, vector_stmt,
4006 stmt_info, 0, vect_body);
4008 /* prologue cost for vec_init and vec_step. */
4009 prologue_cost = record_stmt_cost (cost_vec, 2, scalar_to_vec,
4010 stmt_info, 0, vect_prologue);
4012 if (dump_enabled_p ())
4013 dump_printf_loc (MSG_NOTE, vect_location,
4014 "vect_model_induction_cost: inside_cost = %d, "
4015 "prologue_cost = %d .\n", inside_cost, prologue_cost);
4020 /* Function get_initial_def_for_reduction
4022 Input:
4023 STMT - a stmt that performs a reduction operation in the loop.
4024 INIT_VAL - the initial value of the reduction variable
4026 Output:
4027 ADJUSTMENT_DEF - a tree that holds a value to be added to the final result
4028 of the reduction (used for adjusting the epilog - see below).
4029 Return a vector variable, initialized according to the operation that STMT
4030 performs. This vector will be used as the initial value of the
4031 vector of partial results.
4033 Option1 (adjust in epilog): Initialize the vector as follows:
4034 add/bit or/xor: [0,0,...,0,0]
4035 mult/bit and: [1,1,...,1,1]
4036 min/max/cond_expr: [init_val,init_val,..,init_val,init_val]
4037 and when necessary (e.g. add/mult case) let the caller know
4038 that it needs to adjust the result by init_val.
4040 Option2: Initialize the vector as follows:
4041 add/bit or/xor: [init_val,0,0,...,0]
4042 mult/bit and: [init_val,1,1,...,1]
4043 min/max/cond_expr: [init_val,init_val,...,init_val]
4044 and no adjustments are needed.
4046 For example, for the following code:
4048 s = init_val;
4049 for (i=0;i<n;i++)
4050 s = s + a[i];
4052 STMT is 's = s + a[i]', and the reduction variable is 's'.
4053 For a vector of 4 units, we want to return either [0,0,0,init_val],
4054 or [0,0,0,0] and let the caller know that it needs to adjust
4055 the result at the end by 'init_val'.
4057 FORNOW, we are using the 'adjust in epilog' scheme, because this way the
4058 initialization vector is simpler (same element in all entries), if
4059 ADJUSTMENT_DEF is not NULL, and Option2 otherwise.
4061 A cost model should help decide between these two schemes. */
4063 tree
4064 get_initial_def_for_reduction (gimple *stmt, tree init_val,
4065 tree *adjustment_def)
4067 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
4068 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
4069 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
4070 tree scalar_type = TREE_TYPE (init_val);
4071 tree vectype = get_vectype_for_scalar_type (scalar_type);
4072 enum tree_code code = gimple_assign_rhs_code (stmt);
4073 tree def_for_init;
4074 tree init_def;
4075 REAL_VALUE_TYPE real_init_val = dconst0;
4076 int int_init_val = 0;
4077 gimple_seq stmts = NULL;
4079 gcc_assert (vectype);
4081 gcc_assert (POINTER_TYPE_P (scalar_type) || INTEGRAL_TYPE_P (scalar_type)
4082 || SCALAR_FLOAT_TYPE_P (scalar_type));
4084 gcc_assert (nested_in_vect_loop_p (loop, stmt)
4085 || loop == (gimple_bb (stmt))->loop_father);
4087 vect_reduction_type reduction_type
4088 = STMT_VINFO_VEC_REDUCTION_TYPE (stmt_vinfo);
4090 switch (code)
4092 case WIDEN_SUM_EXPR:
4093 case DOT_PROD_EXPR:
4094 case SAD_EXPR:
4095 case PLUS_EXPR:
4096 case MINUS_EXPR:
4097 case BIT_IOR_EXPR:
4098 case BIT_XOR_EXPR:
4099 case MULT_EXPR:
4100 case BIT_AND_EXPR:
4102 /* ADJUSTMENT_DEF is NULL when called from
4103 vect_create_epilog_for_reduction to vectorize double reduction. */
4104 if (adjustment_def)
4105 *adjustment_def = init_val;
4107 if (code == MULT_EXPR)
4109 real_init_val = dconst1;
4110 int_init_val = 1;
4113 if (code == BIT_AND_EXPR)
4114 int_init_val = -1;
4116 if (SCALAR_FLOAT_TYPE_P (scalar_type))
4117 def_for_init = build_real (scalar_type, real_init_val);
4118 else
4119 def_for_init = build_int_cst (scalar_type, int_init_val);
4121 if (adjustment_def)
4122 /* Option1: the first element is '0' or '1' as well. */
4123 init_def = gimple_build_vector_from_val (&stmts, vectype,
4124 def_for_init);
4125 else if (!TYPE_VECTOR_SUBPARTS (vectype).is_constant ())
4127 /* Option2 (variable length): the first element is INIT_VAL. */
4128 init_def = gimple_build_vector_from_val (&stmts, vectype,
4129 def_for_init);
4130 init_def = gimple_build (&stmts, CFN_VEC_SHL_INSERT,
4131 vectype, init_def, init_val);
4133 else
4135 /* Option2: the first element is INIT_VAL. */
4136 tree_vector_builder elts (vectype, 1, 2);
4137 elts.quick_push (init_val);
4138 elts.quick_push (def_for_init);
4139 init_def = gimple_build_vector (&stmts, &elts);
4142 break;
4144 case MIN_EXPR:
4145 case MAX_EXPR:
4146 case COND_EXPR:
4148 if (adjustment_def)
4150 *adjustment_def = NULL_TREE;
4151 if (reduction_type != COND_REDUCTION
4152 && reduction_type != EXTRACT_LAST_REDUCTION)
4154 init_def = vect_get_vec_def_for_operand (init_val, stmt);
4155 break;
4158 init_val = gimple_convert (&stmts, TREE_TYPE (vectype), init_val);
4159 init_def = gimple_build_vector_from_val (&stmts, vectype, init_val);
4161 break;
4163 default:
4164 gcc_unreachable ();
4167 if (stmts)
4168 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts);
4169 return init_def;
4172 /* Get at the initial defs for the reduction PHIs in SLP_NODE.
4173 NUMBER_OF_VECTORS is the number of vector defs to create.
4174 If NEUTRAL_OP is nonnull, introducing extra elements of that
4175 value will not change the result. */
4177 static void
4178 get_initial_defs_for_reduction (slp_tree slp_node,
4179 vec<tree> *vec_oprnds,
4180 unsigned int number_of_vectors,
4181 bool reduc_chain, tree neutral_op)
4183 vec<stmt_vec_info> stmts = SLP_TREE_SCALAR_STMTS (slp_node);
4184 stmt_vec_info stmt_vinfo = stmts[0];
4185 unsigned HOST_WIDE_INT nunits;
4186 unsigned j, number_of_places_left_in_vector;
4187 tree vector_type;
4188 tree vop;
4189 int group_size = stmts.length ();
4190 unsigned int vec_num, i;
4191 unsigned number_of_copies = 1;
4192 vec<tree> voprnds;
4193 voprnds.create (number_of_vectors);
4194 struct loop *loop;
4195 auto_vec<tree, 16> permute_results;
4197 vector_type = STMT_VINFO_VECTYPE (stmt_vinfo);
4199 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def);
4201 loop = (gimple_bb (stmt_vinfo->stmt))->loop_father;
4202 gcc_assert (loop);
4203 edge pe = loop_preheader_edge (loop);
4205 gcc_assert (!reduc_chain || neutral_op);
4207 /* NUMBER_OF_COPIES is the number of times we need to use the same values in
4208 created vectors. It is greater than 1 if unrolling is performed.
4210 For example, we have two scalar operands, s1 and s2 (e.g., group of
4211 strided accesses of size two), while NUNITS is four (i.e., four scalars
4212 of this type can be packed in a vector). The output vector will contain
4213 two copies of each scalar operand: {s1, s2, s1, s2}. (NUMBER_OF_COPIES
4214 will be 2).
4216 If REDUC_GROUP_SIZE > NUNITS, the scalars will be split into several
4217 vectors containing the operands.
4219 For example, NUNITS is four as before, and the group size is 8
4220 (s1, s2, ..., s8). We will create two vectors {s1, s2, s3, s4} and
4221 {s5, s6, s7, s8}. */
4223 if (!TYPE_VECTOR_SUBPARTS (vector_type).is_constant (&nunits))
4224 nunits = group_size;
4226 number_of_copies = nunits * number_of_vectors / group_size;
4228 number_of_places_left_in_vector = nunits;
4229 bool constant_p = true;
4230 tree_vector_builder elts (vector_type, nunits, 1);
4231 elts.quick_grow (nunits);
4232 for (j = 0; j < number_of_copies; j++)
4234 for (i = group_size - 1; stmts.iterate (i, &stmt_vinfo); i--)
4236 tree op;
4237 /* Get the def before the loop. In reduction chain we have only
4238 one initial value. */
4239 if ((j != (number_of_copies - 1)
4240 || (reduc_chain && i != 0))
4241 && neutral_op)
4242 op = neutral_op;
4243 else
4244 op = PHI_ARG_DEF_FROM_EDGE (stmt_vinfo->stmt, pe);
4246 /* Create 'vect_ = {op0,op1,...,opn}'. */
4247 number_of_places_left_in_vector--;
4248 elts[number_of_places_left_in_vector] = op;
4249 if (!CONSTANT_CLASS_P (op))
4250 constant_p = false;
4252 if (number_of_places_left_in_vector == 0)
4254 gimple_seq ctor_seq = NULL;
4255 tree init;
4256 if (constant_p && !neutral_op
4257 ? multiple_p (TYPE_VECTOR_SUBPARTS (vector_type), nunits)
4258 : known_eq (TYPE_VECTOR_SUBPARTS (vector_type), nunits))
4259 /* Build the vector directly from ELTS. */
4260 init = gimple_build_vector (&ctor_seq, &elts);
4261 else if (neutral_op)
4263 /* Build a vector of the neutral value and shift the
4264 other elements into place. */
4265 init = gimple_build_vector_from_val (&ctor_seq, vector_type,
4266 neutral_op);
4267 int k = nunits;
4268 while (k > 0 && elts[k - 1] == neutral_op)
4269 k -= 1;
4270 while (k > 0)
4272 k -= 1;
4273 init = gimple_build (&ctor_seq, CFN_VEC_SHL_INSERT,
4274 vector_type, init, elts[k]);
4277 else
4279 /* First time round, duplicate ELTS to fill the
4280 required number of vectors, then cherry pick the
4281 appropriate result for each iteration. */
4282 if (vec_oprnds->is_empty ())
4283 duplicate_and_interleave (&ctor_seq, vector_type, elts,
4284 number_of_vectors,
4285 permute_results);
4286 init = permute_results[number_of_vectors - j - 1];
4288 if (ctor_seq != NULL)
4289 gsi_insert_seq_on_edge_immediate (pe, ctor_seq);
4290 voprnds.quick_push (init);
4292 number_of_places_left_in_vector = nunits;
4293 elts.new_vector (vector_type, nunits, 1);
4294 elts.quick_grow (nunits);
4295 constant_p = true;
4300 /* Since the vectors are created in the reverse order, we should invert
4301 them. */
4302 vec_num = voprnds.length ();
4303 for (j = vec_num; j != 0; j--)
4305 vop = voprnds[j - 1];
4306 vec_oprnds->quick_push (vop);
4309 voprnds.release ();
4311 /* In case that VF is greater than the unrolling factor needed for the SLP
4312 group of stmts, NUMBER_OF_VECTORS to be created is greater than
4313 NUMBER_OF_SCALARS/NUNITS or NUNITS/NUMBER_OF_SCALARS, and hence we have
4314 to replicate the vectors. */
4315 tree neutral_vec = NULL;
4316 while (number_of_vectors > vec_oprnds->length ())
4318 if (neutral_op)
4320 if (!neutral_vec)
4322 gimple_seq ctor_seq = NULL;
4323 neutral_vec = gimple_build_vector_from_val
4324 (&ctor_seq, vector_type, neutral_op);
4325 if (ctor_seq != NULL)
4326 gsi_insert_seq_on_edge_immediate (pe, ctor_seq);
4328 vec_oprnds->quick_push (neutral_vec);
4330 else
4332 for (i = 0; vec_oprnds->iterate (i, &vop) && i < vec_num; i++)
4333 vec_oprnds->quick_push (vop);
4339 /* Function vect_create_epilog_for_reduction
4341 Create code at the loop-epilog to finalize the result of a reduction
4342 computation.
4344 VECT_DEFS is list of vector of partial results, i.e., the lhs's of vector
4345 reduction statements.
4346 STMT is the scalar reduction stmt that is being vectorized.
4347 NCOPIES is > 1 in case the vectorization factor (VF) is bigger than the
4348 number of elements that we can fit in a vectype (nunits). In this case
4349 we have to generate more than one vector stmt - i.e - we need to "unroll"
4350 the vector stmt by a factor VF/nunits. For more details see documentation
4351 in vectorizable_operation.
4352 REDUC_FN is the internal function for the epilog reduction.
4353 REDUCTION_PHIS is a list of the phi-nodes that carry the reduction
4354 computation.
4355 REDUC_INDEX is the index of the operand in the right hand side of the
4356 statement that is defined by REDUCTION_PHI.
4357 DOUBLE_REDUC is TRUE if double reduction phi nodes should be handled.
4358 SLP_NODE is an SLP node containing a group of reduction statements. The
4359 first one in this group is STMT.
4360 INDUC_VAL is for INTEGER_INDUC_COND_REDUCTION the value to use for the case
4361 when the COND_EXPR is never true in the loop. For MAX_EXPR, it needs to
4362 be smaller than any value of the IV in the loop, for MIN_EXPR larger than
4363 any value of the IV in the loop.
4364 INDUC_CODE is the code for epilog reduction if INTEGER_INDUC_COND_REDUCTION.
4365 NEUTRAL_OP is the value given by neutral_op_for_slp_reduction; it is
4366 null if this is not an SLP reduction
4368 This function:
4369 1. Creates the reduction def-use cycles: sets the arguments for
4370 REDUCTION_PHIS:
4371 The loop-entry argument is the vectorized initial-value of the reduction.
4372 The loop-latch argument is taken from VECT_DEFS - the vector of partial
4373 sums.
4374 2. "Reduces" each vector of partial results VECT_DEFS into a single result,
4375 by calling the function specified by REDUC_FN if available, or by
4376 other means (whole-vector shifts or a scalar loop).
4377 The function also creates a new phi node at the loop exit to preserve
4378 loop-closed form, as illustrated below.
4380 The flow at the entry to this function:
4382 loop:
4383 vec_def = phi <null, null> # REDUCTION_PHI
4384 VECT_DEF = vector_stmt # vectorized form of STMT
4385 s_loop = scalar_stmt # (scalar) STMT
4386 loop_exit:
4387 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
4388 use <s_out0>
4389 use <s_out0>
4391 The above is transformed by this function into:
4393 loop:
4394 vec_def = phi <vec_init, VECT_DEF> # REDUCTION_PHI
4395 VECT_DEF = vector_stmt # vectorized form of STMT
4396 s_loop = scalar_stmt # (scalar) STMT
4397 loop_exit:
4398 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
4399 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
4400 v_out2 = reduce <v_out1>
4401 s_out3 = extract_field <v_out2, 0>
4402 s_out4 = adjust_result <s_out3>
4403 use <s_out4>
4404 use <s_out4>
4407 static void
4408 vect_create_epilog_for_reduction (vec<tree> vect_defs, gimple *stmt,
4409 gimple *reduc_def_stmt,
4410 int ncopies, internal_fn reduc_fn,
4411 vec<stmt_vec_info> reduction_phis,
4412 bool double_reduc,
4413 slp_tree slp_node,
4414 slp_instance slp_node_instance,
4415 tree induc_val, enum tree_code induc_code,
4416 tree neutral_op)
4418 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4419 stmt_vec_info prev_phi_info;
4420 tree vectype;
4421 machine_mode mode;
4422 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
4423 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo), *outer_loop = NULL;
4424 basic_block exit_bb;
4425 tree scalar_dest;
4426 tree scalar_type;
4427 gimple *new_phi = NULL, *phi;
4428 stmt_vec_info phi_info;
4429 gimple_stmt_iterator exit_gsi;
4430 tree vec_dest;
4431 tree new_temp = NULL_TREE, new_dest, new_name, new_scalar_dest;
4432 gimple *epilog_stmt = NULL;
4433 enum tree_code code = gimple_assign_rhs_code (stmt);
4434 gimple *exit_phi;
4435 tree bitsize;
4436 tree adjustment_def = NULL;
4437 tree vec_initial_def = NULL;
4438 tree expr, def, initial_def = NULL;
4439 tree orig_name, scalar_result;
4440 imm_use_iterator imm_iter, phi_imm_iter;
4441 use_operand_p use_p, phi_use_p;
4442 gimple *use_stmt;
4443 stmt_vec_info reduction_phi_info = NULL;
4444 bool nested_in_vect_loop = false;
4445 auto_vec<gimple *> new_phis;
4446 auto_vec<stmt_vec_info> inner_phis;
4447 enum vect_def_type dt = vect_unknown_def_type;
4448 int j, i;
4449 auto_vec<tree> scalar_results;
4450 unsigned int group_size = 1, k, ratio;
4451 auto_vec<tree> vec_initial_defs;
4452 auto_vec<gimple *> phis;
4453 bool slp_reduc = false;
4454 bool direct_slp_reduc;
4455 tree new_phi_result;
4456 stmt_vec_info inner_phi = NULL;
4457 tree induction_index = NULL_TREE;
4459 if (slp_node)
4460 group_size = SLP_TREE_SCALAR_STMTS (slp_node).length ();
4462 if (nested_in_vect_loop_p (loop, stmt))
4464 outer_loop = loop;
4465 loop = loop->inner;
4466 nested_in_vect_loop = true;
4467 gcc_assert (!slp_node);
4470 vectype = STMT_VINFO_VECTYPE (stmt_info);
4471 gcc_assert (vectype);
4472 mode = TYPE_MODE (vectype);
4474 /* 1. Create the reduction def-use cycle:
4475 Set the arguments of REDUCTION_PHIS, i.e., transform
4477 loop:
4478 vec_def = phi <null, null> # REDUCTION_PHI
4479 VECT_DEF = vector_stmt # vectorized form of STMT
4482 into:
4484 loop:
4485 vec_def = phi <vec_init, VECT_DEF> # REDUCTION_PHI
4486 VECT_DEF = vector_stmt # vectorized form of STMT
4489 (in case of SLP, do it for all the phis). */
4491 /* Get the loop-entry arguments. */
4492 enum vect_def_type initial_def_dt = vect_unknown_def_type;
4493 if (slp_node)
4495 unsigned vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
4496 vec_initial_defs.reserve (vec_num);
4497 get_initial_defs_for_reduction (slp_node_instance->reduc_phis,
4498 &vec_initial_defs, vec_num,
4499 REDUC_GROUP_FIRST_ELEMENT (stmt_info),
4500 neutral_op);
4502 else
4504 /* Get at the scalar def before the loop, that defines the initial value
4505 of the reduction variable. */
4506 initial_def = PHI_ARG_DEF_FROM_EDGE (reduc_def_stmt,
4507 loop_preheader_edge (loop));
4508 /* Optimize: if initial_def is for REDUC_MAX smaller than the base
4509 and we can't use zero for induc_val, use initial_def. Similarly
4510 for REDUC_MIN and initial_def larger than the base. */
4511 if (TREE_CODE (initial_def) == INTEGER_CST
4512 && (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info)
4513 == INTEGER_INDUC_COND_REDUCTION)
4514 && !integer_zerop (induc_val)
4515 && ((induc_code == MAX_EXPR
4516 && tree_int_cst_lt (initial_def, induc_val))
4517 || (induc_code == MIN_EXPR
4518 && tree_int_cst_lt (induc_val, initial_def))))
4519 induc_val = initial_def;
4521 if (double_reduc)
4522 /* In case of double reduction we only create a vector variable
4523 to be put in the reduction phi node. The actual statement
4524 creation is done later in this function. */
4525 vec_initial_def = vect_create_destination_var (initial_def, vectype);
4526 else if (nested_in_vect_loop)
4528 /* Do not use an adjustment def as that case is not supported
4529 correctly if ncopies is not one. */
4530 vect_is_simple_use (initial_def, loop_vinfo, &initial_def_dt);
4531 vec_initial_def = vect_get_vec_def_for_operand (initial_def, stmt);
4533 else
4534 vec_initial_def = get_initial_def_for_reduction (stmt, initial_def,
4535 &adjustment_def);
4536 vec_initial_defs.create (1);
4537 vec_initial_defs.quick_push (vec_initial_def);
4540 /* Set phi nodes arguments. */
4541 FOR_EACH_VEC_ELT (reduction_phis, i, phi_info)
4543 tree vec_init_def = vec_initial_defs[i];
4544 tree def = vect_defs[i];
4545 for (j = 0; j < ncopies; j++)
4547 if (j != 0)
4549 phi_info = STMT_VINFO_RELATED_STMT (phi_info);
4550 if (nested_in_vect_loop)
4551 vec_init_def
4552 = vect_get_vec_def_for_stmt_copy (initial_def_dt,
4553 vec_init_def);
4556 /* Set the loop-entry arg of the reduction-phi. */
4558 gphi *phi = as_a <gphi *> (phi_info->stmt);
4559 if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info)
4560 == INTEGER_INDUC_COND_REDUCTION)
4562 /* Initialise the reduction phi to zero. This prevents initial
4563 values of non-zero interferring with the reduction op. */
4564 gcc_assert (ncopies == 1);
4565 gcc_assert (i == 0);
4567 tree vec_init_def_type = TREE_TYPE (vec_init_def);
4568 tree induc_val_vec
4569 = build_vector_from_val (vec_init_def_type, induc_val);
4571 add_phi_arg (phi, induc_val_vec, loop_preheader_edge (loop),
4572 UNKNOWN_LOCATION);
4574 else
4575 add_phi_arg (phi, vec_init_def, loop_preheader_edge (loop),
4576 UNKNOWN_LOCATION);
4578 /* Set the loop-latch arg for the reduction-phi. */
4579 if (j > 0)
4580 def = vect_get_vec_def_for_stmt_copy (vect_unknown_def_type, def);
4582 add_phi_arg (phi, def, loop_latch_edge (loop), UNKNOWN_LOCATION);
4584 if (dump_enabled_p ())
4586 dump_printf_loc (MSG_NOTE, vect_location,
4587 "transform reduction: created def-use cycle: ");
4588 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
4589 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, SSA_NAME_DEF_STMT (def), 0);
4594 /* For cond reductions we want to create a new vector (INDEX_COND_EXPR)
4595 which is updated with the current index of the loop for every match of
4596 the original loop's cond_expr (VEC_STMT). This results in a vector
4597 containing the last time the condition passed for that vector lane.
4598 The first match will be a 1 to allow 0 to be used for non-matching
4599 indexes. If there are no matches at all then the vector will be all
4600 zeroes. */
4601 if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) == COND_REDUCTION)
4603 tree indx_before_incr, indx_after_incr;
4604 poly_uint64 nunits_out = TYPE_VECTOR_SUBPARTS (vectype);
4606 gimple *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info)->stmt;
4607 gcc_assert (gimple_assign_rhs_code (vec_stmt) == VEC_COND_EXPR);
4609 int scalar_precision
4610 = GET_MODE_PRECISION (SCALAR_TYPE_MODE (TREE_TYPE (vectype)));
4611 tree cr_index_scalar_type = make_unsigned_type (scalar_precision);
4612 tree cr_index_vector_type = build_vector_type
4613 (cr_index_scalar_type, TYPE_VECTOR_SUBPARTS (vectype));
4615 /* First we create a simple vector induction variable which starts
4616 with the values {1,2,3,...} (SERIES_VECT) and increments by the
4617 vector size (STEP). */
4619 /* Create a {1,2,3,...} vector. */
4620 tree series_vect = build_index_vector (cr_index_vector_type, 1, 1);
4622 /* Create a vector of the step value. */
4623 tree step = build_int_cst (cr_index_scalar_type, nunits_out);
4624 tree vec_step = build_vector_from_val (cr_index_vector_type, step);
4626 /* Create an induction variable. */
4627 gimple_stmt_iterator incr_gsi;
4628 bool insert_after;
4629 standard_iv_increment_position (loop, &incr_gsi, &insert_after);
4630 create_iv (series_vect, vec_step, NULL_TREE, loop, &incr_gsi,
4631 insert_after, &indx_before_incr, &indx_after_incr);
4633 /* Next create a new phi node vector (NEW_PHI_TREE) which starts
4634 filled with zeros (VEC_ZERO). */
4636 /* Create a vector of 0s. */
4637 tree zero = build_zero_cst (cr_index_scalar_type);
4638 tree vec_zero = build_vector_from_val (cr_index_vector_type, zero);
4640 /* Create a vector phi node. */
4641 tree new_phi_tree = make_ssa_name (cr_index_vector_type);
4642 new_phi = create_phi_node (new_phi_tree, loop->header);
4643 loop_vinfo->add_stmt (new_phi);
4644 add_phi_arg (as_a <gphi *> (new_phi), vec_zero,
4645 loop_preheader_edge (loop), UNKNOWN_LOCATION);
4647 /* Now take the condition from the loops original cond_expr
4648 (VEC_STMT) and produce a new cond_expr (INDEX_COND_EXPR) which for
4649 every match uses values from the induction variable
4650 (INDEX_BEFORE_INCR) otherwise uses values from the phi node
4651 (NEW_PHI_TREE).
4652 Finally, we update the phi (NEW_PHI_TREE) to take the value of
4653 the new cond_expr (INDEX_COND_EXPR). */
4655 /* Duplicate the condition from vec_stmt. */
4656 tree ccompare = unshare_expr (gimple_assign_rhs1 (vec_stmt));
4658 /* Create a conditional, where the condition is taken from vec_stmt
4659 (CCOMPARE), then is the induction index (INDEX_BEFORE_INCR) and
4660 else is the phi (NEW_PHI_TREE). */
4661 tree index_cond_expr = build3 (VEC_COND_EXPR, cr_index_vector_type,
4662 ccompare, indx_before_incr,
4663 new_phi_tree);
4664 induction_index = make_ssa_name (cr_index_vector_type);
4665 gimple *index_condition = gimple_build_assign (induction_index,
4666 index_cond_expr);
4667 gsi_insert_before (&incr_gsi, index_condition, GSI_SAME_STMT);
4668 stmt_vec_info index_vec_info = loop_vinfo->add_stmt (index_condition);
4669 STMT_VINFO_VECTYPE (index_vec_info) = cr_index_vector_type;
4671 /* Update the phi with the vec cond. */
4672 add_phi_arg (as_a <gphi *> (new_phi), induction_index,
4673 loop_latch_edge (loop), UNKNOWN_LOCATION);
4676 /* 2. Create epilog code.
4677 The reduction epilog code operates across the elements of the vector
4678 of partial results computed by the vectorized loop.
4679 The reduction epilog code consists of:
4681 step 1: compute the scalar result in a vector (v_out2)
4682 step 2: extract the scalar result (s_out3) from the vector (v_out2)
4683 step 3: adjust the scalar result (s_out3) if needed.
4685 Step 1 can be accomplished using one the following three schemes:
4686 (scheme 1) using reduc_fn, if available.
4687 (scheme 2) using whole-vector shifts, if available.
4688 (scheme 3) using a scalar loop. In this case steps 1+2 above are
4689 combined.
4691 The overall epilog code looks like this:
4693 s_out0 = phi <s_loop> # original EXIT_PHI
4694 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
4695 v_out2 = reduce <v_out1> # step 1
4696 s_out3 = extract_field <v_out2, 0> # step 2
4697 s_out4 = adjust_result <s_out3> # step 3
4699 (step 3 is optional, and steps 1 and 2 may be combined).
4700 Lastly, the uses of s_out0 are replaced by s_out4. */
4703 /* 2.1 Create new loop-exit-phis to preserve loop-closed form:
4704 v_out1 = phi <VECT_DEF>
4705 Store them in NEW_PHIS. */
4707 exit_bb = single_exit (loop)->dest;
4708 prev_phi_info = NULL;
4709 new_phis.create (vect_defs.length ());
4710 FOR_EACH_VEC_ELT (vect_defs, i, def)
4712 for (j = 0; j < ncopies; j++)
4714 tree new_def = copy_ssa_name (def);
4715 phi = create_phi_node (new_def, exit_bb);
4716 stmt_vec_info phi_info = loop_vinfo->add_stmt (phi);
4717 if (j == 0)
4718 new_phis.quick_push (phi);
4719 else
4721 def = vect_get_vec_def_for_stmt_copy (dt, def);
4722 STMT_VINFO_RELATED_STMT (prev_phi_info) = phi_info;
4725 SET_PHI_ARG_DEF (phi, single_exit (loop)->dest_idx, def);
4726 prev_phi_info = phi_info;
4730 /* The epilogue is created for the outer-loop, i.e., for the loop being
4731 vectorized. Create exit phis for the outer loop. */
4732 if (double_reduc)
4734 loop = outer_loop;
4735 exit_bb = single_exit (loop)->dest;
4736 inner_phis.create (vect_defs.length ());
4737 FOR_EACH_VEC_ELT (new_phis, i, phi)
4739 stmt_vec_info phi_info = loop_vinfo->lookup_stmt (phi);
4740 tree new_result = copy_ssa_name (PHI_RESULT (phi));
4741 gphi *outer_phi = create_phi_node (new_result, exit_bb);
4742 SET_PHI_ARG_DEF (outer_phi, single_exit (loop)->dest_idx,
4743 PHI_RESULT (phi));
4744 prev_phi_info = loop_vinfo->add_stmt (outer_phi);
4745 inner_phis.quick_push (phi_info);
4746 new_phis[i] = outer_phi;
4747 while (STMT_VINFO_RELATED_STMT (phi_info))
4749 phi_info = STMT_VINFO_RELATED_STMT (phi_info);
4750 new_result = copy_ssa_name (PHI_RESULT (phi_info->stmt));
4751 outer_phi = create_phi_node (new_result, exit_bb);
4752 SET_PHI_ARG_DEF (outer_phi, single_exit (loop)->dest_idx,
4753 PHI_RESULT (phi_info->stmt));
4754 stmt_vec_info outer_phi_info = loop_vinfo->add_stmt (outer_phi);
4755 STMT_VINFO_RELATED_STMT (prev_phi_info) = outer_phi_info;
4756 prev_phi_info = outer_phi_info;
4761 exit_gsi = gsi_after_labels (exit_bb);
4763 /* 2.2 Get the relevant tree-code to use in the epilog for schemes 2,3
4764 (i.e. when reduc_fn is not available) and in the final adjustment
4765 code (if needed). Also get the original scalar reduction variable as
4766 defined in the loop. In case STMT is a "pattern-stmt" (i.e. - it
4767 represents a reduction pattern), the tree-code and scalar-def are
4768 taken from the original stmt that the pattern-stmt (STMT) replaces.
4769 Otherwise (it is a regular reduction) - the tree-code and scalar-def
4770 are taken from STMT. */
4772 stmt_vec_info orig_stmt_info = STMT_VINFO_RELATED_STMT (stmt_info);
4773 if (!orig_stmt_info)
4775 /* Regular reduction */
4776 orig_stmt_info = stmt_info;
4778 else
4780 /* Reduction pattern */
4781 gcc_assert (STMT_VINFO_IN_PATTERN_P (orig_stmt_info));
4782 gcc_assert (STMT_VINFO_RELATED_STMT (orig_stmt_info) == stmt_info);
4785 code = gimple_assign_rhs_code (orig_stmt_info->stmt);
4786 /* For MINUS_EXPR the initial vector is [init_val,0,...,0], therefore,
4787 partial results are added and not subtracted. */
4788 if (code == MINUS_EXPR)
4789 code = PLUS_EXPR;
4791 scalar_dest = gimple_assign_lhs (orig_stmt_info->stmt);
4792 scalar_type = TREE_TYPE (scalar_dest);
4793 scalar_results.create (group_size);
4794 new_scalar_dest = vect_create_destination_var (scalar_dest, NULL);
4795 bitsize = TYPE_SIZE (scalar_type);
4797 /* In case this is a reduction in an inner-loop while vectorizing an outer
4798 loop - we don't need to extract a single scalar result at the end of the
4799 inner-loop (unless it is double reduction, i.e., the use of reduction is
4800 outside the outer-loop). The final vector of partial results will be used
4801 in the vectorized outer-loop, or reduced to a scalar result at the end of
4802 the outer-loop. */
4803 if (nested_in_vect_loop && !double_reduc)
4804 goto vect_finalize_reduction;
4806 /* SLP reduction without reduction chain, e.g.,
4807 # a1 = phi <a2, a0>
4808 # b1 = phi <b2, b0>
4809 a2 = operation (a1)
4810 b2 = operation (b1) */
4811 slp_reduc = (slp_node && !REDUC_GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)));
4813 /* True if we should implement SLP_REDUC using native reduction operations
4814 instead of scalar operations. */
4815 direct_slp_reduc = (reduc_fn != IFN_LAST
4816 && slp_reduc
4817 && !TYPE_VECTOR_SUBPARTS (vectype).is_constant ());
4819 /* In case of reduction chain, e.g.,
4820 # a1 = phi <a3, a0>
4821 a2 = operation (a1)
4822 a3 = operation (a2),
4824 we may end up with more than one vector result. Here we reduce them to
4825 one vector. */
4826 if (REDUC_GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) || direct_slp_reduc)
4828 tree first_vect = PHI_RESULT (new_phis[0]);
4829 gassign *new_vec_stmt = NULL;
4830 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4831 for (k = 1; k < new_phis.length (); k++)
4833 gimple *next_phi = new_phis[k];
4834 tree second_vect = PHI_RESULT (next_phi);
4835 tree tem = make_ssa_name (vec_dest, new_vec_stmt);
4836 new_vec_stmt = gimple_build_assign (tem, code,
4837 first_vect, second_vect);
4838 gsi_insert_before (&exit_gsi, new_vec_stmt, GSI_SAME_STMT);
4839 first_vect = tem;
4842 new_phi_result = first_vect;
4843 if (new_vec_stmt)
4845 new_phis.truncate (0);
4846 new_phis.safe_push (new_vec_stmt);
4849 /* Likewise if we couldn't use a single defuse cycle. */
4850 else if (ncopies > 1)
4852 gcc_assert (new_phis.length () == 1);
4853 tree first_vect = PHI_RESULT (new_phis[0]);
4854 gassign *new_vec_stmt = NULL;
4855 vec_dest = vect_create_destination_var (scalar_dest, vectype);
4856 gimple *next_phi = new_phis[0];
4857 for (int k = 1; k < ncopies; ++k)
4859 next_phi = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (next_phi));
4860 tree second_vect = PHI_RESULT (next_phi);
4861 tree tem = make_ssa_name (vec_dest, new_vec_stmt);
4862 new_vec_stmt = gimple_build_assign (tem, code,
4863 first_vect, second_vect);
4864 gsi_insert_before (&exit_gsi, new_vec_stmt, GSI_SAME_STMT);
4865 first_vect = tem;
4867 new_phi_result = first_vect;
4868 new_phis.truncate (0);
4869 new_phis.safe_push (new_vec_stmt);
4871 else
4872 new_phi_result = PHI_RESULT (new_phis[0]);
4874 if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) == COND_REDUCTION
4875 && reduc_fn != IFN_LAST)
4877 /* For condition reductions, we have a vector (NEW_PHI_RESULT) containing
4878 various data values where the condition matched and another vector
4879 (INDUCTION_INDEX) containing all the indexes of those matches. We
4880 need to extract the last matching index (which will be the index with
4881 highest value) and use this to index into the data vector.
4882 For the case where there were no matches, the data vector will contain
4883 all default values and the index vector will be all zeros. */
4885 /* Get various versions of the type of the vector of indexes. */
4886 tree index_vec_type = TREE_TYPE (induction_index);
4887 gcc_checking_assert (TYPE_UNSIGNED (index_vec_type));
4888 tree index_scalar_type = TREE_TYPE (index_vec_type);
4889 tree index_vec_cmp_type = build_same_sized_truth_vector_type
4890 (index_vec_type);
4892 /* Get an unsigned integer version of the type of the data vector. */
4893 int scalar_precision
4894 = GET_MODE_PRECISION (SCALAR_TYPE_MODE (scalar_type));
4895 tree scalar_type_unsigned = make_unsigned_type (scalar_precision);
4896 tree vectype_unsigned = build_vector_type
4897 (scalar_type_unsigned, TYPE_VECTOR_SUBPARTS (vectype));
4899 /* First we need to create a vector (ZERO_VEC) of zeros and another
4900 vector (MAX_INDEX_VEC) filled with the last matching index, which we
4901 can create using a MAX reduction and then expanding.
4902 In the case where the loop never made any matches, the max index will
4903 be zero. */
4905 /* Vector of {0, 0, 0,...}. */
4906 tree zero_vec = make_ssa_name (vectype);
4907 tree zero_vec_rhs = build_zero_cst (vectype);
4908 gimple *zero_vec_stmt = gimple_build_assign (zero_vec, zero_vec_rhs);
4909 gsi_insert_before (&exit_gsi, zero_vec_stmt, GSI_SAME_STMT);
4911 /* Find maximum value from the vector of found indexes. */
4912 tree max_index = make_ssa_name (index_scalar_type);
4913 gcall *max_index_stmt = gimple_build_call_internal (IFN_REDUC_MAX,
4914 1, induction_index);
4915 gimple_call_set_lhs (max_index_stmt, max_index);
4916 gsi_insert_before (&exit_gsi, max_index_stmt, GSI_SAME_STMT);
4918 /* Vector of {max_index, max_index, max_index,...}. */
4919 tree max_index_vec = make_ssa_name (index_vec_type);
4920 tree max_index_vec_rhs = build_vector_from_val (index_vec_type,
4921 max_index);
4922 gimple *max_index_vec_stmt = gimple_build_assign (max_index_vec,
4923 max_index_vec_rhs);
4924 gsi_insert_before (&exit_gsi, max_index_vec_stmt, GSI_SAME_STMT);
4926 /* Next we compare the new vector (MAX_INDEX_VEC) full of max indexes
4927 with the vector (INDUCTION_INDEX) of found indexes, choosing values
4928 from the data vector (NEW_PHI_RESULT) for matches, 0 (ZERO_VEC)
4929 otherwise. Only one value should match, resulting in a vector
4930 (VEC_COND) with one data value and the rest zeros.
4931 In the case where the loop never made any matches, every index will
4932 match, resulting in a vector with all data values (which will all be
4933 the default value). */
4935 /* Compare the max index vector to the vector of found indexes to find
4936 the position of the max value. */
4937 tree vec_compare = make_ssa_name (index_vec_cmp_type);
4938 gimple *vec_compare_stmt = gimple_build_assign (vec_compare, EQ_EXPR,
4939 induction_index,
4940 max_index_vec);
4941 gsi_insert_before (&exit_gsi, vec_compare_stmt, GSI_SAME_STMT);
4943 /* Use the compare to choose either values from the data vector or
4944 zero. */
4945 tree vec_cond = make_ssa_name (vectype);
4946 gimple *vec_cond_stmt = gimple_build_assign (vec_cond, VEC_COND_EXPR,
4947 vec_compare, new_phi_result,
4948 zero_vec);
4949 gsi_insert_before (&exit_gsi, vec_cond_stmt, GSI_SAME_STMT);
4951 /* Finally we need to extract the data value from the vector (VEC_COND)
4952 into a scalar (MATCHED_DATA_REDUC). Logically we want to do a OR
4953 reduction, but because this doesn't exist, we can use a MAX reduction
4954 instead. The data value might be signed or a float so we need to cast
4955 it first.
4956 In the case where the loop never made any matches, the data values are
4957 all identical, and so will reduce down correctly. */
4959 /* Make the matched data values unsigned. */
4960 tree vec_cond_cast = make_ssa_name (vectype_unsigned);
4961 tree vec_cond_cast_rhs = build1 (VIEW_CONVERT_EXPR, vectype_unsigned,
4962 vec_cond);
4963 gimple *vec_cond_cast_stmt = gimple_build_assign (vec_cond_cast,
4964 VIEW_CONVERT_EXPR,
4965 vec_cond_cast_rhs);
4966 gsi_insert_before (&exit_gsi, vec_cond_cast_stmt, GSI_SAME_STMT);
4968 /* Reduce down to a scalar value. */
4969 tree data_reduc = make_ssa_name (scalar_type_unsigned);
4970 gcall *data_reduc_stmt = gimple_build_call_internal (IFN_REDUC_MAX,
4971 1, vec_cond_cast);
4972 gimple_call_set_lhs (data_reduc_stmt, data_reduc);
4973 gsi_insert_before (&exit_gsi, data_reduc_stmt, GSI_SAME_STMT);
4975 /* Convert the reduced value back to the result type and set as the
4976 result. */
4977 gimple_seq stmts = NULL;
4978 new_temp = gimple_build (&stmts, VIEW_CONVERT_EXPR, scalar_type,
4979 data_reduc);
4980 gsi_insert_seq_before (&exit_gsi, stmts, GSI_SAME_STMT);
4981 scalar_results.safe_push (new_temp);
4983 else if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) == COND_REDUCTION
4984 && reduc_fn == IFN_LAST)
4986 /* Condition reduction without supported IFN_REDUC_MAX. Generate
4987 idx = 0;
4988 idx_val = induction_index[0];
4989 val = data_reduc[0];
4990 for (idx = 0, val = init, i = 0; i < nelts; ++i)
4991 if (induction_index[i] > idx_val)
4992 val = data_reduc[i], idx_val = induction_index[i];
4993 return val; */
4995 tree data_eltype = TREE_TYPE (TREE_TYPE (new_phi_result));
4996 tree idx_eltype = TREE_TYPE (TREE_TYPE (induction_index));
4997 unsigned HOST_WIDE_INT el_size = tree_to_uhwi (TYPE_SIZE (idx_eltype));
4998 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (TREE_TYPE (induction_index));
4999 /* Enforced by vectorizable_reduction, which ensures we have target
5000 support before allowing a conditional reduction on variable-length
5001 vectors. */
5002 unsigned HOST_WIDE_INT v_size = el_size * nunits.to_constant ();
5003 tree idx_val = NULL_TREE, val = NULL_TREE;
5004 for (unsigned HOST_WIDE_INT off = 0; off < v_size; off += el_size)
5006 tree old_idx_val = idx_val;
5007 tree old_val = val;
5008 idx_val = make_ssa_name (idx_eltype);
5009 epilog_stmt = gimple_build_assign (idx_val, BIT_FIELD_REF,
5010 build3 (BIT_FIELD_REF, idx_eltype,
5011 induction_index,
5012 bitsize_int (el_size),
5013 bitsize_int (off)));
5014 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5015 val = make_ssa_name (data_eltype);
5016 epilog_stmt = gimple_build_assign (val, BIT_FIELD_REF,
5017 build3 (BIT_FIELD_REF,
5018 data_eltype,
5019 new_phi_result,
5020 bitsize_int (el_size),
5021 bitsize_int (off)));
5022 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5023 if (off != 0)
5025 tree new_idx_val = idx_val;
5026 tree new_val = val;
5027 if (off != v_size - el_size)
5029 new_idx_val = make_ssa_name (idx_eltype);
5030 epilog_stmt = gimple_build_assign (new_idx_val,
5031 MAX_EXPR, idx_val,
5032 old_idx_val);
5033 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5035 new_val = make_ssa_name (data_eltype);
5036 epilog_stmt = gimple_build_assign (new_val,
5037 COND_EXPR,
5038 build2 (GT_EXPR,
5039 boolean_type_node,
5040 idx_val,
5041 old_idx_val),
5042 val, old_val);
5043 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5044 idx_val = new_idx_val;
5045 val = new_val;
5048 /* Convert the reduced value back to the result type and set as the
5049 result. */
5050 gimple_seq stmts = NULL;
5051 val = gimple_convert (&stmts, scalar_type, val);
5052 gsi_insert_seq_before (&exit_gsi, stmts, GSI_SAME_STMT);
5053 scalar_results.safe_push (val);
5056 /* 2.3 Create the reduction code, using one of the three schemes described
5057 above. In SLP we simply need to extract all the elements from the
5058 vector (without reducing them), so we use scalar shifts. */
5059 else if (reduc_fn != IFN_LAST && !slp_reduc)
5061 tree tmp;
5062 tree vec_elem_type;
5064 /* Case 1: Create:
5065 v_out2 = reduc_expr <v_out1> */
5067 if (dump_enabled_p ())
5068 dump_printf_loc (MSG_NOTE, vect_location,
5069 "Reduce using direct vector reduction.\n");
5071 vec_elem_type = TREE_TYPE (TREE_TYPE (new_phi_result));
5072 if (!useless_type_conversion_p (scalar_type, vec_elem_type))
5074 tree tmp_dest
5075 = vect_create_destination_var (scalar_dest, vec_elem_type);
5076 epilog_stmt = gimple_build_call_internal (reduc_fn, 1,
5077 new_phi_result);
5078 gimple_set_lhs (epilog_stmt, tmp_dest);
5079 new_temp = make_ssa_name (tmp_dest, epilog_stmt);
5080 gimple_set_lhs (epilog_stmt, new_temp);
5081 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5083 epilog_stmt = gimple_build_assign (new_scalar_dest, NOP_EXPR,
5084 new_temp);
5086 else
5088 epilog_stmt = gimple_build_call_internal (reduc_fn, 1,
5089 new_phi_result);
5090 gimple_set_lhs (epilog_stmt, new_scalar_dest);
5093 new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
5094 gimple_set_lhs (epilog_stmt, new_temp);
5095 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5097 if ((STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info)
5098 == INTEGER_INDUC_COND_REDUCTION)
5099 && !operand_equal_p (initial_def, induc_val, 0))
5101 /* Earlier we set the initial value to be a vector if induc_val
5102 values. Check the result and if it is induc_val then replace
5103 with the original initial value, unless induc_val is
5104 the same as initial_def already. */
5105 tree zcompare = build2 (EQ_EXPR, boolean_type_node, new_temp,
5106 induc_val);
5108 tmp = make_ssa_name (new_scalar_dest);
5109 epilog_stmt = gimple_build_assign (tmp, COND_EXPR, zcompare,
5110 initial_def, new_temp);
5111 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5112 new_temp = tmp;
5115 scalar_results.safe_push (new_temp);
5117 else if (direct_slp_reduc)
5119 /* Here we create one vector for each of the REDUC_GROUP_SIZE results,
5120 with the elements for other SLP statements replaced with the
5121 neutral value. We can then do a normal reduction on each vector. */
5123 /* Enforced by vectorizable_reduction. */
5124 gcc_assert (new_phis.length () == 1);
5125 gcc_assert (pow2p_hwi (group_size));
5127 slp_tree orig_phis_slp_node = slp_node_instance->reduc_phis;
5128 vec<stmt_vec_info> orig_phis
5129 = SLP_TREE_SCALAR_STMTS (orig_phis_slp_node);
5130 gimple_seq seq = NULL;
5132 /* Build a vector {0, 1, 2, ...}, with the same number of elements
5133 and the same element size as VECTYPE. */
5134 tree index = build_index_vector (vectype, 0, 1);
5135 tree index_type = TREE_TYPE (index);
5136 tree index_elt_type = TREE_TYPE (index_type);
5137 tree mask_type = build_same_sized_truth_vector_type (index_type);
5139 /* Create a vector that, for each element, identifies which of
5140 the REDUC_GROUP_SIZE results should use it. */
5141 tree index_mask = build_int_cst (index_elt_type, group_size - 1);
5142 index = gimple_build (&seq, BIT_AND_EXPR, index_type, index,
5143 build_vector_from_val (index_type, index_mask));
5145 /* Get a neutral vector value. This is simply a splat of the neutral
5146 scalar value if we have one, otherwise the initial scalar value
5147 is itself a neutral value. */
5148 tree vector_identity = NULL_TREE;
5149 if (neutral_op)
5150 vector_identity = gimple_build_vector_from_val (&seq, vectype,
5151 neutral_op);
5152 for (unsigned int i = 0; i < group_size; ++i)
5154 /* If there's no univeral neutral value, we can use the
5155 initial scalar value from the original PHI. This is used
5156 for MIN and MAX reduction, for example. */
5157 if (!neutral_op)
5159 tree scalar_value
5160 = PHI_ARG_DEF_FROM_EDGE (orig_phis[i]->stmt,
5161 loop_preheader_edge (loop));
5162 vector_identity = gimple_build_vector_from_val (&seq, vectype,
5163 scalar_value);
5166 /* Calculate the equivalent of:
5168 sel[j] = (index[j] == i);
5170 which selects the elements of NEW_PHI_RESULT that should
5171 be included in the result. */
5172 tree compare_val = build_int_cst (index_elt_type, i);
5173 compare_val = build_vector_from_val (index_type, compare_val);
5174 tree sel = gimple_build (&seq, EQ_EXPR, mask_type,
5175 index, compare_val);
5177 /* Calculate the equivalent of:
5179 vec = seq ? new_phi_result : vector_identity;
5181 VEC is now suitable for a full vector reduction. */
5182 tree vec = gimple_build (&seq, VEC_COND_EXPR, vectype,
5183 sel, new_phi_result, vector_identity);
5185 /* Do the reduction and convert it to the appropriate type. */
5186 tree scalar = gimple_build (&seq, as_combined_fn (reduc_fn),
5187 TREE_TYPE (vectype), vec);
5188 scalar = gimple_convert (&seq, scalar_type, scalar);
5189 scalar_results.safe_push (scalar);
5191 gsi_insert_seq_before (&exit_gsi, seq, GSI_SAME_STMT);
5193 else
5195 bool reduce_with_shift;
5196 tree vec_temp;
5198 /* COND reductions all do the final reduction with MAX_EXPR
5199 or MIN_EXPR. */
5200 if (code == COND_EXPR)
5202 if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info)
5203 == INTEGER_INDUC_COND_REDUCTION)
5204 code = induc_code;
5205 else
5206 code = MAX_EXPR;
5209 /* See if the target wants to do the final (shift) reduction
5210 in a vector mode of smaller size and first reduce upper/lower
5211 halves against each other. */
5212 enum machine_mode mode1 = mode;
5213 tree vectype1 = vectype;
5214 unsigned sz = tree_to_uhwi (TYPE_SIZE_UNIT (vectype));
5215 unsigned sz1 = sz;
5216 if (!slp_reduc
5217 && (mode1 = targetm.vectorize.split_reduction (mode)) != mode)
5218 sz1 = GET_MODE_SIZE (mode1).to_constant ();
5220 vectype1 = get_vectype_for_scalar_type_and_size (scalar_type, sz1);
5221 reduce_with_shift = have_whole_vector_shift (mode1);
5222 if (!VECTOR_MODE_P (mode1))
5223 reduce_with_shift = false;
5224 else
5226 optab optab = optab_for_tree_code (code, vectype1, optab_default);
5227 if (optab_handler (optab, mode1) == CODE_FOR_nothing)
5228 reduce_with_shift = false;
5231 /* First reduce the vector to the desired vector size we should
5232 do shift reduction on by combining upper and lower halves. */
5233 new_temp = new_phi_result;
5234 while (sz > sz1)
5236 gcc_assert (!slp_reduc);
5237 sz /= 2;
5238 vectype1 = get_vectype_for_scalar_type_and_size (scalar_type, sz);
5240 /* The target has to make sure we support lowpart/highpart
5241 extraction, either via direct vector extract or through
5242 an integer mode punning. */
5243 tree dst1, dst2;
5244 if (convert_optab_handler (vec_extract_optab,
5245 TYPE_MODE (TREE_TYPE (new_temp)),
5246 TYPE_MODE (vectype1))
5247 != CODE_FOR_nothing)
5249 /* Extract sub-vectors directly once vec_extract becomes
5250 a conversion optab. */
5251 dst1 = make_ssa_name (vectype1);
5252 epilog_stmt
5253 = gimple_build_assign (dst1, BIT_FIELD_REF,
5254 build3 (BIT_FIELD_REF, vectype1,
5255 new_temp, TYPE_SIZE (vectype1),
5256 bitsize_int (0)));
5257 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5258 dst2 = make_ssa_name (vectype1);
5259 epilog_stmt
5260 = gimple_build_assign (dst2, BIT_FIELD_REF,
5261 build3 (BIT_FIELD_REF, vectype1,
5262 new_temp, TYPE_SIZE (vectype1),
5263 bitsize_int (sz * BITS_PER_UNIT)));
5264 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5266 else
5268 /* Extract via punning to appropriately sized integer mode
5269 vector. */
5270 tree eltype = build_nonstandard_integer_type (sz * BITS_PER_UNIT,
5272 tree etype = build_vector_type (eltype, 2);
5273 gcc_assert (convert_optab_handler (vec_extract_optab,
5274 TYPE_MODE (etype),
5275 TYPE_MODE (eltype))
5276 != CODE_FOR_nothing);
5277 tree tem = make_ssa_name (etype);
5278 epilog_stmt = gimple_build_assign (tem, VIEW_CONVERT_EXPR,
5279 build1 (VIEW_CONVERT_EXPR,
5280 etype, new_temp));
5281 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5282 new_temp = tem;
5283 tem = make_ssa_name (eltype);
5284 epilog_stmt
5285 = gimple_build_assign (tem, BIT_FIELD_REF,
5286 build3 (BIT_FIELD_REF, eltype,
5287 new_temp, TYPE_SIZE (eltype),
5288 bitsize_int (0)));
5289 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5290 dst1 = make_ssa_name (vectype1);
5291 epilog_stmt = gimple_build_assign (dst1, VIEW_CONVERT_EXPR,
5292 build1 (VIEW_CONVERT_EXPR,
5293 vectype1, tem));
5294 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5295 tem = make_ssa_name (eltype);
5296 epilog_stmt
5297 = gimple_build_assign (tem, BIT_FIELD_REF,
5298 build3 (BIT_FIELD_REF, eltype,
5299 new_temp, TYPE_SIZE (eltype),
5300 bitsize_int (sz * BITS_PER_UNIT)));
5301 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5302 dst2 = make_ssa_name (vectype1);
5303 epilog_stmt = gimple_build_assign (dst2, VIEW_CONVERT_EXPR,
5304 build1 (VIEW_CONVERT_EXPR,
5305 vectype1, tem));
5306 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5309 new_temp = make_ssa_name (vectype1);
5310 epilog_stmt = gimple_build_assign (new_temp, code, dst1, dst2);
5311 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5314 if (reduce_with_shift && !slp_reduc)
5316 int element_bitsize = tree_to_uhwi (bitsize);
5317 /* Enforced by vectorizable_reduction, which disallows SLP reductions
5318 for variable-length vectors and also requires direct target support
5319 for loop reductions. */
5320 int vec_size_in_bits = tree_to_uhwi (TYPE_SIZE (vectype1));
5321 int nelements = vec_size_in_bits / element_bitsize;
5322 vec_perm_builder sel;
5323 vec_perm_indices indices;
5325 int elt_offset;
5327 tree zero_vec = build_zero_cst (vectype1);
5328 /* Case 2: Create:
5329 for (offset = nelements/2; offset >= 1; offset/=2)
5331 Create: va' = vec_shift <va, offset>
5332 Create: va = vop <va, va'>
5333 } */
5335 tree rhs;
5337 if (dump_enabled_p ())
5338 dump_printf_loc (MSG_NOTE, vect_location,
5339 "Reduce using vector shifts\n");
5341 mode1 = TYPE_MODE (vectype1);
5342 vec_dest = vect_create_destination_var (scalar_dest, vectype1);
5343 for (elt_offset = nelements / 2;
5344 elt_offset >= 1;
5345 elt_offset /= 2)
5347 calc_vec_perm_mask_for_shift (elt_offset, nelements, &sel);
5348 indices.new_vector (sel, 2, nelements);
5349 tree mask = vect_gen_perm_mask_any (vectype1, indices);
5350 epilog_stmt = gimple_build_assign (vec_dest, VEC_PERM_EXPR,
5351 new_temp, zero_vec, mask);
5352 new_name = make_ssa_name (vec_dest, epilog_stmt);
5353 gimple_assign_set_lhs (epilog_stmt, new_name);
5354 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5356 epilog_stmt = gimple_build_assign (vec_dest, code, new_name,
5357 new_temp);
5358 new_temp = make_ssa_name (vec_dest, epilog_stmt);
5359 gimple_assign_set_lhs (epilog_stmt, new_temp);
5360 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5363 /* 2.4 Extract the final scalar result. Create:
5364 s_out3 = extract_field <v_out2, bitpos> */
5366 if (dump_enabled_p ())
5367 dump_printf_loc (MSG_NOTE, vect_location,
5368 "extract scalar result\n");
5370 rhs = build3 (BIT_FIELD_REF, scalar_type, new_temp,
5371 bitsize, bitsize_zero_node);
5372 epilog_stmt = gimple_build_assign (new_scalar_dest, rhs);
5373 new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
5374 gimple_assign_set_lhs (epilog_stmt, new_temp);
5375 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5376 scalar_results.safe_push (new_temp);
5378 else
5380 /* Case 3: Create:
5381 s = extract_field <v_out2, 0>
5382 for (offset = element_size;
5383 offset < vector_size;
5384 offset += element_size;)
5386 Create: s' = extract_field <v_out2, offset>
5387 Create: s = op <s, s'> // For non SLP cases
5388 } */
5390 if (dump_enabled_p ())
5391 dump_printf_loc (MSG_NOTE, vect_location,
5392 "Reduce using scalar code.\n");
5394 int vec_size_in_bits = tree_to_uhwi (TYPE_SIZE (vectype1));
5395 int element_bitsize = tree_to_uhwi (bitsize);
5396 FOR_EACH_VEC_ELT (new_phis, i, new_phi)
5398 int bit_offset;
5399 if (gimple_code (new_phi) == GIMPLE_PHI)
5400 vec_temp = PHI_RESULT (new_phi);
5401 else
5402 vec_temp = gimple_assign_lhs (new_phi);
5403 tree rhs = build3 (BIT_FIELD_REF, scalar_type, vec_temp, bitsize,
5404 bitsize_zero_node);
5405 epilog_stmt = gimple_build_assign (new_scalar_dest, rhs);
5406 new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
5407 gimple_assign_set_lhs (epilog_stmt, new_temp);
5408 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5410 /* In SLP we don't need to apply reduction operation, so we just
5411 collect s' values in SCALAR_RESULTS. */
5412 if (slp_reduc)
5413 scalar_results.safe_push (new_temp);
5415 for (bit_offset = element_bitsize;
5416 bit_offset < vec_size_in_bits;
5417 bit_offset += element_bitsize)
5419 tree bitpos = bitsize_int (bit_offset);
5420 tree rhs = build3 (BIT_FIELD_REF, scalar_type, vec_temp,
5421 bitsize, bitpos);
5423 epilog_stmt = gimple_build_assign (new_scalar_dest, rhs);
5424 new_name = make_ssa_name (new_scalar_dest, epilog_stmt);
5425 gimple_assign_set_lhs (epilog_stmt, new_name);
5426 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5428 if (slp_reduc)
5430 /* In SLP we don't need to apply reduction operation, so
5431 we just collect s' values in SCALAR_RESULTS. */
5432 new_temp = new_name;
5433 scalar_results.safe_push (new_name);
5435 else
5437 epilog_stmt = gimple_build_assign (new_scalar_dest, code,
5438 new_name, new_temp);
5439 new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
5440 gimple_assign_set_lhs (epilog_stmt, new_temp);
5441 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5446 /* The only case where we need to reduce scalar results in SLP, is
5447 unrolling. If the size of SCALAR_RESULTS is greater than
5448 REDUC_GROUP_SIZE, we reduce them combining elements modulo
5449 REDUC_GROUP_SIZE. */
5450 if (slp_reduc)
5452 tree res, first_res, new_res;
5453 gimple *new_stmt;
5455 /* Reduce multiple scalar results in case of SLP unrolling. */
5456 for (j = group_size; scalar_results.iterate (j, &res);
5457 j++)
5459 first_res = scalar_results[j % group_size];
5460 new_stmt = gimple_build_assign (new_scalar_dest, code,
5461 first_res, res);
5462 new_res = make_ssa_name (new_scalar_dest, new_stmt);
5463 gimple_assign_set_lhs (new_stmt, new_res);
5464 gsi_insert_before (&exit_gsi, new_stmt, GSI_SAME_STMT);
5465 scalar_results[j % group_size] = new_res;
5468 else
5469 /* Not SLP - we have one scalar to keep in SCALAR_RESULTS. */
5470 scalar_results.safe_push (new_temp);
5473 if ((STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info)
5474 == INTEGER_INDUC_COND_REDUCTION)
5475 && !operand_equal_p (initial_def, induc_val, 0))
5477 /* Earlier we set the initial value to be a vector if induc_val
5478 values. Check the result and if it is induc_val then replace
5479 with the original initial value, unless induc_val is
5480 the same as initial_def already. */
5481 tree zcompare = build2 (EQ_EXPR, boolean_type_node, new_temp,
5482 induc_val);
5484 tree tmp = make_ssa_name (new_scalar_dest);
5485 epilog_stmt = gimple_build_assign (tmp, COND_EXPR, zcompare,
5486 initial_def, new_temp);
5487 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5488 scalar_results[0] = tmp;
5492 vect_finalize_reduction:
5494 if (double_reduc)
5495 loop = loop->inner;
5497 /* 2.5 Adjust the final result by the initial value of the reduction
5498 variable. (When such adjustment is not needed, then
5499 'adjustment_def' is zero). For example, if code is PLUS we create:
5500 new_temp = loop_exit_def + adjustment_def */
5502 if (adjustment_def)
5504 gcc_assert (!slp_reduc);
5505 if (nested_in_vect_loop)
5507 new_phi = new_phis[0];
5508 gcc_assert (TREE_CODE (TREE_TYPE (adjustment_def)) == VECTOR_TYPE);
5509 expr = build2 (code, vectype, PHI_RESULT (new_phi), adjustment_def);
5510 new_dest = vect_create_destination_var (scalar_dest, vectype);
5512 else
5514 new_temp = scalar_results[0];
5515 gcc_assert (TREE_CODE (TREE_TYPE (adjustment_def)) != VECTOR_TYPE);
5516 expr = build2 (code, scalar_type, new_temp, adjustment_def);
5517 new_dest = vect_create_destination_var (scalar_dest, scalar_type);
5520 epilog_stmt = gimple_build_assign (new_dest, expr);
5521 new_temp = make_ssa_name (new_dest, epilog_stmt);
5522 gimple_assign_set_lhs (epilog_stmt, new_temp);
5523 gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
5524 if (nested_in_vect_loop)
5526 stmt_vec_info epilog_stmt_info = loop_vinfo->add_stmt (epilog_stmt);
5527 STMT_VINFO_RELATED_STMT (epilog_stmt_info)
5528 = STMT_VINFO_RELATED_STMT (loop_vinfo->lookup_stmt (new_phi));
5530 if (!double_reduc)
5531 scalar_results.quick_push (new_temp);
5532 else
5533 scalar_results[0] = new_temp;
5535 else
5536 scalar_results[0] = new_temp;
5538 new_phis[0] = epilog_stmt;
5541 /* 2.6 Handle the loop-exit phis. Replace the uses of scalar loop-exit
5542 phis with new adjusted scalar results, i.e., replace use <s_out0>
5543 with use <s_out4>.
5545 Transform:
5546 loop_exit:
5547 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
5548 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
5549 v_out2 = reduce <v_out1>
5550 s_out3 = extract_field <v_out2, 0>
5551 s_out4 = adjust_result <s_out3>
5552 use <s_out0>
5553 use <s_out0>
5555 into:
5557 loop_exit:
5558 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
5559 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
5560 v_out2 = reduce <v_out1>
5561 s_out3 = extract_field <v_out2, 0>
5562 s_out4 = adjust_result <s_out3>
5563 use <s_out4>
5564 use <s_out4> */
5567 /* In SLP reduction chain we reduce vector results into one vector if
5568 necessary, hence we set here REDUC_GROUP_SIZE to 1. SCALAR_DEST is the
5569 LHS of the last stmt in the reduction chain, since we are looking for
5570 the loop exit phi node. */
5571 if (REDUC_GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
5573 stmt_vec_info dest_stmt_info
5574 = SLP_TREE_SCALAR_STMTS (slp_node)[group_size - 1];
5575 /* Handle reduction patterns. */
5576 if (STMT_VINFO_RELATED_STMT (dest_stmt_info))
5577 dest_stmt_info = STMT_VINFO_RELATED_STMT (dest_stmt_info);
5579 scalar_dest = gimple_assign_lhs (dest_stmt_info->stmt);
5580 group_size = 1;
5583 /* In SLP we may have several statements in NEW_PHIS and REDUCTION_PHIS (in
5584 case that REDUC_GROUP_SIZE is greater than vectorization factor).
5585 Therefore, we need to match SCALAR_RESULTS with corresponding statements.
5586 The first (REDUC_GROUP_SIZE / number of new vector stmts) scalar results
5587 correspond to the first vector stmt, etc.
5588 (RATIO is equal to (REDUC_GROUP_SIZE / number of new vector stmts)). */
5589 if (group_size > new_phis.length ())
5591 ratio = group_size / new_phis.length ();
5592 gcc_assert (!(group_size % new_phis.length ()));
5594 else
5595 ratio = 1;
5597 for (k = 0; k < group_size; k++)
5599 if (k % ratio == 0)
5601 epilog_stmt = new_phis[k / ratio];
5602 reduction_phi_info = reduction_phis[k / ratio];
5603 if (double_reduc)
5604 inner_phi = inner_phis[k / ratio];
5607 if (slp_reduc)
5609 stmt_vec_info scalar_stmt_info = SLP_TREE_SCALAR_STMTS (slp_node)[k];
5611 orig_stmt_info = STMT_VINFO_RELATED_STMT (scalar_stmt_info);
5612 /* SLP statements can't participate in patterns. */
5613 gcc_assert (!orig_stmt_info);
5614 scalar_dest = gimple_assign_lhs (scalar_stmt_info->stmt);
5617 phis.create (3);
5618 /* Find the loop-closed-use at the loop exit of the original scalar
5619 result. (The reduction result is expected to have two immediate uses -
5620 one at the latch block, and one at the loop exit). */
5621 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest)
5622 if (!flow_bb_inside_loop_p (loop, gimple_bb (USE_STMT (use_p)))
5623 && !is_gimple_debug (USE_STMT (use_p)))
5624 phis.safe_push (USE_STMT (use_p));
5626 /* While we expect to have found an exit_phi because of loop-closed-ssa
5627 form we can end up without one if the scalar cycle is dead. */
5629 FOR_EACH_VEC_ELT (phis, i, exit_phi)
5631 if (outer_loop)
5633 stmt_vec_info exit_phi_vinfo
5634 = loop_vinfo->lookup_stmt (exit_phi);
5635 gphi *vect_phi;
5637 /* FORNOW. Currently not supporting the case that an inner-loop
5638 reduction is not used in the outer-loop (but only outside the
5639 outer-loop), unless it is double reduction. */
5640 gcc_assert ((STMT_VINFO_RELEVANT_P (exit_phi_vinfo)
5641 && !STMT_VINFO_LIVE_P (exit_phi_vinfo))
5642 || double_reduc);
5644 if (double_reduc)
5645 STMT_VINFO_VEC_STMT (exit_phi_vinfo) = inner_phi;
5646 else
5647 STMT_VINFO_VEC_STMT (exit_phi_vinfo)
5648 = vinfo_for_stmt (epilog_stmt);
5649 if (!double_reduc
5650 || STMT_VINFO_DEF_TYPE (exit_phi_vinfo)
5651 != vect_double_reduction_def)
5652 continue;
5654 /* Handle double reduction:
5656 stmt1: s1 = phi <s0, s2> - double reduction phi (outer loop)
5657 stmt2: s3 = phi <s1, s4> - (regular) reduc phi (inner loop)
5658 stmt3: s4 = use (s3) - (regular) reduc stmt (inner loop)
5659 stmt4: s2 = phi <s4> - double reduction stmt (outer loop)
5661 At that point the regular reduction (stmt2 and stmt3) is
5662 already vectorized, as well as the exit phi node, stmt4.
5663 Here we vectorize the phi node of double reduction, stmt1, and
5664 update all relevant statements. */
5666 /* Go through all the uses of s2 to find double reduction phi
5667 node, i.e., stmt1 above. */
5668 orig_name = PHI_RESULT (exit_phi);
5669 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, orig_name)
5671 stmt_vec_info use_stmt_vinfo;
5672 tree vect_phi_init, preheader_arg, vect_phi_res;
5673 basic_block bb = gimple_bb (use_stmt);
5675 /* Check that USE_STMT is really double reduction phi
5676 node. */
5677 if (gimple_code (use_stmt) != GIMPLE_PHI
5678 || gimple_phi_num_args (use_stmt) != 2
5679 || bb->loop_father != outer_loop)
5680 continue;
5681 use_stmt_vinfo = loop_vinfo->lookup_stmt (use_stmt);
5682 if (!use_stmt_vinfo
5683 || STMT_VINFO_DEF_TYPE (use_stmt_vinfo)
5684 != vect_double_reduction_def)
5685 continue;
5687 /* Create vector phi node for double reduction:
5688 vs1 = phi <vs0, vs2>
5689 vs1 was created previously in this function by a call to
5690 vect_get_vec_def_for_operand and is stored in
5691 vec_initial_def;
5692 vs2 is defined by INNER_PHI, the vectorized EXIT_PHI;
5693 vs0 is created here. */
5695 /* Create vector phi node. */
5696 vect_phi = create_phi_node (vec_initial_def, bb);
5697 loop_vec_info_for_loop (outer_loop)->add_stmt (vect_phi);
5699 /* Create vs0 - initial def of the double reduction phi. */
5700 preheader_arg = PHI_ARG_DEF_FROM_EDGE (use_stmt,
5701 loop_preheader_edge (outer_loop));
5702 vect_phi_init = get_initial_def_for_reduction
5703 (stmt, preheader_arg, NULL);
5705 /* Update phi node arguments with vs0 and vs2. */
5706 add_phi_arg (vect_phi, vect_phi_init,
5707 loop_preheader_edge (outer_loop),
5708 UNKNOWN_LOCATION);
5709 add_phi_arg (vect_phi, PHI_RESULT (inner_phi->stmt),
5710 loop_latch_edge (outer_loop), UNKNOWN_LOCATION);
5711 if (dump_enabled_p ())
5713 dump_printf_loc (MSG_NOTE, vect_location,
5714 "created double reduction phi node: ");
5715 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, vect_phi, 0);
5718 vect_phi_res = PHI_RESULT (vect_phi);
5720 /* Replace the use, i.e., set the correct vs1 in the regular
5721 reduction phi node. FORNOW, NCOPIES is always 1, so the
5722 loop is redundant. */
5723 stmt_vec_info use_info = reduction_phi_info;
5724 for (j = 0; j < ncopies; j++)
5726 edge pr_edge = loop_preheader_edge (loop);
5727 SET_PHI_ARG_DEF (as_a <gphi *> (use_info->stmt),
5728 pr_edge->dest_idx, vect_phi_res);
5729 use_info = STMT_VINFO_RELATED_STMT (use_info);
5735 phis.release ();
5736 if (nested_in_vect_loop)
5738 if (double_reduc)
5739 loop = outer_loop;
5740 else
5741 continue;
5744 phis.create (3);
5745 /* Find the loop-closed-use at the loop exit of the original scalar
5746 result. (The reduction result is expected to have two immediate uses,
5747 one at the latch block, and one at the loop exit). For double
5748 reductions we are looking for exit phis of the outer loop. */
5749 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest)
5751 if (!flow_bb_inside_loop_p (loop, gimple_bb (USE_STMT (use_p))))
5753 if (!is_gimple_debug (USE_STMT (use_p)))
5754 phis.safe_push (USE_STMT (use_p));
5756 else
5758 if (double_reduc && gimple_code (USE_STMT (use_p)) == GIMPLE_PHI)
5760 tree phi_res = PHI_RESULT (USE_STMT (use_p));
5762 FOR_EACH_IMM_USE_FAST (phi_use_p, phi_imm_iter, phi_res)
5764 if (!flow_bb_inside_loop_p (loop,
5765 gimple_bb (USE_STMT (phi_use_p)))
5766 && !is_gimple_debug (USE_STMT (phi_use_p)))
5767 phis.safe_push (USE_STMT (phi_use_p));
5773 FOR_EACH_VEC_ELT (phis, i, exit_phi)
5775 /* Replace the uses: */
5776 orig_name = PHI_RESULT (exit_phi);
5777 scalar_result = scalar_results[k];
5778 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, orig_name)
5779 FOR_EACH_IMM_USE_ON_STMT (use_p, imm_iter)
5780 SET_USE (use_p, scalar_result);
5783 phis.release ();
5787 /* Return a vector of type VECTYPE that is equal to the vector select
5788 operation "MASK ? VEC : IDENTITY". Insert the select statements
5789 before GSI. */
5791 static tree
5792 merge_with_identity (gimple_stmt_iterator *gsi, tree mask, tree vectype,
5793 tree vec, tree identity)
5795 tree cond = make_temp_ssa_name (vectype, NULL, "cond");
5796 gimple *new_stmt = gimple_build_assign (cond, VEC_COND_EXPR,
5797 mask, vec, identity);
5798 gsi_insert_before (gsi, new_stmt, GSI_SAME_STMT);
5799 return cond;
5802 /* Successively apply CODE to each element of VECTOR_RHS, in left-to-right
5803 order, starting with LHS. Insert the extraction statements before GSI and
5804 associate the new scalar SSA names with variable SCALAR_DEST.
5805 Return the SSA name for the result. */
5807 static tree
5808 vect_expand_fold_left (gimple_stmt_iterator *gsi, tree scalar_dest,
5809 tree_code code, tree lhs, tree vector_rhs)
5811 tree vectype = TREE_TYPE (vector_rhs);
5812 tree scalar_type = TREE_TYPE (vectype);
5813 tree bitsize = TYPE_SIZE (scalar_type);
5814 unsigned HOST_WIDE_INT vec_size_in_bits = tree_to_uhwi (TYPE_SIZE (vectype));
5815 unsigned HOST_WIDE_INT element_bitsize = tree_to_uhwi (bitsize);
5817 for (unsigned HOST_WIDE_INT bit_offset = 0;
5818 bit_offset < vec_size_in_bits;
5819 bit_offset += element_bitsize)
5821 tree bitpos = bitsize_int (bit_offset);
5822 tree rhs = build3 (BIT_FIELD_REF, scalar_type, vector_rhs,
5823 bitsize, bitpos);
5825 gassign *stmt = gimple_build_assign (scalar_dest, rhs);
5826 rhs = make_ssa_name (scalar_dest, stmt);
5827 gimple_assign_set_lhs (stmt, rhs);
5828 gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
5830 stmt = gimple_build_assign (scalar_dest, code, lhs, rhs);
5831 tree new_name = make_ssa_name (scalar_dest, stmt);
5832 gimple_assign_set_lhs (stmt, new_name);
5833 gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
5834 lhs = new_name;
5836 return lhs;
5839 /* Perform an in-order reduction (FOLD_LEFT_REDUCTION). STMT is the
5840 statement that sets the live-out value. REDUC_DEF_STMT is the phi
5841 statement. CODE is the operation performed by STMT and OPS are
5842 its scalar operands. REDUC_INDEX is the index of the operand in
5843 OPS that is set by REDUC_DEF_STMT. REDUC_FN is the function that
5844 implements in-order reduction, or IFN_LAST if we should open-code it.
5845 VECTYPE_IN is the type of the vector input. MASKS specifies the masks
5846 that should be used to control the operation in a fully-masked loop. */
5848 static bool
5849 vectorize_fold_left_reduction (gimple *stmt, gimple_stmt_iterator *gsi,
5850 stmt_vec_info *vec_stmt, slp_tree slp_node,
5851 gimple *reduc_def_stmt,
5852 tree_code code, internal_fn reduc_fn,
5853 tree ops[3], tree vectype_in,
5854 int reduc_index, vec_loop_masks *masks)
5856 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
5857 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
5858 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
5859 tree vectype_out = STMT_VINFO_VECTYPE (stmt_info);
5860 stmt_vec_info new_stmt_info = NULL;
5862 int ncopies;
5863 if (slp_node)
5864 ncopies = 1;
5865 else
5866 ncopies = vect_get_num_copies (loop_vinfo, vectype_in);
5868 gcc_assert (!nested_in_vect_loop_p (loop, stmt));
5869 gcc_assert (ncopies == 1);
5870 gcc_assert (TREE_CODE_LENGTH (code) == binary_op);
5871 gcc_assert (reduc_index == (code == MINUS_EXPR ? 0 : 1));
5872 gcc_assert (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info)
5873 == FOLD_LEFT_REDUCTION);
5875 if (slp_node)
5876 gcc_assert (known_eq (TYPE_VECTOR_SUBPARTS (vectype_out),
5877 TYPE_VECTOR_SUBPARTS (vectype_in)));
5879 tree op0 = ops[1 - reduc_index];
5881 int group_size = 1;
5882 stmt_vec_info scalar_dest_def_info;
5883 auto_vec<tree> vec_oprnds0;
5884 if (slp_node)
5886 vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL, slp_node);
5887 group_size = SLP_TREE_SCALAR_STMTS (slp_node).length ();
5888 scalar_dest_def_info = SLP_TREE_SCALAR_STMTS (slp_node)[group_size - 1];
5890 else
5892 tree loop_vec_def0 = vect_get_vec_def_for_operand (op0, stmt);
5893 vec_oprnds0.create (1);
5894 vec_oprnds0.quick_push (loop_vec_def0);
5895 scalar_dest_def_info = stmt_info;
5898 tree scalar_dest = gimple_assign_lhs (scalar_dest_def_info->stmt);
5899 tree scalar_type = TREE_TYPE (scalar_dest);
5900 tree reduc_var = gimple_phi_result (reduc_def_stmt);
5902 int vec_num = vec_oprnds0.length ();
5903 gcc_assert (vec_num == 1 || slp_node);
5904 tree vec_elem_type = TREE_TYPE (vectype_out);
5905 gcc_checking_assert (useless_type_conversion_p (scalar_type, vec_elem_type));
5907 tree vector_identity = NULL_TREE;
5908 if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
5909 vector_identity = build_zero_cst (vectype_out);
5911 tree scalar_dest_var = vect_create_destination_var (scalar_dest, NULL);
5912 int i;
5913 tree def0;
5914 FOR_EACH_VEC_ELT (vec_oprnds0, i, def0)
5916 gimple *new_stmt;
5917 tree mask = NULL_TREE;
5918 if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
5919 mask = vect_get_loop_mask (gsi, masks, vec_num, vectype_in, i);
5921 /* Handle MINUS by adding the negative. */
5922 if (reduc_fn != IFN_LAST && code == MINUS_EXPR)
5924 tree negated = make_ssa_name (vectype_out);
5925 new_stmt = gimple_build_assign (negated, NEGATE_EXPR, def0);
5926 gsi_insert_before (gsi, new_stmt, GSI_SAME_STMT);
5927 def0 = negated;
5930 if (mask)
5931 def0 = merge_with_identity (gsi, mask, vectype_out, def0,
5932 vector_identity);
5934 /* On the first iteration the input is simply the scalar phi
5935 result, and for subsequent iterations it is the output of
5936 the preceding operation. */
5937 if (reduc_fn != IFN_LAST)
5939 new_stmt = gimple_build_call_internal (reduc_fn, 2, reduc_var, def0);
5940 /* For chained SLP reductions the output of the previous reduction
5941 operation serves as the input of the next. For the final statement
5942 the output cannot be a temporary - we reuse the original
5943 scalar destination of the last statement. */
5944 if (i != vec_num - 1)
5946 gimple_set_lhs (new_stmt, scalar_dest_var);
5947 reduc_var = make_ssa_name (scalar_dest_var, new_stmt);
5948 gimple_set_lhs (new_stmt, reduc_var);
5951 else
5953 reduc_var = vect_expand_fold_left (gsi, scalar_dest_var, code,
5954 reduc_var, def0);
5955 new_stmt = SSA_NAME_DEF_STMT (reduc_var);
5956 /* Remove the statement, so that we can use the same code paths
5957 as for statements that we've just created. */
5958 gimple_stmt_iterator tmp_gsi = gsi_for_stmt (new_stmt);
5959 gsi_remove (&tmp_gsi, false);
5962 if (i == vec_num - 1)
5964 gimple_set_lhs (new_stmt, scalar_dest);
5965 new_stmt_info = vect_finish_replace_stmt (scalar_dest_def_info,
5966 new_stmt);
5968 else
5969 new_stmt_info = vect_finish_stmt_generation (scalar_dest_def_info,
5970 new_stmt, gsi);
5972 if (slp_node)
5973 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info);
5976 if (!slp_node)
5977 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt_info;
5979 return true;
5982 /* Function is_nonwrapping_integer_induction.
5984 Check if STMT (which is part of loop LOOP) both increments and
5985 does not cause overflow. */
5987 static bool
5988 is_nonwrapping_integer_induction (gimple *stmt, struct loop *loop)
5990 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
5991 tree base = STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED (stmt_vinfo);
5992 tree step = STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_vinfo);
5993 tree lhs_type = TREE_TYPE (gimple_phi_result (stmt));
5994 widest_int ni, max_loop_value, lhs_max;
5995 wi::overflow_type overflow = wi::OVF_NONE;
5997 /* Make sure the loop is integer based. */
5998 if (TREE_CODE (base) != INTEGER_CST
5999 || TREE_CODE (step) != INTEGER_CST)
6000 return false;
6002 /* Check that the max size of the loop will not wrap. */
6004 if (TYPE_OVERFLOW_UNDEFINED (lhs_type))
6005 return true;
6007 if (! max_stmt_executions (loop, &ni))
6008 return false;
6010 max_loop_value = wi::mul (wi::to_widest (step), ni, TYPE_SIGN (lhs_type),
6011 &overflow);
6012 if (overflow)
6013 return false;
6015 max_loop_value = wi::add (wi::to_widest (base), max_loop_value,
6016 TYPE_SIGN (lhs_type), &overflow);
6017 if (overflow)
6018 return false;
6020 return (wi::min_precision (max_loop_value, TYPE_SIGN (lhs_type))
6021 <= TYPE_PRECISION (lhs_type));
6024 /* Function vectorizable_reduction.
6026 Check if STMT performs a reduction operation that can be vectorized.
6027 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
6028 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
6029 Return FALSE if not a vectorizable STMT, TRUE otherwise.
6031 This function also handles reduction idioms (patterns) that have been
6032 recognized in advance during vect_pattern_recog. In this case, STMT may be
6033 of this form:
6034 X = pattern_expr (arg0, arg1, ..., X)
6035 and it's STMT_VINFO_RELATED_STMT points to the last stmt in the original
6036 sequence that had been detected and replaced by the pattern-stmt (STMT).
6038 This function also handles reduction of condition expressions, for example:
6039 for (int i = 0; i < N; i++)
6040 if (a[i] < value)
6041 last = a[i];
6042 This is handled by vectorising the loop and creating an additional vector
6043 containing the loop indexes for which "a[i] < value" was true. In the
6044 function epilogue this is reduced to a single max value and then used to
6045 index into the vector of results.
6047 In some cases of reduction patterns, the type of the reduction variable X is
6048 different than the type of the other arguments of STMT.
6049 In such cases, the vectype that is used when transforming STMT into a vector
6050 stmt is different than the vectype that is used to determine the
6051 vectorization factor, because it consists of a different number of elements
6052 than the actual number of elements that are being operated upon in parallel.
6054 For example, consider an accumulation of shorts into an int accumulator.
6055 On some targets it's possible to vectorize this pattern operating on 8
6056 shorts at a time (hence, the vectype for purposes of determining the
6057 vectorization factor should be V8HI); on the other hand, the vectype that
6058 is used to create the vector form is actually V4SI (the type of the result).
6060 Upon entry to this function, STMT_VINFO_VECTYPE records the vectype that
6061 indicates what is the actual level of parallelism (V8HI in the example), so
6062 that the right vectorization factor would be derived. This vectype
6063 corresponds to the type of arguments to the reduction stmt, and should *NOT*
6064 be used to create the vectorized stmt. The right vectype for the vectorized
6065 stmt is obtained from the type of the result X:
6066 get_vectype_for_scalar_type (TREE_TYPE (X))
6068 This means that, contrary to "regular" reductions (or "regular" stmts in
6069 general), the following equation:
6070 STMT_VINFO_VECTYPE == get_vectype_for_scalar_type (TREE_TYPE (X))
6071 does *NOT* necessarily hold for reduction patterns. */
6073 bool
6074 vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi,
6075 stmt_vec_info *vec_stmt, slp_tree slp_node,
6076 slp_instance slp_node_instance,
6077 stmt_vector_for_cost *cost_vec)
6079 tree vec_dest;
6080 tree scalar_dest;
6081 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
6082 tree vectype_out = STMT_VINFO_VECTYPE (stmt_info);
6083 tree vectype_in = NULL_TREE;
6084 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
6085 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
6086 enum tree_code code, orig_code;
6087 internal_fn reduc_fn;
6088 machine_mode vec_mode;
6089 int op_type;
6090 optab optab;
6091 tree new_temp = NULL_TREE;
6092 enum vect_def_type dt, cond_reduc_dt = vect_unknown_def_type;
6093 gimple *cond_reduc_def_stmt = NULL;
6094 enum tree_code cond_reduc_op_code = ERROR_MARK;
6095 tree scalar_type;
6096 bool is_simple_use;
6097 int i;
6098 int ncopies;
6099 int epilog_copies;
6100 stmt_vec_info prev_stmt_info, prev_phi_info;
6101 bool single_defuse_cycle = false;
6102 stmt_vec_info new_stmt_info = NULL;
6103 int j;
6104 tree ops[3];
6105 enum vect_def_type dts[3];
6106 bool nested_cycle = false, found_nested_cycle_def = false;
6107 bool double_reduc = false;
6108 basic_block def_bb;
6109 struct loop * def_stmt_loop;
6110 tree def_arg;
6111 auto_vec<tree> vec_oprnds0;
6112 auto_vec<tree> vec_oprnds1;
6113 auto_vec<tree> vec_oprnds2;
6114 auto_vec<tree> vect_defs;
6115 auto_vec<stmt_vec_info> phis;
6116 int vec_num;
6117 tree def0, tem;
6118 tree cr_index_scalar_type = NULL_TREE, cr_index_vector_type = NULL_TREE;
6119 tree cond_reduc_val = NULL_TREE;
6121 /* Make sure it was already recognized as a reduction computation. */
6122 if (STMT_VINFO_DEF_TYPE (vinfo_for_stmt (stmt)) != vect_reduction_def
6123 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (stmt)) != vect_nested_cycle)
6124 return false;
6126 if (nested_in_vect_loop_p (loop, stmt))
6128 loop = loop->inner;
6129 nested_cycle = true;
6132 if (REDUC_GROUP_FIRST_ELEMENT (stmt_info))
6133 gcc_assert (slp_node && REDUC_GROUP_FIRST_ELEMENT (stmt_info) == stmt);
6135 if (gimple_code (stmt) == GIMPLE_PHI)
6137 tree phi_result = gimple_phi_result (stmt);
6138 /* Analysis is fully done on the reduction stmt invocation. */
6139 if (! vec_stmt)
6141 if (slp_node)
6142 slp_node_instance->reduc_phis = slp_node;
6144 STMT_VINFO_TYPE (stmt_info) = reduc_vec_info_type;
6145 return true;
6148 if (STMT_VINFO_REDUC_TYPE (stmt_info) == FOLD_LEFT_REDUCTION)
6149 /* Leave the scalar phi in place. Note that checking
6150 STMT_VINFO_VEC_REDUCTION_TYPE (as below) only works
6151 for reductions involving a single statement. */
6152 return true;
6154 stmt_vec_info reduc_stmt_info = STMT_VINFO_REDUC_DEF (stmt_info);
6155 if (STMT_VINFO_IN_PATTERN_P (reduc_stmt_info))
6156 reduc_stmt_info = STMT_VINFO_RELATED_STMT (reduc_stmt_info);
6158 if (STMT_VINFO_VEC_REDUCTION_TYPE (reduc_stmt_info)
6159 == EXTRACT_LAST_REDUCTION)
6160 /* Leave the scalar phi in place. */
6161 return true;
6163 gassign *reduc_stmt = as_a <gassign *> (reduc_stmt_info->stmt);
6164 for (unsigned k = 1; k < gimple_num_ops (reduc_stmt); ++k)
6166 tree op = gimple_op (reduc_stmt, k);
6167 if (op == gimple_phi_result (stmt))
6168 continue;
6169 if (k == 1
6170 && gimple_assign_rhs_code (reduc_stmt) == COND_EXPR)
6171 continue;
6172 if (!vectype_in
6173 || (GET_MODE_SIZE (SCALAR_TYPE_MODE (TREE_TYPE (vectype_in)))
6174 < GET_MODE_SIZE (SCALAR_TYPE_MODE (TREE_TYPE (op)))))
6175 vectype_in = get_vectype_for_scalar_type (TREE_TYPE (op));
6176 break;
6178 gcc_assert (vectype_in);
6180 if (slp_node)
6181 ncopies = 1;
6182 else
6183 ncopies = vect_get_num_copies (loop_vinfo, vectype_in);
6185 stmt_vec_info use_stmt_info;
6186 if (ncopies > 1
6187 && STMT_VINFO_RELEVANT (reduc_stmt_info) <= vect_used_only_live
6188 && (use_stmt_info = loop_vinfo->lookup_single_use (phi_result))
6189 && (use_stmt_info == reduc_stmt_info
6190 || STMT_VINFO_RELATED_STMT (use_stmt_info) == reduc_stmt))
6191 single_defuse_cycle = true;
6193 /* Create the destination vector */
6194 scalar_dest = gimple_assign_lhs (reduc_stmt);
6195 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
6197 if (slp_node)
6198 /* The size vect_schedule_slp_instance computes is off for us. */
6199 vec_num = vect_get_num_vectors
6200 (LOOP_VINFO_VECT_FACTOR (loop_vinfo)
6201 * SLP_TREE_SCALAR_STMTS (slp_node).length (),
6202 vectype_in);
6203 else
6204 vec_num = 1;
6206 /* Generate the reduction PHIs upfront. */
6207 prev_phi_info = NULL;
6208 for (j = 0; j < ncopies; j++)
6210 if (j == 0 || !single_defuse_cycle)
6212 for (i = 0; i < vec_num; i++)
6214 /* Create the reduction-phi that defines the reduction
6215 operand. */
6216 gimple *new_phi = create_phi_node (vec_dest, loop->header);
6217 stmt_vec_info new_phi_info = loop_vinfo->add_stmt (new_phi);
6219 if (slp_node)
6220 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_phi_info);
6221 else
6223 if (j == 0)
6224 STMT_VINFO_VEC_STMT (stmt_info)
6225 = *vec_stmt = new_phi_info;
6226 else
6227 STMT_VINFO_RELATED_STMT (prev_phi_info) = new_phi_info;
6228 prev_phi_info = new_phi_info;
6234 return true;
6237 /* 1. Is vectorizable reduction? */
6238 /* Not supportable if the reduction variable is used in the loop, unless
6239 it's a reduction chain. */
6240 if (STMT_VINFO_RELEVANT (stmt_info) > vect_used_in_outer
6241 && !REDUC_GROUP_FIRST_ELEMENT (stmt_info))
6242 return false;
6244 /* Reductions that are not used even in an enclosing outer-loop,
6245 are expected to be "live" (used out of the loop). */
6246 if (STMT_VINFO_RELEVANT (stmt_info) == vect_unused_in_scope
6247 && !STMT_VINFO_LIVE_P (stmt_info))
6248 return false;
6250 /* 2. Has this been recognized as a reduction pattern?
6252 Check if STMT represents a pattern that has been recognized
6253 in earlier analysis stages. For stmts that represent a pattern,
6254 the STMT_VINFO_RELATED_STMT field records the last stmt in
6255 the original sequence that constitutes the pattern. */
6257 stmt_vec_info orig_stmt_info = STMT_VINFO_RELATED_STMT (stmt_info);
6258 if (orig_stmt_info)
6260 gcc_assert (STMT_VINFO_IN_PATTERN_P (orig_stmt_info));
6261 gcc_assert (!STMT_VINFO_IN_PATTERN_P (stmt_info));
6264 /* 3. Check the operands of the operation. The first operands are defined
6265 inside the loop body. The last operand is the reduction variable,
6266 which is defined by the loop-header-phi. */
6268 gcc_assert (is_gimple_assign (stmt));
6270 /* Flatten RHS. */
6271 switch (get_gimple_rhs_class (gimple_assign_rhs_code (stmt)))
6273 case GIMPLE_BINARY_RHS:
6274 code = gimple_assign_rhs_code (stmt);
6275 op_type = TREE_CODE_LENGTH (code);
6276 gcc_assert (op_type == binary_op);
6277 ops[0] = gimple_assign_rhs1 (stmt);
6278 ops[1] = gimple_assign_rhs2 (stmt);
6279 break;
6281 case GIMPLE_TERNARY_RHS:
6282 code = gimple_assign_rhs_code (stmt);
6283 op_type = TREE_CODE_LENGTH (code);
6284 gcc_assert (op_type == ternary_op);
6285 ops[0] = gimple_assign_rhs1 (stmt);
6286 ops[1] = gimple_assign_rhs2 (stmt);
6287 ops[2] = gimple_assign_rhs3 (stmt);
6288 break;
6290 case GIMPLE_UNARY_RHS:
6291 return false;
6293 default:
6294 gcc_unreachable ();
6297 if (code == COND_EXPR && slp_node)
6298 return false;
6300 scalar_dest = gimple_assign_lhs (stmt);
6301 scalar_type = TREE_TYPE (scalar_dest);
6302 if (!POINTER_TYPE_P (scalar_type) && !INTEGRAL_TYPE_P (scalar_type)
6303 && !SCALAR_FLOAT_TYPE_P (scalar_type))
6304 return false;
6306 /* Do not try to vectorize bit-precision reductions. */
6307 if (!type_has_mode_precision_p (scalar_type))
6308 return false;
6310 /* All uses but the last are expected to be defined in the loop.
6311 The last use is the reduction variable. In case of nested cycle this
6312 assumption is not true: we use reduc_index to record the index of the
6313 reduction variable. */
6314 stmt_vec_info reduc_def_info = NULL;
6315 int reduc_index = -1;
6316 for (i = 0; i < op_type; i++)
6318 /* The condition of COND_EXPR is checked in vectorizable_condition(). */
6319 if (i == 0 && code == COND_EXPR)
6320 continue;
6322 stmt_vec_info def_stmt_info;
6323 is_simple_use = vect_is_simple_use (ops[i], loop_vinfo, &dts[i], &tem,
6324 &def_stmt_info);
6325 dt = dts[i];
6326 gcc_assert (is_simple_use);
6327 if (dt == vect_reduction_def)
6329 reduc_def_info = def_stmt_info;
6330 reduc_index = i;
6331 continue;
6333 else if (tem)
6335 /* To properly compute ncopies we are interested in the widest
6336 input type in case we're looking at a widening accumulation. */
6337 if (!vectype_in
6338 || (GET_MODE_SIZE (SCALAR_TYPE_MODE (TREE_TYPE (vectype_in)))
6339 < GET_MODE_SIZE (SCALAR_TYPE_MODE (TREE_TYPE (tem)))))
6340 vectype_in = tem;
6343 if (dt != vect_internal_def
6344 && dt != vect_external_def
6345 && dt != vect_constant_def
6346 && dt != vect_induction_def
6347 && !(dt == vect_nested_cycle && nested_cycle))
6348 return false;
6350 if (dt == vect_nested_cycle)
6352 found_nested_cycle_def = true;
6353 reduc_def_info = def_stmt_info;
6354 reduc_index = i;
6357 if (i == 1 && code == COND_EXPR)
6359 /* Record how value of COND_EXPR is defined. */
6360 if (dt == vect_constant_def)
6362 cond_reduc_dt = dt;
6363 cond_reduc_val = ops[i];
6365 if (dt == vect_induction_def
6366 && def_stmt_info
6367 && is_nonwrapping_integer_induction (def_stmt_info, loop))
6369 cond_reduc_dt = dt;
6370 cond_reduc_def_stmt = def_stmt_info;
6375 if (!vectype_in)
6376 vectype_in = vectype_out;
6378 /* When vectorizing a reduction chain w/o SLP the reduction PHI is not
6379 directy used in stmt. */
6380 if (reduc_index == -1)
6382 if (STMT_VINFO_REDUC_TYPE (stmt_info) == FOLD_LEFT_REDUCTION)
6384 if (dump_enabled_p ())
6385 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6386 "in-order reduction chain without SLP.\n");
6387 return false;
6390 if (orig_stmt_info)
6391 reduc_def_info = STMT_VINFO_REDUC_DEF (orig_stmt_info);
6392 else
6393 reduc_def_info = STMT_VINFO_REDUC_DEF (stmt_info);
6396 if (! reduc_def_info)
6397 return false;
6399 gphi *reduc_def_phi = dyn_cast <gphi *> (reduc_def_info->stmt);
6400 if (!reduc_def_phi)
6401 return false;
6403 if (!(reduc_index == -1
6404 || dts[reduc_index] == vect_reduction_def
6405 || dts[reduc_index] == vect_nested_cycle
6406 || ((dts[reduc_index] == vect_internal_def
6407 || dts[reduc_index] == vect_external_def
6408 || dts[reduc_index] == vect_constant_def
6409 || dts[reduc_index] == vect_induction_def)
6410 && nested_cycle && found_nested_cycle_def)))
6412 /* For pattern recognized stmts, orig_stmt might be a reduction,
6413 but some helper statements for the pattern might not, or
6414 might be COND_EXPRs with reduction uses in the condition. */
6415 gcc_assert (orig_stmt_info);
6416 return false;
6419 /* PHIs should not participate in patterns. */
6420 gcc_assert (!STMT_VINFO_RELATED_STMT (reduc_def_info));
6421 enum vect_reduction_type v_reduc_type
6422 = STMT_VINFO_REDUC_TYPE (reduc_def_info);
6423 stmt_vec_info tmp = STMT_VINFO_REDUC_DEF (reduc_def_info);
6425 STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) = v_reduc_type;
6426 /* If we have a condition reduction, see if we can simplify it further. */
6427 if (v_reduc_type == COND_REDUCTION)
6429 /* TODO: We can't yet handle reduction chains, since we need to treat
6430 each COND_EXPR in the chain specially, not just the last one.
6431 E.g. for:
6433 x_1 = PHI <x_3, ...>
6434 x_2 = a_2 ? ... : x_1;
6435 x_3 = a_3 ? ... : x_2;
6437 we're interested in the last element in x_3 for which a_2 || a_3
6438 is true, whereas the current reduction chain handling would
6439 vectorize x_2 as a normal VEC_COND_EXPR and only treat x_3
6440 as a reduction operation. */
6441 if (reduc_index == -1)
6443 if (dump_enabled_p ())
6444 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6445 "conditional reduction chains not supported\n");
6446 return false;
6449 /* vect_is_simple_reduction ensured that operand 2 is the
6450 loop-carried operand. */
6451 gcc_assert (reduc_index == 2);
6453 /* Loop peeling modifies initial value of reduction PHI, which
6454 makes the reduction stmt to be transformed different to the
6455 original stmt analyzed. We need to record reduction code for
6456 CONST_COND_REDUCTION type reduction at analyzing stage, thus
6457 it can be used directly at transform stage. */
6458 if (STMT_VINFO_VEC_CONST_COND_REDUC_CODE (stmt_info) == MAX_EXPR
6459 || STMT_VINFO_VEC_CONST_COND_REDUC_CODE (stmt_info) == MIN_EXPR)
6461 /* Also set the reduction type to CONST_COND_REDUCTION. */
6462 gcc_assert (cond_reduc_dt == vect_constant_def);
6463 STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) = CONST_COND_REDUCTION;
6465 else if (direct_internal_fn_supported_p (IFN_FOLD_EXTRACT_LAST,
6466 vectype_in, OPTIMIZE_FOR_SPEED))
6468 if (dump_enabled_p ())
6469 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6470 "optimizing condition reduction with"
6471 " FOLD_EXTRACT_LAST.\n");
6472 STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) = EXTRACT_LAST_REDUCTION;
6474 else if (cond_reduc_dt == vect_induction_def)
6476 stmt_vec_info cond_stmt_vinfo = vinfo_for_stmt (cond_reduc_def_stmt);
6477 tree base
6478 = STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED (cond_stmt_vinfo);
6479 tree step = STMT_VINFO_LOOP_PHI_EVOLUTION_PART (cond_stmt_vinfo);
6481 gcc_assert (TREE_CODE (base) == INTEGER_CST
6482 && TREE_CODE (step) == INTEGER_CST);
6483 cond_reduc_val = NULL_TREE;
6484 /* Find a suitable value, for MAX_EXPR below base, for MIN_EXPR
6485 above base; punt if base is the minimum value of the type for
6486 MAX_EXPR or maximum value of the type for MIN_EXPR for now. */
6487 if (tree_int_cst_sgn (step) == -1)
6489 cond_reduc_op_code = MIN_EXPR;
6490 if (tree_int_cst_sgn (base) == -1)
6491 cond_reduc_val = build_int_cst (TREE_TYPE (base), 0);
6492 else if (tree_int_cst_lt (base,
6493 TYPE_MAX_VALUE (TREE_TYPE (base))))
6494 cond_reduc_val
6495 = int_const_binop (PLUS_EXPR, base, integer_one_node);
6497 else
6499 cond_reduc_op_code = MAX_EXPR;
6500 if (tree_int_cst_sgn (base) == 1)
6501 cond_reduc_val = build_int_cst (TREE_TYPE (base), 0);
6502 else if (tree_int_cst_lt (TYPE_MIN_VALUE (TREE_TYPE (base)),
6503 base))
6504 cond_reduc_val
6505 = int_const_binop (MINUS_EXPR, base, integer_one_node);
6507 if (cond_reduc_val)
6509 if (dump_enabled_p ())
6510 dump_printf_loc (MSG_NOTE, vect_location,
6511 "condition expression based on "
6512 "integer induction.\n");
6513 STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info)
6514 = INTEGER_INDUC_COND_REDUCTION;
6517 else if (cond_reduc_dt == vect_constant_def)
6519 enum vect_def_type cond_initial_dt;
6520 gimple *def_stmt = SSA_NAME_DEF_STMT (ops[reduc_index]);
6521 tree cond_initial_val
6522 = PHI_ARG_DEF_FROM_EDGE (def_stmt, loop_preheader_edge (loop));
6524 gcc_assert (cond_reduc_val != NULL_TREE);
6525 vect_is_simple_use (cond_initial_val, loop_vinfo, &cond_initial_dt);
6526 if (cond_initial_dt == vect_constant_def
6527 && types_compatible_p (TREE_TYPE (cond_initial_val),
6528 TREE_TYPE (cond_reduc_val)))
6530 tree e = fold_binary (LE_EXPR, boolean_type_node,
6531 cond_initial_val, cond_reduc_val);
6532 if (e && (integer_onep (e) || integer_zerop (e)))
6534 if (dump_enabled_p ())
6535 dump_printf_loc (MSG_NOTE, vect_location,
6536 "condition expression based on "
6537 "compile time constant.\n");
6538 /* Record reduction code at analysis stage. */
6539 STMT_VINFO_VEC_CONST_COND_REDUC_CODE (stmt_info)
6540 = integer_onep (e) ? MAX_EXPR : MIN_EXPR;
6541 STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info)
6542 = CONST_COND_REDUCTION;
6548 if (orig_stmt_info)
6549 gcc_assert (tmp == orig_stmt_info
6550 || REDUC_GROUP_FIRST_ELEMENT (tmp) == orig_stmt_info);
6551 else
6552 /* We changed STMT to be the first stmt in reduction chain, hence we
6553 check that in this case the first element in the chain is STMT. */
6554 gcc_assert (tmp == stmt_info
6555 || REDUC_GROUP_FIRST_ELEMENT (tmp) == stmt_info);
6557 if (STMT_VINFO_LIVE_P (reduc_def_info))
6558 return false;
6560 if (slp_node)
6561 ncopies = 1;
6562 else
6563 ncopies = vect_get_num_copies (loop_vinfo, vectype_in);
6565 gcc_assert (ncopies >= 1);
6567 vec_mode = TYPE_MODE (vectype_in);
6568 poly_uint64 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
6570 if (code == COND_EXPR)
6572 /* Only call during the analysis stage, otherwise we'll lose
6573 STMT_VINFO_TYPE. */
6574 if (!vec_stmt && !vectorizable_condition (stmt, gsi, NULL,
6575 ops[reduc_index], 0, NULL,
6576 cost_vec))
6578 if (dump_enabled_p ())
6579 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6580 "unsupported condition in reduction\n");
6581 return false;
6584 else
6586 /* 4. Supportable by target? */
6588 if (code == LSHIFT_EXPR || code == RSHIFT_EXPR
6589 || code == LROTATE_EXPR || code == RROTATE_EXPR)
6591 /* Shifts and rotates are only supported by vectorizable_shifts,
6592 not vectorizable_reduction. */
6593 if (dump_enabled_p ())
6594 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6595 "unsupported shift or rotation.\n");
6596 return false;
6599 /* 4.1. check support for the operation in the loop */
6600 optab = optab_for_tree_code (code, vectype_in, optab_default);
6601 if (!optab)
6603 if (dump_enabled_p ())
6604 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6605 "no optab.\n");
6607 return false;
6610 if (optab_handler (optab, vec_mode) == CODE_FOR_nothing)
6612 if (dump_enabled_p ())
6613 dump_printf (MSG_NOTE, "op not supported by target.\n");
6615 if (maybe_ne (GET_MODE_SIZE (vec_mode), UNITS_PER_WORD)
6616 || !vect_worthwhile_without_simd_p (loop_vinfo, code))
6617 return false;
6619 if (dump_enabled_p ())
6620 dump_printf (MSG_NOTE, "proceeding using word mode.\n");
6623 /* Worthwhile without SIMD support? */
6624 if (!VECTOR_MODE_P (TYPE_MODE (vectype_in))
6625 && !vect_worthwhile_without_simd_p (loop_vinfo, code))
6627 if (dump_enabled_p ())
6628 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6629 "not worthwhile without SIMD support.\n");
6631 return false;
6635 /* 4.2. Check support for the epilog operation.
6637 If STMT represents a reduction pattern, then the type of the
6638 reduction variable may be different than the type of the rest
6639 of the arguments. For example, consider the case of accumulation
6640 of shorts into an int accumulator; The original code:
6641 S1: int_a = (int) short_a;
6642 orig_stmt-> S2: int_acc = plus <int_a ,int_acc>;
6644 was replaced with:
6645 STMT: int_acc = widen_sum <short_a, int_acc>
6647 This means that:
6648 1. The tree-code that is used to create the vector operation in the
6649 epilog code (that reduces the partial results) is not the
6650 tree-code of STMT, but is rather the tree-code of the original
6651 stmt from the pattern that STMT is replacing. I.e, in the example
6652 above we want to use 'widen_sum' in the loop, but 'plus' in the
6653 epilog.
6654 2. The type (mode) we use to check available target support
6655 for the vector operation to be created in the *epilog*, is
6656 determined by the type of the reduction variable (in the example
6657 above we'd check this: optab_handler (plus_optab, vect_int_mode])).
6658 However the type (mode) we use to check available target support
6659 for the vector operation to be created *inside the loop*, is
6660 determined by the type of the other arguments to STMT (in the
6661 example we'd check this: optab_handler (widen_sum_optab,
6662 vect_short_mode)).
6664 This is contrary to "regular" reductions, in which the types of all
6665 the arguments are the same as the type of the reduction variable.
6666 For "regular" reductions we can therefore use the same vector type
6667 (and also the same tree-code) when generating the epilog code and
6668 when generating the code inside the loop. */
6670 vect_reduction_type reduction_type
6671 = STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info);
6672 if (orig_stmt_info
6673 && (reduction_type == TREE_CODE_REDUCTION
6674 || reduction_type == FOLD_LEFT_REDUCTION))
6676 /* This is a reduction pattern: get the vectype from the type of the
6677 reduction variable, and get the tree-code from orig_stmt. */
6678 orig_code = gimple_assign_rhs_code (orig_stmt_info->stmt);
6679 gcc_assert (vectype_out);
6680 vec_mode = TYPE_MODE (vectype_out);
6682 else
6684 /* Regular reduction: use the same vectype and tree-code as used for
6685 the vector code inside the loop can be used for the epilog code. */
6686 orig_code = code;
6688 if (code == MINUS_EXPR)
6689 orig_code = PLUS_EXPR;
6691 /* For simple condition reductions, replace with the actual expression
6692 we want to base our reduction around. */
6693 if (reduction_type == CONST_COND_REDUCTION)
6695 orig_code = STMT_VINFO_VEC_CONST_COND_REDUC_CODE (stmt_info);
6696 gcc_assert (orig_code == MAX_EXPR || orig_code == MIN_EXPR);
6698 else if (reduction_type == INTEGER_INDUC_COND_REDUCTION)
6699 orig_code = cond_reduc_op_code;
6702 if (nested_cycle)
6704 def_bb = gimple_bb (reduc_def_phi);
6705 def_stmt_loop = def_bb->loop_father;
6706 def_arg = PHI_ARG_DEF_FROM_EDGE (reduc_def_phi,
6707 loop_preheader_edge (def_stmt_loop));
6708 stmt_vec_info def_arg_stmt_info = loop_vinfo->lookup_def (def_arg);
6709 if (def_arg_stmt_info
6710 && (STMT_VINFO_DEF_TYPE (def_arg_stmt_info)
6711 == vect_double_reduction_def))
6712 double_reduc = true;
6715 reduc_fn = IFN_LAST;
6717 if (reduction_type == TREE_CODE_REDUCTION
6718 || reduction_type == FOLD_LEFT_REDUCTION
6719 || reduction_type == INTEGER_INDUC_COND_REDUCTION
6720 || reduction_type == CONST_COND_REDUCTION)
6722 if (reduction_type == FOLD_LEFT_REDUCTION
6723 ? fold_left_reduction_fn (orig_code, &reduc_fn)
6724 : reduction_fn_for_scalar_code (orig_code, &reduc_fn))
6726 if (reduc_fn != IFN_LAST
6727 && !direct_internal_fn_supported_p (reduc_fn, vectype_out,
6728 OPTIMIZE_FOR_SPEED))
6730 if (dump_enabled_p ())
6731 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6732 "reduc op not supported by target.\n");
6734 reduc_fn = IFN_LAST;
6737 else
6739 if (!nested_cycle || double_reduc)
6741 if (dump_enabled_p ())
6742 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6743 "no reduc code for scalar code.\n");
6745 return false;
6749 else if (reduction_type == COND_REDUCTION)
6751 int scalar_precision
6752 = GET_MODE_PRECISION (SCALAR_TYPE_MODE (scalar_type));
6753 cr_index_scalar_type = make_unsigned_type (scalar_precision);
6754 cr_index_vector_type = build_vector_type (cr_index_scalar_type,
6755 nunits_out);
6757 if (direct_internal_fn_supported_p (IFN_REDUC_MAX, cr_index_vector_type,
6758 OPTIMIZE_FOR_SPEED))
6759 reduc_fn = IFN_REDUC_MAX;
6762 if (reduction_type != EXTRACT_LAST_REDUCTION
6763 && reduc_fn == IFN_LAST
6764 && !nunits_out.is_constant ())
6766 if (dump_enabled_p ())
6767 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6768 "missing target support for reduction on"
6769 " variable-length vectors.\n");
6770 return false;
6773 if ((double_reduc || reduction_type != TREE_CODE_REDUCTION)
6774 && ncopies > 1)
6776 if (dump_enabled_p ())
6777 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6778 "multiple types in double reduction or condition "
6779 "reduction.\n");
6780 return false;
6783 /* For SLP reductions, see if there is a neutral value we can use. */
6784 tree neutral_op = NULL_TREE;
6785 if (slp_node)
6786 neutral_op = neutral_op_for_slp_reduction
6787 (slp_node_instance->reduc_phis, code,
6788 REDUC_GROUP_FIRST_ELEMENT (stmt_info) != NULL);
6790 if (double_reduc && reduction_type == FOLD_LEFT_REDUCTION)
6792 /* We can't support in-order reductions of code such as this:
6794 for (int i = 0; i < n1; ++i)
6795 for (int j = 0; j < n2; ++j)
6796 l += a[j];
6798 since GCC effectively transforms the loop when vectorizing:
6800 for (int i = 0; i < n1 / VF; ++i)
6801 for (int j = 0; j < n2; ++j)
6802 for (int k = 0; k < VF; ++k)
6803 l += a[j];
6805 which is a reassociation of the original operation. */
6806 if (dump_enabled_p ())
6807 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6808 "in-order double reduction not supported.\n");
6810 return false;
6813 if (reduction_type == FOLD_LEFT_REDUCTION
6814 && slp_node
6815 && !REDUC_GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
6817 /* We cannot use in-order reductions in this case because there is
6818 an implicit reassociation of the operations involved. */
6819 if (dump_enabled_p ())
6820 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6821 "in-order unchained SLP reductions not supported.\n");
6822 return false;
6825 /* For double reductions, and for SLP reductions with a neutral value,
6826 we construct a variable-length initial vector by loading a vector
6827 full of the neutral value and then shift-and-inserting the start
6828 values into the low-numbered elements. */
6829 if ((double_reduc || neutral_op)
6830 && !nunits_out.is_constant ()
6831 && !direct_internal_fn_supported_p (IFN_VEC_SHL_INSERT,
6832 vectype_out, OPTIMIZE_FOR_SPEED))
6834 if (dump_enabled_p ())
6835 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6836 "reduction on variable-length vectors requires"
6837 " target support for a vector-shift-and-insert"
6838 " operation.\n");
6839 return false;
6842 /* Check extra constraints for variable-length unchained SLP reductions. */
6843 if (STMT_SLP_TYPE (stmt_info)
6844 && !REDUC_GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt))
6845 && !nunits_out.is_constant ())
6847 /* We checked above that we could build the initial vector when
6848 there's a neutral element value. Check here for the case in
6849 which each SLP statement has its own initial value and in which
6850 that value needs to be repeated for every instance of the
6851 statement within the initial vector. */
6852 unsigned int group_size = SLP_TREE_SCALAR_STMTS (slp_node).length ();
6853 scalar_mode elt_mode = SCALAR_TYPE_MODE (TREE_TYPE (vectype_out));
6854 if (!neutral_op
6855 && !can_duplicate_and_interleave_p (group_size, elt_mode))
6857 if (dump_enabled_p ())
6858 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6859 "unsupported form of SLP reduction for"
6860 " variable-length vectors: cannot build"
6861 " initial vector.\n");
6862 return false;
6864 /* The epilogue code relies on the number of elements being a multiple
6865 of the group size. The duplicate-and-interleave approach to setting
6866 up the the initial vector does too. */
6867 if (!multiple_p (nunits_out, group_size))
6869 if (dump_enabled_p ())
6870 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6871 "unsupported form of SLP reduction for"
6872 " variable-length vectors: the vector size"
6873 " is not a multiple of the number of results.\n");
6874 return false;
6878 /* In case of widenning multiplication by a constant, we update the type
6879 of the constant to be the type of the other operand. We check that the
6880 constant fits the type in the pattern recognition pass. */
6881 if (code == DOT_PROD_EXPR
6882 && !types_compatible_p (TREE_TYPE (ops[0]), TREE_TYPE (ops[1])))
6884 if (TREE_CODE (ops[0]) == INTEGER_CST)
6885 ops[0] = fold_convert (TREE_TYPE (ops[1]), ops[0]);
6886 else if (TREE_CODE (ops[1]) == INTEGER_CST)
6887 ops[1] = fold_convert (TREE_TYPE (ops[0]), ops[1]);
6888 else
6890 if (dump_enabled_p ())
6891 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6892 "invalid types in dot-prod\n");
6894 return false;
6898 if (reduction_type == COND_REDUCTION)
6900 widest_int ni;
6902 if (! max_loop_iterations (loop, &ni))
6904 if (dump_enabled_p ())
6905 dump_printf_loc (MSG_NOTE, vect_location,
6906 "loop count not known, cannot create cond "
6907 "reduction.\n");
6908 return false;
6910 /* Convert backedges to iterations. */
6911 ni += 1;
6913 /* The additional index will be the same type as the condition. Check
6914 that the loop can fit into this less one (because we'll use up the
6915 zero slot for when there are no matches). */
6916 tree max_index = TYPE_MAX_VALUE (cr_index_scalar_type);
6917 if (wi::geu_p (ni, wi::to_widest (max_index)))
6919 if (dump_enabled_p ())
6920 dump_printf_loc (MSG_NOTE, vect_location,
6921 "loop size is greater than data size.\n");
6922 return false;
6926 /* In case the vectorization factor (VF) is bigger than the number
6927 of elements that we can fit in a vectype (nunits), we have to generate
6928 more than one vector stmt - i.e - we need to "unroll" the
6929 vector stmt by a factor VF/nunits. For more details see documentation
6930 in vectorizable_operation. */
6932 /* If the reduction is used in an outer loop we need to generate
6933 VF intermediate results, like so (e.g. for ncopies=2):
6934 r0 = phi (init, r0)
6935 r1 = phi (init, r1)
6936 r0 = x0 + r0;
6937 r1 = x1 + r1;
6938 (i.e. we generate VF results in 2 registers).
6939 In this case we have a separate def-use cycle for each copy, and therefore
6940 for each copy we get the vector def for the reduction variable from the
6941 respective phi node created for this copy.
6943 Otherwise (the reduction is unused in the loop nest), we can combine
6944 together intermediate results, like so (e.g. for ncopies=2):
6945 r = phi (init, r)
6946 r = x0 + r;
6947 r = x1 + r;
6948 (i.e. we generate VF/2 results in a single register).
6949 In this case for each copy we get the vector def for the reduction variable
6950 from the vectorized reduction operation generated in the previous iteration.
6952 This only works when we see both the reduction PHI and its only consumer
6953 in vectorizable_reduction and there are no intermediate stmts
6954 participating. */
6955 stmt_vec_info use_stmt_info;
6956 tree reduc_phi_result = gimple_phi_result (reduc_def_phi);
6957 if (ncopies > 1
6958 && (STMT_VINFO_RELEVANT (stmt_info) <= vect_used_only_live)
6959 && (use_stmt_info = loop_vinfo->lookup_single_use (reduc_phi_result))
6960 && (use_stmt_info == stmt_info
6961 || STMT_VINFO_RELATED_STMT (use_stmt_info) == stmt))
6963 single_defuse_cycle = true;
6964 epilog_copies = 1;
6966 else
6967 epilog_copies = ncopies;
6969 /* If the reduction stmt is one of the patterns that have lane
6970 reduction embedded we cannot handle the case of ! single_defuse_cycle. */
6971 if ((ncopies > 1
6972 && ! single_defuse_cycle)
6973 && (code == DOT_PROD_EXPR
6974 || code == WIDEN_SUM_EXPR
6975 || code == SAD_EXPR))
6977 if (dump_enabled_p ())
6978 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6979 "multi def-use cycle not possible for lane-reducing "
6980 "reduction operation\n");
6981 return false;
6984 if (slp_node)
6985 vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
6986 else
6987 vec_num = 1;
6989 internal_fn cond_fn = get_conditional_internal_fn (code);
6990 vec_loop_masks *masks = &LOOP_VINFO_MASKS (loop_vinfo);
6992 if (!vec_stmt) /* transformation not required. */
6994 vect_model_reduction_cost (stmt_info, reduc_fn, ncopies, cost_vec);
6995 if (loop_vinfo && LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo))
6997 if (reduction_type != FOLD_LEFT_REDUCTION
6998 && (cond_fn == IFN_LAST
6999 || !direct_internal_fn_supported_p (cond_fn, vectype_in,
7000 OPTIMIZE_FOR_SPEED)))
7002 if (dump_enabled_p ())
7003 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7004 "can't use a fully-masked loop because no"
7005 " conditional operation is available.\n");
7006 LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = false;
7008 else if (reduc_index == -1)
7010 if (dump_enabled_p ())
7011 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7012 "can't use a fully-masked loop for chained"
7013 " reductions.\n");
7014 LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = false;
7016 else
7017 vect_record_loop_mask (loop_vinfo, masks, ncopies * vec_num,
7018 vectype_in);
7020 if (dump_enabled_p ()
7021 && reduction_type == FOLD_LEFT_REDUCTION)
7022 dump_printf_loc (MSG_NOTE, vect_location,
7023 "using an in-order (fold-left) reduction.\n");
7024 STMT_VINFO_TYPE (stmt_info) = reduc_vec_info_type;
7025 return true;
7028 /* Transform. */
7030 if (dump_enabled_p ())
7031 dump_printf_loc (MSG_NOTE, vect_location, "transform reduction.\n");
7033 /* FORNOW: Multiple types are not supported for condition. */
7034 if (code == COND_EXPR)
7035 gcc_assert (ncopies == 1);
7037 bool masked_loop_p = LOOP_VINFO_FULLY_MASKED_P (loop_vinfo);
7039 if (reduction_type == FOLD_LEFT_REDUCTION)
7040 return vectorize_fold_left_reduction
7041 (stmt, gsi, vec_stmt, slp_node, reduc_def_phi, code,
7042 reduc_fn, ops, vectype_in, reduc_index, masks);
7044 if (reduction_type == EXTRACT_LAST_REDUCTION)
7046 gcc_assert (!slp_node);
7047 return vectorizable_condition (stmt, gsi, vec_stmt,
7048 NULL, reduc_index, NULL, NULL);
7051 /* Create the destination vector */
7052 vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
7054 prev_stmt_info = NULL;
7055 prev_phi_info = NULL;
7056 if (!slp_node)
7058 vec_oprnds0.create (1);
7059 vec_oprnds1.create (1);
7060 if (op_type == ternary_op)
7061 vec_oprnds2.create (1);
7064 phis.create (vec_num);
7065 vect_defs.create (vec_num);
7066 if (!slp_node)
7067 vect_defs.quick_push (NULL_TREE);
7069 if (slp_node)
7070 phis.splice (SLP_TREE_VEC_STMTS (slp_node_instance->reduc_phis));
7071 else
7072 phis.quick_push (STMT_VINFO_VEC_STMT (reduc_def_info));
7074 for (j = 0; j < ncopies; j++)
7076 if (code == COND_EXPR)
7078 gcc_assert (!slp_node);
7079 vectorizable_condition (stmt, gsi, vec_stmt,
7080 PHI_RESULT (phis[0]->stmt),
7081 reduc_index, NULL, NULL);
7082 /* Multiple types are not supported for condition. */
7083 break;
7086 /* Handle uses. */
7087 if (j == 0)
7089 if (slp_node)
7091 /* Get vec defs for all the operands except the reduction index,
7092 ensuring the ordering of the ops in the vector is kept. */
7093 auto_vec<tree, 3> slp_ops;
7094 auto_vec<vec<tree>, 3> vec_defs;
7096 slp_ops.quick_push (ops[0]);
7097 slp_ops.quick_push (ops[1]);
7098 if (op_type == ternary_op)
7099 slp_ops.quick_push (ops[2]);
7101 vect_get_slp_defs (slp_ops, slp_node, &vec_defs);
7103 vec_oprnds0.safe_splice (vec_defs[0]);
7104 vec_defs[0].release ();
7105 vec_oprnds1.safe_splice (vec_defs[1]);
7106 vec_defs[1].release ();
7107 if (op_type == ternary_op)
7109 vec_oprnds2.safe_splice (vec_defs[2]);
7110 vec_defs[2].release ();
7113 else
7115 vec_oprnds0.quick_push
7116 (vect_get_vec_def_for_operand (ops[0], stmt));
7117 vec_oprnds1.quick_push
7118 (vect_get_vec_def_for_operand (ops[1], stmt));
7119 if (op_type == ternary_op)
7120 vec_oprnds2.quick_push
7121 (vect_get_vec_def_for_operand (ops[2], stmt));
7124 else
7126 if (!slp_node)
7128 gcc_assert (reduc_index != -1 || ! single_defuse_cycle);
7130 if (single_defuse_cycle && reduc_index == 0)
7131 vec_oprnds0[0] = gimple_get_lhs (new_stmt_info->stmt);
7132 else
7133 vec_oprnds0[0]
7134 = vect_get_vec_def_for_stmt_copy (dts[0], vec_oprnds0[0]);
7135 if (single_defuse_cycle && reduc_index == 1)
7136 vec_oprnds1[0] = gimple_get_lhs (new_stmt_info->stmt);
7137 else
7138 vec_oprnds1[0]
7139 = vect_get_vec_def_for_stmt_copy (dts[1], vec_oprnds1[0]);
7140 if (op_type == ternary_op)
7142 if (single_defuse_cycle && reduc_index == 2)
7143 vec_oprnds2[0] = gimple_get_lhs (new_stmt_info->stmt);
7144 else
7145 vec_oprnds2[0]
7146 = vect_get_vec_def_for_stmt_copy (dts[2], vec_oprnds2[0]);
7151 FOR_EACH_VEC_ELT (vec_oprnds0, i, def0)
7153 tree vop[3] = { def0, vec_oprnds1[i], NULL_TREE };
7154 if (masked_loop_p)
7156 /* Make sure that the reduction accumulator is vop[0]. */
7157 if (reduc_index == 1)
7159 gcc_assert (commutative_tree_code (code));
7160 std::swap (vop[0], vop[1]);
7162 tree mask = vect_get_loop_mask (gsi, masks, vec_num * ncopies,
7163 vectype_in, i * ncopies + j);
7164 gcall *call = gimple_build_call_internal (cond_fn, 4, mask,
7165 vop[0], vop[1],
7166 vop[0]);
7167 new_temp = make_ssa_name (vec_dest, call);
7168 gimple_call_set_lhs (call, new_temp);
7169 gimple_call_set_nothrow (call, true);
7170 new_stmt_info = vect_finish_stmt_generation (stmt, call, gsi);
7172 else
7174 if (op_type == ternary_op)
7175 vop[2] = vec_oprnds2[i];
7177 gassign *new_stmt = gimple_build_assign (vec_dest, code,
7178 vop[0], vop[1], vop[2]);
7179 new_temp = make_ssa_name (vec_dest, new_stmt);
7180 gimple_assign_set_lhs (new_stmt, new_temp);
7181 new_stmt_info
7182 = vect_finish_stmt_generation (stmt, new_stmt, gsi);
7185 if (slp_node)
7187 SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info);
7188 vect_defs.quick_push (new_temp);
7190 else
7191 vect_defs[0] = new_temp;
7194 if (slp_node)
7195 continue;
7197 if (j == 0)
7198 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt_info;
7199 else
7200 STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info;
7202 prev_stmt_info = new_stmt_info;
7205 /* Finalize the reduction-phi (set its arguments) and create the
7206 epilog reduction code. */
7207 if ((!single_defuse_cycle || code == COND_EXPR) && !slp_node)
7208 vect_defs[0] = gimple_get_lhs ((*vec_stmt)->stmt);
7210 vect_create_epilog_for_reduction (vect_defs, stmt, reduc_def_phi,
7211 epilog_copies, reduc_fn, phis,
7212 double_reduc, slp_node, slp_node_instance,
7213 cond_reduc_val, cond_reduc_op_code,
7214 neutral_op);
7216 return true;
7219 /* Function vect_min_worthwhile_factor.
7221 For a loop where we could vectorize the operation indicated by CODE,
7222 return the minimum vectorization factor that makes it worthwhile
7223 to use generic vectors. */
7224 static unsigned int
7225 vect_min_worthwhile_factor (enum tree_code code)
7227 switch (code)
7229 case PLUS_EXPR:
7230 case MINUS_EXPR:
7231 case NEGATE_EXPR:
7232 return 4;
7234 case BIT_AND_EXPR:
7235 case BIT_IOR_EXPR:
7236 case BIT_XOR_EXPR:
7237 case BIT_NOT_EXPR:
7238 return 2;
7240 default:
7241 return INT_MAX;
7245 /* Return true if VINFO indicates we are doing loop vectorization and if
7246 it is worth decomposing CODE operations into scalar operations for
7247 that loop's vectorization factor. */
7249 bool
7250 vect_worthwhile_without_simd_p (vec_info *vinfo, tree_code code)
7252 loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo);
7253 unsigned HOST_WIDE_INT value;
7254 return (loop_vinfo
7255 && LOOP_VINFO_VECT_FACTOR (loop_vinfo).is_constant (&value)
7256 && value >= vect_min_worthwhile_factor (code));
7259 /* Function vectorizable_induction
7261 Check if PHI performs an induction computation that can be vectorized.
7262 If VEC_STMT is also passed, vectorize the induction PHI: create a vectorized
7263 phi to replace it, put it in VEC_STMT, and add it to the same basic block.
7264 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
7266 bool
7267 vectorizable_induction (gimple *phi,
7268 gimple_stmt_iterator *gsi ATTRIBUTE_UNUSED,
7269 stmt_vec_info *vec_stmt, slp_tree slp_node,
7270 stmt_vector_for_cost *cost_vec)
7272 stmt_vec_info stmt_info = vinfo_for_stmt (phi);
7273 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
7274 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
7275 unsigned ncopies;
7276 bool nested_in_vect_loop = false;
7277 struct loop *iv_loop;
7278 tree vec_def;
7279 edge pe = loop_preheader_edge (loop);
7280 basic_block new_bb;
7281 tree new_vec, vec_init, vec_step, t;
7282 tree new_name;
7283 gimple *new_stmt;
7284 gphi *induction_phi;
7285 tree induc_def, vec_dest;
7286 tree init_expr, step_expr;
7287 poly_uint64 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
7288 unsigned i;
7289 tree expr;
7290 gimple_seq stmts;
7291 imm_use_iterator imm_iter;
7292 use_operand_p use_p;
7293 gimple *exit_phi;
7294 edge latch_e;
7295 tree loop_arg;
7296 gimple_stmt_iterator si;
7297 basic_block bb = gimple_bb (phi);
7299 if (gimple_code (phi) != GIMPLE_PHI)
7300 return false;
7302 if (!STMT_VINFO_RELEVANT_P (stmt_info))
7303 return false;
7305 /* Make sure it was recognized as induction computation. */
7306 if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_induction_def)
7307 return false;
7309 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
7310 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
7312 if (slp_node)
7313 ncopies = 1;
7314 else
7315 ncopies = vect_get_num_copies (loop_vinfo, vectype);
7316 gcc_assert (ncopies >= 1);
7318 /* FORNOW. These restrictions should be relaxed. */
7319 if (nested_in_vect_loop_p (loop, phi))
7321 imm_use_iterator imm_iter;
7322 use_operand_p use_p;
7323 gimple *exit_phi;
7324 edge latch_e;
7325 tree loop_arg;
7327 if (ncopies > 1)
7329 if (dump_enabled_p ())
7330 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7331 "multiple types in nested loop.\n");
7332 return false;
7335 /* FORNOW: outer loop induction with SLP not supported. */
7336 if (STMT_SLP_TYPE (stmt_info))
7337 return false;
7339 exit_phi = NULL;
7340 latch_e = loop_latch_edge (loop->inner);
7341 loop_arg = PHI_ARG_DEF_FROM_EDGE (phi, latch_e);
7342 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, loop_arg)
7344 gimple *use_stmt = USE_STMT (use_p);
7345 if (is_gimple_debug (use_stmt))
7346 continue;
7348 if (!flow_bb_inside_loop_p (loop->inner, gimple_bb (use_stmt)))
7350 exit_phi = use_stmt;
7351 break;
7354 if (exit_phi)
7356 stmt_vec_info exit_phi_vinfo = loop_vinfo->lookup_stmt (exit_phi);
7357 if (!(STMT_VINFO_RELEVANT_P (exit_phi_vinfo)
7358 && !STMT_VINFO_LIVE_P (exit_phi_vinfo)))
7360 if (dump_enabled_p ())
7361 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7362 "inner-loop induction only used outside "
7363 "of the outer vectorized loop.\n");
7364 return false;
7368 nested_in_vect_loop = true;
7369 iv_loop = loop->inner;
7371 else
7372 iv_loop = loop;
7373 gcc_assert (iv_loop == (gimple_bb (phi))->loop_father);
7375 if (slp_node && !nunits.is_constant ())
7377 /* The current SLP code creates the initial value element-by-element. */
7378 if (dump_enabled_p ())
7379 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7380 "SLP induction not supported for variable-length"
7381 " vectors.\n");
7382 return false;
7385 if (!vec_stmt) /* transformation not required. */
7387 STMT_VINFO_TYPE (stmt_info) = induc_vec_info_type;
7388 DUMP_VECT_SCOPE ("vectorizable_induction");
7389 vect_model_induction_cost (stmt_info, ncopies, cost_vec);
7390 return true;
7393 /* Transform. */
7395 /* Compute a vector variable, initialized with the first VF values of
7396 the induction variable. E.g., for an iv with IV_PHI='X' and
7397 evolution S, for a vector of 4 units, we want to compute:
7398 [X, X + S, X + 2*S, X + 3*S]. */
7400 if (dump_enabled_p ())
7401 dump_printf_loc (MSG_NOTE, vect_location, "transform induction phi.\n");
7403 latch_e = loop_latch_edge (iv_loop);
7404 loop_arg = PHI_ARG_DEF_FROM_EDGE (phi, latch_e);
7406 step_expr = STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_info);
7407 gcc_assert (step_expr != NULL_TREE);
7409 pe = loop_preheader_edge (iv_loop);
7410 init_expr = PHI_ARG_DEF_FROM_EDGE (phi,
7411 loop_preheader_edge (iv_loop));
7413 stmts = NULL;
7414 if (!nested_in_vect_loop)
7416 /* Convert the initial value to the desired type. */
7417 tree new_type = TREE_TYPE (vectype);
7418 init_expr = gimple_convert (&stmts, new_type, init_expr);
7420 /* If we are using the loop mask to "peel" for alignment then we need
7421 to adjust the start value here. */
7422 tree skip_niters = LOOP_VINFO_MASK_SKIP_NITERS (loop_vinfo);
7423 if (skip_niters != NULL_TREE)
7425 if (FLOAT_TYPE_P (vectype))
7426 skip_niters = gimple_build (&stmts, FLOAT_EXPR, new_type,
7427 skip_niters);
7428 else
7429 skip_niters = gimple_convert (&stmts, new_type, skip_niters);
7430 tree skip_step = gimple_build (&stmts, MULT_EXPR, new_type,
7431 skip_niters, step_expr);
7432 init_expr = gimple_build (&stmts, MINUS_EXPR, new_type,
7433 init_expr, skip_step);
7437 /* Convert the step to the desired type. */
7438 step_expr = gimple_convert (&stmts, TREE_TYPE (vectype), step_expr);
7440 if (stmts)
7442 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
7443 gcc_assert (!new_bb);
7446 /* Find the first insertion point in the BB. */
7447 si = gsi_after_labels (bb);
7449 /* For SLP induction we have to generate several IVs as for example
7450 with group size 3 we need [i, i, i, i + S] [i + S, i + S, i + 2*S, i + 2*S]
7451 [i + 2*S, i + 3*S, i + 3*S, i + 3*S]. The step is the same uniform
7452 [VF*S, VF*S, VF*S, VF*S] for all. */
7453 if (slp_node)
7455 /* Enforced above. */
7456 unsigned int const_nunits = nunits.to_constant ();
7458 /* Generate [VF*S, VF*S, ... ]. */
7459 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr)))
7461 expr = build_int_cst (integer_type_node, vf);
7462 expr = fold_convert (TREE_TYPE (step_expr), expr);
7464 else
7465 expr = build_int_cst (TREE_TYPE (step_expr), vf);
7466 new_name = fold_build2 (MULT_EXPR, TREE_TYPE (step_expr),
7467 expr, step_expr);
7468 if (! CONSTANT_CLASS_P (new_name))
7469 new_name = vect_init_vector (phi, new_name,
7470 TREE_TYPE (step_expr), NULL);
7471 new_vec = build_vector_from_val (vectype, new_name);
7472 vec_step = vect_init_vector (phi, new_vec, vectype, NULL);
7474 /* Now generate the IVs. */
7475 unsigned group_size = SLP_TREE_SCALAR_STMTS (slp_node).length ();
7476 unsigned nvects = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
7477 unsigned elts = const_nunits * nvects;
7478 unsigned nivs = least_common_multiple (group_size,
7479 const_nunits) / const_nunits;
7480 gcc_assert (elts % group_size == 0);
7481 tree elt = init_expr;
7482 unsigned ivn;
7483 for (ivn = 0; ivn < nivs; ++ivn)
7485 tree_vector_builder elts (vectype, const_nunits, 1);
7486 stmts = NULL;
7487 for (unsigned eltn = 0; eltn < const_nunits; ++eltn)
7489 if (ivn*const_nunits + eltn >= group_size
7490 && (ivn * const_nunits + eltn) % group_size == 0)
7491 elt = gimple_build (&stmts, PLUS_EXPR, TREE_TYPE (elt),
7492 elt, step_expr);
7493 elts.quick_push (elt);
7495 vec_init = gimple_build_vector (&stmts, &elts);
7496 if (stmts)
7498 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
7499 gcc_assert (!new_bb);
7502 /* Create the induction-phi that defines the induction-operand. */
7503 vec_dest = vect_get_new_vect_var (vectype, vect_simple_var, "vec_iv_");
7504 induction_phi = create_phi_node (vec_dest, iv_loop->header);
7505 stmt_vec_info induction_phi_info
7506 = loop_vinfo->add_stmt (induction_phi);
7507 induc_def = PHI_RESULT (induction_phi);
7509 /* Create the iv update inside the loop */
7510 vec_def = make_ssa_name (vec_dest);
7511 new_stmt = gimple_build_assign (vec_def, PLUS_EXPR, induc_def, vec_step);
7512 gsi_insert_before (&si, new_stmt, GSI_SAME_STMT);
7513 loop_vinfo->add_stmt (new_stmt);
7515 /* Set the arguments of the phi node: */
7516 add_phi_arg (induction_phi, vec_init, pe, UNKNOWN_LOCATION);
7517 add_phi_arg (induction_phi, vec_def, loop_latch_edge (iv_loop),
7518 UNKNOWN_LOCATION);
7520 SLP_TREE_VEC_STMTS (slp_node).quick_push (induction_phi_info);
7523 /* Re-use IVs when we can. */
7524 if (ivn < nvects)
7526 unsigned vfp
7527 = least_common_multiple (group_size, const_nunits) / group_size;
7528 /* Generate [VF'*S, VF'*S, ... ]. */
7529 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr)))
7531 expr = build_int_cst (integer_type_node, vfp);
7532 expr = fold_convert (TREE_TYPE (step_expr), expr);
7534 else
7535 expr = build_int_cst (TREE_TYPE (step_expr), vfp);
7536 new_name = fold_build2 (MULT_EXPR, TREE_TYPE (step_expr),
7537 expr, step_expr);
7538 if (! CONSTANT_CLASS_P (new_name))
7539 new_name = vect_init_vector (phi, new_name,
7540 TREE_TYPE (step_expr), NULL);
7541 new_vec = build_vector_from_val (vectype, new_name);
7542 vec_step = vect_init_vector (phi, new_vec, vectype, NULL);
7543 for (; ivn < nvects; ++ivn)
7545 gimple *iv = SLP_TREE_VEC_STMTS (slp_node)[ivn - nivs]->stmt;
7546 tree def;
7547 if (gimple_code (iv) == GIMPLE_PHI)
7548 def = gimple_phi_result (iv);
7549 else
7550 def = gimple_assign_lhs (iv);
7551 new_stmt = gimple_build_assign (make_ssa_name (vectype),
7552 PLUS_EXPR,
7553 def, vec_step);
7554 if (gimple_code (iv) == GIMPLE_PHI)
7555 gsi_insert_before (&si, new_stmt, GSI_SAME_STMT);
7556 else
7558 gimple_stmt_iterator tgsi = gsi_for_stmt (iv);
7559 gsi_insert_after (&tgsi, new_stmt, GSI_CONTINUE_LINKING);
7561 SLP_TREE_VEC_STMTS (slp_node).quick_push
7562 (loop_vinfo->add_stmt (new_stmt));
7566 return true;
7569 /* Create the vector that holds the initial_value of the induction. */
7570 if (nested_in_vect_loop)
7572 /* iv_loop is nested in the loop to be vectorized. init_expr had already
7573 been created during vectorization of previous stmts. We obtain it
7574 from the STMT_VINFO_VEC_STMT of the defining stmt. */
7575 vec_init = vect_get_vec_def_for_operand (init_expr, phi);
7576 /* If the initial value is not of proper type, convert it. */
7577 if (!useless_type_conversion_p (vectype, TREE_TYPE (vec_init)))
7579 new_stmt
7580 = gimple_build_assign (vect_get_new_ssa_name (vectype,
7581 vect_simple_var,
7582 "vec_iv_"),
7583 VIEW_CONVERT_EXPR,
7584 build1 (VIEW_CONVERT_EXPR, vectype,
7585 vec_init));
7586 vec_init = gimple_assign_lhs (new_stmt);
7587 new_bb = gsi_insert_on_edge_immediate (loop_preheader_edge (iv_loop),
7588 new_stmt);
7589 gcc_assert (!new_bb);
7590 loop_vinfo->add_stmt (new_stmt);
7593 else
7595 /* iv_loop is the loop to be vectorized. Create:
7596 vec_init = [X, X+S, X+2*S, X+3*S] (S = step_expr, X = init_expr) */
7597 stmts = NULL;
7598 new_name = gimple_convert (&stmts, TREE_TYPE (vectype), init_expr);
7600 unsigned HOST_WIDE_INT const_nunits;
7601 if (nunits.is_constant (&const_nunits))
7603 tree_vector_builder elts (vectype, const_nunits, 1);
7604 elts.quick_push (new_name);
7605 for (i = 1; i < const_nunits; i++)
7607 /* Create: new_name_i = new_name + step_expr */
7608 new_name = gimple_build (&stmts, PLUS_EXPR, TREE_TYPE (new_name),
7609 new_name, step_expr);
7610 elts.quick_push (new_name);
7612 /* Create a vector from [new_name_0, new_name_1, ...,
7613 new_name_nunits-1] */
7614 vec_init = gimple_build_vector (&stmts, &elts);
7616 else if (INTEGRAL_TYPE_P (TREE_TYPE (step_expr)))
7617 /* Build the initial value directly from a VEC_SERIES_EXPR. */
7618 vec_init = gimple_build (&stmts, VEC_SERIES_EXPR, vectype,
7619 new_name, step_expr);
7620 else
7622 /* Build:
7623 [base, base, base, ...]
7624 + (vectype) [0, 1, 2, ...] * [step, step, step, ...]. */
7625 gcc_assert (SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr)));
7626 gcc_assert (flag_associative_math);
7627 tree index = build_index_vector (vectype, 0, 1);
7628 tree base_vec = gimple_build_vector_from_val (&stmts, vectype,
7629 new_name);
7630 tree step_vec = gimple_build_vector_from_val (&stmts, vectype,
7631 step_expr);
7632 vec_init = gimple_build (&stmts, FLOAT_EXPR, vectype, index);
7633 vec_init = gimple_build (&stmts, MULT_EXPR, vectype,
7634 vec_init, step_vec);
7635 vec_init = gimple_build (&stmts, PLUS_EXPR, vectype,
7636 vec_init, base_vec);
7639 if (stmts)
7641 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
7642 gcc_assert (!new_bb);
7647 /* Create the vector that holds the step of the induction. */
7648 if (nested_in_vect_loop)
7649 /* iv_loop is nested in the loop to be vectorized. Generate:
7650 vec_step = [S, S, S, S] */
7651 new_name = step_expr;
7652 else
7654 /* iv_loop is the loop to be vectorized. Generate:
7655 vec_step = [VF*S, VF*S, VF*S, VF*S] */
7656 gimple_seq seq = NULL;
7657 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr)))
7659 expr = build_int_cst (integer_type_node, vf);
7660 expr = gimple_build (&seq, FLOAT_EXPR, TREE_TYPE (step_expr), expr);
7662 else
7663 expr = build_int_cst (TREE_TYPE (step_expr), vf);
7664 new_name = gimple_build (&seq, MULT_EXPR, TREE_TYPE (step_expr),
7665 expr, step_expr);
7666 if (seq)
7668 new_bb = gsi_insert_seq_on_edge_immediate (pe, seq);
7669 gcc_assert (!new_bb);
7673 t = unshare_expr (new_name);
7674 gcc_assert (CONSTANT_CLASS_P (new_name)
7675 || TREE_CODE (new_name) == SSA_NAME);
7676 new_vec = build_vector_from_val (vectype, t);
7677 vec_step = vect_init_vector (phi, new_vec, vectype, NULL);
7680 /* Create the following def-use cycle:
7681 loop prolog:
7682 vec_init = ...
7683 vec_step = ...
7684 loop:
7685 vec_iv = PHI <vec_init, vec_loop>
7687 STMT
7689 vec_loop = vec_iv + vec_step; */
7691 /* Create the induction-phi that defines the induction-operand. */
7692 vec_dest = vect_get_new_vect_var (vectype, vect_simple_var, "vec_iv_");
7693 induction_phi = create_phi_node (vec_dest, iv_loop->header);
7694 stmt_vec_info induction_phi_info = loop_vinfo->add_stmt (induction_phi);
7695 induc_def = PHI_RESULT (induction_phi);
7697 /* Create the iv update inside the loop */
7698 vec_def = make_ssa_name (vec_dest);
7699 new_stmt = gimple_build_assign (vec_def, PLUS_EXPR, induc_def, vec_step);
7700 gsi_insert_before (&si, new_stmt, GSI_SAME_STMT);
7701 stmt_vec_info new_stmt_info = loop_vinfo->add_stmt (new_stmt);
7703 /* Set the arguments of the phi node: */
7704 add_phi_arg (induction_phi, vec_init, pe, UNKNOWN_LOCATION);
7705 add_phi_arg (induction_phi, vec_def, loop_latch_edge (iv_loop),
7706 UNKNOWN_LOCATION);
7708 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = induction_phi_info;
7710 /* In case that vectorization factor (VF) is bigger than the number
7711 of elements that we can fit in a vectype (nunits), we have to generate
7712 more than one vector stmt - i.e - we need to "unroll" the
7713 vector stmt by a factor VF/nunits. For more details see documentation
7714 in vectorizable_operation. */
7716 if (ncopies > 1)
7718 gimple_seq seq = NULL;
7719 stmt_vec_info prev_stmt_vinfo;
7720 /* FORNOW. This restriction should be relaxed. */
7721 gcc_assert (!nested_in_vect_loop);
7723 /* Create the vector that holds the step of the induction. */
7724 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr)))
7726 expr = build_int_cst (integer_type_node, nunits);
7727 expr = gimple_build (&seq, FLOAT_EXPR, TREE_TYPE (step_expr), expr);
7729 else
7730 expr = build_int_cst (TREE_TYPE (step_expr), nunits);
7731 new_name = gimple_build (&seq, MULT_EXPR, TREE_TYPE (step_expr),
7732 expr, step_expr);
7733 if (seq)
7735 new_bb = gsi_insert_seq_on_edge_immediate (pe, seq);
7736 gcc_assert (!new_bb);
7739 t = unshare_expr (new_name);
7740 gcc_assert (CONSTANT_CLASS_P (new_name)
7741 || TREE_CODE (new_name) == SSA_NAME);
7742 new_vec = build_vector_from_val (vectype, t);
7743 vec_step = vect_init_vector (phi, new_vec, vectype, NULL);
7745 vec_def = induc_def;
7746 prev_stmt_vinfo = induction_phi_info;
7747 for (i = 1; i < ncopies; i++)
7749 /* vec_i = vec_prev + vec_step */
7750 new_stmt = gimple_build_assign (vec_dest, PLUS_EXPR,
7751 vec_def, vec_step);
7752 vec_def = make_ssa_name (vec_dest, new_stmt);
7753 gimple_assign_set_lhs (new_stmt, vec_def);
7755 gsi_insert_before (&si, new_stmt, GSI_SAME_STMT);
7756 new_stmt_info = loop_vinfo->add_stmt (new_stmt);
7757 STMT_VINFO_RELATED_STMT (prev_stmt_vinfo) = new_stmt_info;
7758 prev_stmt_vinfo = new_stmt_info;
7762 if (nested_in_vect_loop)
7764 /* Find the loop-closed exit-phi of the induction, and record
7765 the final vector of induction results: */
7766 exit_phi = NULL;
7767 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, loop_arg)
7769 gimple *use_stmt = USE_STMT (use_p);
7770 if (is_gimple_debug (use_stmt))
7771 continue;
7773 if (!flow_bb_inside_loop_p (iv_loop, gimple_bb (use_stmt)))
7775 exit_phi = use_stmt;
7776 break;
7779 if (exit_phi)
7781 stmt_vec_info stmt_vinfo = loop_vinfo->lookup_stmt (exit_phi);
7782 /* FORNOW. Currently not supporting the case that an inner-loop induction
7783 is not used in the outer-loop (i.e. only outside the outer-loop). */
7784 gcc_assert (STMT_VINFO_RELEVANT_P (stmt_vinfo)
7785 && !STMT_VINFO_LIVE_P (stmt_vinfo));
7787 STMT_VINFO_VEC_STMT (stmt_vinfo) = new_stmt_info;
7788 if (dump_enabled_p ())
7790 dump_printf_loc (MSG_NOTE, vect_location,
7791 "vector of inductions after inner-loop:");
7792 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, new_stmt, 0);
7798 if (dump_enabled_p ())
7800 dump_printf_loc (MSG_NOTE, vect_location,
7801 "transform induction: created def-use cycle: ");
7802 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, induction_phi, 0);
7803 dump_gimple_stmt (MSG_NOTE, TDF_SLIM,
7804 SSA_NAME_DEF_STMT (vec_def), 0);
7807 return true;
7810 /* Function vectorizable_live_operation.
7812 STMT computes a value that is used outside the loop. Check if
7813 it can be supported. */
7815 bool
7816 vectorizable_live_operation (gimple *stmt,
7817 gimple_stmt_iterator *gsi ATTRIBUTE_UNUSED,
7818 slp_tree slp_node, int slp_index,
7819 stmt_vec_info *vec_stmt,
7820 stmt_vector_for_cost *)
7822 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
7823 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
7824 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
7825 imm_use_iterator imm_iter;
7826 tree lhs, lhs_type, bitsize, vec_bitsize;
7827 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
7828 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
7829 int ncopies;
7830 gimple *use_stmt;
7831 auto_vec<tree> vec_oprnds;
7832 int vec_entry = 0;
7833 poly_uint64 vec_index = 0;
7835 gcc_assert (STMT_VINFO_LIVE_P (stmt_info));
7837 if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def)
7838 return false;
7840 /* FORNOW. CHECKME. */
7841 if (nested_in_vect_loop_p (loop, stmt))
7842 return false;
7844 /* If STMT is not relevant and it is a simple assignment and its inputs are
7845 invariant then it can remain in place, unvectorized. The original last
7846 scalar value that it computes will be used. */
7847 if (!STMT_VINFO_RELEVANT_P (stmt_info))
7849 gcc_assert (is_simple_and_all_uses_invariant (stmt, loop_vinfo));
7850 if (dump_enabled_p ())
7851 dump_printf_loc (MSG_NOTE, vect_location,
7852 "statement is simple and uses invariant. Leaving in "
7853 "place.\n");
7854 return true;
7857 if (slp_node)
7858 ncopies = 1;
7859 else
7860 ncopies = vect_get_num_copies (loop_vinfo, vectype);
7862 if (slp_node)
7864 gcc_assert (slp_index >= 0);
7866 int num_scalar = SLP_TREE_SCALAR_STMTS (slp_node).length ();
7867 int num_vec = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
7869 /* Get the last occurrence of the scalar index from the concatenation of
7870 all the slp vectors. Calculate which slp vector it is and the index
7871 within. */
7872 poly_uint64 pos = (num_vec * nunits) - num_scalar + slp_index;
7874 /* Calculate which vector contains the result, and which lane of
7875 that vector we need. */
7876 if (!can_div_trunc_p (pos, nunits, &vec_entry, &vec_index))
7878 if (dump_enabled_p ())
7879 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7880 "Cannot determine which vector holds the"
7881 " final result.\n");
7882 return false;
7886 if (!vec_stmt)
7888 /* No transformation required. */
7889 if (LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo))
7891 if (!direct_internal_fn_supported_p (IFN_EXTRACT_LAST, vectype,
7892 OPTIMIZE_FOR_SPEED))
7894 if (dump_enabled_p ())
7895 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7896 "can't use a fully-masked loop because "
7897 "the target doesn't support extract last "
7898 "reduction.\n");
7899 LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = false;
7901 else if (slp_node)
7903 if (dump_enabled_p ())
7904 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7905 "can't use a fully-masked loop because an "
7906 "SLP statement is live after the loop.\n");
7907 LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = false;
7909 else if (ncopies > 1)
7911 if (dump_enabled_p ())
7912 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
7913 "can't use a fully-masked loop because"
7914 " ncopies is greater than 1.\n");
7915 LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = false;
7917 else
7919 gcc_assert (ncopies == 1 && !slp_node);
7920 vect_record_loop_mask (loop_vinfo,
7921 &LOOP_VINFO_MASKS (loop_vinfo),
7922 1, vectype);
7925 return true;
7928 /* If stmt has a related stmt, then use that for getting the lhs. */
7929 if (is_pattern_stmt_p (stmt_info))
7930 stmt = STMT_VINFO_RELATED_STMT (stmt_info);
7932 lhs = (is_a <gphi *> (stmt)) ? gimple_phi_result (stmt)
7933 : gimple_get_lhs (stmt);
7934 lhs_type = TREE_TYPE (lhs);
7936 bitsize = (VECTOR_BOOLEAN_TYPE_P (vectype)
7937 ? bitsize_int (TYPE_PRECISION (TREE_TYPE (vectype)))
7938 : TYPE_SIZE (TREE_TYPE (vectype)));
7939 vec_bitsize = TYPE_SIZE (vectype);
7941 /* Get the vectorized lhs of STMT and the lane to use (counted in bits). */
7942 tree vec_lhs, bitstart;
7943 if (slp_node)
7945 gcc_assert (!LOOP_VINFO_FULLY_MASKED_P (loop_vinfo));
7947 /* Get the correct slp vectorized stmt. */
7948 gimple *vec_stmt = SLP_TREE_VEC_STMTS (slp_node)[vec_entry]->stmt;
7949 if (gphi *phi = dyn_cast <gphi *> (vec_stmt))
7950 vec_lhs = gimple_phi_result (phi);
7951 else
7952 vec_lhs = gimple_get_lhs (vec_stmt);
7954 /* Get entry to use. */
7955 bitstart = bitsize_int (vec_index);
7956 bitstart = int_const_binop (MULT_EXPR, bitsize, bitstart);
7958 else
7960 enum vect_def_type dt = STMT_VINFO_DEF_TYPE (stmt_info);
7961 vec_lhs = vect_get_vec_def_for_operand_1 (stmt_info, dt);
7962 gcc_checking_assert (ncopies == 1
7963 || !LOOP_VINFO_FULLY_MASKED_P (loop_vinfo));
7965 /* For multiple copies, get the last copy. */
7966 for (int i = 1; i < ncopies; ++i)
7967 vec_lhs = vect_get_vec_def_for_stmt_copy (vect_unknown_def_type,
7968 vec_lhs);
7970 /* Get the last lane in the vector. */
7971 bitstart = int_const_binop (MINUS_EXPR, vec_bitsize, bitsize);
7974 gimple_seq stmts = NULL;
7975 tree new_tree;
7976 if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
7978 /* Emit:
7980 SCALAR_RES = EXTRACT_LAST <VEC_LHS, MASK>
7982 where VEC_LHS is the vectorized live-out result and MASK is
7983 the loop mask for the final iteration. */
7984 gcc_assert (ncopies == 1 && !slp_node);
7985 tree scalar_type = TREE_TYPE (STMT_VINFO_VECTYPE (stmt_info));
7986 tree mask = vect_get_loop_mask (gsi, &LOOP_VINFO_MASKS (loop_vinfo),
7987 1, vectype, 0);
7988 tree scalar_res = gimple_build (&stmts, CFN_EXTRACT_LAST,
7989 scalar_type, mask, vec_lhs);
7991 /* Convert the extracted vector element to the required scalar type. */
7992 new_tree = gimple_convert (&stmts, lhs_type, scalar_res);
7994 else
7996 tree bftype = TREE_TYPE (vectype);
7997 if (VECTOR_BOOLEAN_TYPE_P (vectype))
7998 bftype = build_nonstandard_integer_type (tree_to_uhwi (bitsize), 1);
7999 new_tree = build3 (BIT_FIELD_REF, bftype, vec_lhs, bitsize, bitstart);
8000 new_tree = force_gimple_operand (fold_convert (lhs_type, new_tree),
8001 &stmts, true, NULL_TREE);
8004 if (stmts)
8005 gsi_insert_seq_on_edge_immediate (single_exit (loop), stmts);
8007 /* Replace use of lhs with newly computed result. If the use stmt is a
8008 single arg PHI, just replace all uses of PHI result. It's necessary
8009 because lcssa PHI defining lhs may be before newly inserted stmt. */
8010 use_operand_p use_p;
8011 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, lhs)
8012 if (!flow_bb_inside_loop_p (loop, gimple_bb (use_stmt))
8013 && !is_gimple_debug (use_stmt))
8015 if (gimple_code (use_stmt) == GIMPLE_PHI
8016 && gimple_phi_num_args (use_stmt) == 1)
8018 replace_uses_by (gimple_phi_result (use_stmt), new_tree);
8020 else
8022 FOR_EACH_IMM_USE_ON_STMT (use_p, imm_iter)
8023 SET_USE (use_p, new_tree);
8025 update_stmt (use_stmt);
8028 return true;
8031 /* Kill any debug uses outside LOOP of SSA names defined in STMT. */
8033 static void
8034 vect_loop_kill_debug_uses (struct loop *loop, gimple *stmt)
8036 ssa_op_iter op_iter;
8037 imm_use_iterator imm_iter;
8038 def_operand_p def_p;
8039 gimple *ustmt;
8041 FOR_EACH_PHI_OR_STMT_DEF (def_p, stmt, op_iter, SSA_OP_DEF)
8043 FOR_EACH_IMM_USE_STMT (ustmt, imm_iter, DEF_FROM_PTR (def_p))
8045 basic_block bb;
8047 if (!is_gimple_debug (ustmt))
8048 continue;
8050 bb = gimple_bb (ustmt);
8052 if (!flow_bb_inside_loop_p (loop, bb))
8054 if (gimple_debug_bind_p (ustmt))
8056 if (dump_enabled_p ())
8057 dump_printf_loc (MSG_NOTE, vect_location,
8058 "killing debug use\n");
8060 gimple_debug_bind_reset_value (ustmt);
8061 update_stmt (ustmt);
8063 else
8064 gcc_unreachable ();
8070 /* Given loop represented by LOOP_VINFO, return true if computation of
8071 LOOP_VINFO_NITERS (= LOOP_VINFO_NITERSM1 + 1) doesn't overflow, false
8072 otherwise. */
8074 static bool
8075 loop_niters_no_overflow (loop_vec_info loop_vinfo)
8077 /* Constant case. */
8078 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
8080 tree cst_niters = LOOP_VINFO_NITERS (loop_vinfo);
8081 tree cst_nitersm1 = LOOP_VINFO_NITERSM1 (loop_vinfo);
8083 gcc_assert (TREE_CODE (cst_niters) == INTEGER_CST);
8084 gcc_assert (TREE_CODE (cst_nitersm1) == INTEGER_CST);
8085 if (wi::to_widest (cst_nitersm1) < wi::to_widest (cst_niters))
8086 return true;
8089 widest_int max;
8090 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
8091 /* Check the upper bound of loop niters. */
8092 if (get_max_loop_iterations (loop, &max))
8094 tree type = TREE_TYPE (LOOP_VINFO_NITERS (loop_vinfo));
8095 signop sgn = TYPE_SIGN (type);
8096 widest_int type_max = widest_int::from (wi::max_value (type), sgn);
8097 if (max < type_max)
8098 return true;
8100 return false;
8103 /* Return a mask type with half the number of elements as TYPE. */
8105 tree
8106 vect_halve_mask_nunits (tree type)
8108 poly_uint64 nunits = exact_div (TYPE_VECTOR_SUBPARTS (type), 2);
8109 return build_truth_vector_type (nunits, current_vector_size);
8112 /* Return a mask type with twice as many elements as TYPE. */
8114 tree
8115 vect_double_mask_nunits (tree type)
8117 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (type) * 2;
8118 return build_truth_vector_type (nunits, current_vector_size);
8121 /* Record that a fully-masked version of LOOP_VINFO would need MASKS to
8122 contain a sequence of NVECTORS masks that each control a vector of type
8123 VECTYPE. */
8125 void
8126 vect_record_loop_mask (loop_vec_info loop_vinfo, vec_loop_masks *masks,
8127 unsigned int nvectors, tree vectype)
8129 gcc_assert (nvectors != 0);
8130 if (masks->length () < nvectors)
8131 masks->safe_grow_cleared (nvectors);
8132 rgroup_masks *rgm = &(*masks)[nvectors - 1];
8133 /* The number of scalars per iteration and the number of vectors are
8134 both compile-time constants. */
8135 unsigned int nscalars_per_iter
8136 = exact_div (nvectors * TYPE_VECTOR_SUBPARTS (vectype),
8137 LOOP_VINFO_VECT_FACTOR (loop_vinfo)).to_constant ();
8138 if (rgm->max_nscalars_per_iter < nscalars_per_iter)
8140 rgm->max_nscalars_per_iter = nscalars_per_iter;
8141 rgm->mask_type = build_same_sized_truth_vector_type (vectype);
8145 /* Given a complete set of masks MASKS, extract mask number INDEX
8146 for an rgroup that operates on NVECTORS vectors of type VECTYPE,
8147 where 0 <= INDEX < NVECTORS. Insert any set-up statements before GSI.
8149 See the comment above vec_loop_masks for more details about the mask
8150 arrangement. */
8152 tree
8153 vect_get_loop_mask (gimple_stmt_iterator *gsi, vec_loop_masks *masks,
8154 unsigned int nvectors, tree vectype, unsigned int index)
8156 rgroup_masks *rgm = &(*masks)[nvectors - 1];
8157 tree mask_type = rgm->mask_type;
8159 /* Populate the rgroup's mask array, if this is the first time we've
8160 used it. */
8161 if (rgm->masks.is_empty ())
8163 rgm->masks.safe_grow_cleared (nvectors);
8164 for (unsigned int i = 0; i < nvectors; ++i)
8166 tree mask = make_temp_ssa_name (mask_type, NULL, "loop_mask");
8167 /* Provide a dummy definition until the real one is available. */
8168 SSA_NAME_DEF_STMT (mask) = gimple_build_nop ();
8169 rgm->masks[i] = mask;
8173 tree mask = rgm->masks[index];
8174 if (maybe_ne (TYPE_VECTOR_SUBPARTS (mask_type),
8175 TYPE_VECTOR_SUBPARTS (vectype)))
8177 /* A loop mask for data type X can be reused for data type Y
8178 if X has N times more elements than Y and if Y's elements
8179 are N times bigger than X's. In this case each sequence
8180 of N elements in the loop mask will be all-zero or all-one.
8181 We can then view-convert the mask so that each sequence of
8182 N elements is replaced by a single element. */
8183 gcc_assert (multiple_p (TYPE_VECTOR_SUBPARTS (mask_type),
8184 TYPE_VECTOR_SUBPARTS (vectype)));
8185 gimple_seq seq = NULL;
8186 mask_type = build_same_sized_truth_vector_type (vectype);
8187 mask = gimple_build (&seq, VIEW_CONVERT_EXPR, mask_type, mask);
8188 if (seq)
8189 gsi_insert_seq_before (gsi, seq, GSI_SAME_STMT);
8191 return mask;
8194 /* Scale profiling counters by estimation for LOOP which is vectorized
8195 by factor VF. */
8197 static void
8198 scale_profile_for_vect_loop (struct loop *loop, unsigned vf)
8200 edge preheader = loop_preheader_edge (loop);
8201 /* Reduce loop iterations by the vectorization factor. */
8202 gcov_type new_est_niter = niter_for_unrolled_loop (loop, vf);
8203 profile_count freq_h = loop->header->count, freq_e = preheader->count ();
8205 if (freq_h.nonzero_p ())
8207 profile_probability p;
8209 /* Avoid dropping loop body profile counter to 0 because of zero count
8210 in loop's preheader. */
8211 if (!(freq_e == profile_count::zero ()))
8212 freq_e = freq_e.force_nonzero ();
8213 p = freq_e.apply_scale (new_est_niter + 1, 1).probability_in (freq_h);
8214 scale_loop_frequencies (loop, p);
8217 edge exit_e = single_exit (loop);
8218 exit_e->probability = profile_probability::always ()
8219 .apply_scale (1, new_est_niter + 1);
8221 edge exit_l = single_pred_edge (loop->latch);
8222 profile_probability prob = exit_l->probability;
8223 exit_l->probability = exit_e->probability.invert ();
8224 if (prob.initialized_p () && exit_l->probability.initialized_p ())
8225 scale_bbs_frequencies (&loop->latch, 1, exit_l->probability / prob);
8228 /* Vectorize STMT if relevant, inserting any new instructions before GSI.
8229 When vectorizing STMT as a store, set *SEEN_STORE to its stmt_vec_info.
8230 *SLP_SCHEDULE is a running record of whether we have called
8231 vect_schedule_slp. */
8233 static void
8234 vect_transform_loop_stmt (loop_vec_info loop_vinfo, gimple *stmt,
8235 gimple_stmt_iterator *gsi,
8236 stmt_vec_info *seen_store, bool *slp_scheduled)
8238 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
8239 poly_uint64 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
8240 stmt_vec_info stmt_info = loop_vinfo->lookup_stmt (stmt);
8241 if (!stmt_info)
8242 return;
8244 if (dump_enabled_p ())
8246 dump_printf_loc (MSG_NOTE, vect_location,
8247 "------>vectorizing statement: ");
8248 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
8251 if (MAY_HAVE_DEBUG_BIND_STMTS && !STMT_VINFO_LIVE_P (stmt_info))
8252 vect_loop_kill_debug_uses (loop, stmt);
8254 if (!STMT_VINFO_RELEVANT_P (stmt_info)
8255 && !STMT_VINFO_LIVE_P (stmt_info))
8256 return;
8258 if (STMT_VINFO_VECTYPE (stmt_info))
8260 poly_uint64 nunits
8261 = TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info));
8262 if (!STMT_SLP_TYPE (stmt_info)
8263 && maybe_ne (nunits, vf)
8264 && dump_enabled_p ())
8265 /* For SLP VF is set according to unrolling factor, and not
8266 to vector size, hence for SLP this print is not valid. */
8267 dump_printf_loc (MSG_NOTE, vect_location, "multiple-types.\n");
8270 /* SLP. Schedule all the SLP instances when the first SLP stmt is
8271 reached. */
8272 if (slp_vect_type slptype = STMT_SLP_TYPE (stmt_info))
8275 if (!*slp_scheduled)
8277 *slp_scheduled = true;
8279 DUMP_VECT_SCOPE ("scheduling SLP instances");
8281 vect_schedule_slp (loop_vinfo);
8284 /* Hybrid SLP stmts must be vectorized in addition to SLP. */
8285 if (slptype == pure_slp)
8286 return;
8289 if (dump_enabled_p ())
8290 dump_printf_loc (MSG_NOTE, vect_location, "transform statement.\n");
8292 bool grouped_store = false;
8293 if (vect_transform_stmt (stmt, gsi, &grouped_store, NULL, NULL))
8294 *seen_store = stmt_info;
8297 /* Function vect_transform_loop.
8299 The analysis phase has determined that the loop is vectorizable.
8300 Vectorize the loop - created vectorized stmts to replace the scalar
8301 stmts in the loop, and update the loop exit condition.
8302 Returns scalar epilogue loop if any. */
8304 struct loop *
8305 vect_transform_loop (loop_vec_info loop_vinfo)
8307 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
8308 struct loop *epilogue = NULL;
8309 basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
8310 int nbbs = loop->num_nodes;
8311 int i;
8312 tree niters_vector = NULL_TREE;
8313 tree step_vector = NULL_TREE;
8314 tree niters_vector_mult_vf = NULL_TREE;
8315 poly_uint64 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
8316 unsigned int lowest_vf = constant_lower_bound (vf);
8317 bool slp_scheduled = false;
8318 gimple *stmt;
8319 bool check_profitability = false;
8320 unsigned int th;
8322 DUMP_VECT_SCOPE ("vec_transform_loop");
8324 loop_vinfo->shared->check_datarefs ();
8326 /* Use the more conservative vectorization threshold. If the number
8327 of iterations is constant assume the cost check has been performed
8328 by our caller. If the threshold makes all loops profitable that
8329 run at least the (estimated) vectorization factor number of times
8330 checking is pointless, too. */
8331 th = LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo);
8332 if (th >= vect_vf_for_cost (loop_vinfo)
8333 && !LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
8335 if (dump_enabled_p ())
8336 dump_printf_loc (MSG_NOTE, vect_location,
8337 "Profitability threshold is %d loop iterations.\n",
8338 th);
8339 check_profitability = true;
8342 /* Make sure there exists a single-predecessor exit bb. Do this before
8343 versioning. */
8344 edge e = single_exit (loop);
8345 if (! single_pred_p (e->dest))
8347 split_loop_exit_edge (e);
8348 if (dump_enabled_p ())
8349 dump_printf (MSG_NOTE, "split exit edge\n");
8352 /* Version the loop first, if required, so the profitability check
8353 comes first. */
8355 if (LOOP_REQUIRES_VERSIONING (loop_vinfo))
8357 poly_uint64 versioning_threshold
8358 = LOOP_VINFO_VERSIONING_THRESHOLD (loop_vinfo);
8359 if (check_profitability
8360 && ordered_p (poly_uint64 (th), versioning_threshold))
8362 versioning_threshold = ordered_max (poly_uint64 (th),
8363 versioning_threshold);
8364 check_profitability = false;
8366 vect_loop_versioning (loop_vinfo, th, check_profitability,
8367 versioning_threshold);
8368 check_profitability = false;
8371 /* Make sure there exists a single-predecessor exit bb also on the
8372 scalar loop copy. Do this after versioning but before peeling
8373 so CFG structure is fine for both scalar and if-converted loop
8374 to make slpeel_duplicate_current_defs_from_edges face matched
8375 loop closed PHI nodes on the exit. */
8376 if (LOOP_VINFO_SCALAR_LOOP (loop_vinfo))
8378 e = single_exit (LOOP_VINFO_SCALAR_LOOP (loop_vinfo));
8379 if (! single_pred_p (e->dest))
8381 split_loop_exit_edge (e);
8382 if (dump_enabled_p ())
8383 dump_printf (MSG_NOTE, "split exit edge of scalar loop\n");
8387 tree niters = vect_build_loop_niters (loop_vinfo);
8388 LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo) = niters;
8389 tree nitersm1 = unshare_expr (LOOP_VINFO_NITERSM1 (loop_vinfo));
8390 bool niters_no_overflow = loop_niters_no_overflow (loop_vinfo);
8391 epilogue = vect_do_peeling (loop_vinfo, niters, nitersm1, &niters_vector,
8392 &step_vector, &niters_vector_mult_vf, th,
8393 check_profitability, niters_no_overflow);
8395 if (niters_vector == NULL_TREE)
8397 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
8398 && !LOOP_VINFO_FULLY_MASKED_P (loop_vinfo)
8399 && known_eq (lowest_vf, vf))
8401 niters_vector
8402 = build_int_cst (TREE_TYPE (LOOP_VINFO_NITERS (loop_vinfo)),
8403 LOOP_VINFO_INT_NITERS (loop_vinfo) / lowest_vf);
8404 step_vector = build_one_cst (TREE_TYPE (niters));
8406 else
8407 vect_gen_vector_loop_niters (loop_vinfo, niters, &niters_vector,
8408 &step_vector, niters_no_overflow);
8411 /* 1) Make sure the loop header has exactly two entries
8412 2) Make sure we have a preheader basic block. */
8414 gcc_assert (EDGE_COUNT (loop->header->preds) == 2);
8416 split_edge (loop_preheader_edge (loop));
8418 if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo)
8419 && vect_use_loop_mask_for_alignment_p (loop_vinfo))
8420 /* This will deal with any possible peeling. */
8421 vect_prepare_for_masked_peels (loop_vinfo);
8423 /* FORNOW: the vectorizer supports only loops which body consist
8424 of one basic block (header + empty latch). When the vectorizer will
8425 support more involved loop forms, the order by which the BBs are
8426 traversed need to be reconsidered. */
8428 for (i = 0; i < nbbs; i++)
8430 basic_block bb = bbs[i];
8431 stmt_vec_info stmt_info;
8433 for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
8434 gsi_next (&si))
8436 gphi *phi = si.phi ();
8437 if (dump_enabled_p ())
8439 dump_printf_loc (MSG_NOTE, vect_location,
8440 "------>vectorizing phi: ");
8441 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
8443 stmt_info = loop_vinfo->lookup_stmt (phi);
8444 if (!stmt_info)
8445 continue;
8447 if (MAY_HAVE_DEBUG_BIND_STMTS && !STMT_VINFO_LIVE_P (stmt_info))
8448 vect_loop_kill_debug_uses (loop, phi);
8450 if (!STMT_VINFO_RELEVANT_P (stmt_info)
8451 && !STMT_VINFO_LIVE_P (stmt_info))
8452 continue;
8454 if (STMT_VINFO_VECTYPE (stmt_info)
8455 && (maybe_ne
8456 (TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info)), vf))
8457 && dump_enabled_p ())
8458 dump_printf_loc (MSG_NOTE, vect_location, "multiple-types.\n");
8460 if ((STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def
8461 || STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def
8462 || STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle)
8463 && ! PURE_SLP_STMT (stmt_info))
8465 if (dump_enabled_p ())
8466 dump_printf_loc (MSG_NOTE, vect_location, "transform phi.\n");
8467 vect_transform_stmt (phi, NULL, NULL, NULL, NULL);
8471 for (gimple_stmt_iterator si = gsi_start_bb (bb);
8472 !gsi_end_p (si);)
8474 stmt = gsi_stmt (si);
8475 /* During vectorization remove existing clobber stmts. */
8476 if (gimple_clobber_p (stmt))
8478 unlink_stmt_vdef (stmt);
8479 gsi_remove (&si, true);
8480 release_defs (stmt);
8482 else
8484 stmt_info = loop_vinfo->lookup_stmt (stmt);
8486 /* vector stmts created in the outer-loop during vectorization of
8487 stmts in an inner-loop may not have a stmt_info, and do not
8488 need to be vectorized. */
8489 stmt_vec_info seen_store = NULL;
8490 if (stmt_info)
8492 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
8494 gimple *def_seq = STMT_VINFO_PATTERN_DEF_SEQ (stmt_info);
8495 for (gimple_stmt_iterator subsi = gsi_start (def_seq);
8496 !gsi_end_p (subsi); gsi_next (&subsi))
8497 vect_transform_loop_stmt (loop_vinfo,
8498 gsi_stmt (subsi), &si,
8499 &seen_store,
8500 &slp_scheduled);
8501 gimple *pat_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
8502 vect_transform_loop_stmt (loop_vinfo, pat_stmt, &si,
8503 &seen_store, &slp_scheduled);
8505 vect_transform_loop_stmt (loop_vinfo, stmt, &si,
8506 &seen_store, &slp_scheduled);
8508 if (seen_store)
8510 if (STMT_VINFO_GROUPED_ACCESS (seen_store))
8512 /* Interleaving. If IS_STORE is TRUE, the
8513 vectorization of the interleaving chain was
8514 completed - free all the stores in the chain. */
8515 gsi_next (&si);
8516 vect_remove_stores (DR_GROUP_FIRST_ELEMENT (seen_store));
8518 else
8520 /* Free the attached stmt_vec_info and remove the
8521 stmt. */
8522 free_stmt_vec_info (stmt);
8523 unlink_stmt_vdef (stmt);
8524 gsi_remove (&si, true);
8525 release_defs (stmt);
8528 else
8529 gsi_next (&si);
8533 /* Stub out scalar statements that must not survive vectorization.
8534 Doing this here helps with grouped statements, or statements that
8535 are involved in patterns. */
8536 for (gimple_stmt_iterator gsi = gsi_start_bb (bb);
8537 !gsi_end_p (gsi); gsi_next (&gsi))
8539 gcall *call = dyn_cast <gcall *> (gsi_stmt (gsi));
8540 if (call && gimple_call_internal_p (call, IFN_MASK_LOAD))
8542 tree lhs = gimple_get_lhs (call);
8543 if (!VECTOR_TYPE_P (TREE_TYPE (lhs)))
8545 tree zero = build_zero_cst (TREE_TYPE (lhs));
8546 gimple *new_stmt = gimple_build_assign (lhs, zero);
8547 gsi_replace (&gsi, new_stmt, true);
8551 } /* BBs in loop */
8553 /* The vectorization factor is always > 1, so if we use an IV increment of 1.
8554 a zero NITERS becomes a nonzero NITERS_VECTOR. */
8555 if (integer_onep (step_vector))
8556 niters_no_overflow = true;
8557 vect_set_loop_condition (loop, loop_vinfo, niters_vector, step_vector,
8558 niters_vector_mult_vf, !niters_no_overflow);
8560 unsigned int assumed_vf = vect_vf_for_cost (loop_vinfo);
8561 scale_profile_for_vect_loop (loop, assumed_vf);
8563 /* True if the final iteration might not handle a full vector's
8564 worth of scalar iterations. */
8565 bool final_iter_may_be_partial = LOOP_VINFO_FULLY_MASKED_P (loop_vinfo);
8566 /* The minimum number of iterations performed by the epilogue. This
8567 is 1 when peeling for gaps because we always need a final scalar
8568 iteration. */
8569 int min_epilogue_iters = LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) ? 1 : 0;
8570 /* +1 to convert latch counts to loop iteration counts,
8571 -min_epilogue_iters to remove iterations that cannot be performed
8572 by the vector code. */
8573 int bias_for_lowest = 1 - min_epilogue_iters;
8574 int bias_for_assumed = bias_for_lowest;
8575 int alignment_npeels = LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo);
8576 if (alignment_npeels && LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
8578 /* When the amount of peeling is known at compile time, the first
8579 iteration will have exactly alignment_npeels active elements.
8580 In the worst case it will have at least one. */
8581 int min_first_active = (alignment_npeels > 0 ? alignment_npeels : 1);
8582 bias_for_lowest += lowest_vf - min_first_active;
8583 bias_for_assumed += assumed_vf - min_first_active;
8585 /* In these calculations the "- 1" converts loop iteration counts
8586 back to latch counts. */
8587 if (loop->any_upper_bound)
8588 loop->nb_iterations_upper_bound
8589 = (final_iter_may_be_partial
8590 ? wi::udiv_ceil (loop->nb_iterations_upper_bound + bias_for_lowest,
8591 lowest_vf) - 1
8592 : wi::udiv_floor (loop->nb_iterations_upper_bound + bias_for_lowest,
8593 lowest_vf) - 1);
8594 if (loop->any_likely_upper_bound)
8595 loop->nb_iterations_likely_upper_bound
8596 = (final_iter_may_be_partial
8597 ? wi::udiv_ceil (loop->nb_iterations_likely_upper_bound
8598 + bias_for_lowest, lowest_vf) - 1
8599 : wi::udiv_floor (loop->nb_iterations_likely_upper_bound
8600 + bias_for_lowest, lowest_vf) - 1);
8601 if (loop->any_estimate)
8602 loop->nb_iterations_estimate
8603 = (final_iter_may_be_partial
8604 ? wi::udiv_ceil (loop->nb_iterations_estimate + bias_for_assumed,
8605 assumed_vf) - 1
8606 : wi::udiv_floor (loop->nb_iterations_estimate + bias_for_assumed,
8607 assumed_vf) - 1);
8609 if (dump_enabled_p ())
8611 if (!LOOP_VINFO_EPILOGUE_P (loop_vinfo))
8613 dump_printf_loc (MSG_NOTE, vect_location,
8614 "LOOP VECTORIZED\n");
8615 if (loop->inner)
8616 dump_printf_loc (MSG_NOTE, vect_location,
8617 "OUTER LOOP VECTORIZED\n");
8618 dump_printf (MSG_NOTE, "\n");
8620 else
8622 dump_printf_loc (MSG_NOTE, vect_location,
8623 "LOOP EPILOGUE VECTORIZED (VS=");
8624 dump_dec (MSG_NOTE, current_vector_size);
8625 dump_printf (MSG_NOTE, ")\n");
8629 /* Free SLP instances here because otherwise stmt reference counting
8630 won't work. */
8631 slp_instance instance;
8632 FOR_EACH_VEC_ELT (LOOP_VINFO_SLP_INSTANCES (loop_vinfo), i, instance)
8633 vect_free_slp_instance (instance, true);
8634 LOOP_VINFO_SLP_INSTANCES (loop_vinfo).release ();
8635 /* Clear-up safelen field since its value is invalid after vectorization
8636 since vectorized loop can have loop-carried dependencies. */
8637 loop->safelen = 0;
8639 /* Don't vectorize epilogue for epilogue. */
8640 if (LOOP_VINFO_EPILOGUE_P (loop_vinfo))
8641 epilogue = NULL;
8643 if (!PARAM_VALUE (PARAM_VECT_EPILOGUES_NOMASK))
8644 epilogue = NULL;
8646 if (epilogue)
8648 auto_vector_sizes vector_sizes;
8649 targetm.vectorize.autovectorize_vector_sizes (&vector_sizes);
8650 unsigned int next_size = 0;
8652 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
8653 && LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) >= 0
8654 && known_eq (vf, lowest_vf))
8656 unsigned int eiters
8657 = (LOOP_VINFO_INT_NITERS (loop_vinfo)
8658 - LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo));
8659 eiters = eiters % lowest_vf;
8660 epilogue->nb_iterations_upper_bound = eiters - 1;
8662 unsigned int ratio;
8663 while (next_size < vector_sizes.length ()
8664 && !(constant_multiple_p (current_vector_size,
8665 vector_sizes[next_size], &ratio)
8666 && eiters >= lowest_vf / ratio))
8667 next_size += 1;
8669 else
8670 while (next_size < vector_sizes.length ()
8671 && maybe_lt (current_vector_size, vector_sizes[next_size]))
8672 next_size += 1;
8674 if (next_size == vector_sizes.length ())
8675 epilogue = NULL;
8678 if (epilogue)
8680 epilogue->force_vectorize = loop->force_vectorize;
8681 epilogue->safelen = loop->safelen;
8682 epilogue->dont_vectorize = false;
8684 /* We may need to if-convert epilogue to vectorize it. */
8685 if (LOOP_VINFO_SCALAR_LOOP (loop_vinfo))
8686 tree_if_conversion (epilogue);
8689 return epilogue;
8692 /* The code below is trying to perform simple optimization - revert
8693 if-conversion for masked stores, i.e. if the mask of a store is zero
8694 do not perform it and all stored value producers also if possible.
8695 For example,
8696 for (i=0; i<n; i++)
8697 if (c[i])
8699 p1[i] += 1;
8700 p2[i] = p3[i] +2;
8702 this transformation will produce the following semi-hammock:
8704 if (!mask__ifc__42.18_165 == { 0, 0, 0, 0, 0, 0, 0, 0 })
8706 vect__11.19_170 = MASK_LOAD (vectp_p1.20_168, 0B, mask__ifc__42.18_165);
8707 vect__12.22_172 = vect__11.19_170 + vect_cst__171;
8708 MASK_STORE (vectp_p1.23_175, 0B, mask__ifc__42.18_165, vect__12.22_172);
8709 vect__18.25_182 = MASK_LOAD (vectp_p3.26_180, 0B, mask__ifc__42.18_165);
8710 vect__19.28_184 = vect__18.25_182 + vect_cst__183;
8711 MASK_STORE (vectp_p2.29_187, 0B, mask__ifc__42.18_165, vect__19.28_184);
8715 void
8716 optimize_mask_stores (struct loop *loop)
8718 basic_block *bbs = get_loop_body (loop);
8719 unsigned nbbs = loop->num_nodes;
8720 unsigned i;
8721 basic_block bb;
8722 struct loop *bb_loop;
8723 gimple_stmt_iterator gsi;
8724 gimple *stmt;
8725 auto_vec<gimple *> worklist;
8727 vect_location = find_loop_location (loop);
8728 /* Pick up all masked stores in loop if any. */
8729 for (i = 0; i < nbbs; i++)
8731 bb = bbs[i];
8732 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi);
8733 gsi_next (&gsi))
8735 stmt = gsi_stmt (gsi);
8736 if (gimple_call_internal_p (stmt, IFN_MASK_STORE))
8737 worklist.safe_push (stmt);
8741 free (bbs);
8742 if (worklist.is_empty ())
8743 return;
8745 /* Loop has masked stores. */
8746 while (!worklist.is_empty ())
8748 gimple *last, *last_store;
8749 edge e, efalse;
8750 tree mask;
8751 basic_block store_bb, join_bb;
8752 gimple_stmt_iterator gsi_to;
8753 tree vdef, new_vdef;
8754 gphi *phi;
8755 tree vectype;
8756 tree zero;
8758 last = worklist.pop ();
8759 mask = gimple_call_arg (last, 2);
8760 bb = gimple_bb (last);
8761 /* Create then_bb and if-then structure in CFG, then_bb belongs to
8762 the same loop as if_bb. It could be different to LOOP when two
8763 level loop-nest is vectorized and mask_store belongs to the inner
8764 one. */
8765 e = split_block (bb, last);
8766 bb_loop = bb->loop_father;
8767 gcc_assert (loop == bb_loop || flow_loop_nested_p (loop, bb_loop));
8768 join_bb = e->dest;
8769 store_bb = create_empty_bb (bb);
8770 add_bb_to_loop (store_bb, bb_loop);
8771 e->flags = EDGE_TRUE_VALUE;
8772 efalse = make_edge (bb, store_bb, EDGE_FALSE_VALUE);
8773 /* Put STORE_BB to likely part. */
8774 efalse->probability = profile_probability::unlikely ();
8775 store_bb->count = efalse->count ();
8776 make_single_succ_edge (store_bb, join_bb, EDGE_FALLTHRU);
8777 if (dom_info_available_p (CDI_DOMINATORS))
8778 set_immediate_dominator (CDI_DOMINATORS, store_bb, bb);
8779 if (dump_enabled_p ())
8780 dump_printf_loc (MSG_NOTE, vect_location,
8781 "Create new block %d to sink mask stores.",
8782 store_bb->index);
8783 /* Create vector comparison with boolean result. */
8784 vectype = TREE_TYPE (mask);
8785 zero = build_zero_cst (vectype);
8786 stmt = gimple_build_cond (EQ_EXPR, mask, zero, NULL_TREE, NULL_TREE);
8787 gsi = gsi_last_bb (bb);
8788 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
8789 /* Create new PHI node for vdef of the last masked store:
8790 .MEM_2 = VDEF <.MEM_1>
8791 will be converted to
8792 .MEM.3 = VDEF <.MEM_1>
8793 and new PHI node will be created in join bb
8794 .MEM_2 = PHI <.MEM_1, .MEM_3>
8796 vdef = gimple_vdef (last);
8797 new_vdef = make_ssa_name (gimple_vop (cfun), last);
8798 gimple_set_vdef (last, new_vdef);
8799 phi = create_phi_node (vdef, join_bb);
8800 add_phi_arg (phi, new_vdef, EDGE_SUCC (store_bb, 0), UNKNOWN_LOCATION);
8802 /* Put all masked stores with the same mask to STORE_BB if possible. */
8803 while (true)
8805 gimple_stmt_iterator gsi_from;
8806 gimple *stmt1 = NULL;
8808 /* Move masked store to STORE_BB. */
8809 last_store = last;
8810 gsi = gsi_for_stmt (last);
8811 gsi_from = gsi;
8812 /* Shift GSI to the previous stmt for further traversal. */
8813 gsi_prev (&gsi);
8814 gsi_to = gsi_start_bb (store_bb);
8815 gsi_move_before (&gsi_from, &gsi_to);
8816 /* Setup GSI_TO to the non-empty block start. */
8817 gsi_to = gsi_start_bb (store_bb);
8818 if (dump_enabled_p ())
8820 dump_printf_loc (MSG_NOTE, vect_location,
8821 "Move stmt to created bb\n");
8822 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, last, 0);
8824 /* Move all stored value producers if possible. */
8825 while (!gsi_end_p (gsi))
8827 tree lhs;
8828 imm_use_iterator imm_iter;
8829 use_operand_p use_p;
8830 bool res;
8832 /* Skip debug statements. */
8833 if (is_gimple_debug (gsi_stmt (gsi)))
8835 gsi_prev (&gsi);
8836 continue;
8838 stmt1 = gsi_stmt (gsi);
8839 /* Do not consider statements writing to memory or having
8840 volatile operand. */
8841 if (gimple_vdef (stmt1)
8842 || gimple_has_volatile_ops (stmt1))
8843 break;
8844 gsi_from = gsi;
8845 gsi_prev (&gsi);
8846 lhs = gimple_get_lhs (stmt1);
8847 if (!lhs)
8848 break;
8850 /* LHS of vectorized stmt must be SSA_NAME. */
8851 if (TREE_CODE (lhs) != SSA_NAME)
8852 break;
8854 if (!VECTOR_TYPE_P (TREE_TYPE (lhs)))
8856 /* Remove dead scalar statement. */
8857 if (has_zero_uses (lhs))
8859 gsi_remove (&gsi_from, true);
8860 continue;
8864 /* Check that LHS does not have uses outside of STORE_BB. */
8865 res = true;
8866 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, lhs)
8868 gimple *use_stmt;
8869 use_stmt = USE_STMT (use_p);
8870 if (is_gimple_debug (use_stmt))
8871 continue;
8872 if (gimple_bb (use_stmt) != store_bb)
8874 res = false;
8875 break;
8878 if (!res)
8879 break;
8881 if (gimple_vuse (stmt1)
8882 && gimple_vuse (stmt1) != gimple_vuse (last_store))
8883 break;
8885 /* Can move STMT1 to STORE_BB. */
8886 if (dump_enabled_p ())
8888 dump_printf_loc (MSG_NOTE, vect_location,
8889 "Move stmt to created bb\n");
8890 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt1, 0);
8892 gsi_move_before (&gsi_from, &gsi_to);
8893 /* Shift GSI_TO for further insertion. */
8894 gsi_prev (&gsi_to);
8896 /* Put other masked stores with the same mask to STORE_BB. */
8897 if (worklist.is_empty ()
8898 || gimple_call_arg (worklist.last (), 2) != mask
8899 || worklist.last () != stmt1)
8900 break;
8901 last = worklist.pop ();
8903 add_phi_arg (phi, gimple_vuse (last_store), e, UNKNOWN_LOCATION);