2 Copyright (C) 2003-2018 Free Software Foundation, Inc.
3 Contributed by Dorit Naishlos <dorit@il.ibm.com> and
4 Ira Rosen <irar@il.ibm.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
24 #include "coretypes.h"
31 #include "tree-pass.h"
33 #include "optabs-tree.h"
34 #include "diagnostic-core.h"
35 #include "fold-const.h"
36 #include "stor-layout.h"
39 #include "gimple-iterator.h"
40 #include "gimplify-me.h"
41 #include "tree-ssa-loop-ivopts.h"
42 #include "tree-ssa-loop-manip.h"
43 #include "tree-ssa-loop-niter.h"
44 #include "tree-ssa-loop.h"
47 #include "tree-scalar-evolution.h"
48 #include "tree-vectorizer.h"
49 #include "gimple-fold.h"
52 #include "tree-if-conv.h"
53 #include "internal-fn.h"
54 #include "tree-vector-builder.h"
55 #include "vec-perm-indices.h"
58 /* Loop Vectorization Pass.
60 This pass tries to vectorize loops.
62 For example, the vectorizer transforms the following simple loop:
64 short a[N]; short b[N]; short c[N]; int i;
70 as if it was manually vectorized by rewriting the source code into:
72 typedef int __attribute__((mode(V8HI))) v8hi;
73 short a[N]; short b[N]; short c[N]; int i;
74 v8hi *pa = (v8hi*)a, *pb = (v8hi*)b, *pc = (v8hi*)c;
77 for (i=0; i<N/8; i++){
84 The main entry to this pass is vectorize_loops(), in which
85 the vectorizer applies a set of analyses on a given set of loops,
86 followed by the actual vectorization transformation for the loops that
87 had successfully passed the analysis phase.
88 Throughout this pass we make a distinction between two types of
89 data: scalars (which are represented by SSA_NAMES), and memory references
90 ("data-refs"). These two types of data require different handling both
91 during analysis and transformation. The types of data-refs that the
92 vectorizer currently supports are ARRAY_REFS which base is an array DECL
93 (not a pointer), and INDIRECT_REFS through pointers; both array and pointer
94 accesses are required to have a simple (consecutive) access pattern.
98 The driver for the analysis phase is vect_analyze_loop().
99 It applies a set of analyses, some of which rely on the scalar evolution
100 analyzer (scev) developed by Sebastian Pop.
102 During the analysis phase the vectorizer records some information
103 per stmt in a "stmt_vec_info" struct which is attached to each stmt in the
104 loop, as well as general information about the loop as a whole, which is
105 recorded in a "loop_vec_info" struct attached to each loop.
107 Transformation phase:
108 =====================
109 The loop transformation phase scans all the stmts in the loop, and
110 creates a vector stmt (or a sequence of stmts) for each scalar stmt S in
111 the loop that needs to be vectorized. It inserts the vector code sequence
112 just before the scalar stmt S, and records a pointer to the vector code
113 in STMT_VINFO_VEC_STMT (stmt_info) (stmt_info is the stmt_vec_info struct
114 attached to S). This pointer will be used for the vectorization of following
115 stmts which use the def of stmt S. Stmt S is removed if it writes to memory;
116 otherwise, we rely on dead code elimination for removing it.
118 For example, say stmt S1 was vectorized into stmt VS1:
121 S1: b = x[i]; STMT_VINFO_VEC_STMT (stmt_info (S1)) = VS1
124 To vectorize stmt S2, the vectorizer first finds the stmt that defines
125 the operand 'b' (S1), and gets the relevant vector def 'vb' from the
126 vector stmt VS1 pointed to by STMT_VINFO_VEC_STMT (stmt_info (S1)). The
127 resulting sequence would be:
130 S1: b = x[i]; STMT_VINFO_VEC_STMT (stmt_info (S1)) = VS1
132 S2: a = b; STMT_VINFO_VEC_STMT (stmt_info (S2)) = VS2
134 Operands that are not SSA_NAMEs, are data-refs that appear in
135 load/store operations (like 'x[i]' in S1), and are handled differently.
139 Currently the only target specific information that is used is the
140 size of the vector (in bytes) - "TARGET_VECTORIZE_UNITS_PER_SIMD_WORD".
141 Targets that can support different sizes of vectors, for now will need
142 to specify one value for "TARGET_VECTORIZE_UNITS_PER_SIMD_WORD". More
143 flexibility will be added in the future.
145 Since we only vectorize operations which vector form can be
146 expressed using existing tree codes, to verify that an operation is
147 supported, the vectorizer checks the relevant optab at the relevant
148 machine_mode (e.g, optab_handler (add_optab, V8HImode)). If
149 the value found is CODE_FOR_nothing, then there's no target support, and
150 we can't vectorize the stmt.
152 For additional information on this project see:
153 http://gcc.gnu.org/projects/tree-ssa/vectorization.html
156 static void vect_estimate_min_profitable_iters (loop_vec_info
, int *, int *);
158 /* Subroutine of vect_determine_vf_for_stmt that handles only one
159 statement. VECTYPE_MAYBE_SET_P is true if STMT_VINFO_VECTYPE
160 may already be set for general statements (not just data refs). */
163 vect_determine_vf_for_stmt_1 (stmt_vec_info stmt_info
,
164 bool vectype_maybe_set_p
,
166 vec
<stmt_vec_info
> *mask_producers
)
168 gimple
*stmt
= stmt_info
->stmt
;
170 if ((!STMT_VINFO_RELEVANT_P (stmt_info
)
171 && !STMT_VINFO_LIVE_P (stmt_info
))
172 || gimple_clobber_p (stmt
))
174 if (dump_enabled_p ())
175 dump_printf_loc (MSG_NOTE
, vect_location
, "skip.\n");
179 tree stmt_vectype
, nunits_vectype
;
180 if (!vect_get_vector_types_for_stmt (stmt_info
, &stmt_vectype
,
186 if (STMT_VINFO_VECTYPE (stmt_info
))
187 /* The only case when a vectype had been already set is for stmts
188 that contain a data ref, or for "pattern-stmts" (stmts generated
189 by the vectorizer to represent/replace a certain idiom). */
190 gcc_assert ((STMT_VINFO_DATA_REF (stmt_info
)
191 || vectype_maybe_set_p
)
192 && STMT_VINFO_VECTYPE (stmt_info
) == stmt_vectype
);
193 else if (stmt_vectype
== boolean_type_node
)
194 mask_producers
->safe_push (stmt_info
);
196 STMT_VINFO_VECTYPE (stmt_info
) = stmt_vectype
;
200 vect_update_max_nunits (vf
, nunits_vectype
);
205 /* Subroutine of vect_determine_vectorization_factor. Set the vector
206 types of STMT_INFO and all attached pattern statements and update
207 the vectorization factor VF accordingly. If some of the statements
208 produce a mask result whose vector type can only be calculated later,
209 add them to MASK_PRODUCERS. Return true on success or false if
210 something prevented vectorization. */
213 vect_determine_vf_for_stmt (stmt_vec_info stmt_info
, poly_uint64
*vf
,
214 vec
<stmt_vec_info
> *mask_producers
)
216 vec_info
*vinfo
= stmt_info
->vinfo
;
217 if (dump_enabled_p ())
219 dump_printf_loc (MSG_NOTE
, vect_location
, "==> examining statement: ");
220 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt_info
->stmt
, 0);
222 if (!vect_determine_vf_for_stmt_1 (stmt_info
, false, vf
, mask_producers
))
225 if (STMT_VINFO_IN_PATTERN_P (stmt_info
)
226 && STMT_VINFO_RELATED_STMT (stmt_info
))
228 gimple
*pattern_def_seq
= STMT_VINFO_PATTERN_DEF_SEQ (stmt_info
);
229 stmt_info
= STMT_VINFO_RELATED_STMT (stmt_info
);
231 /* If a pattern statement has def stmts, analyze them too. */
232 for (gimple_stmt_iterator si
= gsi_start (pattern_def_seq
);
233 !gsi_end_p (si
); gsi_next (&si
))
235 stmt_vec_info def_stmt_info
= vinfo
->lookup_stmt (gsi_stmt (si
));
236 if (dump_enabled_p ())
238 dump_printf_loc (MSG_NOTE
, vect_location
,
239 "==> examining pattern def stmt: ");
240 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
,
241 def_stmt_info
->stmt
, 0);
243 if (!vect_determine_vf_for_stmt_1 (def_stmt_info
, true,
248 if (dump_enabled_p ())
250 dump_printf_loc (MSG_NOTE
, vect_location
,
251 "==> examining pattern statement: ");
252 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt_info
->stmt
, 0);
254 if (!vect_determine_vf_for_stmt_1 (stmt_info
, true, vf
, mask_producers
))
261 /* Function vect_determine_vectorization_factor
263 Determine the vectorization factor (VF). VF is the number of data elements
264 that are operated upon in parallel in a single iteration of the vectorized
265 loop. For example, when vectorizing a loop that operates on 4byte elements,
266 on a target with vector size (VS) 16byte, the VF is set to 4, since 4
267 elements can fit in a single vector register.
269 We currently support vectorization of loops in which all types operated upon
270 are of the same size. Therefore this function currently sets VF according to
271 the size of the types operated upon, and fails if there are multiple sizes
274 VF is also the factor by which the loop iterations are strip-mined, e.g.:
281 for (i=0; i<N; i+=VF){
282 a[i:VF] = b[i:VF] + c[i:VF];
287 vect_determine_vectorization_factor (loop_vec_info loop_vinfo
)
289 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
290 basic_block
*bbs
= LOOP_VINFO_BBS (loop_vinfo
);
291 unsigned nbbs
= loop
->num_nodes
;
292 poly_uint64 vectorization_factor
= 1;
293 tree scalar_type
= NULL_TREE
;
296 stmt_vec_info stmt_info
;
298 auto_vec
<stmt_vec_info
> mask_producers
;
300 DUMP_VECT_SCOPE ("vect_determine_vectorization_factor");
302 for (i
= 0; i
< nbbs
; i
++)
304 basic_block bb
= bbs
[i
];
306 for (gphi_iterator si
= gsi_start_phis (bb
); !gsi_end_p (si
);
310 stmt_info
= loop_vinfo
->lookup_stmt (phi
);
311 if (dump_enabled_p ())
313 dump_printf_loc (MSG_NOTE
, vect_location
, "==> examining phi: ");
314 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, phi
, 0);
317 gcc_assert (stmt_info
);
319 if (STMT_VINFO_RELEVANT_P (stmt_info
)
320 || STMT_VINFO_LIVE_P (stmt_info
))
322 gcc_assert (!STMT_VINFO_VECTYPE (stmt_info
));
323 scalar_type
= TREE_TYPE (PHI_RESULT (phi
));
325 if (dump_enabled_p ())
327 dump_printf_loc (MSG_NOTE
, vect_location
,
328 "get vectype for scalar type: ");
329 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, scalar_type
);
330 dump_printf (MSG_NOTE
, "\n");
333 vectype
= get_vectype_for_scalar_type (scalar_type
);
336 if (dump_enabled_p ())
338 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
339 "not vectorized: unsupported "
341 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
343 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
347 STMT_VINFO_VECTYPE (stmt_info
) = vectype
;
349 if (dump_enabled_p ())
351 dump_printf_loc (MSG_NOTE
, vect_location
, "vectype: ");
352 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, vectype
);
353 dump_printf (MSG_NOTE
, "\n");
356 if (dump_enabled_p ())
358 dump_printf_loc (MSG_NOTE
, vect_location
, "nunits = ");
359 dump_dec (MSG_NOTE
, TYPE_VECTOR_SUBPARTS (vectype
));
360 dump_printf (MSG_NOTE
, "\n");
363 vect_update_max_nunits (&vectorization_factor
, vectype
);
367 for (gimple_stmt_iterator si
= gsi_start_bb (bb
); !gsi_end_p (si
);
370 stmt_info
= loop_vinfo
->lookup_stmt (gsi_stmt (si
));
371 if (!vect_determine_vf_for_stmt (stmt_info
, &vectorization_factor
,
377 /* TODO: Analyze cost. Decide if worth while to vectorize. */
378 if (dump_enabled_p ())
380 dump_printf_loc (MSG_NOTE
, vect_location
, "vectorization factor = ");
381 dump_dec (MSG_NOTE
, vectorization_factor
);
382 dump_printf (MSG_NOTE
, "\n");
385 if (known_le (vectorization_factor
, 1U))
387 if (dump_enabled_p ())
388 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
389 "not vectorized: unsupported data-type\n");
392 LOOP_VINFO_VECT_FACTOR (loop_vinfo
) = vectorization_factor
;
394 for (i
= 0; i
< mask_producers
.length (); i
++)
396 stmt_info
= mask_producers
[i
];
397 tree mask_type
= vect_get_mask_type_for_stmt (stmt_info
);
400 STMT_VINFO_VECTYPE (stmt_info
) = mask_type
;
407 /* Function vect_is_simple_iv_evolution.
409 FORNOW: A simple evolution of an induction variables in the loop is
410 considered a polynomial evolution. */
413 vect_is_simple_iv_evolution (unsigned loop_nb
, tree access_fn
, tree
* init
,
418 tree evolution_part
= evolution_part_in_loop_num (access_fn
, loop_nb
);
421 /* When there is no evolution in this loop, the evolution function
423 if (evolution_part
== NULL_TREE
)
426 /* When the evolution is a polynomial of degree >= 2
427 the evolution function is not "simple". */
428 if (tree_is_chrec (evolution_part
))
431 step_expr
= evolution_part
;
432 init_expr
= unshare_expr (initial_condition_in_loop_num (access_fn
, loop_nb
));
434 if (dump_enabled_p ())
436 dump_printf_loc (MSG_NOTE
, vect_location
, "step: ");
437 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, step_expr
);
438 dump_printf (MSG_NOTE
, ", init: ");
439 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, init_expr
);
440 dump_printf (MSG_NOTE
, "\n");
446 if (TREE_CODE (step_expr
) != INTEGER_CST
447 && (TREE_CODE (step_expr
) != SSA_NAME
448 || ((bb
= gimple_bb (SSA_NAME_DEF_STMT (step_expr
)))
449 && flow_bb_inside_loop_p (get_loop (cfun
, loop_nb
), bb
))
450 || (!INTEGRAL_TYPE_P (TREE_TYPE (step_expr
))
451 && (!SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr
))
452 || !flag_associative_math
)))
453 && (TREE_CODE (step_expr
) != REAL_CST
454 || !flag_associative_math
))
456 if (dump_enabled_p ())
457 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
465 /* Function vect_analyze_scalar_cycles_1.
467 Examine the cross iteration def-use cycles of scalar variables
468 in LOOP. LOOP_VINFO represents the loop that is now being
469 considered for vectorization (can be LOOP, or an outer-loop
473 vect_analyze_scalar_cycles_1 (loop_vec_info loop_vinfo
, struct loop
*loop
)
475 basic_block bb
= loop
->header
;
477 auto_vec
<gimple
*, 64> worklist
;
481 DUMP_VECT_SCOPE ("vect_analyze_scalar_cycles");
483 /* First - identify all inductions. Reduction detection assumes that all the
484 inductions have been identified, therefore, this order must not be
486 for (gsi
= gsi_start_phis (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
488 gphi
*phi
= gsi
.phi ();
489 tree access_fn
= NULL
;
490 tree def
= PHI_RESULT (phi
);
491 stmt_vec_info stmt_vinfo
= loop_vinfo
->lookup_stmt (phi
);
493 if (dump_enabled_p ())
495 dump_printf_loc (MSG_NOTE
, vect_location
, "Analyze phi: ");
496 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, phi
, 0);
499 /* Skip virtual phi's. The data dependences that are associated with
500 virtual defs/uses (i.e., memory accesses) are analyzed elsewhere. */
501 if (virtual_operand_p (def
))
504 STMT_VINFO_DEF_TYPE (stmt_vinfo
) = vect_unknown_def_type
;
506 /* Analyze the evolution function. */
507 access_fn
= analyze_scalar_evolution (loop
, def
);
510 STRIP_NOPS (access_fn
);
511 if (dump_enabled_p ())
513 dump_printf_loc (MSG_NOTE
, vect_location
,
514 "Access function of PHI: ");
515 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, access_fn
);
516 dump_printf (MSG_NOTE
, "\n");
518 STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED (stmt_vinfo
)
519 = initial_condition_in_loop_num (access_fn
, loop
->num
);
520 STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_vinfo
)
521 = evolution_part_in_loop_num (access_fn
, loop
->num
);
525 || !vect_is_simple_iv_evolution (loop
->num
, access_fn
, &init
, &step
)
526 || (LOOP_VINFO_LOOP (loop_vinfo
) != loop
527 && TREE_CODE (step
) != INTEGER_CST
))
529 worklist
.safe_push (phi
);
533 gcc_assert (STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED (stmt_vinfo
)
535 gcc_assert (STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_vinfo
) != NULL_TREE
);
537 if (dump_enabled_p ())
538 dump_printf_loc (MSG_NOTE
, vect_location
, "Detected induction.\n");
539 STMT_VINFO_DEF_TYPE (stmt_vinfo
) = vect_induction_def
;
543 /* Second - identify all reductions and nested cycles. */
544 while (worklist
.length () > 0)
546 gimple
*phi
= worklist
.pop ();
547 tree def
= PHI_RESULT (phi
);
548 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (phi
);
551 if (dump_enabled_p ())
553 dump_printf_loc (MSG_NOTE
, vect_location
, "Analyze phi: ");
554 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, phi
, 0);
557 gcc_assert (!virtual_operand_p (def
)
558 && STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_unknown_def_type
);
560 reduc_stmt
= vect_force_simple_reduction (loop_vinfo
, phi
,
561 &double_reduc
, false);
566 if (dump_enabled_p ())
567 dump_printf_loc (MSG_NOTE
, vect_location
,
568 "Detected double reduction.\n");
570 STMT_VINFO_DEF_TYPE (stmt_vinfo
) = vect_double_reduction_def
;
571 STMT_VINFO_DEF_TYPE (vinfo_for_stmt (reduc_stmt
)) =
572 vect_double_reduction_def
;
576 if (loop
!= LOOP_VINFO_LOOP (loop_vinfo
))
578 if (dump_enabled_p ())
579 dump_printf_loc (MSG_NOTE
, vect_location
,
580 "Detected vectorizable nested cycle.\n");
582 STMT_VINFO_DEF_TYPE (stmt_vinfo
) = vect_nested_cycle
;
583 STMT_VINFO_DEF_TYPE (vinfo_for_stmt (reduc_stmt
)) =
588 if (dump_enabled_p ())
589 dump_printf_loc (MSG_NOTE
, vect_location
,
590 "Detected reduction.\n");
592 STMT_VINFO_DEF_TYPE (stmt_vinfo
) = vect_reduction_def
;
593 STMT_VINFO_DEF_TYPE (vinfo_for_stmt (reduc_stmt
)) =
595 /* Store the reduction cycles for possible vectorization in
596 loop-aware SLP if it was not detected as reduction
598 if (! REDUC_GROUP_FIRST_ELEMENT (vinfo_for_stmt (reduc_stmt
)))
599 LOOP_VINFO_REDUCTIONS (loop_vinfo
).safe_push (reduc_stmt
);
604 if (dump_enabled_p ())
605 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
606 "Unknown def-use cycle pattern.\n");
611 /* Function vect_analyze_scalar_cycles.
613 Examine the cross iteration def-use cycles of scalar variables, by
614 analyzing the loop-header PHIs of scalar variables. Classify each
615 cycle as one of the following: invariant, induction, reduction, unknown.
616 We do that for the loop represented by LOOP_VINFO, and also to its
617 inner-loop, if exists.
618 Examples for scalar cycles:
633 vect_analyze_scalar_cycles (loop_vec_info loop_vinfo
)
635 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
637 vect_analyze_scalar_cycles_1 (loop_vinfo
, loop
);
639 /* When vectorizing an outer-loop, the inner-loop is executed sequentially.
640 Reductions in such inner-loop therefore have different properties than
641 the reductions in the nest that gets vectorized:
642 1. When vectorized, they are executed in the same order as in the original
643 scalar loop, so we can't change the order of computation when
645 2. FIXME: Inner-loop reductions can be used in the inner-loop, so the
646 current checks are too strict. */
649 vect_analyze_scalar_cycles_1 (loop_vinfo
, loop
->inner
);
652 /* Transfer group and reduction information from STMT to its pattern stmt. */
655 vect_fixup_reduc_chain (gimple
*stmt
)
657 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
658 stmt_vec_info firstp
= STMT_VINFO_RELATED_STMT (stmt_info
);
660 gcc_assert (!REDUC_GROUP_FIRST_ELEMENT (firstp
)
661 && REDUC_GROUP_FIRST_ELEMENT (stmt_info
));
662 REDUC_GROUP_SIZE (firstp
) = REDUC_GROUP_SIZE (stmt_info
);
665 stmtp
= STMT_VINFO_RELATED_STMT (vinfo_for_stmt (stmt
));
666 REDUC_GROUP_FIRST_ELEMENT (stmtp
) = firstp
;
667 stmt
= REDUC_GROUP_NEXT_ELEMENT (vinfo_for_stmt (stmt
));
669 REDUC_GROUP_NEXT_ELEMENT (stmtp
)
670 = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (stmt
));
673 STMT_VINFO_DEF_TYPE (stmtp
) = vect_reduction_def
;
676 /* Fixup scalar cycles that now have their stmts detected as patterns. */
679 vect_fixup_scalar_cycles_with_patterns (loop_vec_info loop_vinfo
)
684 FOR_EACH_VEC_ELT (LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo
), i
, first
)
685 if (STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (first
)))
687 gimple
*next
= REDUC_GROUP_NEXT_ELEMENT (vinfo_for_stmt (first
));
690 if (! STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (next
)))
692 next
= REDUC_GROUP_NEXT_ELEMENT (vinfo_for_stmt (next
));
694 /* If not all stmt in the chain are patterns try to handle
695 the chain without patterns. */
698 vect_fixup_reduc_chain (first
);
699 LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo
)[i
]
700 = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (first
));
705 /* Function vect_get_loop_niters.
707 Determine how many iterations the loop is executed and place it
708 in NUMBER_OF_ITERATIONS. Place the number of latch iterations
709 in NUMBER_OF_ITERATIONSM1. Place the condition under which the
710 niter information holds in ASSUMPTIONS.
712 Return the loop exit condition. */
716 vect_get_loop_niters (struct loop
*loop
, tree
*assumptions
,
717 tree
*number_of_iterations
, tree
*number_of_iterationsm1
)
719 edge exit
= single_exit (loop
);
720 struct tree_niter_desc niter_desc
;
721 tree niter_assumptions
, niter
, may_be_zero
;
722 gcond
*cond
= get_loop_exit_condition (loop
);
724 *assumptions
= boolean_true_node
;
725 *number_of_iterationsm1
= chrec_dont_know
;
726 *number_of_iterations
= chrec_dont_know
;
727 DUMP_VECT_SCOPE ("get_loop_niters");
732 niter
= chrec_dont_know
;
733 may_be_zero
= NULL_TREE
;
734 niter_assumptions
= boolean_true_node
;
735 if (!number_of_iterations_exit_assumptions (loop
, exit
, &niter_desc
, NULL
)
736 || chrec_contains_undetermined (niter_desc
.niter
))
739 niter_assumptions
= niter_desc
.assumptions
;
740 may_be_zero
= niter_desc
.may_be_zero
;
741 niter
= niter_desc
.niter
;
743 if (may_be_zero
&& integer_zerop (may_be_zero
))
744 may_be_zero
= NULL_TREE
;
748 if (COMPARISON_CLASS_P (may_be_zero
))
750 /* Try to combine may_be_zero with assumptions, this can simplify
751 computation of niter expression. */
752 if (niter_assumptions
&& !integer_nonzerop (niter_assumptions
))
753 niter_assumptions
= fold_build2 (TRUTH_AND_EXPR
, boolean_type_node
,
755 fold_build1 (TRUTH_NOT_EXPR
,
759 niter
= fold_build3 (COND_EXPR
, TREE_TYPE (niter
), may_be_zero
,
760 build_int_cst (TREE_TYPE (niter
), 0),
761 rewrite_to_non_trapping_overflow (niter
));
763 may_be_zero
= NULL_TREE
;
765 else if (integer_nonzerop (may_be_zero
))
767 *number_of_iterationsm1
= build_int_cst (TREE_TYPE (niter
), 0);
768 *number_of_iterations
= build_int_cst (TREE_TYPE (niter
), 1);
775 *assumptions
= niter_assumptions
;
776 *number_of_iterationsm1
= niter
;
778 /* We want the number of loop header executions which is the number
779 of latch executions plus one.
780 ??? For UINT_MAX latch executions this number overflows to zero
781 for loops like do { n++; } while (n != 0); */
782 if (niter
&& !chrec_contains_undetermined (niter
))
783 niter
= fold_build2 (PLUS_EXPR
, TREE_TYPE (niter
), unshare_expr (niter
),
784 build_int_cst (TREE_TYPE (niter
), 1));
785 *number_of_iterations
= niter
;
790 /* Function bb_in_loop_p
792 Used as predicate for dfs order traversal of the loop bbs. */
795 bb_in_loop_p (const_basic_block bb
, const void *data
)
797 const struct loop
*const loop
= (const struct loop
*)data
;
798 if (flow_bb_inside_loop_p (loop
, bb
))
804 /* Create and initialize a new loop_vec_info struct for LOOP_IN, as well as
805 stmt_vec_info structs for all the stmts in LOOP_IN. */
807 _loop_vec_info::_loop_vec_info (struct loop
*loop_in
, vec_info_shared
*shared
)
808 : vec_info (vec_info::loop
, init_cost (loop_in
), shared
),
810 bbs (XCNEWVEC (basic_block
, loop
->num_nodes
)),
811 num_itersm1 (NULL_TREE
),
812 num_iters (NULL_TREE
),
813 num_iters_unchanged (NULL_TREE
),
814 num_iters_assumptions (NULL_TREE
),
816 versioning_threshold (0),
817 vectorization_factor (0),
818 max_vectorization_factor (0),
819 mask_skip_niters (NULL_TREE
),
820 mask_compare_type (NULL_TREE
),
822 peeling_for_alignment (0),
825 slp_unrolling_factor (1),
826 single_scalar_iteration_cost (0),
827 vectorizable (false),
828 can_fully_mask_p (true),
829 fully_masked_p (false),
830 peeling_for_gaps (false),
831 peeling_for_niter (false),
832 operands_swapped (false),
833 no_data_dependencies (false),
834 has_mask_store (false),
836 orig_loop_info (NULL
)
838 /* Create/Update stmt_info for all stmts in the loop. */
839 basic_block
*body
= get_loop_body (loop
);
840 for (unsigned int i
= 0; i
< loop
->num_nodes
; i
++)
842 basic_block bb
= body
[i
];
843 gimple_stmt_iterator si
;
845 for (si
= gsi_start_phis (bb
); !gsi_end_p (si
); gsi_next (&si
))
847 gimple
*phi
= gsi_stmt (si
);
848 gimple_set_uid (phi
, 0);
852 for (si
= gsi_start_bb (bb
); !gsi_end_p (si
); gsi_next (&si
))
854 gimple
*stmt
= gsi_stmt (si
);
855 gimple_set_uid (stmt
, 0);
861 /* CHECKME: We want to visit all BBs before their successors (except for
862 latch blocks, for which this assertion wouldn't hold). In the simple
863 case of the loop forms we allow, a dfs order of the BBs would the same
864 as reversed postorder traversal, so we are safe. */
866 unsigned int nbbs
= dfs_enumerate_from (loop
->header
, 0, bb_in_loop_p
,
867 bbs
, loop
->num_nodes
, loop
);
868 gcc_assert (nbbs
== loop
->num_nodes
);
871 /* Free all levels of MASKS. */
874 release_vec_loop_masks (vec_loop_masks
*masks
)
878 FOR_EACH_VEC_ELT (*masks
, i
, rgm
)
879 rgm
->masks
.release ();
883 /* Free all memory used by the _loop_vec_info, as well as all the
884 stmt_vec_info structs of all the stmts in the loop. */
886 _loop_vec_info::~_loop_vec_info ()
889 gimple_stmt_iterator si
;
892 /* ??? We're releasing loop_vinfos en-block. */
893 set_stmt_vec_info_vec (&stmt_vec_infos
);
894 nbbs
= loop
->num_nodes
;
895 for (j
= 0; j
< nbbs
; j
++)
897 basic_block bb
= bbs
[j
];
898 for (si
= gsi_start_phis (bb
); !gsi_end_p (si
); gsi_next (&si
))
899 free_stmt_vec_info (gsi_stmt (si
));
901 for (si
= gsi_start_bb (bb
); !gsi_end_p (si
); )
903 gimple
*stmt
= gsi_stmt (si
);
905 /* We may have broken canonical form by moving a constant
906 into RHS1 of a commutative op. Fix such occurrences. */
907 if (operands_swapped
&& is_gimple_assign (stmt
))
909 enum tree_code code
= gimple_assign_rhs_code (stmt
);
911 if ((code
== PLUS_EXPR
912 || code
== POINTER_PLUS_EXPR
913 || code
== MULT_EXPR
)
914 && CONSTANT_CLASS_P (gimple_assign_rhs1 (stmt
)))
915 swap_ssa_operands (stmt
,
916 gimple_assign_rhs1_ptr (stmt
),
917 gimple_assign_rhs2_ptr (stmt
));
918 else if (code
== COND_EXPR
919 && CONSTANT_CLASS_P (gimple_assign_rhs2 (stmt
)))
921 tree cond_expr
= gimple_assign_rhs1 (stmt
);
922 enum tree_code cond_code
= TREE_CODE (cond_expr
);
924 if (TREE_CODE_CLASS (cond_code
) == tcc_comparison
)
926 bool honor_nans
= HONOR_NANS (TREE_OPERAND (cond_expr
,
928 cond_code
= invert_tree_comparison (cond_code
,
930 if (cond_code
!= ERROR_MARK
)
932 TREE_SET_CODE (cond_expr
, cond_code
);
933 swap_ssa_operands (stmt
,
934 gimple_assign_rhs2_ptr (stmt
),
935 gimple_assign_rhs3_ptr (stmt
));
941 /* Free stmt_vec_info. */
942 free_stmt_vec_info (stmt
);
949 release_vec_loop_masks (&masks
);
955 /* Return an invariant or register for EXPR and emit necessary
956 computations in the LOOP_VINFO loop preheader. */
959 cse_and_gimplify_to_preheader (loop_vec_info loop_vinfo
, tree expr
)
961 if (is_gimple_reg (expr
)
962 || is_gimple_min_invariant (expr
))
965 if (! loop_vinfo
->ivexpr_map
)
966 loop_vinfo
->ivexpr_map
= new hash_map
<tree_operand_hash
, tree
>;
967 tree
&cached
= loop_vinfo
->ivexpr_map
->get_or_insert (expr
);
970 gimple_seq stmts
= NULL
;
971 cached
= force_gimple_operand (unshare_expr (expr
),
972 &stmts
, true, NULL_TREE
);
975 edge e
= loop_preheader_edge (LOOP_VINFO_LOOP (loop_vinfo
));
976 gsi_insert_seq_on_edge_immediate (e
, stmts
);
982 /* Return true if we can use CMP_TYPE as the comparison type to produce
983 all masks required to mask LOOP_VINFO. */
986 can_produce_all_loop_masks_p (loop_vec_info loop_vinfo
, tree cmp_type
)
990 FOR_EACH_VEC_ELT (LOOP_VINFO_MASKS (loop_vinfo
), i
, rgm
)
991 if (rgm
->mask_type
!= NULL_TREE
992 && !direct_internal_fn_supported_p (IFN_WHILE_ULT
,
993 cmp_type
, rgm
->mask_type
,
999 /* Calculate the maximum number of scalars per iteration for every
1000 rgroup in LOOP_VINFO. */
1003 vect_get_max_nscalars_per_iter (loop_vec_info loop_vinfo
)
1005 unsigned int res
= 1;
1008 FOR_EACH_VEC_ELT (LOOP_VINFO_MASKS (loop_vinfo
), i
, rgm
)
1009 res
= MAX (res
, rgm
->max_nscalars_per_iter
);
1013 /* Each statement in LOOP_VINFO can be masked where necessary. Check
1014 whether we can actually generate the masks required. Return true if so,
1015 storing the type of the scalar IV in LOOP_VINFO_MASK_COMPARE_TYPE. */
1018 vect_verify_full_masking (loop_vec_info loop_vinfo
)
1020 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
1021 unsigned int min_ni_width
;
1023 /* Use a normal loop if there are no statements that need masking.
1024 This only happens in rare degenerate cases: it means that the loop
1025 has no loads, no stores, and no live-out values. */
1026 if (LOOP_VINFO_MASKS (loop_vinfo
).is_empty ())
1029 /* Get the maximum number of iterations that is representable
1030 in the counter type. */
1031 tree ni_type
= TREE_TYPE (LOOP_VINFO_NITERSM1 (loop_vinfo
));
1032 widest_int max_ni
= wi::to_widest (TYPE_MAX_VALUE (ni_type
)) + 1;
1034 /* Get a more refined estimate for the number of iterations. */
1035 widest_int max_back_edges
;
1036 if (max_loop_iterations (loop
, &max_back_edges
))
1037 max_ni
= wi::smin (max_ni
, max_back_edges
+ 1);
1039 /* Account for rgroup masks, in which each bit is replicated N times. */
1040 max_ni
*= vect_get_max_nscalars_per_iter (loop_vinfo
);
1042 /* Work out how many bits we need to represent the limit. */
1043 min_ni_width
= wi::min_precision (max_ni
, UNSIGNED
);
1045 /* Find a scalar mode for which WHILE_ULT is supported. */
1046 opt_scalar_int_mode cmp_mode_iter
;
1047 tree cmp_type
= NULL_TREE
;
1048 FOR_EACH_MODE_IN_CLASS (cmp_mode_iter
, MODE_INT
)
1050 unsigned int cmp_bits
= GET_MODE_BITSIZE (cmp_mode_iter
.require ());
1051 if (cmp_bits
>= min_ni_width
1052 && targetm
.scalar_mode_supported_p (cmp_mode_iter
.require ()))
1054 tree this_type
= build_nonstandard_integer_type (cmp_bits
, true);
1056 && can_produce_all_loop_masks_p (loop_vinfo
, this_type
))
1058 /* Although we could stop as soon as we find a valid mode,
1059 it's often better to continue until we hit Pmode, since the
1060 operands to the WHILE are more likely to be reusable in
1061 address calculations. */
1062 cmp_type
= this_type
;
1063 if (cmp_bits
>= GET_MODE_BITSIZE (Pmode
))
1072 LOOP_VINFO_MASK_COMPARE_TYPE (loop_vinfo
) = cmp_type
;
1076 /* Calculate the cost of one scalar iteration of the loop. */
1078 vect_compute_single_scalar_iteration_cost (loop_vec_info loop_vinfo
)
1080 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
1081 basic_block
*bbs
= LOOP_VINFO_BBS (loop_vinfo
);
1082 int nbbs
= loop
->num_nodes
, factor
;
1083 int innerloop_iters
, i
;
1085 /* Gather costs for statements in the scalar loop. */
1088 innerloop_iters
= 1;
1090 innerloop_iters
= 50; /* FIXME */
1092 for (i
= 0; i
< nbbs
; i
++)
1094 gimple_stmt_iterator si
;
1095 basic_block bb
= bbs
[i
];
1097 if (bb
->loop_father
== loop
->inner
)
1098 factor
= innerloop_iters
;
1102 for (si
= gsi_start_bb (bb
); !gsi_end_p (si
); gsi_next (&si
))
1104 gimple
*stmt
= gsi_stmt (si
);
1105 stmt_vec_info stmt_info
= loop_vinfo
->lookup_stmt (stmt
);
1107 if (!is_gimple_assign (stmt
) && !is_gimple_call (stmt
))
1110 /* Skip stmts that are not vectorized inside the loop. */
1112 && !STMT_VINFO_RELEVANT_P (stmt_info
)
1113 && (!STMT_VINFO_LIVE_P (stmt_info
)
1114 || !VECTORIZABLE_CYCLE_DEF (STMT_VINFO_DEF_TYPE (stmt_info
)))
1115 && !STMT_VINFO_IN_PATTERN_P (stmt_info
))
1118 vect_cost_for_stmt kind
;
1119 if (STMT_VINFO_DATA_REF (stmt_info
))
1121 if (DR_IS_READ (STMT_VINFO_DATA_REF (stmt_info
)))
1124 kind
= scalar_store
;
1129 record_stmt_cost (&LOOP_VINFO_SCALAR_ITERATION_COST (loop_vinfo
),
1130 factor
, kind
, stmt_info
, 0, vect_prologue
);
1134 /* Now accumulate cost. */
1135 void *target_cost_data
= init_cost (loop
);
1136 stmt_info_for_cost
*si
;
1138 FOR_EACH_VEC_ELT (LOOP_VINFO_SCALAR_ITERATION_COST (loop_vinfo
),
1141 struct _stmt_vec_info
*stmt_info
1142 = si
->stmt
? vinfo_for_stmt (si
->stmt
) : NULL_STMT_VEC_INFO
;
1143 (void) add_stmt_cost (target_cost_data
, si
->count
,
1144 si
->kind
, stmt_info
, si
->misalign
,
1147 unsigned dummy
, body_cost
= 0;
1148 finish_cost (target_cost_data
, &dummy
, &body_cost
, &dummy
);
1149 destroy_cost_data (target_cost_data
);
1150 LOOP_VINFO_SINGLE_SCALAR_ITERATION_COST (loop_vinfo
) = body_cost
;
1154 /* Function vect_analyze_loop_form_1.
1156 Verify that certain CFG restrictions hold, including:
1157 - the loop has a pre-header
1158 - the loop has a single entry and exit
1159 - the loop exit condition is simple enough
1160 - the number of iterations can be analyzed, i.e, a countable loop. The
1161 niter could be analyzed under some assumptions. */
1164 vect_analyze_loop_form_1 (struct loop
*loop
, gcond
**loop_cond
,
1165 tree
*assumptions
, tree
*number_of_iterationsm1
,
1166 tree
*number_of_iterations
, gcond
**inner_loop_cond
)
1168 DUMP_VECT_SCOPE ("vect_analyze_loop_form");
1170 /* Different restrictions apply when we are considering an inner-most loop,
1171 vs. an outer (nested) loop.
1172 (FORNOW. May want to relax some of these restrictions in the future). */
1176 /* Inner-most loop. We currently require that the number of BBs is
1177 exactly 2 (the header and latch). Vectorizable inner-most loops
1188 if (loop
->num_nodes
!= 2)
1190 if (dump_enabled_p ())
1191 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1192 "not vectorized: control flow in loop.\n");
1196 if (empty_block_p (loop
->header
))
1198 if (dump_enabled_p ())
1199 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1200 "not vectorized: empty loop.\n");
1206 struct loop
*innerloop
= loop
->inner
;
1209 /* Nested loop. We currently require that the loop is doubly-nested,
1210 contains a single inner loop, and the number of BBs is exactly 5.
1211 Vectorizable outer-loops look like this:
1223 The inner-loop has the properties expected of inner-most loops
1224 as described above. */
1226 if ((loop
->inner
)->inner
|| (loop
->inner
)->next
)
1228 if (dump_enabled_p ())
1229 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1230 "not vectorized: multiple nested loops.\n");
1234 if (loop
->num_nodes
!= 5)
1236 if (dump_enabled_p ())
1237 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1238 "not vectorized: control flow in loop.\n");
1242 entryedge
= loop_preheader_edge (innerloop
);
1243 if (entryedge
->src
!= loop
->header
1244 || !single_exit (innerloop
)
1245 || single_exit (innerloop
)->dest
!= EDGE_PRED (loop
->latch
, 0)->src
)
1247 if (dump_enabled_p ())
1248 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1249 "not vectorized: unsupported outerloop form.\n");
1253 /* Analyze the inner-loop. */
1254 tree inner_niterm1
, inner_niter
, inner_assumptions
;
1255 if (! vect_analyze_loop_form_1 (loop
->inner
, inner_loop_cond
,
1256 &inner_assumptions
, &inner_niterm1
,
1258 /* Don't support analyzing niter under assumptions for inner
1260 || !integer_onep (inner_assumptions
))
1262 if (dump_enabled_p ())
1263 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1264 "not vectorized: Bad inner loop.\n");
1268 if (!expr_invariant_in_loop_p (loop
, inner_niter
))
1270 if (dump_enabled_p ())
1271 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1272 "not vectorized: inner-loop count not"
1277 if (dump_enabled_p ())
1278 dump_printf_loc (MSG_NOTE
, vect_location
,
1279 "Considering outer-loop vectorization.\n");
1282 if (!single_exit (loop
)
1283 || EDGE_COUNT (loop
->header
->preds
) != 2)
1285 if (dump_enabled_p ())
1287 if (!single_exit (loop
))
1288 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1289 "not vectorized: multiple exits.\n");
1290 else if (EDGE_COUNT (loop
->header
->preds
) != 2)
1291 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1292 "not vectorized: too many incoming edges.\n");
1297 /* We assume that the loop exit condition is at the end of the loop. i.e,
1298 that the loop is represented as a do-while (with a proper if-guard
1299 before the loop if needed), where the loop header contains all the
1300 executable statements, and the latch is empty. */
1301 if (!empty_block_p (loop
->latch
)
1302 || !gimple_seq_empty_p (phi_nodes (loop
->latch
)))
1304 if (dump_enabled_p ())
1305 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1306 "not vectorized: latch block not empty.\n");
1310 /* Make sure the exit is not abnormal. */
1311 edge e
= single_exit (loop
);
1312 if (e
->flags
& EDGE_ABNORMAL
)
1314 if (dump_enabled_p ())
1315 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1316 "not vectorized: abnormal loop exit edge.\n");
1320 *loop_cond
= vect_get_loop_niters (loop
, assumptions
, number_of_iterations
,
1321 number_of_iterationsm1
);
1324 if (dump_enabled_p ())
1325 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1326 "not vectorized: complicated exit condition.\n");
1330 if (integer_zerop (*assumptions
)
1331 || !*number_of_iterations
1332 || chrec_contains_undetermined (*number_of_iterations
))
1334 if (dump_enabled_p ())
1335 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1336 "not vectorized: number of iterations cannot be "
1341 if (integer_zerop (*number_of_iterations
))
1343 if (dump_enabled_p ())
1344 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1345 "not vectorized: number of iterations = 0.\n");
1352 /* Analyze LOOP form and return a loop_vec_info if it is of suitable form. */
1355 vect_analyze_loop_form (struct loop
*loop
, vec_info_shared
*shared
)
1357 tree assumptions
, number_of_iterations
, number_of_iterationsm1
;
1358 gcond
*loop_cond
, *inner_loop_cond
= NULL
;
1360 if (! vect_analyze_loop_form_1 (loop
, &loop_cond
,
1361 &assumptions
, &number_of_iterationsm1
,
1362 &number_of_iterations
, &inner_loop_cond
))
1365 loop_vec_info loop_vinfo
= new _loop_vec_info (loop
, shared
);
1366 LOOP_VINFO_NITERSM1 (loop_vinfo
) = number_of_iterationsm1
;
1367 LOOP_VINFO_NITERS (loop_vinfo
) = number_of_iterations
;
1368 LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo
) = number_of_iterations
;
1369 if (!integer_onep (assumptions
))
1371 /* We consider to vectorize this loop by versioning it under
1372 some assumptions. In order to do this, we need to clear
1373 existing information computed by scev and niter analyzer. */
1375 free_numbers_of_iterations_estimates (loop
);
1376 /* Also set flag for this loop so that following scev and niter
1377 analysis are done under the assumptions. */
1378 loop_constraint_set (loop
, LOOP_C_FINITE
);
1379 /* Also record the assumptions for versioning. */
1380 LOOP_VINFO_NITERS_ASSUMPTIONS (loop_vinfo
) = assumptions
;
1383 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo
))
1385 if (dump_enabled_p ())
1387 dump_printf_loc (MSG_NOTE
, vect_location
,
1388 "Symbolic number of iterations is ");
1389 dump_generic_expr (MSG_NOTE
, TDF_DETAILS
, number_of_iterations
);
1390 dump_printf (MSG_NOTE
, "\n");
1394 stmt_vec_info loop_cond_info
= loop_vinfo
->lookup_stmt (loop_cond
);
1395 STMT_VINFO_TYPE (loop_cond_info
) = loop_exit_ctrl_vec_info_type
;
1396 if (inner_loop_cond
)
1398 stmt_vec_info inner_loop_cond_info
1399 = loop_vinfo
->lookup_stmt (inner_loop_cond
);
1400 STMT_VINFO_TYPE (inner_loop_cond_info
) = loop_exit_ctrl_vec_info_type
;
1403 gcc_assert (!loop
->aux
);
1404 loop
->aux
= loop_vinfo
;
1410 /* Scan the loop stmts and dependent on whether there are any (non-)SLP
1411 statements update the vectorization factor. */
1414 vect_update_vf_for_slp (loop_vec_info loop_vinfo
)
1416 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
1417 basic_block
*bbs
= LOOP_VINFO_BBS (loop_vinfo
);
1418 int nbbs
= loop
->num_nodes
;
1419 poly_uint64 vectorization_factor
;
1422 DUMP_VECT_SCOPE ("vect_update_vf_for_slp");
1424 vectorization_factor
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
1425 gcc_assert (known_ne (vectorization_factor
, 0U));
1427 /* If all the stmts in the loop can be SLPed, we perform only SLP, and
1428 vectorization factor of the loop is the unrolling factor required by
1429 the SLP instances. If that unrolling factor is 1, we say, that we
1430 perform pure SLP on loop - cross iteration parallelism is not
1432 bool only_slp_in_loop
= true;
1433 for (i
= 0; i
< nbbs
; i
++)
1435 basic_block bb
= bbs
[i
];
1436 for (gimple_stmt_iterator si
= gsi_start_bb (bb
); !gsi_end_p (si
);
1439 stmt_vec_info stmt_info
= loop_vinfo
->lookup_stmt (gsi_stmt (si
));
1440 if (STMT_VINFO_IN_PATTERN_P (stmt_info
)
1441 && STMT_VINFO_RELATED_STMT (stmt_info
))
1442 stmt_info
= STMT_VINFO_RELATED_STMT (stmt_info
);
1443 if ((STMT_VINFO_RELEVANT_P (stmt_info
)
1444 || VECTORIZABLE_CYCLE_DEF (STMT_VINFO_DEF_TYPE (stmt_info
)))
1445 && !PURE_SLP_STMT (stmt_info
))
1446 /* STMT needs both SLP and loop-based vectorization. */
1447 only_slp_in_loop
= false;
1451 if (only_slp_in_loop
)
1453 dump_printf_loc (MSG_NOTE
, vect_location
,
1454 "Loop contains only SLP stmts\n");
1455 vectorization_factor
= LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo
);
1459 dump_printf_loc (MSG_NOTE
, vect_location
,
1460 "Loop contains SLP and non-SLP stmts\n");
1461 /* Both the vectorization factor and unroll factor have the form
1462 current_vector_size * X for some rational X, so they must have
1463 a common multiple. */
1464 vectorization_factor
1465 = force_common_multiple (vectorization_factor
,
1466 LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo
));
1469 LOOP_VINFO_VECT_FACTOR (loop_vinfo
) = vectorization_factor
;
1470 if (dump_enabled_p ())
1472 dump_printf_loc (MSG_NOTE
, vect_location
,
1473 "Updating vectorization factor to ");
1474 dump_dec (MSG_NOTE
, vectorization_factor
);
1475 dump_printf (MSG_NOTE
, ".\n");
1479 /* Return true if STMT_INFO describes a double reduction phi and if
1480 the other phi in the reduction is also relevant for vectorization.
1481 This rejects cases such as:
1484 x_1 = PHI <x_3(outer2), ...>;
1492 x_3 = PHI <x_2(inner)>;
1494 if nothing in x_2 or elsewhere makes x_1 relevant. */
1497 vect_active_double_reduction_p (stmt_vec_info stmt_info
)
1499 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_double_reduction_def
)
1502 gimple
*other_phi
= STMT_VINFO_REDUC_DEF (stmt_info
);
1503 return STMT_VINFO_RELEVANT_P (vinfo_for_stmt (other_phi
));
1506 /* Function vect_analyze_loop_operations.
1508 Scan the loop stmts and make sure they are all vectorizable. */
1511 vect_analyze_loop_operations (loop_vec_info loop_vinfo
)
1513 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
1514 basic_block
*bbs
= LOOP_VINFO_BBS (loop_vinfo
);
1515 int nbbs
= loop
->num_nodes
;
1517 stmt_vec_info stmt_info
;
1518 bool need_to_vectorize
= false;
1521 DUMP_VECT_SCOPE ("vect_analyze_loop_operations");
1523 stmt_vector_for_cost cost_vec
;
1524 cost_vec
.create (2);
1526 for (i
= 0; i
< nbbs
; i
++)
1528 basic_block bb
= bbs
[i
];
1530 for (gphi_iterator si
= gsi_start_phis (bb
); !gsi_end_p (si
);
1533 gphi
*phi
= si
.phi ();
1536 stmt_info
= loop_vinfo
->lookup_stmt (phi
);
1537 if (dump_enabled_p ())
1539 dump_printf_loc (MSG_NOTE
, vect_location
, "examining phi: ");
1540 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, phi
, 0);
1542 if (virtual_operand_p (gimple_phi_result (phi
)))
1545 /* Inner-loop loop-closed exit phi in outer-loop vectorization
1546 (i.e., a phi in the tail of the outer-loop). */
1547 if (! is_loop_header_bb_p (bb
))
1549 /* FORNOW: we currently don't support the case that these phis
1550 are not used in the outerloop (unless it is double reduction,
1551 i.e., this phi is vect_reduction_def), cause this case
1552 requires to actually do something here. */
1553 if (STMT_VINFO_LIVE_P (stmt_info
)
1554 && !vect_active_double_reduction_p (stmt_info
))
1556 if (dump_enabled_p ())
1557 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1558 "Unsupported loop-closed phi in "
1563 /* If PHI is used in the outer loop, we check that its operand
1564 is defined in the inner loop. */
1565 if (STMT_VINFO_RELEVANT_P (stmt_info
))
1569 if (gimple_phi_num_args (phi
) != 1)
1572 phi_op
= PHI_ARG_DEF (phi
, 0);
1573 stmt_vec_info op_def_info
= loop_vinfo
->lookup_def (phi_op
);
1577 if (STMT_VINFO_RELEVANT (op_def_info
) != vect_used_in_outer
1578 && (STMT_VINFO_RELEVANT (op_def_info
)
1579 != vect_used_in_outer_by_reduction
))
1586 gcc_assert (stmt_info
);
1588 if ((STMT_VINFO_RELEVANT (stmt_info
) == vect_used_in_scope
1589 || STMT_VINFO_LIVE_P (stmt_info
))
1590 && STMT_VINFO_DEF_TYPE (stmt_info
) != vect_induction_def
)
1592 /* A scalar-dependence cycle that we don't support. */
1593 if (dump_enabled_p ())
1594 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1595 "not vectorized: scalar dependence cycle.\n");
1599 if (STMT_VINFO_RELEVANT_P (stmt_info
))
1601 need_to_vectorize
= true;
1602 if (STMT_VINFO_DEF_TYPE (stmt_info
) == vect_induction_def
1603 && ! PURE_SLP_STMT (stmt_info
))
1604 ok
= vectorizable_induction (phi
, NULL
, NULL
, NULL
, &cost_vec
);
1605 else if ((STMT_VINFO_DEF_TYPE (stmt_info
) == vect_reduction_def
1606 || STMT_VINFO_DEF_TYPE (stmt_info
) == vect_nested_cycle
)
1607 && ! PURE_SLP_STMT (stmt_info
))
1608 ok
= vectorizable_reduction (phi
, NULL
, NULL
, NULL
, NULL
,
1612 /* SLP PHIs are tested by vect_slp_analyze_node_operations. */
1614 && STMT_VINFO_LIVE_P (stmt_info
)
1615 && !PURE_SLP_STMT (stmt_info
))
1616 ok
= vectorizable_live_operation (phi
, NULL
, NULL
, -1, NULL
,
1621 if (dump_enabled_p ())
1623 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1624 "not vectorized: relevant phi not "
1626 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, phi
, 0);
1632 for (gimple_stmt_iterator si
= gsi_start_bb (bb
); !gsi_end_p (si
);
1635 gimple
*stmt
= gsi_stmt (si
);
1636 if (!gimple_clobber_p (stmt
)
1637 && !vect_analyze_stmt (stmt
, &need_to_vectorize
, NULL
, NULL
,
1643 add_stmt_costs (loop_vinfo
->target_cost_data
, &cost_vec
);
1644 cost_vec
.release ();
1646 /* All operations in the loop are either irrelevant (deal with loop
1647 control, or dead), or only used outside the loop and can be moved
1648 out of the loop (e.g. invariants, inductions). The loop can be
1649 optimized away by scalar optimizations. We're better off not
1650 touching this loop. */
1651 if (!need_to_vectorize
)
1653 if (dump_enabled_p ())
1654 dump_printf_loc (MSG_NOTE
, vect_location
,
1655 "All the computation can be taken out of the loop.\n");
1656 if (dump_enabled_p ())
1657 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1658 "not vectorized: redundant loop. no profit to "
1666 /* Analyze the cost of the loop described by LOOP_VINFO. Decide if it
1667 is worthwhile to vectorize. Return 1 if definitely yes, 0 if
1668 definitely no, or -1 if it's worth retrying. */
1671 vect_analyze_loop_costing (loop_vec_info loop_vinfo
)
1673 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
1674 unsigned int assumed_vf
= vect_vf_for_cost (loop_vinfo
);
1676 /* Only fully-masked loops can have iteration counts less than the
1677 vectorization factor. */
1678 if (!LOOP_VINFO_FULLY_MASKED_P (loop_vinfo
))
1680 HOST_WIDE_INT max_niter
;
1682 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo
))
1683 max_niter
= LOOP_VINFO_INT_NITERS (loop_vinfo
);
1685 max_niter
= max_stmt_executions_int (loop
);
1688 && (unsigned HOST_WIDE_INT
) max_niter
< assumed_vf
)
1690 if (dump_enabled_p ())
1691 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1692 "not vectorized: iteration count smaller than "
1693 "vectorization factor.\n");
1698 int min_profitable_iters
, min_profitable_estimate
;
1699 vect_estimate_min_profitable_iters (loop_vinfo
, &min_profitable_iters
,
1700 &min_profitable_estimate
);
1702 if (min_profitable_iters
< 0)
1704 if (dump_enabled_p ())
1705 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1706 "not vectorized: vectorization not profitable.\n");
1707 if (dump_enabled_p ())
1708 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1709 "not vectorized: vector version will never be "
1714 int min_scalar_loop_bound
= (PARAM_VALUE (PARAM_MIN_VECT_LOOP_BOUND
)
1717 /* Use the cost model only if it is more conservative than user specified
1719 unsigned int th
= (unsigned) MAX (min_scalar_loop_bound
,
1720 min_profitable_iters
);
1722 LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo
) = th
;
1724 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo
)
1725 && LOOP_VINFO_INT_NITERS (loop_vinfo
) < th
)
1727 if (dump_enabled_p ())
1728 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1729 "not vectorized: vectorization not profitable.\n");
1730 if (dump_enabled_p ())
1731 dump_printf_loc (MSG_NOTE
, vect_location
,
1732 "not vectorized: iteration count smaller than user "
1733 "specified loop bound parameter or minimum profitable "
1734 "iterations (whichever is more conservative).\n");
1738 HOST_WIDE_INT estimated_niter
= estimated_stmt_executions_int (loop
);
1739 if (estimated_niter
== -1)
1740 estimated_niter
= likely_max_stmt_executions_int (loop
);
1741 if (estimated_niter
!= -1
1742 && ((unsigned HOST_WIDE_INT
) estimated_niter
1743 < MAX (th
, (unsigned) min_profitable_estimate
)))
1745 if (dump_enabled_p ())
1746 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1747 "not vectorized: estimated iteration count too "
1749 if (dump_enabled_p ())
1750 dump_printf_loc (MSG_NOTE
, vect_location
,
1751 "not vectorized: estimated iteration count smaller "
1752 "than specified loop bound parameter or minimum "
1753 "profitable iterations (whichever is more "
1754 "conservative).\n");
1762 vect_get_datarefs_in_loop (loop_p loop
, basic_block
*bbs
,
1763 vec
<data_reference_p
> *datarefs
,
1764 unsigned int *n_stmts
)
1767 for (unsigned i
= 0; i
< loop
->num_nodes
; i
++)
1768 for (gimple_stmt_iterator gsi
= gsi_start_bb (bbs
[i
]);
1769 !gsi_end_p (gsi
); gsi_next (&gsi
))
1771 gimple
*stmt
= gsi_stmt (gsi
);
1772 if (is_gimple_debug (stmt
))
1775 if (!vect_find_stmt_data_reference (loop
, stmt
, datarefs
))
1777 if (is_gimple_call (stmt
) && loop
->safelen
)
1779 tree fndecl
= gimple_call_fndecl (stmt
), op
;
1780 if (fndecl
!= NULL_TREE
)
1782 cgraph_node
*node
= cgraph_node::get (fndecl
);
1783 if (node
!= NULL
&& node
->simd_clones
!= NULL
)
1785 unsigned int j
, n
= gimple_call_num_args (stmt
);
1786 for (j
= 0; j
< n
; j
++)
1788 op
= gimple_call_arg (stmt
, j
);
1790 || (REFERENCE_CLASS_P (op
)
1791 && get_base_address (op
)))
1794 op
= gimple_call_lhs (stmt
);
1795 /* Ignore #pragma omp declare simd functions
1796 if they don't have data references in the
1797 call stmt itself. */
1801 || (REFERENCE_CLASS_P (op
)
1802 && get_base_address (op
)))))
1809 /* If dependence analysis will give up due to the limit on the
1810 number of datarefs stop here and fail fatally. */
1811 if (datarefs
->length ()
1812 > (unsigned)PARAM_VALUE (PARAM_LOOP_MAX_DATAREFS_FOR_DATADEPS
))
1818 /* Function vect_analyze_loop_2.
1820 Apply a set of analyses on LOOP, and create a loop_vec_info struct
1821 for it. The different analyses will record information in the
1822 loop_vec_info struct. */
1824 vect_analyze_loop_2 (loop_vec_info loop_vinfo
, bool &fatal
, unsigned *n_stmts
)
1828 unsigned int max_vf
= MAX_VECTORIZATION_FACTOR
;
1829 poly_uint64 min_vf
= 2;
1831 /* The first group of checks is independent of the vector size. */
1834 /* Find all data references in the loop (which correspond to vdefs/vuses)
1835 and analyze their evolution in the loop. */
1837 loop_p loop
= LOOP_VINFO_LOOP (loop_vinfo
);
1839 /* Gather the data references and count stmts in the loop. */
1840 if (!LOOP_VINFO_DATAREFS (loop_vinfo
).exists ())
1842 if (!vect_get_datarefs_in_loop (loop
, LOOP_VINFO_BBS (loop_vinfo
),
1843 &LOOP_VINFO_DATAREFS (loop_vinfo
),
1846 if (dump_enabled_p ())
1847 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1848 "not vectorized: loop contains function "
1849 "calls or data references that cannot "
1853 loop_vinfo
->shared
->save_datarefs ();
1856 loop_vinfo
->shared
->check_datarefs ();
1858 /* Analyze the data references and also adjust the minimal
1859 vectorization factor according to the loads and stores. */
1861 ok
= vect_analyze_data_refs (loop_vinfo
, &min_vf
);
1864 if (dump_enabled_p ())
1865 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1866 "bad data references.\n");
1870 /* Classify all cross-iteration scalar data-flow cycles.
1871 Cross-iteration cycles caused by virtual phis are analyzed separately. */
1872 vect_analyze_scalar_cycles (loop_vinfo
);
1874 vect_pattern_recog (loop_vinfo
);
1876 vect_fixup_scalar_cycles_with_patterns (loop_vinfo
);
1878 /* Analyze the access patterns of the data-refs in the loop (consecutive,
1879 complex, etc.). FORNOW: Only handle consecutive access pattern. */
1881 ok
= vect_analyze_data_ref_accesses (loop_vinfo
);
1884 if (dump_enabled_p ())
1885 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1886 "bad data access.\n");
1890 /* Data-flow analysis to detect stmts that do not need to be vectorized. */
1892 ok
= vect_mark_stmts_to_be_vectorized (loop_vinfo
);
1895 if (dump_enabled_p ())
1896 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1897 "unexpected pattern.\n");
1901 /* While the rest of the analysis below depends on it in some way. */
1904 /* Analyze data dependences between the data-refs in the loop
1905 and adjust the maximum vectorization factor according to
1907 FORNOW: fail at the first data dependence that we encounter. */
1909 ok
= vect_analyze_data_ref_dependences (loop_vinfo
, &max_vf
);
1911 || (max_vf
!= MAX_VECTORIZATION_FACTOR
1912 && maybe_lt (max_vf
, min_vf
)))
1914 if (dump_enabled_p ())
1915 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1916 "bad data dependence.\n");
1919 LOOP_VINFO_MAX_VECT_FACTOR (loop_vinfo
) = max_vf
;
1921 ok
= vect_determine_vectorization_factor (loop_vinfo
);
1924 if (dump_enabled_p ())
1925 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1926 "can't determine vectorization factor.\n");
1929 if (max_vf
!= MAX_VECTORIZATION_FACTOR
1930 && maybe_lt (max_vf
, LOOP_VINFO_VECT_FACTOR (loop_vinfo
)))
1932 if (dump_enabled_p ())
1933 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1934 "bad data dependence.\n");
1938 /* Compute the scalar iteration cost. */
1939 vect_compute_single_scalar_iteration_cost (loop_vinfo
);
1941 poly_uint64 saved_vectorization_factor
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
1944 /* Check the SLP opportunities in the loop, analyze and build SLP trees. */
1945 ok
= vect_analyze_slp (loop_vinfo
, *n_stmts
);
1949 /* If there are any SLP instances mark them as pure_slp. */
1950 bool slp
= vect_make_slp_decision (loop_vinfo
);
1953 /* Find stmts that need to be both vectorized and SLPed. */
1954 vect_detect_hybrid_slp (loop_vinfo
);
1956 /* Update the vectorization factor based on the SLP decision. */
1957 vect_update_vf_for_slp (loop_vinfo
);
1960 bool saved_can_fully_mask_p
= LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo
);
1962 /* We don't expect to have to roll back to anything other than an empty
1964 gcc_assert (LOOP_VINFO_MASKS (loop_vinfo
).is_empty ());
1966 /* This is the point where we can re-start analysis with SLP forced off. */
1969 /* Now the vectorization factor is final. */
1970 poly_uint64 vectorization_factor
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
1971 gcc_assert (known_ne (vectorization_factor
, 0U));
1973 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo
) && dump_enabled_p ())
1975 dump_printf_loc (MSG_NOTE
, vect_location
,
1976 "vectorization_factor = ");
1977 dump_dec (MSG_NOTE
, vectorization_factor
);
1978 dump_printf (MSG_NOTE
, ", niters = " HOST_WIDE_INT_PRINT_DEC
"\n",
1979 LOOP_VINFO_INT_NITERS (loop_vinfo
));
1982 HOST_WIDE_INT max_niter
1983 = likely_max_stmt_executions_int (LOOP_VINFO_LOOP (loop_vinfo
));
1985 /* Analyze the alignment of the data-refs in the loop.
1986 Fail if a data reference is found that cannot be vectorized. */
1988 ok
= vect_analyze_data_refs_alignment (loop_vinfo
);
1991 if (dump_enabled_p ())
1992 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1993 "bad data alignment.\n");
1997 /* Prune the list of ddrs to be tested at run-time by versioning for alias.
1998 It is important to call pruning after vect_analyze_data_ref_accesses,
1999 since we use grouping information gathered by interleaving analysis. */
2000 ok
= vect_prune_runtime_alias_test_list (loop_vinfo
);
2004 /* Do not invoke vect_enhance_data_refs_alignment for eplilogue
2006 if (!LOOP_VINFO_EPILOGUE_P (loop_vinfo
))
2008 /* This pass will decide on using loop versioning and/or loop peeling in
2009 order to enhance the alignment of data references in the loop. */
2010 ok
= vect_enhance_data_refs_alignment (loop_vinfo
);
2013 if (dump_enabled_p ())
2014 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2015 "bad data alignment.\n");
2022 /* Analyze operations in the SLP instances. Note this may
2023 remove unsupported SLP instances which makes the above
2024 SLP kind detection invalid. */
2025 unsigned old_size
= LOOP_VINFO_SLP_INSTANCES (loop_vinfo
).length ();
2026 vect_slp_analyze_operations (loop_vinfo
);
2027 if (LOOP_VINFO_SLP_INSTANCES (loop_vinfo
).length () != old_size
)
2031 /* Scan all the remaining operations in the loop that are not subject
2032 to SLP and make sure they are vectorizable. */
2033 ok
= vect_analyze_loop_operations (loop_vinfo
);
2036 if (dump_enabled_p ())
2037 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2038 "bad operation or unsupported loop bound.\n");
2042 /* Decide whether to use a fully-masked loop for this vectorization
2044 LOOP_VINFO_FULLY_MASKED_P (loop_vinfo
)
2045 = (LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo
)
2046 && vect_verify_full_masking (loop_vinfo
));
2047 if (dump_enabled_p ())
2049 if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo
))
2050 dump_printf_loc (MSG_NOTE
, vect_location
,
2051 "using a fully-masked loop.\n");
2053 dump_printf_loc (MSG_NOTE
, vect_location
,
2054 "not using a fully-masked loop.\n");
2057 /* If epilog loop is required because of data accesses with gaps,
2058 one additional iteration needs to be peeled. Check if there is
2059 enough iterations for vectorization. */
2060 if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo
)
2061 && LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo
)
2062 && !LOOP_VINFO_FULLY_MASKED_P (loop_vinfo
))
2064 poly_uint64 vf
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
2065 tree scalar_niters
= LOOP_VINFO_NITERSM1 (loop_vinfo
);
2067 if (known_lt (wi::to_widest (scalar_niters
), vf
))
2069 if (dump_enabled_p ())
2070 dump_printf_loc (MSG_NOTE
, vect_location
,
2071 "loop has no enough iterations to support"
2072 " peeling for gaps.\n");
2077 /* Check the costings of the loop make vectorizing worthwhile. */
2078 res
= vect_analyze_loop_costing (loop_vinfo
);
2083 if (dump_enabled_p ())
2084 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2085 "Loop costings not worthwhile.\n");
2089 /* Decide whether we need to create an epilogue loop to handle
2090 remaining scalar iterations. */
2091 th
= LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo
);
2093 unsigned HOST_WIDE_INT const_vf
;
2094 if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo
))
2095 /* The main loop handles all iterations. */
2096 LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo
) = false;
2097 else if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo
)
2098 && LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo
) > 0)
2100 if (!multiple_p (LOOP_VINFO_INT_NITERS (loop_vinfo
)
2101 - LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo
),
2102 LOOP_VINFO_VECT_FACTOR (loop_vinfo
)))
2103 LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo
) = true;
2105 else if (LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo
)
2106 || !LOOP_VINFO_VECT_FACTOR (loop_vinfo
).is_constant (&const_vf
)
2107 || ((tree_ctz (LOOP_VINFO_NITERS (loop_vinfo
))
2108 < (unsigned) exact_log2 (const_vf
))
2109 /* In case of versioning, check if the maximum number of
2110 iterations is greater than th. If they are identical,
2111 the epilogue is unnecessary. */
2112 && (!LOOP_REQUIRES_VERSIONING (loop_vinfo
)
2113 || ((unsigned HOST_WIDE_INT
) max_niter
2114 > (th
/ const_vf
) * const_vf
))))
2115 LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo
) = true;
2117 /* If an epilogue loop is required make sure we can create one. */
2118 if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo
)
2119 || LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo
))
2121 if (dump_enabled_p ())
2122 dump_printf_loc (MSG_NOTE
, vect_location
, "epilog loop required\n");
2123 if (!vect_can_advance_ivs_p (loop_vinfo
)
2124 || !slpeel_can_duplicate_loop_p (LOOP_VINFO_LOOP (loop_vinfo
),
2125 single_exit (LOOP_VINFO_LOOP
2128 if (dump_enabled_p ())
2129 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2130 "not vectorized: can't create required "
2136 /* During peeling, we need to check if number of loop iterations is
2137 enough for both peeled prolog loop and vector loop. This check
2138 can be merged along with threshold check of loop versioning, so
2139 increase threshold for this case if necessary. */
2140 if (LOOP_REQUIRES_VERSIONING (loop_vinfo
))
2142 poly_uint64 niters_th
= 0;
2144 if (!vect_use_loop_mask_for_alignment_p (loop_vinfo
))
2146 /* Niters for peeled prolog loop. */
2147 if (LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo
) < 0)
2149 struct data_reference
*dr
= LOOP_VINFO_UNALIGNED_DR (loop_vinfo
);
2151 = STMT_VINFO_VECTYPE (vinfo_for_stmt (vect_dr_stmt (dr
)));
2152 niters_th
+= TYPE_VECTOR_SUBPARTS (vectype
) - 1;
2155 niters_th
+= LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo
);
2158 /* Niters for at least one iteration of vectorized loop. */
2159 if (!LOOP_VINFO_FULLY_MASKED_P (loop_vinfo
))
2160 niters_th
+= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
2161 /* One additional iteration because of peeling for gap. */
2162 if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo
))
2164 LOOP_VINFO_VERSIONING_THRESHOLD (loop_vinfo
) = niters_th
;
2167 gcc_assert (known_eq (vectorization_factor
,
2168 LOOP_VINFO_VECT_FACTOR (loop_vinfo
)));
2170 /* Ok to vectorize! */
2174 /* Try again with SLP forced off but if we didn't do any SLP there is
2175 no point in re-trying. */
2179 /* If there are reduction chains re-trying will fail anyway. */
2180 if (! LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo
).is_empty ())
2183 /* Likewise if the grouped loads or stores in the SLP cannot be handled
2184 via interleaving or lane instructions. */
2185 slp_instance instance
;
2188 FOR_EACH_VEC_ELT (LOOP_VINFO_SLP_INSTANCES (loop_vinfo
), i
, instance
)
2190 stmt_vec_info vinfo
;
2191 vinfo
= vinfo_for_stmt
2192 (SLP_TREE_SCALAR_STMTS (SLP_INSTANCE_TREE (instance
))[0]);
2193 if (! STMT_VINFO_GROUPED_ACCESS (vinfo
))
2195 vinfo
= vinfo_for_stmt (DR_GROUP_FIRST_ELEMENT (vinfo
));
2196 unsigned int size
= DR_GROUP_SIZE (vinfo
);
2197 tree vectype
= STMT_VINFO_VECTYPE (vinfo
);
2198 if (! vect_store_lanes_supported (vectype
, size
, false)
2199 && ! known_eq (TYPE_VECTOR_SUBPARTS (vectype
), 1U)
2200 && ! vect_grouped_store_supported (vectype
, size
))
2202 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (instance
), j
, node
)
2204 vinfo
= vinfo_for_stmt (SLP_TREE_SCALAR_STMTS (node
)[0]);
2205 vinfo
= vinfo_for_stmt (DR_GROUP_FIRST_ELEMENT (vinfo
));
2206 bool single_element_p
= !DR_GROUP_NEXT_ELEMENT (vinfo
);
2207 size
= DR_GROUP_SIZE (vinfo
);
2208 vectype
= STMT_VINFO_VECTYPE (vinfo
);
2209 if (! vect_load_lanes_supported (vectype
, size
, false)
2210 && ! vect_grouped_load_supported (vectype
, single_element_p
,
2216 if (dump_enabled_p ())
2217 dump_printf_loc (MSG_NOTE
, vect_location
,
2218 "re-trying with SLP disabled\n");
2220 /* Roll back state appropriately. No SLP this time. */
2222 /* Restore vectorization factor as it were without SLP. */
2223 LOOP_VINFO_VECT_FACTOR (loop_vinfo
) = saved_vectorization_factor
;
2224 /* Free the SLP instances. */
2225 FOR_EACH_VEC_ELT (LOOP_VINFO_SLP_INSTANCES (loop_vinfo
), j
, instance
)
2226 vect_free_slp_instance (instance
, false);
2227 LOOP_VINFO_SLP_INSTANCES (loop_vinfo
).release ();
2228 /* Reset SLP type to loop_vect on all stmts. */
2229 for (i
= 0; i
< LOOP_VINFO_LOOP (loop_vinfo
)->num_nodes
; ++i
)
2231 basic_block bb
= LOOP_VINFO_BBS (loop_vinfo
)[i
];
2232 for (gimple_stmt_iterator si
= gsi_start_phis (bb
);
2233 !gsi_end_p (si
); gsi_next (&si
))
2235 stmt_vec_info stmt_info
= loop_vinfo
->lookup_stmt (gsi_stmt (si
));
2236 STMT_SLP_TYPE (stmt_info
) = loop_vect
;
2238 for (gimple_stmt_iterator si
= gsi_start_bb (bb
);
2239 !gsi_end_p (si
); gsi_next (&si
))
2241 stmt_vec_info stmt_info
= loop_vinfo
->lookup_stmt (gsi_stmt (si
));
2242 STMT_SLP_TYPE (stmt_info
) = loop_vect
;
2243 if (STMT_VINFO_IN_PATTERN_P (stmt_info
))
2245 gimple
*pattern_def_seq
= STMT_VINFO_PATTERN_DEF_SEQ (stmt_info
);
2246 stmt_info
= STMT_VINFO_RELATED_STMT (stmt_info
);
2247 STMT_SLP_TYPE (stmt_info
) = loop_vect
;
2248 for (gimple_stmt_iterator pi
= gsi_start (pattern_def_seq
);
2249 !gsi_end_p (pi
); gsi_next (&pi
))
2250 STMT_SLP_TYPE (loop_vinfo
->lookup_stmt (gsi_stmt (pi
)))
2255 /* Free optimized alias test DDRS. */
2256 LOOP_VINFO_LOWER_BOUNDS (loop_vinfo
).truncate (0);
2257 LOOP_VINFO_COMP_ALIAS_DDRS (loop_vinfo
).release ();
2258 LOOP_VINFO_CHECK_UNEQUAL_ADDRS (loop_vinfo
).release ();
2259 /* Reset target cost data. */
2260 destroy_cost_data (LOOP_VINFO_TARGET_COST_DATA (loop_vinfo
));
2261 LOOP_VINFO_TARGET_COST_DATA (loop_vinfo
)
2262 = init_cost (LOOP_VINFO_LOOP (loop_vinfo
));
2263 /* Reset accumulated rgroup information. */
2264 release_vec_loop_masks (&LOOP_VINFO_MASKS (loop_vinfo
));
2265 /* Reset assorted flags. */
2266 LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo
) = false;
2267 LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo
) = false;
2268 LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo
) = 0;
2269 LOOP_VINFO_VERSIONING_THRESHOLD (loop_vinfo
) = 0;
2270 LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo
) = saved_can_fully_mask_p
;
2275 /* Function vect_analyze_loop.
2277 Apply a set of analyses on LOOP, and create a loop_vec_info struct
2278 for it. The different analyses will record information in the
2279 loop_vec_info struct. If ORIG_LOOP_VINFO is not NULL epilogue must
2282 vect_analyze_loop (struct loop
*loop
, loop_vec_info orig_loop_vinfo
,
2283 vec_info_shared
*shared
)
2285 loop_vec_info loop_vinfo
;
2286 auto_vector_sizes vector_sizes
;
2288 /* Autodetect first vector size we try. */
2289 current_vector_size
= 0;
2290 targetm
.vectorize
.autovectorize_vector_sizes (&vector_sizes
);
2291 unsigned int next_size
= 0;
2293 DUMP_VECT_SCOPE ("analyze_loop_nest");
2295 if (loop_outer (loop
)
2296 && loop_vec_info_for_loop (loop_outer (loop
))
2297 && LOOP_VINFO_VECTORIZABLE_P (loop_vec_info_for_loop (loop_outer (loop
))))
2299 if (dump_enabled_p ())
2300 dump_printf_loc (MSG_NOTE
, vect_location
,
2301 "outer-loop already vectorized.\n");
2305 if (!find_loop_nest (loop
, &shared
->loop_nest
))
2307 if (dump_enabled_p ())
2308 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2309 "not vectorized: loop nest containing two "
2310 "or more consecutive inner loops cannot be "
2315 unsigned n_stmts
= 0;
2316 poly_uint64 autodetected_vector_size
= 0;
2319 /* Check the CFG characteristics of the loop (nesting, entry/exit). */
2320 loop_vinfo
= vect_analyze_loop_form (loop
, shared
);
2323 if (dump_enabled_p ())
2324 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2325 "bad loop form.\n");
2331 if (orig_loop_vinfo
)
2332 LOOP_VINFO_ORIG_LOOP_INFO (loop_vinfo
) = orig_loop_vinfo
;
2334 if (vect_analyze_loop_2 (loop_vinfo
, fatal
, &n_stmts
))
2336 LOOP_VINFO_VECTORIZABLE_P (loop_vinfo
) = 1;
2344 autodetected_vector_size
= current_vector_size
;
2346 if (next_size
< vector_sizes
.length ()
2347 && known_eq (vector_sizes
[next_size
], autodetected_vector_size
))
2351 || next_size
== vector_sizes
.length ()
2352 || known_eq (current_vector_size
, 0U))
2355 /* Try the next biggest vector size. */
2356 current_vector_size
= vector_sizes
[next_size
++];
2357 if (dump_enabled_p ())
2359 dump_printf_loc (MSG_NOTE
, vect_location
,
2360 "***** Re-trying analysis with "
2362 dump_dec (MSG_NOTE
, current_vector_size
);
2363 dump_printf (MSG_NOTE
, "\n");
2368 /* Return true if there is an in-order reduction function for CODE, storing
2369 it in *REDUC_FN if so. */
2372 fold_left_reduction_fn (tree_code code
, internal_fn
*reduc_fn
)
2377 *reduc_fn
= IFN_FOLD_LEFT_PLUS
;
2385 /* Function reduction_fn_for_scalar_code
2388 CODE - tree_code of a reduction operations.
2391 REDUC_FN - the corresponding internal function to be used to reduce the
2392 vector of partial results into a single scalar result, or IFN_LAST
2393 if the operation is a supported reduction operation, but does not have
2394 such an internal function.
2396 Return FALSE if CODE currently cannot be vectorized as reduction. */
2399 reduction_fn_for_scalar_code (enum tree_code code
, internal_fn
*reduc_fn
)
2404 *reduc_fn
= IFN_REDUC_MAX
;
2408 *reduc_fn
= IFN_REDUC_MIN
;
2412 *reduc_fn
= IFN_REDUC_PLUS
;
2416 *reduc_fn
= IFN_REDUC_AND
;
2420 *reduc_fn
= IFN_REDUC_IOR
;
2424 *reduc_fn
= IFN_REDUC_XOR
;
2429 *reduc_fn
= IFN_LAST
;
2437 /* If there is a neutral value X such that SLP reduction NODE would not
2438 be affected by the introduction of additional X elements, return that X,
2439 otherwise return null. CODE is the code of the reduction. REDUC_CHAIN
2440 is true if the SLP statements perform a single reduction, false if each
2441 statement performs an independent reduction. */
2444 neutral_op_for_slp_reduction (slp_tree slp_node
, tree_code code
,
2447 vec
<gimple
*> stmts
= SLP_TREE_SCALAR_STMTS (slp_node
);
2448 gimple
*stmt
= stmts
[0];
2449 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (stmt
);
2450 tree vector_type
= STMT_VINFO_VECTYPE (stmt_vinfo
);
2451 tree scalar_type
= TREE_TYPE (vector_type
);
2452 struct loop
*loop
= gimple_bb (stmt
)->loop_father
;
2457 case WIDEN_SUM_EXPR
:
2464 return build_zero_cst (scalar_type
);
2467 return build_one_cst (scalar_type
);
2470 return build_all_ones_cst (scalar_type
);
2474 /* For MIN/MAX the initial values are neutral. A reduction chain
2475 has only a single initial value, so that value is neutral for
2478 return PHI_ARG_DEF_FROM_EDGE (stmt
, loop_preheader_edge (loop
));
2486 /* Error reporting helper for vect_is_simple_reduction below. GIMPLE statement
2487 STMT is printed with a message MSG. */
2490 report_vect_op (dump_flags_t msg_type
, gimple
*stmt
, const char *msg
)
2492 dump_printf_loc (msg_type
, vect_location
, "%s", msg
);
2493 dump_gimple_stmt (msg_type
, TDF_SLIM
, stmt
, 0);
2496 /* DEF_STMT_INFO occurs in a loop that contains a potential reduction
2497 operation. Return true if the results of DEF_STMT_INFO are something
2498 that can be accumulated by such a reduction. */
2501 vect_valid_reduction_input_p (stmt_vec_info def_stmt_info
)
2503 return (is_gimple_assign (def_stmt_info
->stmt
)
2504 || is_gimple_call (def_stmt_info
->stmt
)
2505 || STMT_VINFO_DEF_TYPE (def_stmt_info
) == vect_induction_def
2506 || (gimple_code (def_stmt_info
->stmt
) == GIMPLE_PHI
2507 && STMT_VINFO_DEF_TYPE (def_stmt_info
) == vect_internal_def
2508 && !is_loop_header_bb_p (gimple_bb (def_stmt_info
->stmt
))));
2511 /* Detect SLP reduction of the form:
2521 PHI is the reduction phi node (#a1 = phi <a5, a0> above)
2522 FIRST_STMT is the first reduction stmt in the chain
2523 (a2 = operation (a1)).
2525 Return TRUE if a reduction chain was detected. */
2528 vect_is_slp_reduction (loop_vec_info loop_info
, gimple
*phi
,
2531 struct loop
*loop
= (gimple_bb (phi
))->loop_father
;
2532 struct loop
*vect_loop
= LOOP_VINFO_LOOP (loop_info
);
2533 enum tree_code code
;
2534 gimple
*current_stmt
= NULL
, *loop_use_stmt
= NULL
, *first
, *next_stmt
;
2535 stmt_vec_info use_stmt_info
, current_stmt_info
;
2537 imm_use_iterator imm_iter
;
2538 use_operand_p use_p
;
2539 int nloop_uses
, size
= 0, n_out_of_loop_uses
;
2542 if (loop
!= vect_loop
)
2545 lhs
= PHI_RESULT (phi
);
2546 code
= gimple_assign_rhs_code (first_stmt
);
2550 n_out_of_loop_uses
= 0;
2551 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, lhs
)
2553 gimple
*use_stmt
= USE_STMT (use_p
);
2554 if (is_gimple_debug (use_stmt
))
2557 /* Check if we got back to the reduction phi. */
2558 if (use_stmt
== phi
)
2560 loop_use_stmt
= use_stmt
;
2565 if (flow_bb_inside_loop_p (loop
, gimple_bb (use_stmt
)))
2567 loop_use_stmt
= use_stmt
;
2571 n_out_of_loop_uses
++;
2573 /* There are can be either a single use in the loop or two uses in
2575 if (nloop_uses
> 1 || (n_out_of_loop_uses
&& nloop_uses
))
2582 /* We reached a statement with no loop uses. */
2583 if (nloop_uses
== 0)
2586 /* This is a loop exit phi, and we haven't reached the reduction phi. */
2587 if (gimple_code (loop_use_stmt
) == GIMPLE_PHI
)
2590 if (!is_gimple_assign (loop_use_stmt
)
2591 || code
!= gimple_assign_rhs_code (loop_use_stmt
)
2592 || !flow_bb_inside_loop_p (loop
, gimple_bb (loop_use_stmt
)))
2595 /* Insert USE_STMT into reduction chain. */
2596 use_stmt_info
= loop_info
->lookup_stmt (loop_use_stmt
);
2599 current_stmt_info
= vinfo_for_stmt (current_stmt
);
2600 REDUC_GROUP_NEXT_ELEMENT (current_stmt_info
) = loop_use_stmt
;
2601 REDUC_GROUP_FIRST_ELEMENT (use_stmt_info
)
2602 = REDUC_GROUP_FIRST_ELEMENT (current_stmt_info
);
2605 REDUC_GROUP_FIRST_ELEMENT (use_stmt_info
) = loop_use_stmt
;
2607 lhs
= gimple_assign_lhs (loop_use_stmt
);
2608 current_stmt
= loop_use_stmt
;
2612 if (!found
|| loop_use_stmt
!= phi
|| size
< 2)
2615 /* Swap the operands, if needed, to make the reduction operand be the second
2617 lhs
= PHI_RESULT (phi
);
2618 next_stmt
= REDUC_GROUP_FIRST_ELEMENT (vinfo_for_stmt (current_stmt
));
2621 if (gimple_assign_rhs2 (next_stmt
) == lhs
)
2623 tree op
= gimple_assign_rhs1 (next_stmt
);
2624 stmt_vec_info def_stmt_info
= loop_info
->lookup_def (op
);
2626 /* Check that the other def is either defined in the loop
2627 ("vect_internal_def"), or it's an induction (defined by a
2628 loop-header phi-node). */
2630 && flow_bb_inside_loop_p (loop
, gimple_bb (def_stmt_info
->stmt
))
2631 && vect_valid_reduction_input_p (def_stmt_info
))
2633 lhs
= gimple_assign_lhs (next_stmt
);
2634 next_stmt
= REDUC_GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt
));
2642 tree op
= gimple_assign_rhs2 (next_stmt
);
2643 stmt_vec_info def_stmt_info
= loop_info
->lookup_def (op
);
2645 /* Check that the other def is either defined in the loop
2646 ("vect_internal_def"), or it's an induction (defined by a
2647 loop-header phi-node). */
2649 && flow_bb_inside_loop_p (loop
, gimple_bb (def_stmt_info
->stmt
))
2650 && vect_valid_reduction_input_p (def_stmt_info
))
2652 if (dump_enabled_p ())
2654 dump_printf_loc (MSG_NOTE
, vect_location
, "swapping oprnds: ");
2655 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, next_stmt
, 0);
2658 swap_ssa_operands (next_stmt
,
2659 gimple_assign_rhs1_ptr (next_stmt
),
2660 gimple_assign_rhs2_ptr (next_stmt
));
2661 update_stmt (next_stmt
);
2663 if (CONSTANT_CLASS_P (gimple_assign_rhs1 (next_stmt
)))
2664 LOOP_VINFO_OPERANDS_SWAPPED (loop_info
) = true;
2670 lhs
= gimple_assign_lhs (next_stmt
);
2671 next_stmt
= REDUC_GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt
));
2674 /* Save the chain for further analysis in SLP detection. */
2675 first
= REDUC_GROUP_FIRST_ELEMENT (vinfo_for_stmt (current_stmt
));
2676 LOOP_VINFO_REDUCTION_CHAINS (loop_info
).safe_push (first
);
2677 REDUC_GROUP_SIZE (vinfo_for_stmt (first
)) = size
;
2682 /* Return true if we need an in-order reduction for operation CODE
2683 on type TYPE. NEED_WRAPPING_INTEGRAL_OVERFLOW is true if integer
2684 overflow must wrap. */
2687 needs_fold_left_reduction_p (tree type
, tree_code code
,
2688 bool need_wrapping_integral_overflow
)
2690 /* CHECKME: check for !flag_finite_math_only too? */
2691 if (SCALAR_FLOAT_TYPE_P (type
))
2699 return !flag_associative_math
;
2702 if (INTEGRAL_TYPE_P (type
))
2704 if (!operation_no_trapping_overflow (type
, code
))
2706 if (need_wrapping_integral_overflow
2707 && !TYPE_OVERFLOW_WRAPS (type
)
2708 && operation_can_overflow (code
))
2713 if (SAT_FIXED_POINT_TYPE_P (type
))
2719 /* Return true if the reduction PHI in LOOP with latch arg LOOP_ARG and
2720 reduction operation CODE has a handled computation expression. */
2723 check_reduction_path (dump_user_location_t loc
, loop_p loop
, gphi
*phi
,
2724 tree loop_arg
, enum tree_code code
)
2726 auto_vec
<std::pair
<ssa_op_iter
, use_operand_p
> > path
;
2727 auto_bitmap visited
;
2728 tree lookfor
= PHI_RESULT (phi
);
2730 use_operand_p curr
= op_iter_init_phiuse (&curri
, phi
, SSA_OP_USE
);
2731 while (USE_FROM_PTR (curr
) != loop_arg
)
2732 curr
= op_iter_next_use (&curri
);
2733 curri
.i
= curri
.numops
;
2736 path
.safe_push (std::make_pair (curri
, curr
));
2737 tree use
= USE_FROM_PTR (curr
);
2740 gimple
*def
= SSA_NAME_DEF_STMT (use
);
2741 if (gimple_nop_p (def
)
2742 || ! flow_bb_inside_loop_p (loop
, gimple_bb (def
)))
2747 std::pair
<ssa_op_iter
, use_operand_p
> x
= path
.pop ();
2751 curr
= op_iter_next_use (&curri
);
2752 /* Skip already visited or non-SSA operands (from iterating
2754 while (curr
!= NULL_USE_OPERAND_P
2755 && (TREE_CODE (USE_FROM_PTR (curr
)) != SSA_NAME
2756 || ! bitmap_set_bit (visited
,
2758 (USE_FROM_PTR (curr
)))));
2760 while (curr
== NULL_USE_OPERAND_P
&& ! path
.is_empty ());
2761 if (curr
== NULL_USE_OPERAND_P
)
2766 if (gimple_code (def
) == GIMPLE_PHI
)
2767 curr
= op_iter_init_phiuse (&curri
, as_a
<gphi
*>(def
), SSA_OP_USE
);
2769 curr
= op_iter_init_use (&curri
, def
, SSA_OP_USE
);
2770 while (curr
!= NULL_USE_OPERAND_P
2771 && (TREE_CODE (USE_FROM_PTR (curr
)) != SSA_NAME
2772 || ! bitmap_set_bit (visited
,
2774 (USE_FROM_PTR (curr
)))))
2775 curr
= op_iter_next_use (&curri
);
2776 if (curr
== NULL_USE_OPERAND_P
)
2781 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
2783 dump_printf_loc (MSG_NOTE
, loc
, "reduction path: ");
2785 std::pair
<ssa_op_iter
, use_operand_p
> *x
;
2786 FOR_EACH_VEC_ELT (path
, i
, x
)
2788 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, USE_FROM_PTR (x
->second
));
2789 dump_printf (MSG_NOTE
, " ");
2791 dump_printf (MSG_NOTE
, "\n");
2794 /* Check whether the reduction path detected is valid. */
2795 bool fail
= path
.length () == 0;
2797 for (unsigned i
= 1; i
< path
.length (); ++i
)
2799 gimple
*use_stmt
= USE_STMT (path
[i
].second
);
2800 tree op
= USE_FROM_PTR (path
[i
].second
);
2801 if (! has_single_use (op
)
2802 || ! is_gimple_assign (use_stmt
))
2807 if (gimple_assign_rhs_code (use_stmt
) != code
)
2809 if (code
== PLUS_EXPR
2810 && gimple_assign_rhs_code (use_stmt
) == MINUS_EXPR
)
2812 /* Track whether we negate the reduction value each iteration. */
2813 if (gimple_assign_rhs2 (use_stmt
) == op
)
2823 return ! fail
&& ! neg
;
2827 /* Function vect_is_simple_reduction
2829 (1) Detect a cross-iteration def-use cycle that represents a simple
2830 reduction computation. We look for the following pattern:
2835 a2 = operation (a3, a1)
2842 a2 = operation (a3, a1)
2845 1. operation is commutative and associative and it is safe to
2846 change the order of the computation
2847 2. no uses for a2 in the loop (a2 is used out of the loop)
2848 3. no uses of a1 in the loop besides the reduction operation
2849 4. no uses of a1 outside the loop.
2851 Conditions 1,4 are tested here.
2852 Conditions 2,3 are tested in vect_mark_stmts_to_be_vectorized.
2854 (2) Detect a cross-iteration def-use cycle in nested loops, i.e.,
2857 (3) Detect cycles of phi nodes in outer-loop vectorization, i.e., double
2861 inner loop (def of a3)
2864 (4) Detect condition expressions, ie:
2865 for (int i = 0; i < N; i++)
2872 vect_is_simple_reduction (loop_vec_info loop_info
, gimple
*phi
,
2874 bool need_wrapping_integral_overflow
,
2875 enum vect_reduction_type
*v_reduc_type
)
2877 struct loop
*loop
= (gimple_bb (phi
))->loop_father
;
2878 struct loop
*vect_loop
= LOOP_VINFO_LOOP (loop_info
);
2879 gimple
*def_stmt
, *phi_use_stmt
= NULL
;
2880 enum tree_code orig_code
, code
;
2881 tree op1
, op2
, op3
= NULL_TREE
, op4
= NULL_TREE
;
2885 imm_use_iterator imm_iter
;
2886 use_operand_p use_p
;
2889 *double_reduc
= false;
2890 *v_reduc_type
= TREE_CODE_REDUCTION
;
2892 tree phi_name
= PHI_RESULT (phi
);
2893 /* ??? If there are no uses of the PHI result the inner loop reduction
2894 won't be detected as possibly double-reduction by vectorizable_reduction
2895 because that tries to walk the PHI arg from the preheader edge which
2896 can be constant. See PR60382. */
2897 if (has_zero_uses (phi_name
))
2900 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, phi_name
)
2902 gimple
*use_stmt
= USE_STMT (use_p
);
2903 if (is_gimple_debug (use_stmt
))
2906 if (!flow_bb_inside_loop_p (loop
, gimple_bb (use_stmt
)))
2908 if (dump_enabled_p ())
2909 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2910 "intermediate value used outside loop.\n");
2918 if (dump_enabled_p ())
2919 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2920 "reduction value used in loop.\n");
2924 phi_use_stmt
= use_stmt
;
2927 edge latch_e
= loop_latch_edge (loop
);
2928 tree loop_arg
= PHI_ARG_DEF_FROM_EDGE (phi
, latch_e
);
2929 if (TREE_CODE (loop_arg
) != SSA_NAME
)
2931 if (dump_enabled_p ())
2933 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2934 "reduction: not ssa_name: ");
2935 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, loop_arg
);
2936 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
2941 def_stmt
= SSA_NAME_DEF_STMT (loop_arg
);
2942 if (is_gimple_assign (def_stmt
))
2944 name
= gimple_assign_lhs (def_stmt
);
2947 else if (gimple_code (def_stmt
) == GIMPLE_PHI
)
2949 name
= PHI_RESULT (def_stmt
);
2954 if (dump_enabled_p ())
2956 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2957 "reduction: unhandled reduction operation: ");
2958 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, def_stmt
, 0);
2963 if (! flow_bb_inside_loop_p (loop
, gimple_bb (def_stmt
)))
2967 auto_vec
<gphi
*, 3> lcphis
;
2968 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, name
)
2970 gimple
*use_stmt
= USE_STMT (use_p
);
2971 if (is_gimple_debug (use_stmt
))
2973 if (flow_bb_inside_loop_p (loop
, gimple_bb (use_stmt
)))
2976 /* We can have more than one loop-closed PHI. */
2977 lcphis
.safe_push (as_a
<gphi
*> (use_stmt
));
2980 if (dump_enabled_p ())
2981 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2982 "reduction used in loop.\n");
2987 /* If DEF_STMT is a phi node itself, we expect it to have a single argument
2988 defined in the inner loop. */
2991 op1
= PHI_ARG_DEF (def_stmt
, 0);
2993 if (gimple_phi_num_args (def_stmt
) != 1
2994 || TREE_CODE (op1
) != SSA_NAME
)
2996 if (dump_enabled_p ())
2997 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2998 "unsupported phi node definition.\n");
3003 gimple
*def1
= SSA_NAME_DEF_STMT (op1
);
3004 if (gimple_bb (def1
)
3005 && flow_bb_inside_loop_p (loop
, gimple_bb (def_stmt
))
3007 && flow_bb_inside_loop_p (loop
->inner
, gimple_bb (def1
))
3008 && is_gimple_assign (def1
)
3009 && flow_bb_inside_loop_p (loop
->inner
, gimple_bb (phi_use_stmt
)))
3011 if (dump_enabled_p ())
3012 report_vect_op (MSG_NOTE
, def_stmt
,
3013 "detected double reduction: ");
3015 *double_reduc
= true;
3022 /* If we are vectorizing an inner reduction we are executing that
3023 in the original order only in case we are not dealing with a
3024 double reduction. */
3025 bool check_reduction
= true;
3026 if (flow_loop_nested_p (vect_loop
, loop
))
3030 check_reduction
= false;
3031 FOR_EACH_VEC_ELT (lcphis
, i
, lcphi
)
3032 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, gimple_phi_result (lcphi
))
3034 gimple
*use_stmt
= USE_STMT (use_p
);
3035 if (is_gimple_debug (use_stmt
))
3037 if (! flow_bb_inside_loop_p (vect_loop
, gimple_bb (use_stmt
)))
3038 check_reduction
= true;
3042 bool nested_in_vect_loop
= flow_loop_nested_p (vect_loop
, loop
);
3043 code
= orig_code
= gimple_assign_rhs_code (def_stmt
);
3045 /* We can handle "res -= x[i]", which is non-associative by
3046 simply rewriting this into "res += -x[i]". Avoid changing
3047 gimple instruction for the first simple tests and only do this
3048 if we're allowed to change code at all. */
3049 if (code
== MINUS_EXPR
&& gimple_assign_rhs2 (def_stmt
) != phi_name
)
3052 if (code
== COND_EXPR
)
3054 if (! nested_in_vect_loop
)
3055 *v_reduc_type
= COND_REDUCTION
;
3057 op3
= gimple_assign_rhs1 (def_stmt
);
3058 if (COMPARISON_CLASS_P (op3
))
3060 op4
= TREE_OPERAND (op3
, 1);
3061 op3
= TREE_OPERAND (op3
, 0);
3063 if (op3
== phi_name
|| op4
== phi_name
)
3065 if (dump_enabled_p ())
3066 report_vect_op (MSG_MISSED_OPTIMIZATION
, def_stmt
,
3067 "reduction: condition depends on previous"
3072 op1
= gimple_assign_rhs2 (def_stmt
);
3073 op2
= gimple_assign_rhs3 (def_stmt
);
3075 else if (!commutative_tree_code (code
) || !associative_tree_code (code
))
3077 if (dump_enabled_p ())
3078 report_vect_op (MSG_MISSED_OPTIMIZATION
, def_stmt
,
3079 "reduction: not commutative/associative: ");
3082 else if (get_gimple_rhs_class (code
) == GIMPLE_BINARY_RHS
)
3084 op1
= gimple_assign_rhs1 (def_stmt
);
3085 op2
= gimple_assign_rhs2 (def_stmt
);
3089 if (dump_enabled_p ())
3090 report_vect_op (MSG_MISSED_OPTIMIZATION
, def_stmt
,
3091 "reduction: not handled operation: ");
3095 if (TREE_CODE (op1
) != SSA_NAME
&& TREE_CODE (op2
) != SSA_NAME
)
3097 if (dump_enabled_p ())
3098 report_vect_op (MSG_MISSED_OPTIMIZATION
, def_stmt
,
3099 "reduction: both uses not ssa_names: ");
3104 type
= TREE_TYPE (gimple_assign_lhs (def_stmt
));
3105 if ((TREE_CODE (op1
) == SSA_NAME
3106 && !types_compatible_p (type
,TREE_TYPE (op1
)))
3107 || (TREE_CODE (op2
) == SSA_NAME
3108 && !types_compatible_p (type
, TREE_TYPE (op2
)))
3109 || (op3
&& TREE_CODE (op3
) == SSA_NAME
3110 && !types_compatible_p (type
, TREE_TYPE (op3
)))
3111 || (op4
&& TREE_CODE (op4
) == SSA_NAME
3112 && !types_compatible_p (type
, TREE_TYPE (op4
))))
3114 if (dump_enabled_p ())
3116 dump_printf_loc (MSG_NOTE
, vect_location
,
3117 "reduction: multiple types: operation type: ");
3118 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, type
);
3119 dump_printf (MSG_NOTE
, ", operands types: ");
3120 dump_generic_expr (MSG_NOTE
, TDF_SLIM
,
3122 dump_printf (MSG_NOTE
, ",");
3123 dump_generic_expr (MSG_NOTE
, TDF_SLIM
,
3127 dump_printf (MSG_NOTE
, ",");
3128 dump_generic_expr (MSG_NOTE
, TDF_SLIM
,
3134 dump_printf (MSG_NOTE
, ",");
3135 dump_generic_expr (MSG_NOTE
, TDF_SLIM
,
3138 dump_printf (MSG_NOTE
, "\n");
3144 /* Check whether it's ok to change the order of the computation.
3145 Generally, when vectorizing a reduction we change the order of the
3146 computation. This may change the behavior of the program in some
3147 cases, so we need to check that this is ok. One exception is when
3148 vectorizing an outer-loop: the inner-loop is executed sequentially,
3149 and therefore vectorizing reductions in the inner-loop during
3150 outer-loop vectorization is safe. */
3152 && *v_reduc_type
== TREE_CODE_REDUCTION
3153 && needs_fold_left_reduction_p (type
, code
,
3154 need_wrapping_integral_overflow
))
3155 *v_reduc_type
= FOLD_LEFT_REDUCTION
;
3157 /* Reduction is safe. We're dealing with one of the following:
3158 1) integer arithmetic and no trapv
3159 2) floating point arithmetic, and special flags permit this optimization
3160 3) nested cycle (i.e., outer loop vectorization). */
3161 stmt_vec_info def1_info
= loop_info
->lookup_def (op1
);
3162 stmt_vec_info def2_info
= loop_info
->lookup_def (op2
);
3163 if (code
!= COND_EXPR
&& !def1_info
&& !def2_info
)
3165 if (dump_enabled_p ())
3166 report_vect_op (MSG_NOTE
, def_stmt
, "reduction: no defs for operands: ");
3170 /* Check that one def is the reduction def, defined by PHI,
3171 the other def is either defined in the loop ("vect_internal_def"),
3172 or it's an induction (defined by a loop-header phi-node). */
3175 && def2_info
->stmt
== phi
3176 && (code
== COND_EXPR
3178 || vect_valid_reduction_input_p (def1_info
)))
3180 if (dump_enabled_p ())
3181 report_vect_op (MSG_NOTE
, def_stmt
, "detected reduction: ");
3186 && def1_info
->stmt
== phi
3187 && (code
== COND_EXPR
3189 || vect_valid_reduction_input_p (def2_info
)))
3191 if (! nested_in_vect_loop
&& orig_code
!= MINUS_EXPR
)
3193 /* Check if we can swap operands (just for simplicity - so that
3194 the rest of the code can assume that the reduction variable
3195 is always the last (second) argument). */
3196 if (code
== COND_EXPR
)
3198 /* Swap cond_expr by inverting the condition. */
3199 tree cond_expr
= gimple_assign_rhs1 (def_stmt
);
3200 enum tree_code invert_code
= ERROR_MARK
;
3201 enum tree_code cond_code
= TREE_CODE (cond_expr
);
3203 if (TREE_CODE_CLASS (cond_code
) == tcc_comparison
)
3205 bool honor_nans
= HONOR_NANS (TREE_OPERAND (cond_expr
, 0));
3206 invert_code
= invert_tree_comparison (cond_code
, honor_nans
);
3208 if (invert_code
!= ERROR_MARK
)
3210 TREE_SET_CODE (cond_expr
, invert_code
);
3211 swap_ssa_operands (def_stmt
,
3212 gimple_assign_rhs2_ptr (def_stmt
),
3213 gimple_assign_rhs3_ptr (def_stmt
));
3217 if (dump_enabled_p ())
3218 report_vect_op (MSG_NOTE
, def_stmt
,
3219 "detected reduction: cannot swap operands "
3225 swap_ssa_operands (def_stmt
, gimple_assign_rhs1_ptr (def_stmt
),
3226 gimple_assign_rhs2_ptr (def_stmt
));
3228 if (dump_enabled_p ())
3229 report_vect_op (MSG_NOTE
, def_stmt
,
3230 "detected reduction: need to swap operands: ");
3232 if (CONSTANT_CLASS_P (gimple_assign_rhs1 (def_stmt
)))
3233 LOOP_VINFO_OPERANDS_SWAPPED (loop_info
) = true;
3237 if (dump_enabled_p ())
3238 report_vect_op (MSG_NOTE
, def_stmt
, "detected reduction: ");
3244 /* Try to find SLP reduction chain. */
3245 if (! nested_in_vect_loop
3246 && code
!= COND_EXPR
3247 && orig_code
!= MINUS_EXPR
3248 && vect_is_slp_reduction (loop_info
, phi
, def_stmt
))
3250 if (dump_enabled_p ())
3251 report_vect_op (MSG_NOTE
, def_stmt
,
3252 "reduction: detected reduction chain: ");
3257 /* Dissolve group eventually half-built by vect_is_slp_reduction. */
3258 gimple
*first
= REDUC_GROUP_FIRST_ELEMENT (vinfo_for_stmt (def_stmt
));
3261 gimple
*next
= REDUC_GROUP_NEXT_ELEMENT (vinfo_for_stmt (first
));
3262 REDUC_GROUP_FIRST_ELEMENT (vinfo_for_stmt (first
)) = NULL
;
3263 REDUC_GROUP_NEXT_ELEMENT (vinfo_for_stmt (first
)) = NULL
;
3267 /* Look for the expression computing loop_arg from loop PHI result. */
3268 if (check_reduction_path (vect_location
, loop
, as_a
<gphi
*> (phi
), loop_arg
,
3272 if (dump_enabled_p ())
3274 report_vect_op (MSG_MISSED_OPTIMIZATION
, def_stmt
,
3275 "reduction: unknown pattern: ");
3281 /* Wrapper around vect_is_simple_reduction, which will modify code
3282 in-place if it enables detection of more reductions. Arguments
3286 vect_force_simple_reduction (loop_vec_info loop_info
, gimple
*phi
,
3288 bool need_wrapping_integral_overflow
)
3290 enum vect_reduction_type v_reduc_type
;
3291 gimple
*def
= vect_is_simple_reduction (loop_info
, phi
, double_reduc
,
3292 need_wrapping_integral_overflow
,
3296 stmt_vec_info reduc_def_info
= vinfo_for_stmt (phi
);
3297 STMT_VINFO_REDUC_TYPE (reduc_def_info
) = v_reduc_type
;
3298 STMT_VINFO_REDUC_DEF (reduc_def_info
) = def
;
3299 reduc_def_info
= vinfo_for_stmt (def
);
3300 STMT_VINFO_REDUC_TYPE (reduc_def_info
) = v_reduc_type
;
3301 STMT_VINFO_REDUC_DEF (reduc_def_info
) = phi
;
3306 /* Calculate cost of peeling the loop PEEL_ITERS_PROLOGUE times. */
3308 vect_get_known_peeling_cost (loop_vec_info loop_vinfo
, int peel_iters_prologue
,
3309 int *peel_iters_epilogue
,
3310 stmt_vector_for_cost
*scalar_cost_vec
,
3311 stmt_vector_for_cost
*prologue_cost_vec
,
3312 stmt_vector_for_cost
*epilogue_cost_vec
)
3315 int assumed_vf
= vect_vf_for_cost (loop_vinfo
);
3317 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo
))
3319 *peel_iters_epilogue
= assumed_vf
/ 2;
3320 if (dump_enabled_p ())
3321 dump_printf_loc (MSG_NOTE
, vect_location
,
3322 "cost model: epilogue peel iters set to vf/2 "
3323 "because loop iterations are unknown .\n");
3325 /* If peeled iterations are known but number of scalar loop
3326 iterations are unknown, count a taken branch per peeled loop. */
3327 retval
= record_stmt_cost (prologue_cost_vec
, 1, cond_branch_taken
,
3328 NULL
, 0, vect_prologue
);
3329 retval
= record_stmt_cost (prologue_cost_vec
, 1, cond_branch_taken
,
3330 NULL
, 0, vect_epilogue
);
3334 int niters
= LOOP_VINFO_INT_NITERS (loop_vinfo
);
3335 peel_iters_prologue
= niters
< peel_iters_prologue
?
3336 niters
: peel_iters_prologue
;
3337 *peel_iters_epilogue
= (niters
- peel_iters_prologue
) % assumed_vf
;
3338 /* If we need to peel for gaps, but no peeling is required, we have to
3339 peel VF iterations. */
3340 if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo
) && !*peel_iters_epilogue
)
3341 *peel_iters_epilogue
= assumed_vf
;
3344 stmt_info_for_cost
*si
;
3346 if (peel_iters_prologue
)
3347 FOR_EACH_VEC_ELT (*scalar_cost_vec
, j
, si
)
3349 stmt_vec_info stmt_info
3350 = si
->stmt
? vinfo_for_stmt (si
->stmt
) : NULL_STMT_VEC_INFO
;
3351 retval
+= record_stmt_cost (prologue_cost_vec
,
3352 si
->count
* peel_iters_prologue
,
3353 si
->kind
, stmt_info
, si
->misalign
,
3356 if (*peel_iters_epilogue
)
3357 FOR_EACH_VEC_ELT (*scalar_cost_vec
, j
, si
)
3359 stmt_vec_info stmt_info
3360 = si
->stmt
? vinfo_for_stmt (si
->stmt
) : NULL_STMT_VEC_INFO
;
3361 retval
+= record_stmt_cost (epilogue_cost_vec
,
3362 si
->count
* *peel_iters_epilogue
,
3363 si
->kind
, stmt_info
, si
->misalign
,
3370 /* Function vect_estimate_min_profitable_iters
3372 Return the number of iterations required for the vector version of the
3373 loop to be profitable relative to the cost of the scalar version of the
3376 *RET_MIN_PROFITABLE_NITERS is a cost model profitability threshold
3377 of iterations for vectorization. -1 value means loop vectorization
3378 is not profitable. This returned value may be used for dynamic
3379 profitability check.
3381 *RET_MIN_PROFITABLE_ESTIMATE is a profitability threshold to be used
3382 for static check against estimated number of iterations. */
3385 vect_estimate_min_profitable_iters (loop_vec_info loop_vinfo
,
3386 int *ret_min_profitable_niters
,
3387 int *ret_min_profitable_estimate
)
3389 int min_profitable_iters
;
3390 int min_profitable_estimate
;
3391 int peel_iters_prologue
;
3392 int peel_iters_epilogue
;
3393 unsigned vec_inside_cost
= 0;
3394 int vec_outside_cost
= 0;
3395 unsigned vec_prologue_cost
= 0;
3396 unsigned vec_epilogue_cost
= 0;
3397 int scalar_single_iter_cost
= 0;
3398 int scalar_outside_cost
= 0;
3399 int assumed_vf
= vect_vf_for_cost (loop_vinfo
);
3400 int npeel
= LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo
);
3401 void *target_cost_data
= LOOP_VINFO_TARGET_COST_DATA (loop_vinfo
);
3403 /* Cost model disabled. */
3404 if (unlimited_cost_model (LOOP_VINFO_LOOP (loop_vinfo
)))
3406 dump_printf_loc (MSG_NOTE
, vect_location
, "cost model disabled.\n");
3407 *ret_min_profitable_niters
= 0;
3408 *ret_min_profitable_estimate
= 0;
3412 /* Requires loop versioning tests to handle misalignment. */
3413 if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo
))
3415 /* FIXME: Make cost depend on complexity of individual check. */
3416 unsigned len
= LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo
).length ();
3417 (void) add_stmt_cost (target_cost_data
, len
, vector_stmt
, NULL
, 0,
3419 dump_printf (MSG_NOTE
,
3420 "cost model: Adding cost of checks for loop "
3421 "versioning to treat misalignment.\n");
3424 /* Requires loop versioning with alias checks. */
3425 if (LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo
))
3427 /* FIXME: Make cost depend on complexity of individual check. */
3428 unsigned len
= LOOP_VINFO_COMP_ALIAS_DDRS (loop_vinfo
).length ();
3429 (void) add_stmt_cost (target_cost_data
, len
, vector_stmt
, NULL
, 0,
3431 len
= LOOP_VINFO_CHECK_UNEQUAL_ADDRS (loop_vinfo
).length ();
3433 /* Count LEN - 1 ANDs and LEN comparisons. */
3434 (void) add_stmt_cost (target_cost_data
, len
* 2 - 1, scalar_stmt
,
3435 NULL
, 0, vect_prologue
);
3436 len
= LOOP_VINFO_LOWER_BOUNDS (loop_vinfo
).length ();
3439 /* Count LEN - 1 ANDs and LEN comparisons. */
3440 unsigned int nstmts
= len
* 2 - 1;
3441 /* +1 for each bias that needs adding. */
3442 for (unsigned int i
= 0; i
< len
; ++i
)
3443 if (!LOOP_VINFO_LOWER_BOUNDS (loop_vinfo
)[i
].unsigned_p
)
3445 (void) add_stmt_cost (target_cost_data
, nstmts
, scalar_stmt
,
3446 NULL
, 0, vect_prologue
);
3448 dump_printf (MSG_NOTE
,
3449 "cost model: Adding cost of checks for loop "
3450 "versioning aliasing.\n");
3453 /* Requires loop versioning with niter checks. */
3454 if (LOOP_REQUIRES_VERSIONING_FOR_NITERS (loop_vinfo
))
3456 /* FIXME: Make cost depend on complexity of individual check. */
3457 (void) add_stmt_cost (target_cost_data
, 1, vector_stmt
, NULL
, 0,
3459 dump_printf (MSG_NOTE
,
3460 "cost model: Adding cost of checks for loop "
3461 "versioning niters.\n");
3464 if (LOOP_REQUIRES_VERSIONING (loop_vinfo
))
3465 (void) add_stmt_cost (target_cost_data
, 1, cond_branch_taken
, NULL
, 0,
3468 /* Count statements in scalar loop. Using this as scalar cost for a single
3471 TODO: Add outer loop support.
3473 TODO: Consider assigning different costs to different scalar
3476 scalar_single_iter_cost
3477 = LOOP_VINFO_SINGLE_SCALAR_ITERATION_COST (loop_vinfo
);
3479 /* Add additional cost for the peeled instructions in prologue and epilogue
3480 loop. (For fully-masked loops there will be no peeling.)
3482 FORNOW: If we don't know the value of peel_iters for prologue or epilogue
3483 at compile-time - we assume it's vf/2 (the worst would be vf-1).
3485 TODO: Build an expression that represents peel_iters for prologue and
3486 epilogue to be used in a run-time test. */
3488 if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo
))
3490 peel_iters_prologue
= 0;
3491 peel_iters_epilogue
= 0;
3493 if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo
))
3495 /* We need to peel exactly one iteration. */
3496 peel_iters_epilogue
+= 1;
3497 stmt_info_for_cost
*si
;
3499 FOR_EACH_VEC_ELT (LOOP_VINFO_SCALAR_ITERATION_COST (loop_vinfo
),
3502 struct _stmt_vec_info
*stmt_info
3503 = si
->stmt
? vinfo_for_stmt (si
->stmt
) : NULL_STMT_VEC_INFO
;
3504 (void) add_stmt_cost (target_cost_data
, si
->count
,
3505 si
->kind
, stmt_info
, si
->misalign
,
3512 peel_iters_prologue
= assumed_vf
/ 2;
3513 dump_printf (MSG_NOTE
, "cost model: "
3514 "prologue peel iters set to vf/2.\n");
3516 /* If peeling for alignment is unknown, loop bound of main loop becomes
3518 peel_iters_epilogue
= assumed_vf
/ 2;
3519 dump_printf (MSG_NOTE
, "cost model: "
3520 "epilogue peel iters set to vf/2 because "
3521 "peeling for alignment is unknown.\n");
3523 /* If peeled iterations are unknown, count a taken branch and a not taken
3524 branch per peeled loop. Even if scalar loop iterations are known,
3525 vector iterations are not known since peeled prologue iterations are
3526 not known. Hence guards remain the same. */
3527 (void) add_stmt_cost (target_cost_data
, 1, cond_branch_taken
,
3528 NULL
, 0, vect_prologue
);
3529 (void) add_stmt_cost (target_cost_data
, 1, cond_branch_not_taken
,
3530 NULL
, 0, vect_prologue
);
3531 (void) add_stmt_cost (target_cost_data
, 1, cond_branch_taken
,
3532 NULL
, 0, vect_epilogue
);
3533 (void) add_stmt_cost (target_cost_data
, 1, cond_branch_not_taken
,
3534 NULL
, 0, vect_epilogue
);
3535 stmt_info_for_cost
*si
;
3537 FOR_EACH_VEC_ELT (LOOP_VINFO_SCALAR_ITERATION_COST (loop_vinfo
), j
, si
)
3539 struct _stmt_vec_info
*stmt_info
3540 = si
->stmt
? vinfo_for_stmt (si
->stmt
) : NULL_STMT_VEC_INFO
;
3541 (void) add_stmt_cost (target_cost_data
,
3542 si
->count
* peel_iters_prologue
,
3543 si
->kind
, stmt_info
, si
->misalign
,
3545 (void) add_stmt_cost (target_cost_data
,
3546 si
->count
* peel_iters_epilogue
,
3547 si
->kind
, stmt_info
, si
->misalign
,
3553 stmt_vector_for_cost prologue_cost_vec
, epilogue_cost_vec
;
3554 stmt_info_for_cost
*si
;
3556 void *data
= LOOP_VINFO_TARGET_COST_DATA (loop_vinfo
);
3558 prologue_cost_vec
.create (2);
3559 epilogue_cost_vec
.create (2);
3560 peel_iters_prologue
= npeel
;
3562 (void) vect_get_known_peeling_cost (loop_vinfo
, peel_iters_prologue
,
3563 &peel_iters_epilogue
,
3564 &LOOP_VINFO_SCALAR_ITERATION_COST
3567 &epilogue_cost_vec
);
3569 FOR_EACH_VEC_ELT (prologue_cost_vec
, j
, si
)
3571 struct _stmt_vec_info
*stmt_info
3572 = si
->stmt
? vinfo_for_stmt (si
->stmt
) : NULL_STMT_VEC_INFO
;
3573 (void) add_stmt_cost (data
, si
->count
, si
->kind
, stmt_info
,
3574 si
->misalign
, vect_prologue
);
3577 FOR_EACH_VEC_ELT (epilogue_cost_vec
, j
, si
)
3579 struct _stmt_vec_info
*stmt_info
3580 = si
->stmt
? vinfo_for_stmt (si
->stmt
) : NULL_STMT_VEC_INFO
;
3581 (void) add_stmt_cost (data
, si
->count
, si
->kind
, stmt_info
,
3582 si
->misalign
, vect_epilogue
);
3585 prologue_cost_vec
.release ();
3586 epilogue_cost_vec
.release ();
3589 /* FORNOW: The scalar outside cost is incremented in one of the
3592 1. The vectorizer checks for alignment and aliasing and generates
3593 a condition that allows dynamic vectorization. A cost model
3594 check is ANDED with the versioning condition. Hence scalar code
3595 path now has the added cost of the versioning check.
3597 if (cost > th & versioning_check)
3600 Hence run-time scalar is incremented by not-taken branch cost.
3602 2. The vectorizer then checks if a prologue is required. If the
3603 cost model check was not done before during versioning, it has to
3604 be done before the prologue check.
3607 prologue = scalar_iters
3612 if (prologue == num_iters)
3615 Hence the run-time scalar cost is incremented by a taken branch,
3616 plus a not-taken branch, plus a taken branch cost.
3618 3. The vectorizer then checks if an epilogue is required. If the
3619 cost model check was not done before during prologue check, it
3620 has to be done with the epilogue check.
3626 if (prologue == num_iters)
3629 if ((cost <= th) | (scalar_iters-prologue-epilogue == 0))
3632 Hence the run-time scalar cost should be incremented by 2 taken
3635 TODO: The back end may reorder the BBS's differently and reverse
3636 conditions/branch directions. Change the estimates below to
3637 something more reasonable. */
3639 /* If the number of iterations is known and we do not do versioning, we can
3640 decide whether to vectorize at compile time. Hence the scalar version
3641 do not carry cost model guard costs. */
3642 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo
)
3643 || LOOP_REQUIRES_VERSIONING (loop_vinfo
))
3645 /* Cost model check occurs at versioning. */
3646 if (LOOP_REQUIRES_VERSIONING (loop_vinfo
))
3647 scalar_outside_cost
+= vect_get_stmt_cost (cond_branch_not_taken
);
3650 /* Cost model check occurs at prologue generation. */
3651 if (LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo
) < 0)
3652 scalar_outside_cost
+= 2 * vect_get_stmt_cost (cond_branch_taken
)
3653 + vect_get_stmt_cost (cond_branch_not_taken
);
3654 /* Cost model check occurs at epilogue generation. */
3656 scalar_outside_cost
+= 2 * vect_get_stmt_cost (cond_branch_taken
);
3660 /* Complete the target-specific cost calculations. */
3661 finish_cost (LOOP_VINFO_TARGET_COST_DATA (loop_vinfo
), &vec_prologue_cost
,
3662 &vec_inside_cost
, &vec_epilogue_cost
);
3664 vec_outside_cost
= (int)(vec_prologue_cost
+ vec_epilogue_cost
);
3666 if (dump_enabled_p ())
3668 dump_printf_loc (MSG_NOTE
, vect_location
, "Cost model analysis: \n");
3669 dump_printf (MSG_NOTE
, " Vector inside of loop cost: %d\n",
3671 dump_printf (MSG_NOTE
, " Vector prologue cost: %d\n",
3673 dump_printf (MSG_NOTE
, " Vector epilogue cost: %d\n",
3675 dump_printf (MSG_NOTE
, " Scalar iteration cost: %d\n",
3676 scalar_single_iter_cost
);
3677 dump_printf (MSG_NOTE
, " Scalar outside cost: %d\n",
3678 scalar_outside_cost
);
3679 dump_printf (MSG_NOTE
, " Vector outside cost: %d\n",
3681 dump_printf (MSG_NOTE
, " prologue iterations: %d\n",
3682 peel_iters_prologue
);
3683 dump_printf (MSG_NOTE
, " epilogue iterations: %d\n",
3684 peel_iters_epilogue
);
3687 /* Calculate number of iterations required to make the vector version
3688 profitable, relative to the loop bodies only. The following condition
3690 SIC * niters + SOC > VIC * ((niters-PL_ITERS-EP_ITERS)/VF) + VOC
3692 SIC = scalar iteration cost, VIC = vector iteration cost,
3693 VOC = vector outside cost, VF = vectorization factor,
3694 PL_ITERS = prologue iterations, EP_ITERS= epilogue iterations
3695 SOC = scalar outside cost for run time cost model check. */
3697 if ((scalar_single_iter_cost
* assumed_vf
) > (int) vec_inside_cost
)
3699 min_profitable_iters
= ((vec_outside_cost
- scalar_outside_cost
)
3701 - vec_inside_cost
* peel_iters_prologue
3702 - vec_inside_cost
* peel_iters_epilogue
);
3703 if (min_profitable_iters
<= 0)
3704 min_profitable_iters
= 0;
3707 min_profitable_iters
/= ((scalar_single_iter_cost
* assumed_vf
)
3710 if ((scalar_single_iter_cost
* assumed_vf
* min_profitable_iters
)
3711 <= (((int) vec_inside_cost
* min_profitable_iters
)
3712 + (((int) vec_outside_cost
- scalar_outside_cost
)
3714 min_profitable_iters
++;
3717 /* vector version will never be profitable. */
3720 if (LOOP_VINFO_LOOP (loop_vinfo
)->force_vectorize
)
3721 warning_at (vect_location
.get_location_t (), OPT_Wopenmp_simd
,
3722 "vectorization did not happen for a simd loop");
3724 if (dump_enabled_p ())
3725 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3726 "cost model: the vector iteration cost = %d "
3727 "divided by the scalar iteration cost = %d "
3728 "is greater or equal to the vectorization factor = %d"
3730 vec_inside_cost
, scalar_single_iter_cost
, assumed_vf
);
3731 *ret_min_profitable_niters
= -1;
3732 *ret_min_profitable_estimate
= -1;
3736 dump_printf (MSG_NOTE
,
3737 " Calculated minimum iters for profitability: %d\n",
3738 min_profitable_iters
);
3740 if (!LOOP_VINFO_FULLY_MASKED_P (loop_vinfo
)
3741 && min_profitable_iters
< (assumed_vf
+ peel_iters_prologue
))
3742 /* We want the vectorized loop to execute at least once. */
3743 min_profitable_iters
= assumed_vf
+ peel_iters_prologue
;
3745 if (dump_enabled_p ())
3746 dump_printf_loc (MSG_NOTE
, vect_location
,
3747 " Runtime profitability threshold = %d\n",
3748 min_profitable_iters
);
3750 *ret_min_profitable_niters
= min_profitable_iters
;
3752 /* Calculate number of iterations required to make the vector version
3753 profitable, relative to the loop bodies only.
3755 Non-vectorized variant is SIC * niters and it must win over vector
3756 variant on the expected loop trip count. The following condition must hold true:
3757 SIC * niters > VIC * ((niters-PL_ITERS-EP_ITERS)/VF) + VOC + SOC */
3759 if (vec_outside_cost
<= 0)
3760 min_profitable_estimate
= 0;
3763 min_profitable_estimate
= ((vec_outside_cost
+ scalar_outside_cost
)
3765 - vec_inside_cost
* peel_iters_prologue
3766 - vec_inside_cost
* peel_iters_epilogue
)
3767 / ((scalar_single_iter_cost
* assumed_vf
)
3770 min_profitable_estimate
= MAX (min_profitable_estimate
, min_profitable_iters
);
3771 if (dump_enabled_p ())
3772 dump_printf_loc (MSG_NOTE
, vect_location
,
3773 " Static estimate profitability threshold = %d\n",
3774 min_profitable_estimate
);
3776 *ret_min_profitable_estimate
= min_profitable_estimate
;
3779 /* Writes into SEL a mask for a vec_perm, equivalent to a vec_shr by OFFSET
3780 vector elements (not bits) for a vector with NELT elements. */
3782 calc_vec_perm_mask_for_shift (unsigned int offset
, unsigned int nelt
,
3783 vec_perm_builder
*sel
)
3785 /* The encoding is a single stepped pattern. Any wrap-around is handled
3786 by vec_perm_indices. */
3787 sel
->new_vector (nelt
, 1, 3);
3788 for (unsigned int i
= 0; i
< 3; i
++)
3789 sel
->quick_push (i
+ offset
);
3792 /* Checks whether the target supports whole-vector shifts for vectors of mode
3793 MODE. This is the case if _either_ the platform handles vec_shr_optab, _or_
3794 it supports vec_perm_const with masks for all necessary shift amounts. */
3796 have_whole_vector_shift (machine_mode mode
)
3798 if (optab_handler (vec_shr_optab
, mode
) != CODE_FOR_nothing
)
3801 /* Variable-length vectors should be handled via the optab. */
3803 if (!GET_MODE_NUNITS (mode
).is_constant (&nelt
))
3806 vec_perm_builder sel
;
3807 vec_perm_indices indices
;
3808 for (unsigned int i
= nelt
/ 2; i
>= 1; i
/= 2)
3810 calc_vec_perm_mask_for_shift (i
, nelt
, &sel
);
3811 indices
.new_vector (sel
, 2, nelt
);
3812 if (!can_vec_perm_const_p (mode
, indices
, false))
3818 /* TODO: Close dependency between vect_model_*_cost and vectorizable_*
3819 functions. Design better to avoid maintenance issues. */
3821 /* Function vect_model_reduction_cost.
3823 Models cost for a reduction operation, including the vector ops
3824 generated within the strip-mine loop, the initial definition before
3825 the loop, and the epilogue code that must be generated. */
3828 vect_model_reduction_cost (stmt_vec_info stmt_info
, internal_fn reduc_fn
,
3829 int ncopies
, stmt_vector_for_cost
*cost_vec
)
3831 int prologue_cost
= 0, epilogue_cost
= 0, inside_cost
;
3832 enum tree_code code
;
3836 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
3837 struct loop
*loop
= NULL
;
3840 loop
= LOOP_VINFO_LOOP (loop_vinfo
);
3842 /* Condition reductions generate two reductions in the loop. */
3843 vect_reduction_type reduction_type
3844 = STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info
);
3845 if (reduction_type
== COND_REDUCTION
)
3848 vectype
= STMT_VINFO_VECTYPE (stmt_info
);
3849 mode
= TYPE_MODE (vectype
);
3850 stmt_vec_info orig_stmt_info
= STMT_VINFO_RELATED_STMT (stmt_info
);
3852 if (!orig_stmt_info
)
3853 orig_stmt_info
= stmt_info
;
3855 code
= gimple_assign_rhs_code (orig_stmt_info
->stmt
);
3857 if (reduction_type
== EXTRACT_LAST_REDUCTION
3858 || reduction_type
== FOLD_LEFT_REDUCTION
)
3860 /* No extra instructions needed in the prologue. */
3863 if (reduction_type
== EXTRACT_LAST_REDUCTION
|| reduc_fn
!= IFN_LAST
)
3864 /* Count one reduction-like operation per vector. */
3865 inside_cost
= record_stmt_cost (cost_vec
, ncopies
, vec_to_scalar
,
3866 stmt_info
, 0, vect_body
);
3869 /* Use NELEMENTS extracts and NELEMENTS scalar ops. */
3870 unsigned int nelements
= ncopies
* vect_nunits_for_cost (vectype
);
3871 inside_cost
= record_stmt_cost (cost_vec
, nelements
,
3872 vec_to_scalar
, stmt_info
, 0,
3874 inside_cost
+= record_stmt_cost (cost_vec
, nelements
,
3875 scalar_stmt
, stmt_info
, 0,
3881 /* Add in cost for initial definition.
3882 For cond reduction we have four vectors: initial index, step,
3883 initial result of the data reduction, initial value of the index
3885 int prologue_stmts
= reduction_type
== COND_REDUCTION
? 4 : 1;
3886 prologue_cost
+= record_stmt_cost (cost_vec
, prologue_stmts
,
3887 scalar_to_vec
, stmt_info
, 0,
3890 /* Cost of reduction op inside loop. */
3891 inside_cost
= record_stmt_cost (cost_vec
, ncopies
, vector_stmt
,
3892 stmt_info
, 0, vect_body
);
3895 /* Determine cost of epilogue code.
3897 We have a reduction operator that will reduce the vector in one statement.
3898 Also requires scalar extract. */
3900 if (!loop
|| !nested_in_vect_loop_p (loop
, orig_stmt_info
))
3902 if (reduc_fn
!= IFN_LAST
)
3904 if (reduction_type
== COND_REDUCTION
)
3906 /* An EQ stmt and an COND_EXPR stmt. */
3907 epilogue_cost
+= record_stmt_cost (cost_vec
, 2,
3908 vector_stmt
, stmt_info
, 0,
3910 /* Reduction of the max index and a reduction of the found
3912 epilogue_cost
+= record_stmt_cost (cost_vec
, 2,
3913 vec_to_scalar
, stmt_info
, 0,
3915 /* A broadcast of the max value. */
3916 epilogue_cost
+= record_stmt_cost (cost_vec
, 1,
3917 scalar_to_vec
, stmt_info
, 0,
3922 epilogue_cost
+= record_stmt_cost (cost_vec
, 1, vector_stmt
,
3923 stmt_info
, 0, vect_epilogue
);
3924 epilogue_cost
+= record_stmt_cost (cost_vec
, 1,
3925 vec_to_scalar
, stmt_info
, 0,
3929 else if (reduction_type
== COND_REDUCTION
)
3931 unsigned estimated_nunits
= vect_nunits_for_cost (vectype
);
3932 /* Extraction of scalar elements. */
3933 epilogue_cost
+= record_stmt_cost (cost_vec
,
3934 2 * estimated_nunits
,
3935 vec_to_scalar
, stmt_info
, 0,
3937 /* Scalar max reductions via COND_EXPR / MAX_EXPR. */
3938 epilogue_cost
+= record_stmt_cost (cost_vec
,
3939 2 * estimated_nunits
- 3,
3940 scalar_stmt
, stmt_info
, 0,
3943 else if (reduction_type
== EXTRACT_LAST_REDUCTION
3944 || reduction_type
== FOLD_LEFT_REDUCTION
)
3945 /* No extra instructions need in the epilogue. */
3949 int vec_size_in_bits
= tree_to_uhwi (TYPE_SIZE (vectype
));
3951 TYPE_SIZE (TREE_TYPE (gimple_assign_lhs (orig_stmt_info
->stmt
)));
3952 int element_bitsize
= tree_to_uhwi (bitsize
);
3953 int nelements
= vec_size_in_bits
/ element_bitsize
;
3955 if (code
== COND_EXPR
)
3958 optab
= optab_for_tree_code (code
, vectype
, optab_default
);
3960 /* We have a whole vector shift available. */
3961 if (optab
!= unknown_optab
3962 && VECTOR_MODE_P (mode
)
3963 && optab_handler (optab
, mode
) != CODE_FOR_nothing
3964 && have_whole_vector_shift (mode
))
3966 /* Final reduction via vector shifts and the reduction operator.
3967 Also requires scalar extract. */
3968 epilogue_cost
+= record_stmt_cost (cost_vec
,
3969 exact_log2 (nelements
) * 2,
3970 vector_stmt
, stmt_info
, 0,
3972 epilogue_cost
+= record_stmt_cost (cost_vec
, 1,
3973 vec_to_scalar
, stmt_info
, 0,
3977 /* Use extracts and reduction op for final reduction. For N
3978 elements, we have N extracts and N-1 reduction ops. */
3979 epilogue_cost
+= record_stmt_cost (cost_vec
,
3980 nelements
+ nelements
- 1,
3981 vector_stmt
, stmt_info
, 0,
3986 if (dump_enabled_p ())
3987 dump_printf (MSG_NOTE
,
3988 "vect_model_reduction_cost: inside_cost = %d, "
3989 "prologue_cost = %d, epilogue_cost = %d .\n", inside_cost
,
3990 prologue_cost
, epilogue_cost
);
3994 /* Function vect_model_induction_cost.
3996 Models cost for induction operations. */
3999 vect_model_induction_cost (stmt_vec_info stmt_info
, int ncopies
,
4000 stmt_vector_for_cost
*cost_vec
)
4002 unsigned inside_cost
, prologue_cost
;
4004 if (PURE_SLP_STMT (stmt_info
))
4007 /* loop cost for vec_loop. */
4008 inside_cost
= record_stmt_cost (cost_vec
, ncopies
, vector_stmt
,
4009 stmt_info
, 0, vect_body
);
4011 /* prologue cost for vec_init and vec_step. */
4012 prologue_cost
= record_stmt_cost (cost_vec
, 2, scalar_to_vec
,
4013 stmt_info
, 0, vect_prologue
);
4015 if (dump_enabled_p ())
4016 dump_printf_loc (MSG_NOTE
, vect_location
,
4017 "vect_model_induction_cost: inside_cost = %d, "
4018 "prologue_cost = %d .\n", inside_cost
, prologue_cost
);
4023 /* Function get_initial_def_for_reduction
4026 STMT - a stmt that performs a reduction operation in the loop.
4027 INIT_VAL - the initial value of the reduction variable
4030 ADJUSTMENT_DEF - a tree that holds a value to be added to the final result
4031 of the reduction (used for adjusting the epilog - see below).
4032 Return a vector variable, initialized according to the operation that STMT
4033 performs. This vector will be used as the initial value of the
4034 vector of partial results.
4036 Option1 (adjust in epilog): Initialize the vector as follows:
4037 add/bit or/xor: [0,0,...,0,0]
4038 mult/bit and: [1,1,...,1,1]
4039 min/max/cond_expr: [init_val,init_val,..,init_val,init_val]
4040 and when necessary (e.g. add/mult case) let the caller know
4041 that it needs to adjust the result by init_val.
4043 Option2: Initialize the vector as follows:
4044 add/bit or/xor: [init_val,0,0,...,0]
4045 mult/bit and: [init_val,1,1,...,1]
4046 min/max/cond_expr: [init_val,init_val,...,init_val]
4047 and no adjustments are needed.
4049 For example, for the following code:
4055 STMT is 's = s + a[i]', and the reduction variable is 's'.
4056 For a vector of 4 units, we want to return either [0,0,0,init_val],
4057 or [0,0,0,0] and let the caller know that it needs to adjust
4058 the result at the end by 'init_val'.
4060 FORNOW, we are using the 'adjust in epilog' scheme, because this way the
4061 initialization vector is simpler (same element in all entries), if
4062 ADJUSTMENT_DEF is not NULL, and Option2 otherwise.
4064 A cost model should help decide between these two schemes. */
4067 get_initial_def_for_reduction (gimple
*stmt
, tree init_val
,
4068 tree
*adjustment_def
)
4070 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (stmt
);
4071 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_vinfo
);
4072 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
4073 tree scalar_type
= TREE_TYPE (init_val
);
4074 tree vectype
= get_vectype_for_scalar_type (scalar_type
);
4075 enum tree_code code
= gimple_assign_rhs_code (stmt
);
4078 REAL_VALUE_TYPE real_init_val
= dconst0
;
4079 int int_init_val
= 0;
4080 gimple_seq stmts
= NULL
;
4082 gcc_assert (vectype
);
4084 gcc_assert (POINTER_TYPE_P (scalar_type
) || INTEGRAL_TYPE_P (scalar_type
)
4085 || SCALAR_FLOAT_TYPE_P (scalar_type
));
4087 gcc_assert (nested_in_vect_loop_p (loop
, stmt
)
4088 || loop
== (gimple_bb (stmt
))->loop_father
);
4090 vect_reduction_type reduction_type
4091 = STMT_VINFO_VEC_REDUCTION_TYPE (stmt_vinfo
);
4095 case WIDEN_SUM_EXPR
:
4105 /* ADJUSTMENT_DEF is NULL when called from
4106 vect_create_epilog_for_reduction to vectorize double reduction. */
4108 *adjustment_def
= init_val
;
4110 if (code
== MULT_EXPR
)
4112 real_init_val
= dconst1
;
4116 if (code
== BIT_AND_EXPR
)
4119 if (SCALAR_FLOAT_TYPE_P (scalar_type
))
4120 def_for_init
= build_real (scalar_type
, real_init_val
);
4122 def_for_init
= build_int_cst (scalar_type
, int_init_val
);
4125 /* Option1: the first element is '0' or '1' as well. */
4126 init_def
= gimple_build_vector_from_val (&stmts
, vectype
,
4128 else if (!TYPE_VECTOR_SUBPARTS (vectype
).is_constant ())
4130 /* Option2 (variable length): the first element is INIT_VAL. */
4131 init_def
= gimple_build_vector_from_val (&stmts
, vectype
,
4133 init_def
= gimple_build (&stmts
, CFN_VEC_SHL_INSERT
,
4134 vectype
, init_def
, init_val
);
4138 /* Option2: the first element is INIT_VAL. */
4139 tree_vector_builder
elts (vectype
, 1, 2);
4140 elts
.quick_push (init_val
);
4141 elts
.quick_push (def_for_init
);
4142 init_def
= gimple_build_vector (&stmts
, &elts
);
4153 *adjustment_def
= NULL_TREE
;
4154 if (reduction_type
!= COND_REDUCTION
4155 && reduction_type
!= EXTRACT_LAST_REDUCTION
)
4157 init_def
= vect_get_vec_def_for_operand (init_val
, stmt
);
4161 init_val
= gimple_convert (&stmts
, TREE_TYPE (vectype
), init_val
);
4162 init_def
= gimple_build_vector_from_val (&stmts
, vectype
, init_val
);
4171 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop
), stmts
);
4175 /* Get at the initial defs for the reduction PHIs in SLP_NODE.
4176 NUMBER_OF_VECTORS is the number of vector defs to create.
4177 If NEUTRAL_OP is nonnull, introducing extra elements of that
4178 value will not change the result. */
4181 get_initial_defs_for_reduction (slp_tree slp_node
,
4182 vec
<tree
> *vec_oprnds
,
4183 unsigned int number_of_vectors
,
4184 bool reduc_chain
, tree neutral_op
)
4186 vec
<gimple
*> stmts
= SLP_TREE_SCALAR_STMTS (slp_node
);
4187 gimple
*stmt
= stmts
[0];
4188 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (stmt
);
4189 unsigned HOST_WIDE_INT nunits
;
4190 unsigned j
, number_of_places_left_in_vector
;
4193 int group_size
= stmts
.length ();
4194 unsigned int vec_num
, i
;
4195 unsigned number_of_copies
= 1;
4197 voprnds
.create (number_of_vectors
);
4199 auto_vec
<tree
, 16> permute_results
;
4201 vector_type
= STMT_VINFO_VECTYPE (stmt_vinfo
);
4203 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_reduction_def
);
4205 loop
= (gimple_bb (stmt
))->loop_father
;
4207 edge pe
= loop_preheader_edge (loop
);
4209 gcc_assert (!reduc_chain
|| neutral_op
);
4211 /* NUMBER_OF_COPIES is the number of times we need to use the same values in
4212 created vectors. It is greater than 1 if unrolling is performed.
4214 For example, we have two scalar operands, s1 and s2 (e.g., group of
4215 strided accesses of size two), while NUNITS is four (i.e., four scalars
4216 of this type can be packed in a vector). The output vector will contain
4217 two copies of each scalar operand: {s1, s2, s1, s2}. (NUMBER_OF_COPIES
4220 If REDUC_GROUP_SIZE > NUNITS, the scalars will be split into several
4221 vectors containing the operands.
4223 For example, NUNITS is four as before, and the group size is 8
4224 (s1, s2, ..., s8). We will create two vectors {s1, s2, s3, s4} and
4225 {s5, s6, s7, s8}. */
4227 if (!TYPE_VECTOR_SUBPARTS (vector_type
).is_constant (&nunits
))
4228 nunits
= group_size
;
4230 number_of_copies
= nunits
* number_of_vectors
/ group_size
;
4232 number_of_places_left_in_vector
= nunits
;
4233 bool constant_p
= true;
4234 tree_vector_builder
elts (vector_type
, nunits
, 1);
4235 elts
.quick_grow (nunits
);
4236 for (j
= 0; j
< number_of_copies
; j
++)
4238 for (i
= group_size
- 1; stmts
.iterate (i
, &stmt
); i
--)
4241 /* Get the def before the loop. In reduction chain we have only
4242 one initial value. */
4243 if ((j
!= (number_of_copies
- 1)
4244 || (reduc_chain
&& i
!= 0))
4248 op
= PHI_ARG_DEF_FROM_EDGE (stmt
, pe
);
4250 /* Create 'vect_ = {op0,op1,...,opn}'. */
4251 number_of_places_left_in_vector
--;
4252 elts
[number_of_places_left_in_vector
] = op
;
4253 if (!CONSTANT_CLASS_P (op
))
4256 if (number_of_places_left_in_vector
== 0)
4258 gimple_seq ctor_seq
= NULL
;
4260 if (constant_p
&& !neutral_op
4261 ? multiple_p (TYPE_VECTOR_SUBPARTS (vector_type
), nunits
)
4262 : known_eq (TYPE_VECTOR_SUBPARTS (vector_type
), nunits
))
4263 /* Build the vector directly from ELTS. */
4264 init
= gimple_build_vector (&ctor_seq
, &elts
);
4265 else if (neutral_op
)
4267 /* Build a vector of the neutral value and shift the
4268 other elements into place. */
4269 init
= gimple_build_vector_from_val (&ctor_seq
, vector_type
,
4272 while (k
> 0 && elts
[k
- 1] == neutral_op
)
4277 init
= gimple_build (&ctor_seq
, CFN_VEC_SHL_INSERT
,
4278 vector_type
, init
, elts
[k
]);
4283 /* First time round, duplicate ELTS to fill the
4284 required number of vectors, then cherry pick the
4285 appropriate result for each iteration. */
4286 if (vec_oprnds
->is_empty ())
4287 duplicate_and_interleave (&ctor_seq
, vector_type
, elts
,
4290 init
= permute_results
[number_of_vectors
- j
- 1];
4292 if (ctor_seq
!= NULL
)
4293 gsi_insert_seq_on_edge_immediate (pe
, ctor_seq
);
4294 voprnds
.quick_push (init
);
4296 number_of_places_left_in_vector
= nunits
;
4297 elts
.new_vector (vector_type
, nunits
, 1);
4298 elts
.quick_grow (nunits
);
4304 /* Since the vectors are created in the reverse order, we should invert
4306 vec_num
= voprnds
.length ();
4307 for (j
= vec_num
; j
!= 0; j
--)
4309 vop
= voprnds
[j
- 1];
4310 vec_oprnds
->quick_push (vop
);
4315 /* In case that VF is greater than the unrolling factor needed for the SLP
4316 group of stmts, NUMBER_OF_VECTORS to be created is greater than
4317 NUMBER_OF_SCALARS/NUNITS or NUNITS/NUMBER_OF_SCALARS, and hence we have
4318 to replicate the vectors. */
4319 tree neutral_vec
= NULL
;
4320 while (number_of_vectors
> vec_oprnds
->length ())
4326 gimple_seq ctor_seq
= NULL
;
4327 neutral_vec
= gimple_build_vector_from_val
4328 (&ctor_seq
, vector_type
, neutral_op
);
4329 if (ctor_seq
!= NULL
)
4330 gsi_insert_seq_on_edge_immediate (pe
, ctor_seq
);
4332 vec_oprnds
->quick_push (neutral_vec
);
4336 for (i
= 0; vec_oprnds
->iterate (i
, &vop
) && i
< vec_num
; i
++)
4337 vec_oprnds
->quick_push (vop
);
4343 /* Function vect_create_epilog_for_reduction
4345 Create code at the loop-epilog to finalize the result of a reduction
4348 VECT_DEFS is list of vector of partial results, i.e., the lhs's of vector
4349 reduction statements.
4350 STMT is the scalar reduction stmt that is being vectorized.
4351 NCOPIES is > 1 in case the vectorization factor (VF) is bigger than the
4352 number of elements that we can fit in a vectype (nunits). In this case
4353 we have to generate more than one vector stmt - i.e - we need to "unroll"
4354 the vector stmt by a factor VF/nunits. For more details see documentation
4355 in vectorizable_operation.
4356 REDUC_FN is the internal function for the epilog reduction.
4357 REDUCTION_PHIS is a list of the phi-nodes that carry the reduction
4359 REDUC_INDEX is the index of the operand in the right hand side of the
4360 statement that is defined by REDUCTION_PHI.
4361 DOUBLE_REDUC is TRUE if double reduction phi nodes should be handled.
4362 SLP_NODE is an SLP node containing a group of reduction statements. The
4363 first one in this group is STMT.
4364 INDUC_VAL is for INTEGER_INDUC_COND_REDUCTION the value to use for the case
4365 when the COND_EXPR is never true in the loop. For MAX_EXPR, it needs to
4366 be smaller than any value of the IV in the loop, for MIN_EXPR larger than
4367 any value of the IV in the loop.
4368 INDUC_CODE is the code for epilog reduction if INTEGER_INDUC_COND_REDUCTION.
4369 NEUTRAL_OP is the value given by neutral_op_for_slp_reduction; it is
4370 null if this is not an SLP reduction
4373 1. Creates the reduction def-use cycles: sets the arguments for
4375 The loop-entry argument is the vectorized initial-value of the reduction.
4376 The loop-latch argument is taken from VECT_DEFS - the vector of partial
4378 2. "Reduces" each vector of partial results VECT_DEFS into a single result,
4379 by calling the function specified by REDUC_FN if available, or by
4380 other means (whole-vector shifts or a scalar loop).
4381 The function also creates a new phi node at the loop exit to preserve
4382 loop-closed form, as illustrated below.
4384 The flow at the entry to this function:
4387 vec_def = phi <null, null> # REDUCTION_PHI
4388 VECT_DEF = vector_stmt # vectorized form of STMT
4389 s_loop = scalar_stmt # (scalar) STMT
4391 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
4395 The above is transformed by this function into:
4398 vec_def = phi <vec_init, VECT_DEF> # REDUCTION_PHI
4399 VECT_DEF = vector_stmt # vectorized form of STMT
4400 s_loop = scalar_stmt # (scalar) STMT
4402 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
4403 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
4404 v_out2 = reduce <v_out1>
4405 s_out3 = extract_field <v_out2, 0>
4406 s_out4 = adjust_result <s_out3>
4412 vect_create_epilog_for_reduction (vec
<tree
> vect_defs
, gimple
*stmt
,
4413 gimple
*reduc_def_stmt
,
4414 int ncopies
, internal_fn reduc_fn
,
4415 vec
<gimple
*> reduction_phis
,
4418 slp_instance slp_node_instance
,
4419 tree induc_val
, enum tree_code induc_code
,
4422 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
4423 stmt_vec_info prev_phi_info
;
4426 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
4427 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
), *outer_loop
= NULL
;
4428 basic_block exit_bb
;
4431 gimple
*new_phi
= NULL
, *phi
;
4432 gimple_stmt_iterator exit_gsi
;
4434 tree new_temp
= NULL_TREE
, new_dest
, new_name
, new_scalar_dest
;
4435 gimple
*epilog_stmt
= NULL
;
4436 enum tree_code code
= gimple_assign_rhs_code (stmt
);
4439 tree adjustment_def
= NULL
;
4440 tree vec_initial_def
= NULL
;
4441 tree expr
, def
, initial_def
= NULL
;
4442 tree orig_name
, scalar_result
;
4443 imm_use_iterator imm_iter
, phi_imm_iter
;
4444 use_operand_p use_p
, phi_use_p
;
4445 gimple
*use_stmt
, *reduction_phi
= NULL
;
4446 bool nested_in_vect_loop
= false;
4447 auto_vec
<gimple
*> new_phis
;
4448 auto_vec
<gimple
*> inner_phis
;
4449 enum vect_def_type dt
= vect_unknown_def_type
;
4451 auto_vec
<tree
> scalar_results
;
4452 unsigned int group_size
= 1, k
, ratio
;
4453 auto_vec
<tree
> vec_initial_defs
;
4454 auto_vec
<gimple
*> phis
;
4455 bool slp_reduc
= false;
4456 bool direct_slp_reduc
;
4457 tree new_phi_result
;
4458 gimple
*inner_phi
= NULL
;
4459 tree induction_index
= NULL_TREE
;
4462 group_size
= SLP_TREE_SCALAR_STMTS (slp_node
).length ();
4464 if (nested_in_vect_loop_p (loop
, stmt
))
4468 nested_in_vect_loop
= true;
4469 gcc_assert (!slp_node
);
4472 vectype
= STMT_VINFO_VECTYPE (stmt_info
);
4473 gcc_assert (vectype
);
4474 mode
= TYPE_MODE (vectype
);
4476 /* 1. Create the reduction def-use cycle:
4477 Set the arguments of REDUCTION_PHIS, i.e., transform
4480 vec_def = phi <null, null> # REDUCTION_PHI
4481 VECT_DEF = vector_stmt # vectorized form of STMT
4487 vec_def = phi <vec_init, VECT_DEF> # REDUCTION_PHI
4488 VECT_DEF = vector_stmt # vectorized form of STMT
4491 (in case of SLP, do it for all the phis). */
4493 /* Get the loop-entry arguments. */
4494 enum vect_def_type initial_def_dt
= vect_unknown_def_type
;
4497 unsigned vec_num
= SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node
);
4498 vec_initial_defs
.reserve (vec_num
);
4499 get_initial_defs_for_reduction (slp_node_instance
->reduc_phis
,
4500 &vec_initial_defs
, vec_num
,
4501 REDUC_GROUP_FIRST_ELEMENT (stmt_info
),
4506 /* Get at the scalar def before the loop, that defines the initial value
4507 of the reduction variable. */
4508 initial_def
= PHI_ARG_DEF_FROM_EDGE (reduc_def_stmt
,
4509 loop_preheader_edge (loop
));
4510 /* Optimize: if initial_def is for REDUC_MAX smaller than the base
4511 and we can't use zero for induc_val, use initial_def. Similarly
4512 for REDUC_MIN and initial_def larger than the base. */
4513 if (TREE_CODE (initial_def
) == INTEGER_CST
4514 && (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info
)
4515 == INTEGER_INDUC_COND_REDUCTION
)
4516 && !integer_zerop (induc_val
)
4517 && ((induc_code
== MAX_EXPR
4518 && tree_int_cst_lt (initial_def
, induc_val
))
4519 || (induc_code
== MIN_EXPR
4520 && tree_int_cst_lt (induc_val
, initial_def
))))
4521 induc_val
= initial_def
;
4524 /* In case of double reduction we only create a vector variable
4525 to be put in the reduction phi node. The actual statement
4526 creation is done later in this function. */
4527 vec_initial_def
= vect_create_destination_var (initial_def
, vectype
);
4528 else if (nested_in_vect_loop
)
4530 /* Do not use an adjustment def as that case is not supported
4531 correctly if ncopies is not one. */
4532 vect_is_simple_use (initial_def
, loop_vinfo
, &initial_def_dt
);
4533 vec_initial_def
= vect_get_vec_def_for_operand (initial_def
, stmt
);
4536 vec_initial_def
= get_initial_def_for_reduction (stmt
, initial_def
,
4538 vec_initial_defs
.create (1);
4539 vec_initial_defs
.quick_push (vec_initial_def
);
4542 /* Set phi nodes arguments. */
4543 FOR_EACH_VEC_ELT (reduction_phis
, i
, phi
)
4545 tree vec_init_def
= vec_initial_defs
[i
];
4546 tree def
= vect_defs
[i
];
4547 for (j
= 0; j
< ncopies
; j
++)
4551 phi
= STMT_VINFO_RELATED_STMT (vinfo_for_stmt (phi
));
4552 if (nested_in_vect_loop
)
4554 = vect_get_vec_def_for_stmt_copy (initial_def_dt
,
4558 /* Set the loop-entry arg of the reduction-phi. */
4560 if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info
)
4561 == INTEGER_INDUC_COND_REDUCTION
)
4563 /* Initialise the reduction phi to zero. This prevents initial
4564 values of non-zero interferring with the reduction op. */
4565 gcc_assert (ncopies
== 1);
4566 gcc_assert (i
== 0);
4568 tree vec_init_def_type
= TREE_TYPE (vec_init_def
);
4570 = build_vector_from_val (vec_init_def_type
, induc_val
);
4572 add_phi_arg (as_a
<gphi
*> (phi
), induc_val_vec
,
4573 loop_preheader_edge (loop
), UNKNOWN_LOCATION
);
4576 add_phi_arg (as_a
<gphi
*> (phi
), vec_init_def
,
4577 loop_preheader_edge (loop
), UNKNOWN_LOCATION
);
4579 /* Set the loop-latch arg for the reduction-phi. */
4581 def
= vect_get_vec_def_for_stmt_copy (vect_unknown_def_type
, def
);
4583 add_phi_arg (as_a
<gphi
*> (phi
), def
, loop_latch_edge (loop
),
4586 if (dump_enabled_p ())
4588 dump_printf_loc (MSG_NOTE
, vect_location
,
4589 "transform reduction: created def-use cycle: ");
4590 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, phi
, 0);
4591 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, SSA_NAME_DEF_STMT (def
), 0);
4596 /* For cond reductions we want to create a new vector (INDEX_COND_EXPR)
4597 which is updated with the current index of the loop for every match of
4598 the original loop's cond_expr (VEC_STMT). This results in a vector
4599 containing the last time the condition passed for that vector lane.
4600 The first match will be a 1 to allow 0 to be used for non-matching
4601 indexes. If there are no matches at all then the vector will be all
4603 if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info
) == COND_REDUCTION
)
4605 tree indx_before_incr
, indx_after_incr
;
4606 poly_uint64 nunits_out
= TYPE_VECTOR_SUBPARTS (vectype
);
4608 gimple
*vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
4609 gcc_assert (gimple_assign_rhs_code (vec_stmt
) == VEC_COND_EXPR
);
4611 int scalar_precision
4612 = GET_MODE_PRECISION (SCALAR_TYPE_MODE (TREE_TYPE (vectype
)));
4613 tree cr_index_scalar_type
= make_unsigned_type (scalar_precision
);
4614 tree cr_index_vector_type
= build_vector_type
4615 (cr_index_scalar_type
, TYPE_VECTOR_SUBPARTS (vectype
));
4617 /* First we create a simple vector induction variable which starts
4618 with the values {1,2,3,...} (SERIES_VECT) and increments by the
4619 vector size (STEP). */
4621 /* Create a {1,2,3,...} vector. */
4622 tree series_vect
= build_index_vector (cr_index_vector_type
, 1, 1);
4624 /* Create a vector of the step value. */
4625 tree step
= build_int_cst (cr_index_scalar_type
, nunits_out
);
4626 tree vec_step
= build_vector_from_val (cr_index_vector_type
, step
);
4628 /* Create an induction variable. */
4629 gimple_stmt_iterator incr_gsi
;
4631 standard_iv_increment_position (loop
, &incr_gsi
, &insert_after
);
4632 create_iv (series_vect
, vec_step
, NULL_TREE
, loop
, &incr_gsi
,
4633 insert_after
, &indx_before_incr
, &indx_after_incr
);
4635 /* Next create a new phi node vector (NEW_PHI_TREE) which starts
4636 filled with zeros (VEC_ZERO). */
4638 /* Create a vector of 0s. */
4639 tree zero
= build_zero_cst (cr_index_scalar_type
);
4640 tree vec_zero
= build_vector_from_val (cr_index_vector_type
, zero
);
4642 /* Create a vector phi node. */
4643 tree new_phi_tree
= make_ssa_name (cr_index_vector_type
);
4644 new_phi
= create_phi_node (new_phi_tree
, loop
->header
);
4645 loop_vinfo
->add_stmt (new_phi
);
4646 add_phi_arg (as_a
<gphi
*> (new_phi
), vec_zero
,
4647 loop_preheader_edge (loop
), UNKNOWN_LOCATION
);
4649 /* Now take the condition from the loops original cond_expr
4650 (VEC_STMT) and produce a new cond_expr (INDEX_COND_EXPR) which for
4651 every match uses values from the induction variable
4652 (INDEX_BEFORE_INCR) otherwise uses values from the phi node
4654 Finally, we update the phi (NEW_PHI_TREE) to take the value of
4655 the new cond_expr (INDEX_COND_EXPR). */
4657 /* Duplicate the condition from vec_stmt. */
4658 tree ccompare
= unshare_expr (gimple_assign_rhs1 (vec_stmt
));
4660 /* Create a conditional, where the condition is taken from vec_stmt
4661 (CCOMPARE), then is the induction index (INDEX_BEFORE_INCR) and
4662 else is the phi (NEW_PHI_TREE). */
4663 tree index_cond_expr
= build3 (VEC_COND_EXPR
, cr_index_vector_type
,
4664 ccompare
, indx_before_incr
,
4666 induction_index
= make_ssa_name (cr_index_vector_type
);
4667 gimple
*index_condition
= gimple_build_assign (induction_index
,
4669 gsi_insert_before (&incr_gsi
, index_condition
, GSI_SAME_STMT
);
4670 stmt_vec_info index_vec_info
= loop_vinfo
->add_stmt (index_condition
);
4671 STMT_VINFO_VECTYPE (index_vec_info
) = cr_index_vector_type
;
4673 /* Update the phi with the vec cond. */
4674 add_phi_arg (as_a
<gphi
*> (new_phi
), induction_index
,
4675 loop_latch_edge (loop
), UNKNOWN_LOCATION
);
4678 /* 2. Create epilog code.
4679 The reduction epilog code operates across the elements of the vector
4680 of partial results computed by the vectorized loop.
4681 The reduction epilog code consists of:
4683 step 1: compute the scalar result in a vector (v_out2)
4684 step 2: extract the scalar result (s_out3) from the vector (v_out2)
4685 step 3: adjust the scalar result (s_out3) if needed.
4687 Step 1 can be accomplished using one the following three schemes:
4688 (scheme 1) using reduc_fn, if available.
4689 (scheme 2) using whole-vector shifts, if available.
4690 (scheme 3) using a scalar loop. In this case steps 1+2 above are
4693 The overall epilog code looks like this:
4695 s_out0 = phi <s_loop> # original EXIT_PHI
4696 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
4697 v_out2 = reduce <v_out1> # step 1
4698 s_out3 = extract_field <v_out2, 0> # step 2
4699 s_out4 = adjust_result <s_out3> # step 3
4701 (step 3 is optional, and steps 1 and 2 may be combined).
4702 Lastly, the uses of s_out0 are replaced by s_out4. */
4705 /* 2.1 Create new loop-exit-phis to preserve loop-closed form:
4706 v_out1 = phi <VECT_DEF>
4707 Store them in NEW_PHIS. */
4709 exit_bb
= single_exit (loop
)->dest
;
4710 prev_phi_info
= NULL
;
4711 new_phis
.create (vect_defs
.length ());
4712 FOR_EACH_VEC_ELT (vect_defs
, i
, def
)
4714 for (j
= 0; j
< ncopies
; j
++)
4716 tree new_def
= copy_ssa_name (def
);
4717 phi
= create_phi_node (new_def
, exit_bb
);
4718 stmt_vec_info phi_info
= loop_vinfo
->add_stmt (phi
);
4720 new_phis
.quick_push (phi
);
4723 def
= vect_get_vec_def_for_stmt_copy (dt
, def
);
4724 STMT_VINFO_RELATED_STMT (prev_phi_info
) = phi_info
;
4727 SET_PHI_ARG_DEF (phi
, single_exit (loop
)->dest_idx
, def
);
4728 prev_phi_info
= phi_info
;
4732 /* The epilogue is created for the outer-loop, i.e., for the loop being
4733 vectorized. Create exit phis for the outer loop. */
4737 exit_bb
= single_exit (loop
)->dest
;
4738 inner_phis
.create (vect_defs
.length ());
4739 FOR_EACH_VEC_ELT (new_phis
, i
, phi
)
4741 tree new_result
= copy_ssa_name (PHI_RESULT (phi
));
4742 gphi
*outer_phi
= create_phi_node (new_result
, exit_bb
);
4743 SET_PHI_ARG_DEF (outer_phi
, single_exit (loop
)->dest_idx
,
4745 prev_phi_info
= loop_vinfo
->add_stmt (outer_phi
);
4746 inner_phis
.quick_push (phi
);
4747 new_phis
[i
] = outer_phi
;
4748 while (STMT_VINFO_RELATED_STMT (vinfo_for_stmt (phi
)))
4750 phi
= STMT_VINFO_RELATED_STMT (vinfo_for_stmt (phi
));
4751 new_result
= copy_ssa_name (PHI_RESULT (phi
));
4752 outer_phi
= create_phi_node (new_result
, exit_bb
);
4753 SET_PHI_ARG_DEF (outer_phi
, single_exit (loop
)->dest_idx
,
4755 stmt_vec_info outer_phi_info
= loop_vinfo
->add_stmt (outer_phi
);
4756 STMT_VINFO_RELATED_STMT (prev_phi_info
) = outer_phi_info
;
4757 prev_phi_info
= outer_phi_info
;
4762 exit_gsi
= gsi_after_labels (exit_bb
);
4764 /* 2.2 Get the relevant tree-code to use in the epilog for schemes 2,3
4765 (i.e. when reduc_fn is not available) and in the final adjustment
4766 code (if needed). Also get the original scalar reduction variable as
4767 defined in the loop. In case STMT is a "pattern-stmt" (i.e. - it
4768 represents a reduction pattern), the tree-code and scalar-def are
4769 taken from the original stmt that the pattern-stmt (STMT) replaces.
4770 Otherwise (it is a regular reduction) - the tree-code and scalar-def
4771 are taken from STMT. */
4773 stmt_vec_info orig_stmt_info
= STMT_VINFO_RELATED_STMT (stmt_info
);
4774 if (!orig_stmt_info
)
4776 /* Regular reduction */
4777 orig_stmt_info
= stmt_info
;
4781 /* Reduction pattern */
4782 gcc_assert (STMT_VINFO_IN_PATTERN_P (orig_stmt_info
));
4783 gcc_assert (STMT_VINFO_RELATED_STMT (orig_stmt_info
) == stmt_info
);
4786 code
= gimple_assign_rhs_code (orig_stmt_info
->stmt
);
4787 /* For MINUS_EXPR the initial vector is [init_val,0,...,0], therefore,
4788 partial results are added and not subtracted. */
4789 if (code
== MINUS_EXPR
)
4792 scalar_dest
= gimple_assign_lhs (orig_stmt_info
->stmt
);
4793 scalar_type
= TREE_TYPE (scalar_dest
);
4794 scalar_results
.create (group_size
);
4795 new_scalar_dest
= vect_create_destination_var (scalar_dest
, NULL
);
4796 bitsize
= TYPE_SIZE (scalar_type
);
4798 /* In case this is a reduction in an inner-loop while vectorizing an outer
4799 loop - we don't need to extract a single scalar result at the end of the
4800 inner-loop (unless it is double reduction, i.e., the use of reduction is
4801 outside the outer-loop). The final vector of partial results will be used
4802 in the vectorized outer-loop, or reduced to a scalar result at the end of
4804 if (nested_in_vect_loop
&& !double_reduc
)
4805 goto vect_finalize_reduction
;
4807 /* SLP reduction without reduction chain, e.g.,
4811 b2 = operation (b1) */
4812 slp_reduc
= (slp_node
&& !REDUC_GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt
)));
4814 /* True if we should implement SLP_REDUC using native reduction operations
4815 instead of scalar operations. */
4816 direct_slp_reduc
= (reduc_fn
!= IFN_LAST
4818 && !TYPE_VECTOR_SUBPARTS (vectype
).is_constant ());
4820 /* In case of reduction chain, e.g.,
4823 a3 = operation (a2),
4825 we may end up with more than one vector result. Here we reduce them to
4827 if (REDUC_GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt
)) || direct_slp_reduc
)
4829 tree first_vect
= PHI_RESULT (new_phis
[0]);
4830 gassign
*new_vec_stmt
= NULL
;
4831 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
4832 for (k
= 1; k
< new_phis
.length (); k
++)
4834 gimple
*next_phi
= new_phis
[k
];
4835 tree second_vect
= PHI_RESULT (next_phi
);
4836 tree tem
= make_ssa_name (vec_dest
, new_vec_stmt
);
4837 new_vec_stmt
= gimple_build_assign (tem
, code
,
4838 first_vect
, second_vect
);
4839 gsi_insert_before (&exit_gsi
, new_vec_stmt
, GSI_SAME_STMT
);
4843 new_phi_result
= first_vect
;
4846 new_phis
.truncate (0);
4847 new_phis
.safe_push (new_vec_stmt
);
4850 /* Likewise if we couldn't use a single defuse cycle. */
4851 else if (ncopies
> 1)
4853 gcc_assert (new_phis
.length () == 1);
4854 tree first_vect
= PHI_RESULT (new_phis
[0]);
4855 gassign
*new_vec_stmt
= NULL
;
4856 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
4857 gimple
*next_phi
= new_phis
[0];
4858 for (int k
= 1; k
< ncopies
; ++k
)
4860 next_phi
= STMT_VINFO_RELATED_STMT (vinfo_for_stmt (next_phi
));
4861 tree second_vect
= PHI_RESULT (next_phi
);
4862 tree tem
= make_ssa_name (vec_dest
, new_vec_stmt
);
4863 new_vec_stmt
= gimple_build_assign (tem
, code
,
4864 first_vect
, second_vect
);
4865 gsi_insert_before (&exit_gsi
, new_vec_stmt
, GSI_SAME_STMT
);
4868 new_phi_result
= first_vect
;
4869 new_phis
.truncate (0);
4870 new_phis
.safe_push (new_vec_stmt
);
4873 new_phi_result
= PHI_RESULT (new_phis
[0]);
4875 if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info
) == COND_REDUCTION
4876 && reduc_fn
!= IFN_LAST
)
4878 /* For condition reductions, we have a vector (NEW_PHI_RESULT) containing
4879 various data values where the condition matched and another vector
4880 (INDUCTION_INDEX) containing all the indexes of those matches. We
4881 need to extract the last matching index (which will be the index with
4882 highest value) and use this to index into the data vector.
4883 For the case where there were no matches, the data vector will contain
4884 all default values and the index vector will be all zeros. */
4886 /* Get various versions of the type of the vector of indexes. */
4887 tree index_vec_type
= TREE_TYPE (induction_index
);
4888 gcc_checking_assert (TYPE_UNSIGNED (index_vec_type
));
4889 tree index_scalar_type
= TREE_TYPE (index_vec_type
);
4890 tree index_vec_cmp_type
= build_same_sized_truth_vector_type
4893 /* Get an unsigned integer version of the type of the data vector. */
4894 int scalar_precision
4895 = GET_MODE_PRECISION (SCALAR_TYPE_MODE (scalar_type
));
4896 tree scalar_type_unsigned
= make_unsigned_type (scalar_precision
);
4897 tree vectype_unsigned
= build_vector_type
4898 (scalar_type_unsigned
, TYPE_VECTOR_SUBPARTS (vectype
));
4900 /* First we need to create a vector (ZERO_VEC) of zeros and another
4901 vector (MAX_INDEX_VEC) filled with the last matching index, which we
4902 can create using a MAX reduction and then expanding.
4903 In the case where the loop never made any matches, the max index will
4906 /* Vector of {0, 0, 0,...}. */
4907 tree zero_vec
= make_ssa_name (vectype
);
4908 tree zero_vec_rhs
= build_zero_cst (vectype
);
4909 gimple
*zero_vec_stmt
= gimple_build_assign (zero_vec
, zero_vec_rhs
);
4910 gsi_insert_before (&exit_gsi
, zero_vec_stmt
, GSI_SAME_STMT
);
4912 /* Find maximum value from the vector of found indexes. */
4913 tree max_index
= make_ssa_name (index_scalar_type
);
4914 gcall
*max_index_stmt
= gimple_build_call_internal (IFN_REDUC_MAX
,
4915 1, induction_index
);
4916 gimple_call_set_lhs (max_index_stmt
, max_index
);
4917 gsi_insert_before (&exit_gsi
, max_index_stmt
, GSI_SAME_STMT
);
4919 /* Vector of {max_index, max_index, max_index,...}. */
4920 tree max_index_vec
= make_ssa_name (index_vec_type
);
4921 tree max_index_vec_rhs
= build_vector_from_val (index_vec_type
,
4923 gimple
*max_index_vec_stmt
= gimple_build_assign (max_index_vec
,
4925 gsi_insert_before (&exit_gsi
, max_index_vec_stmt
, GSI_SAME_STMT
);
4927 /* Next we compare the new vector (MAX_INDEX_VEC) full of max indexes
4928 with the vector (INDUCTION_INDEX) of found indexes, choosing values
4929 from the data vector (NEW_PHI_RESULT) for matches, 0 (ZERO_VEC)
4930 otherwise. Only one value should match, resulting in a vector
4931 (VEC_COND) with one data value and the rest zeros.
4932 In the case where the loop never made any matches, every index will
4933 match, resulting in a vector with all data values (which will all be
4934 the default value). */
4936 /* Compare the max index vector to the vector of found indexes to find
4937 the position of the max value. */
4938 tree vec_compare
= make_ssa_name (index_vec_cmp_type
);
4939 gimple
*vec_compare_stmt
= gimple_build_assign (vec_compare
, EQ_EXPR
,
4942 gsi_insert_before (&exit_gsi
, vec_compare_stmt
, GSI_SAME_STMT
);
4944 /* Use the compare to choose either values from the data vector or
4946 tree vec_cond
= make_ssa_name (vectype
);
4947 gimple
*vec_cond_stmt
= gimple_build_assign (vec_cond
, VEC_COND_EXPR
,
4948 vec_compare
, new_phi_result
,
4950 gsi_insert_before (&exit_gsi
, vec_cond_stmt
, GSI_SAME_STMT
);
4952 /* Finally we need to extract the data value from the vector (VEC_COND)
4953 into a scalar (MATCHED_DATA_REDUC). Logically we want to do a OR
4954 reduction, but because this doesn't exist, we can use a MAX reduction
4955 instead. The data value might be signed or a float so we need to cast
4957 In the case where the loop never made any matches, the data values are
4958 all identical, and so will reduce down correctly. */
4960 /* Make the matched data values unsigned. */
4961 tree vec_cond_cast
= make_ssa_name (vectype_unsigned
);
4962 tree vec_cond_cast_rhs
= build1 (VIEW_CONVERT_EXPR
, vectype_unsigned
,
4964 gimple
*vec_cond_cast_stmt
= gimple_build_assign (vec_cond_cast
,
4967 gsi_insert_before (&exit_gsi
, vec_cond_cast_stmt
, GSI_SAME_STMT
);
4969 /* Reduce down to a scalar value. */
4970 tree data_reduc
= make_ssa_name (scalar_type_unsigned
);
4971 gcall
*data_reduc_stmt
= gimple_build_call_internal (IFN_REDUC_MAX
,
4973 gimple_call_set_lhs (data_reduc_stmt
, data_reduc
);
4974 gsi_insert_before (&exit_gsi
, data_reduc_stmt
, GSI_SAME_STMT
);
4976 /* Convert the reduced value back to the result type and set as the
4978 gimple_seq stmts
= NULL
;
4979 new_temp
= gimple_build (&stmts
, VIEW_CONVERT_EXPR
, scalar_type
,
4981 gsi_insert_seq_before (&exit_gsi
, stmts
, GSI_SAME_STMT
);
4982 scalar_results
.safe_push (new_temp
);
4984 else if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info
) == COND_REDUCTION
4985 && reduc_fn
== IFN_LAST
)
4987 /* Condition reduction without supported IFN_REDUC_MAX. Generate
4989 idx_val = induction_index[0];
4990 val = data_reduc[0];
4991 for (idx = 0, val = init, i = 0; i < nelts; ++i)
4992 if (induction_index[i] > idx_val)
4993 val = data_reduc[i], idx_val = induction_index[i];
4996 tree data_eltype
= TREE_TYPE (TREE_TYPE (new_phi_result
));
4997 tree idx_eltype
= TREE_TYPE (TREE_TYPE (induction_index
));
4998 unsigned HOST_WIDE_INT el_size
= tree_to_uhwi (TYPE_SIZE (idx_eltype
));
4999 poly_uint64 nunits
= TYPE_VECTOR_SUBPARTS (TREE_TYPE (induction_index
));
5000 /* Enforced by vectorizable_reduction, which ensures we have target
5001 support before allowing a conditional reduction on variable-length
5003 unsigned HOST_WIDE_INT v_size
= el_size
* nunits
.to_constant ();
5004 tree idx_val
= NULL_TREE
, val
= NULL_TREE
;
5005 for (unsigned HOST_WIDE_INT off
= 0; off
< v_size
; off
+= el_size
)
5007 tree old_idx_val
= idx_val
;
5009 idx_val
= make_ssa_name (idx_eltype
);
5010 epilog_stmt
= gimple_build_assign (idx_val
, BIT_FIELD_REF
,
5011 build3 (BIT_FIELD_REF
, idx_eltype
,
5013 bitsize_int (el_size
),
5014 bitsize_int (off
)));
5015 gsi_insert_before (&exit_gsi
, epilog_stmt
, GSI_SAME_STMT
);
5016 val
= make_ssa_name (data_eltype
);
5017 epilog_stmt
= gimple_build_assign (val
, BIT_FIELD_REF
,
5018 build3 (BIT_FIELD_REF
,
5021 bitsize_int (el_size
),
5022 bitsize_int (off
)));
5023 gsi_insert_before (&exit_gsi
, epilog_stmt
, GSI_SAME_STMT
);
5026 tree new_idx_val
= idx_val
;
5028 if (off
!= v_size
- el_size
)
5030 new_idx_val
= make_ssa_name (idx_eltype
);
5031 epilog_stmt
= gimple_build_assign (new_idx_val
,
5034 gsi_insert_before (&exit_gsi
, epilog_stmt
, GSI_SAME_STMT
);
5036 new_val
= make_ssa_name (data_eltype
);
5037 epilog_stmt
= gimple_build_assign (new_val
,
5044 gsi_insert_before (&exit_gsi
, epilog_stmt
, GSI_SAME_STMT
);
5045 idx_val
= new_idx_val
;
5049 /* Convert the reduced value back to the result type and set as the
5051 gimple_seq stmts
= NULL
;
5052 val
= gimple_convert (&stmts
, scalar_type
, val
);
5053 gsi_insert_seq_before (&exit_gsi
, stmts
, GSI_SAME_STMT
);
5054 scalar_results
.safe_push (val
);
5057 /* 2.3 Create the reduction code, using one of the three schemes described
5058 above. In SLP we simply need to extract all the elements from the
5059 vector (without reducing them), so we use scalar shifts. */
5060 else if (reduc_fn
!= IFN_LAST
&& !slp_reduc
)
5066 v_out2 = reduc_expr <v_out1> */
5068 if (dump_enabled_p ())
5069 dump_printf_loc (MSG_NOTE
, vect_location
,
5070 "Reduce using direct vector reduction.\n");
5072 vec_elem_type
= TREE_TYPE (TREE_TYPE (new_phi_result
));
5073 if (!useless_type_conversion_p (scalar_type
, vec_elem_type
))
5076 = vect_create_destination_var (scalar_dest
, vec_elem_type
);
5077 epilog_stmt
= gimple_build_call_internal (reduc_fn
, 1,
5079 gimple_set_lhs (epilog_stmt
, tmp_dest
);
5080 new_temp
= make_ssa_name (tmp_dest
, epilog_stmt
);
5081 gimple_set_lhs (epilog_stmt
, new_temp
);
5082 gsi_insert_before (&exit_gsi
, epilog_stmt
, GSI_SAME_STMT
);
5084 epilog_stmt
= gimple_build_assign (new_scalar_dest
, NOP_EXPR
,
5089 epilog_stmt
= gimple_build_call_internal (reduc_fn
, 1,
5091 gimple_set_lhs (epilog_stmt
, new_scalar_dest
);
5094 new_temp
= make_ssa_name (new_scalar_dest
, epilog_stmt
);
5095 gimple_set_lhs (epilog_stmt
, new_temp
);
5096 gsi_insert_before (&exit_gsi
, epilog_stmt
, GSI_SAME_STMT
);
5098 if ((STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info
)
5099 == INTEGER_INDUC_COND_REDUCTION
)
5100 && !operand_equal_p (initial_def
, induc_val
, 0))
5102 /* Earlier we set the initial value to be a vector if induc_val
5103 values. Check the result and if it is induc_val then replace
5104 with the original initial value, unless induc_val is
5105 the same as initial_def already. */
5106 tree zcompare
= build2 (EQ_EXPR
, boolean_type_node
, new_temp
,
5109 tmp
= make_ssa_name (new_scalar_dest
);
5110 epilog_stmt
= gimple_build_assign (tmp
, COND_EXPR
, zcompare
,
5111 initial_def
, new_temp
);
5112 gsi_insert_before (&exit_gsi
, epilog_stmt
, GSI_SAME_STMT
);
5116 scalar_results
.safe_push (new_temp
);
5118 else if (direct_slp_reduc
)
5120 /* Here we create one vector for each of the REDUC_GROUP_SIZE results,
5121 with the elements for other SLP statements replaced with the
5122 neutral value. We can then do a normal reduction on each vector. */
5124 /* Enforced by vectorizable_reduction. */
5125 gcc_assert (new_phis
.length () == 1);
5126 gcc_assert (pow2p_hwi (group_size
));
5128 slp_tree orig_phis_slp_node
= slp_node_instance
->reduc_phis
;
5129 vec
<gimple
*> orig_phis
= SLP_TREE_SCALAR_STMTS (orig_phis_slp_node
);
5130 gimple_seq seq
= NULL
;
5132 /* Build a vector {0, 1, 2, ...}, with the same number of elements
5133 and the same element size as VECTYPE. */
5134 tree index
= build_index_vector (vectype
, 0, 1);
5135 tree index_type
= TREE_TYPE (index
);
5136 tree index_elt_type
= TREE_TYPE (index_type
);
5137 tree mask_type
= build_same_sized_truth_vector_type (index_type
);
5139 /* Create a vector that, for each element, identifies which of
5140 the REDUC_GROUP_SIZE results should use it. */
5141 tree index_mask
= build_int_cst (index_elt_type
, group_size
- 1);
5142 index
= gimple_build (&seq
, BIT_AND_EXPR
, index_type
, index
,
5143 build_vector_from_val (index_type
, index_mask
));
5145 /* Get a neutral vector value. This is simply a splat of the neutral
5146 scalar value if we have one, otherwise the initial scalar value
5147 is itself a neutral value. */
5148 tree vector_identity
= NULL_TREE
;
5150 vector_identity
= gimple_build_vector_from_val (&seq
, vectype
,
5152 for (unsigned int i
= 0; i
< group_size
; ++i
)
5154 /* If there's no univeral neutral value, we can use the
5155 initial scalar value from the original PHI. This is used
5156 for MIN and MAX reduction, for example. */
5160 = PHI_ARG_DEF_FROM_EDGE (orig_phis
[i
],
5161 loop_preheader_edge (loop
));
5162 vector_identity
= gimple_build_vector_from_val (&seq
, vectype
,
5166 /* Calculate the equivalent of:
5168 sel[j] = (index[j] == i);
5170 which selects the elements of NEW_PHI_RESULT that should
5171 be included in the result. */
5172 tree compare_val
= build_int_cst (index_elt_type
, i
);
5173 compare_val
= build_vector_from_val (index_type
, compare_val
);
5174 tree sel
= gimple_build (&seq
, EQ_EXPR
, mask_type
,
5175 index
, compare_val
);
5177 /* Calculate the equivalent of:
5179 vec = seq ? new_phi_result : vector_identity;
5181 VEC is now suitable for a full vector reduction. */
5182 tree vec
= gimple_build (&seq
, VEC_COND_EXPR
, vectype
,
5183 sel
, new_phi_result
, vector_identity
);
5185 /* Do the reduction and convert it to the appropriate type. */
5186 tree scalar
= gimple_build (&seq
, as_combined_fn (reduc_fn
),
5187 TREE_TYPE (vectype
), vec
);
5188 scalar
= gimple_convert (&seq
, scalar_type
, scalar
);
5189 scalar_results
.safe_push (scalar
);
5191 gsi_insert_seq_before (&exit_gsi
, seq
, GSI_SAME_STMT
);
5195 bool reduce_with_shift
;
5198 /* COND reductions all do the final reduction with MAX_EXPR
5200 if (code
== COND_EXPR
)
5202 if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info
)
5203 == INTEGER_INDUC_COND_REDUCTION
)
5209 /* See if the target wants to do the final (shift) reduction
5210 in a vector mode of smaller size and first reduce upper/lower
5211 halves against each other. */
5212 enum machine_mode mode1
= mode
;
5213 tree vectype1
= vectype
;
5214 unsigned sz
= tree_to_uhwi (TYPE_SIZE_UNIT (vectype
));
5217 && (mode1
= targetm
.vectorize
.split_reduction (mode
)) != mode
)
5218 sz1
= GET_MODE_SIZE (mode1
).to_constant ();
5220 vectype1
= get_vectype_for_scalar_type_and_size (scalar_type
, sz1
);
5221 reduce_with_shift
= have_whole_vector_shift (mode1
);
5222 if (!VECTOR_MODE_P (mode1
))
5223 reduce_with_shift
= false;
5226 optab optab
= optab_for_tree_code (code
, vectype1
, optab_default
);
5227 if (optab_handler (optab
, mode1
) == CODE_FOR_nothing
)
5228 reduce_with_shift
= false;
5231 /* First reduce the vector to the desired vector size we should
5232 do shift reduction on by combining upper and lower halves. */
5233 new_temp
= new_phi_result
;
5236 gcc_assert (!slp_reduc
);
5238 vectype1
= get_vectype_for_scalar_type_and_size (scalar_type
, sz
);
5240 /* The target has to make sure we support lowpart/highpart
5241 extraction, either via direct vector extract or through
5242 an integer mode punning. */
5244 if (convert_optab_handler (vec_extract_optab
,
5245 TYPE_MODE (TREE_TYPE (new_temp
)),
5246 TYPE_MODE (vectype1
))
5247 != CODE_FOR_nothing
)
5249 /* Extract sub-vectors directly once vec_extract becomes
5250 a conversion optab. */
5251 dst1
= make_ssa_name (vectype1
);
5253 = gimple_build_assign (dst1
, BIT_FIELD_REF
,
5254 build3 (BIT_FIELD_REF
, vectype1
,
5255 new_temp
, TYPE_SIZE (vectype1
),
5257 gsi_insert_before (&exit_gsi
, epilog_stmt
, GSI_SAME_STMT
);
5258 dst2
= make_ssa_name (vectype1
);
5260 = gimple_build_assign (dst2
, BIT_FIELD_REF
,
5261 build3 (BIT_FIELD_REF
, vectype1
,
5262 new_temp
, TYPE_SIZE (vectype1
),
5263 bitsize_int (sz
* BITS_PER_UNIT
)));
5264 gsi_insert_before (&exit_gsi
, epilog_stmt
, GSI_SAME_STMT
);
5268 /* Extract via punning to appropriately sized integer mode
5270 tree eltype
= build_nonstandard_integer_type (sz
* BITS_PER_UNIT
,
5272 tree etype
= build_vector_type (eltype
, 2);
5273 gcc_assert (convert_optab_handler (vec_extract_optab
,
5276 != CODE_FOR_nothing
);
5277 tree tem
= make_ssa_name (etype
);
5278 epilog_stmt
= gimple_build_assign (tem
, VIEW_CONVERT_EXPR
,
5279 build1 (VIEW_CONVERT_EXPR
,
5281 gsi_insert_before (&exit_gsi
, epilog_stmt
, GSI_SAME_STMT
);
5283 tem
= make_ssa_name (eltype
);
5285 = gimple_build_assign (tem
, BIT_FIELD_REF
,
5286 build3 (BIT_FIELD_REF
, eltype
,
5287 new_temp
, TYPE_SIZE (eltype
),
5289 gsi_insert_before (&exit_gsi
, epilog_stmt
, GSI_SAME_STMT
);
5290 dst1
= make_ssa_name (vectype1
);
5291 epilog_stmt
= gimple_build_assign (dst1
, VIEW_CONVERT_EXPR
,
5292 build1 (VIEW_CONVERT_EXPR
,
5294 gsi_insert_before (&exit_gsi
, epilog_stmt
, GSI_SAME_STMT
);
5295 tem
= make_ssa_name (eltype
);
5297 = gimple_build_assign (tem
, BIT_FIELD_REF
,
5298 build3 (BIT_FIELD_REF
, eltype
,
5299 new_temp
, TYPE_SIZE (eltype
),
5300 bitsize_int (sz
* BITS_PER_UNIT
)));
5301 gsi_insert_before (&exit_gsi
, epilog_stmt
, GSI_SAME_STMT
);
5302 dst2
= make_ssa_name (vectype1
);
5303 epilog_stmt
= gimple_build_assign (dst2
, VIEW_CONVERT_EXPR
,
5304 build1 (VIEW_CONVERT_EXPR
,
5306 gsi_insert_before (&exit_gsi
, epilog_stmt
, GSI_SAME_STMT
);
5309 new_temp
= make_ssa_name (vectype1
);
5310 epilog_stmt
= gimple_build_assign (new_temp
, code
, dst1
, dst2
);
5311 gsi_insert_before (&exit_gsi
, epilog_stmt
, GSI_SAME_STMT
);
5314 if (reduce_with_shift
&& !slp_reduc
)
5316 int element_bitsize
= tree_to_uhwi (bitsize
);
5317 /* Enforced by vectorizable_reduction, which disallows SLP reductions
5318 for variable-length vectors and also requires direct target support
5319 for loop reductions. */
5320 int vec_size_in_bits
= tree_to_uhwi (TYPE_SIZE (vectype1
));
5321 int nelements
= vec_size_in_bits
/ element_bitsize
;
5322 vec_perm_builder sel
;
5323 vec_perm_indices indices
;
5327 tree zero_vec
= build_zero_cst (vectype1
);
5329 for (offset = nelements/2; offset >= 1; offset/=2)
5331 Create: va' = vec_shift <va, offset>
5332 Create: va = vop <va, va'>
5337 if (dump_enabled_p ())
5338 dump_printf_loc (MSG_NOTE
, vect_location
,
5339 "Reduce using vector shifts\n");
5341 mode1
= TYPE_MODE (vectype1
);
5342 vec_dest
= vect_create_destination_var (scalar_dest
, vectype1
);
5343 for (elt_offset
= nelements
/ 2;
5347 calc_vec_perm_mask_for_shift (elt_offset
, nelements
, &sel
);
5348 indices
.new_vector (sel
, 2, nelements
);
5349 tree mask
= vect_gen_perm_mask_any (vectype1
, indices
);
5350 epilog_stmt
= gimple_build_assign (vec_dest
, VEC_PERM_EXPR
,
5351 new_temp
, zero_vec
, mask
);
5352 new_name
= make_ssa_name (vec_dest
, epilog_stmt
);
5353 gimple_assign_set_lhs (epilog_stmt
, new_name
);
5354 gsi_insert_before (&exit_gsi
, epilog_stmt
, GSI_SAME_STMT
);
5356 epilog_stmt
= gimple_build_assign (vec_dest
, code
, new_name
,
5358 new_temp
= make_ssa_name (vec_dest
, epilog_stmt
);
5359 gimple_assign_set_lhs (epilog_stmt
, new_temp
);
5360 gsi_insert_before (&exit_gsi
, epilog_stmt
, GSI_SAME_STMT
);
5363 /* 2.4 Extract the final scalar result. Create:
5364 s_out3 = extract_field <v_out2, bitpos> */
5366 if (dump_enabled_p ())
5367 dump_printf_loc (MSG_NOTE
, vect_location
,
5368 "extract scalar result\n");
5370 rhs
= build3 (BIT_FIELD_REF
, scalar_type
, new_temp
,
5371 bitsize
, bitsize_zero_node
);
5372 epilog_stmt
= gimple_build_assign (new_scalar_dest
, rhs
);
5373 new_temp
= make_ssa_name (new_scalar_dest
, epilog_stmt
);
5374 gimple_assign_set_lhs (epilog_stmt
, new_temp
);
5375 gsi_insert_before (&exit_gsi
, epilog_stmt
, GSI_SAME_STMT
);
5376 scalar_results
.safe_push (new_temp
);
5381 s = extract_field <v_out2, 0>
5382 for (offset = element_size;
5383 offset < vector_size;
5384 offset += element_size;)
5386 Create: s' = extract_field <v_out2, offset>
5387 Create: s = op <s, s'> // For non SLP cases
5390 if (dump_enabled_p ())
5391 dump_printf_loc (MSG_NOTE
, vect_location
,
5392 "Reduce using scalar code.\n");
5394 int vec_size_in_bits
= tree_to_uhwi (TYPE_SIZE (vectype1
));
5395 int element_bitsize
= tree_to_uhwi (bitsize
);
5396 FOR_EACH_VEC_ELT (new_phis
, i
, new_phi
)
5399 if (gimple_code (new_phi
) == GIMPLE_PHI
)
5400 vec_temp
= PHI_RESULT (new_phi
);
5402 vec_temp
= gimple_assign_lhs (new_phi
);
5403 tree rhs
= build3 (BIT_FIELD_REF
, scalar_type
, vec_temp
, bitsize
,
5405 epilog_stmt
= gimple_build_assign (new_scalar_dest
, rhs
);
5406 new_temp
= make_ssa_name (new_scalar_dest
, epilog_stmt
);
5407 gimple_assign_set_lhs (epilog_stmt
, new_temp
);
5408 gsi_insert_before (&exit_gsi
, epilog_stmt
, GSI_SAME_STMT
);
5410 /* In SLP we don't need to apply reduction operation, so we just
5411 collect s' values in SCALAR_RESULTS. */
5413 scalar_results
.safe_push (new_temp
);
5415 for (bit_offset
= element_bitsize
;
5416 bit_offset
< vec_size_in_bits
;
5417 bit_offset
+= element_bitsize
)
5419 tree bitpos
= bitsize_int (bit_offset
);
5420 tree rhs
= build3 (BIT_FIELD_REF
, scalar_type
, vec_temp
,
5423 epilog_stmt
= gimple_build_assign (new_scalar_dest
, rhs
);
5424 new_name
= make_ssa_name (new_scalar_dest
, epilog_stmt
);
5425 gimple_assign_set_lhs (epilog_stmt
, new_name
);
5426 gsi_insert_before (&exit_gsi
, epilog_stmt
, GSI_SAME_STMT
);
5430 /* In SLP we don't need to apply reduction operation, so
5431 we just collect s' values in SCALAR_RESULTS. */
5432 new_temp
= new_name
;
5433 scalar_results
.safe_push (new_name
);
5437 epilog_stmt
= gimple_build_assign (new_scalar_dest
, code
,
5438 new_name
, new_temp
);
5439 new_temp
= make_ssa_name (new_scalar_dest
, epilog_stmt
);
5440 gimple_assign_set_lhs (epilog_stmt
, new_temp
);
5441 gsi_insert_before (&exit_gsi
, epilog_stmt
, GSI_SAME_STMT
);
5446 /* The only case where we need to reduce scalar results in SLP, is
5447 unrolling. If the size of SCALAR_RESULTS is greater than
5448 REDUC_GROUP_SIZE, we reduce them combining elements modulo
5449 REDUC_GROUP_SIZE. */
5452 tree res
, first_res
, new_res
;
5455 /* Reduce multiple scalar results in case of SLP unrolling. */
5456 for (j
= group_size
; scalar_results
.iterate (j
, &res
);
5459 first_res
= scalar_results
[j
% group_size
];
5460 new_stmt
= gimple_build_assign (new_scalar_dest
, code
,
5462 new_res
= make_ssa_name (new_scalar_dest
, new_stmt
);
5463 gimple_assign_set_lhs (new_stmt
, new_res
);
5464 gsi_insert_before (&exit_gsi
, new_stmt
, GSI_SAME_STMT
);
5465 scalar_results
[j
% group_size
] = new_res
;
5469 /* Not SLP - we have one scalar to keep in SCALAR_RESULTS. */
5470 scalar_results
.safe_push (new_temp
);
5473 if ((STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info
)
5474 == INTEGER_INDUC_COND_REDUCTION
)
5475 && !operand_equal_p (initial_def
, induc_val
, 0))
5477 /* Earlier we set the initial value to be a vector if induc_val
5478 values. Check the result and if it is induc_val then replace
5479 with the original initial value, unless induc_val is
5480 the same as initial_def already. */
5481 tree zcompare
= build2 (EQ_EXPR
, boolean_type_node
, new_temp
,
5484 tree tmp
= make_ssa_name (new_scalar_dest
);
5485 epilog_stmt
= gimple_build_assign (tmp
, COND_EXPR
, zcompare
,
5486 initial_def
, new_temp
);
5487 gsi_insert_before (&exit_gsi
, epilog_stmt
, GSI_SAME_STMT
);
5488 scalar_results
[0] = tmp
;
5492 vect_finalize_reduction
:
5497 /* 2.5 Adjust the final result by the initial value of the reduction
5498 variable. (When such adjustment is not needed, then
5499 'adjustment_def' is zero). For example, if code is PLUS we create:
5500 new_temp = loop_exit_def + adjustment_def */
5504 gcc_assert (!slp_reduc
);
5505 if (nested_in_vect_loop
)
5507 new_phi
= new_phis
[0];
5508 gcc_assert (TREE_CODE (TREE_TYPE (adjustment_def
)) == VECTOR_TYPE
);
5509 expr
= build2 (code
, vectype
, PHI_RESULT (new_phi
), adjustment_def
);
5510 new_dest
= vect_create_destination_var (scalar_dest
, vectype
);
5514 new_temp
= scalar_results
[0];
5515 gcc_assert (TREE_CODE (TREE_TYPE (adjustment_def
)) != VECTOR_TYPE
);
5516 expr
= build2 (code
, scalar_type
, new_temp
, adjustment_def
);
5517 new_dest
= vect_create_destination_var (scalar_dest
, scalar_type
);
5520 epilog_stmt
= gimple_build_assign (new_dest
, expr
);
5521 new_temp
= make_ssa_name (new_dest
, epilog_stmt
);
5522 gimple_assign_set_lhs (epilog_stmt
, new_temp
);
5523 gsi_insert_before (&exit_gsi
, epilog_stmt
, GSI_SAME_STMT
);
5524 if (nested_in_vect_loop
)
5526 stmt_vec_info epilog_stmt_info
= loop_vinfo
->add_stmt (epilog_stmt
);
5527 STMT_VINFO_RELATED_STMT (epilog_stmt_info
)
5528 = STMT_VINFO_RELATED_STMT (loop_vinfo
->lookup_stmt (new_phi
));
5531 scalar_results
.quick_push (new_temp
);
5533 scalar_results
[0] = new_temp
;
5536 scalar_results
[0] = new_temp
;
5538 new_phis
[0] = epilog_stmt
;
5541 /* 2.6 Handle the loop-exit phis. Replace the uses of scalar loop-exit
5542 phis with new adjusted scalar results, i.e., replace use <s_out0>
5547 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
5548 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
5549 v_out2 = reduce <v_out1>
5550 s_out3 = extract_field <v_out2, 0>
5551 s_out4 = adjust_result <s_out3>
5558 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
5559 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
5560 v_out2 = reduce <v_out1>
5561 s_out3 = extract_field <v_out2, 0>
5562 s_out4 = adjust_result <s_out3>
5567 /* In SLP reduction chain we reduce vector results into one vector if
5568 necessary, hence we set here REDUC_GROUP_SIZE to 1. SCALAR_DEST is the
5569 LHS of the last stmt in the reduction chain, since we are looking for
5570 the loop exit phi node. */
5571 if (REDUC_GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt
)))
5573 gimple
*dest_stmt
= SLP_TREE_SCALAR_STMTS (slp_node
)[group_size
- 1];
5574 /* Handle reduction patterns. */
5575 if (STMT_VINFO_RELATED_STMT (vinfo_for_stmt (dest_stmt
)))
5576 dest_stmt
= STMT_VINFO_RELATED_STMT (vinfo_for_stmt (dest_stmt
));
5578 scalar_dest
= gimple_assign_lhs (dest_stmt
);
5582 /* In SLP we may have several statements in NEW_PHIS and REDUCTION_PHIS (in
5583 case that REDUC_GROUP_SIZE is greater than vectorization factor).
5584 Therefore, we need to match SCALAR_RESULTS with corresponding statements.
5585 The first (REDUC_GROUP_SIZE / number of new vector stmts) scalar results
5586 correspond to the first vector stmt, etc.
5587 (RATIO is equal to (REDUC_GROUP_SIZE / number of new vector stmts)). */
5588 if (group_size
> new_phis
.length ())
5590 ratio
= group_size
/ new_phis
.length ();
5591 gcc_assert (!(group_size
% new_phis
.length ()));
5596 for (k
= 0; k
< group_size
; k
++)
5600 epilog_stmt
= new_phis
[k
/ ratio
];
5601 reduction_phi
= reduction_phis
[k
/ ratio
];
5603 inner_phi
= inner_phis
[k
/ ratio
];
5608 gimple
*current_stmt
= SLP_TREE_SCALAR_STMTS (slp_node
)[k
];
5611 = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (current_stmt
));
5612 /* SLP statements can't participate in patterns. */
5613 gcc_assert (!orig_stmt_info
);
5614 scalar_dest
= gimple_assign_lhs (current_stmt
);
5618 /* Find the loop-closed-use at the loop exit of the original scalar
5619 result. (The reduction result is expected to have two immediate uses -
5620 one at the latch block, and one at the loop exit). */
5621 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, scalar_dest
)
5622 if (!flow_bb_inside_loop_p (loop
, gimple_bb (USE_STMT (use_p
)))
5623 && !is_gimple_debug (USE_STMT (use_p
)))
5624 phis
.safe_push (USE_STMT (use_p
));
5626 /* While we expect to have found an exit_phi because of loop-closed-ssa
5627 form we can end up without one if the scalar cycle is dead. */
5629 FOR_EACH_VEC_ELT (phis
, i
, exit_phi
)
5633 stmt_vec_info exit_phi_vinfo
5634 = loop_vinfo
->lookup_stmt (exit_phi
);
5637 /* FORNOW. Currently not supporting the case that an inner-loop
5638 reduction is not used in the outer-loop (but only outside the
5639 outer-loop), unless it is double reduction. */
5640 gcc_assert ((STMT_VINFO_RELEVANT_P (exit_phi_vinfo
)
5641 && !STMT_VINFO_LIVE_P (exit_phi_vinfo
))
5645 STMT_VINFO_VEC_STMT (exit_phi_vinfo
) = inner_phi
;
5647 STMT_VINFO_VEC_STMT (exit_phi_vinfo
) = epilog_stmt
;
5649 || STMT_VINFO_DEF_TYPE (exit_phi_vinfo
)
5650 != vect_double_reduction_def
)
5653 /* Handle double reduction:
5655 stmt1: s1 = phi <s0, s2> - double reduction phi (outer loop)
5656 stmt2: s3 = phi <s1, s4> - (regular) reduc phi (inner loop)
5657 stmt3: s4 = use (s3) - (regular) reduc stmt (inner loop)
5658 stmt4: s2 = phi <s4> - double reduction stmt (outer loop)
5660 At that point the regular reduction (stmt2 and stmt3) is
5661 already vectorized, as well as the exit phi node, stmt4.
5662 Here we vectorize the phi node of double reduction, stmt1, and
5663 update all relevant statements. */
5665 /* Go through all the uses of s2 to find double reduction phi
5666 node, i.e., stmt1 above. */
5667 orig_name
= PHI_RESULT (exit_phi
);
5668 FOR_EACH_IMM_USE_STMT (use_stmt
, imm_iter
, orig_name
)
5670 stmt_vec_info use_stmt_vinfo
;
5671 tree vect_phi_init
, preheader_arg
, vect_phi_res
;
5672 basic_block bb
= gimple_bb (use_stmt
);
5675 /* Check that USE_STMT is really double reduction phi
5677 if (gimple_code (use_stmt
) != GIMPLE_PHI
5678 || gimple_phi_num_args (use_stmt
) != 2
5679 || bb
->loop_father
!= outer_loop
)
5681 use_stmt_vinfo
= loop_vinfo
->lookup_stmt (use_stmt
);
5683 || STMT_VINFO_DEF_TYPE (use_stmt_vinfo
)
5684 != vect_double_reduction_def
)
5687 /* Create vector phi node for double reduction:
5688 vs1 = phi <vs0, vs2>
5689 vs1 was created previously in this function by a call to
5690 vect_get_vec_def_for_operand and is stored in
5692 vs2 is defined by INNER_PHI, the vectorized EXIT_PHI;
5693 vs0 is created here. */
5695 /* Create vector phi node. */
5696 vect_phi
= create_phi_node (vec_initial_def
, bb
);
5697 loop_vec_info_for_loop (outer_loop
)->add_stmt (vect_phi
);
5699 /* Create vs0 - initial def of the double reduction phi. */
5700 preheader_arg
= PHI_ARG_DEF_FROM_EDGE (use_stmt
,
5701 loop_preheader_edge (outer_loop
));
5702 vect_phi_init
= get_initial_def_for_reduction
5703 (stmt
, preheader_arg
, NULL
);
5705 /* Update phi node arguments with vs0 and vs2. */
5706 add_phi_arg (vect_phi
, vect_phi_init
,
5707 loop_preheader_edge (outer_loop
),
5709 add_phi_arg (vect_phi
, PHI_RESULT (inner_phi
),
5710 loop_latch_edge (outer_loop
), UNKNOWN_LOCATION
);
5711 if (dump_enabled_p ())
5713 dump_printf_loc (MSG_NOTE
, vect_location
,
5714 "created double reduction phi node: ");
5715 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, vect_phi
, 0);
5718 vect_phi_res
= PHI_RESULT (vect_phi
);
5720 /* Replace the use, i.e., set the correct vs1 in the regular
5721 reduction phi node. FORNOW, NCOPIES is always 1, so the
5722 loop is redundant. */
5723 use
= reduction_phi
;
5724 for (j
= 0; j
< ncopies
; j
++)
5726 edge pr_edge
= loop_preheader_edge (loop
);
5727 SET_PHI_ARG_DEF (use
, pr_edge
->dest_idx
, vect_phi_res
);
5728 use
= STMT_VINFO_RELATED_STMT (vinfo_for_stmt (use
));
5735 if (nested_in_vect_loop
)
5744 /* Find the loop-closed-use at the loop exit of the original scalar
5745 result. (The reduction result is expected to have two immediate uses,
5746 one at the latch block, and one at the loop exit). For double
5747 reductions we are looking for exit phis of the outer loop. */
5748 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, scalar_dest
)
5750 if (!flow_bb_inside_loop_p (loop
, gimple_bb (USE_STMT (use_p
))))
5752 if (!is_gimple_debug (USE_STMT (use_p
)))
5753 phis
.safe_push (USE_STMT (use_p
));
5757 if (double_reduc
&& gimple_code (USE_STMT (use_p
)) == GIMPLE_PHI
)
5759 tree phi_res
= PHI_RESULT (USE_STMT (use_p
));
5761 FOR_EACH_IMM_USE_FAST (phi_use_p
, phi_imm_iter
, phi_res
)
5763 if (!flow_bb_inside_loop_p (loop
,
5764 gimple_bb (USE_STMT (phi_use_p
)))
5765 && !is_gimple_debug (USE_STMT (phi_use_p
)))
5766 phis
.safe_push (USE_STMT (phi_use_p
));
5772 FOR_EACH_VEC_ELT (phis
, i
, exit_phi
)
5774 /* Replace the uses: */
5775 orig_name
= PHI_RESULT (exit_phi
);
5776 scalar_result
= scalar_results
[k
];
5777 FOR_EACH_IMM_USE_STMT (use_stmt
, imm_iter
, orig_name
)
5778 FOR_EACH_IMM_USE_ON_STMT (use_p
, imm_iter
)
5779 SET_USE (use_p
, scalar_result
);
5786 /* Return a vector of type VECTYPE that is equal to the vector select
5787 operation "MASK ? VEC : IDENTITY". Insert the select statements
5791 merge_with_identity (gimple_stmt_iterator
*gsi
, tree mask
, tree vectype
,
5792 tree vec
, tree identity
)
5794 tree cond
= make_temp_ssa_name (vectype
, NULL
, "cond");
5795 gimple
*new_stmt
= gimple_build_assign (cond
, VEC_COND_EXPR
,
5796 mask
, vec
, identity
);
5797 gsi_insert_before (gsi
, new_stmt
, GSI_SAME_STMT
);
5801 /* Successively apply CODE to each element of VECTOR_RHS, in left-to-right
5802 order, starting with LHS. Insert the extraction statements before GSI and
5803 associate the new scalar SSA names with variable SCALAR_DEST.
5804 Return the SSA name for the result. */
5807 vect_expand_fold_left (gimple_stmt_iterator
*gsi
, tree scalar_dest
,
5808 tree_code code
, tree lhs
, tree vector_rhs
)
5810 tree vectype
= TREE_TYPE (vector_rhs
);
5811 tree scalar_type
= TREE_TYPE (vectype
);
5812 tree bitsize
= TYPE_SIZE (scalar_type
);
5813 unsigned HOST_WIDE_INT vec_size_in_bits
= tree_to_uhwi (TYPE_SIZE (vectype
));
5814 unsigned HOST_WIDE_INT element_bitsize
= tree_to_uhwi (bitsize
);
5816 for (unsigned HOST_WIDE_INT bit_offset
= 0;
5817 bit_offset
< vec_size_in_bits
;
5818 bit_offset
+= element_bitsize
)
5820 tree bitpos
= bitsize_int (bit_offset
);
5821 tree rhs
= build3 (BIT_FIELD_REF
, scalar_type
, vector_rhs
,
5824 gassign
*stmt
= gimple_build_assign (scalar_dest
, rhs
);
5825 rhs
= make_ssa_name (scalar_dest
, stmt
);
5826 gimple_assign_set_lhs (stmt
, rhs
);
5827 gsi_insert_before (gsi
, stmt
, GSI_SAME_STMT
);
5829 stmt
= gimple_build_assign (scalar_dest
, code
, lhs
, rhs
);
5830 tree new_name
= make_ssa_name (scalar_dest
, stmt
);
5831 gimple_assign_set_lhs (stmt
, new_name
);
5832 gsi_insert_before (gsi
, stmt
, GSI_SAME_STMT
);
5838 /* Perform an in-order reduction (FOLD_LEFT_REDUCTION). STMT is the
5839 statement that sets the live-out value. REDUC_DEF_STMT is the phi
5840 statement. CODE is the operation performed by STMT and OPS are
5841 its scalar operands. REDUC_INDEX is the index of the operand in
5842 OPS that is set by REDUC_DEF_STMT. REDUC_FN is the function that
5843 implements in-order reduction, or IFN_LAST if we should open-code it.
5844 VECTYPE_IN is the type of the vector input. MASKS specifies the masks
5845 that should be used to control the operation in a fully-masked loop. */
5848 vectorize_fold_left_reduction (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
5849 gimple
**vec_stmt
, slp_tree slp_node
,
5850 gimple
*reduc_def_stmt
,
5851 tree_code code
, internal_fn reduc_fn
,
5852 tree ops
[3], tree vectype_in
,
5853 int reduc_index
, vec_loop_masks
*masks
)
5855 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
5856 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
5857 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
5858 tree vectype_out
= STMT_VINFO_VECTYPE (stmt_info
);
5859 stmt_vec_info new_stmt_info
= NULL
;
5865 ncopies
= vect_get_num_copies (loop_vinfo
, vectype_in
);
5867 gcc_assert (!nested_in_vect_loop_p (loop
, stmt
));
5868 gcc_assert (ncopies
== 1);
5869 gcc_assert (TREE_CODE_LENGTH (code
) == binary_op
);
5870 gcc_assert (reduc_index
== (code
== MINUS_EXPR
? 0 : 1));
5871 gcc_assert (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info
)
5872 == FOLD_LEFT_REDUCTION
);
5875 gcc_assert (known_eq (TYPE_VECTOR_SUBPARTS (vectype_out
),
5876 TYPE_VECTOR_SUBPARTS (vectype_in
)));
5878 tree op0
= ops
[1 - reduc_index
];
5881 gimple
*scalar_dest_def
;
5882 auto_vec
<tree
> vec_oprnds0
;
5885 vect_get_vec_defs (op0
, NULL_TREE
, stmt
, &vec_oprnds0
, NULL
, slp_node
);
5886 group_size
= SLP_TREE_SCALAR_STMTS (slp_node
).length ();
5887 scalar_dest_def
= SLP_TREE_SCALAR_STMTS (slp_node
)[group_size
- 1];
5891 tree loop_vec_def0
= vect_get_vec_def_for_operand (op0
, stmt
);
5892 vec_oprnds0
.create (1);
5893 vec_oprnds0
.quick_push (loop_vec_def0
);
5894 scalar_dest_def
= stmt
;
5897 tree scalar_dest
= gimple_assign_lhs (scalar_dest_def
);
5898 tree scalar_type
= TREE_TYPE (scalar_dest
);
5899 tree reduc_var
= gimple_phi_result (reduc_def_stmt
);
5901 int vec_num
= vec_oprnds0
.length ();
5902 gcc_assert (vec_num
== 1 || slp_node
);
5903 tree vec_elem_type
= TREE_TYPE (vectype_out
);
5904 gcc_checking_assert (useless_type_conversion_p (scalar_type
, vec_elem_type
));
5906 tree vector_identity
= NULL_TREE
;
5907 if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo
))
5908 vector_identity
= build_zero_cst (vectype_out
);
5910 tree scalar_dest_var
= vect_create_destination_var (scalar_dest
, NULL
);
5913 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, def0
)
5916 tree mask
= NULL_TREE
;
5917 if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo
))
5918 mask
= vect_get_loop_mask (gsi
, masks
, vec_num
, vectype_in
, i
);
5920 /* Handle MINUS by adding the negative. */
5921 if (reduc_fn
!= IFN_LAST
&& code
== MINUS_EXPR
)
5923 tree negated
= make_ssa_name (vectype_out
);
5924 new_stmt
= gimple_build_assign (negated
, NEGATE_EXPR
, def0
);
5925 gsi_insert_before (gsi
, new_stmt
, GSI_SAME_STMT
);
5930 def0
= merge_with_identity (gsi
, mask
, vectype_out
, def0
,
5933 /* On the first iteration the input is simply the scalar phi
5934 result, and for subsequent iterations it is the output of
5935 the preceding operation. */
5936 if (reduc_fn
!= IFN_LAST
)
5938 new_stmt
= gimple_build_call_internal (reduc_fn
, 2, reduc_var
, def0
);
5939 /* For chained SLP reductions the output of the previous reduction
5940 operation serves as the input of the next. For the final statement
5941 the output cannot be a temporary - we reuse the original
5942 scalar destination of the last statement. */
5943 if (i
!= vec_num
- 1)
5945 gimple_set_lhs (new_stmt
, scalar_dest_var
);
5946 reduc_var
= make_ssa_name (scalar_dest_var
, new_stmt
);
5947 gimple_set_lhs (new_stmt
, reduc_var
);
5952 reduc_var
= vect_expand_fold_left (gsi
, scalar_dest_var
, code
,
5954 new_stmt
= SSA_NAME_DEF_STMT (reduc_var
);
5955 /* Remove the statement, so that we can use the same code paths
5956 as for statements that we've just created. */
5957 gimple_stmt_iterator tmp_gsi
= gsi_for_stmt (new_stmt
);
5958 gsi_remove (&tmp_gsi
, false);
5961 if (i
== vec_num
- 1)
5963 gimple_set_lhs (new_stmt
, scalar_dest
);
5964 new_stmt_info
= vect_finish_replace_stmt (scalar_dest_def
, new_stmt
);
5967 new_stmt_info
= vect_finish_stmt_generation (scalar_dest_def
,
5971 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt_info
);
5975 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt_info
;
5980 /* Function is_nonwrapping_integer_induction.
5982 Check if STMT (which is part of loop LOOP) both increments and
5983 does not cause overflow. */
5986 is_nonwrapping_integer_induction (gimple
*stmt
, struct loop
*loop
)
5988 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (stmt
);
5989 tree base
= STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED (stmt_vinfo
);
5990 tree step
= STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_vinfo
);
5991 tree lhs_type
= TREE_TYPE (gimple_phi_result (stmt
));
5992 widest_int ni
, max_loop_value
, lhs_max
;
5993 wi::overflow_type overflow
= wi::OVF_NONE
;
5995 /* Make sure the loop is integer based. */
5996 if (TREE_CODE (base
) != INTEGER_CST
5997 || TREE_CODE (step
) != INTEGER_CST
)
6000 /* Check that the max size of the loop will not wrap. */
6002 if (TYPE_OVERFLOW_UNDEFINED (lhs_type
))
6005 if (! max_stmt_executions (loop
, &ni
))
6008 max_loop_value
= wi::mul (wi::to_widest (step
), ni
, TYPE_SIGN (lhs_type
),
6013 max_loop_value
= wi::add (wi::to_widest (base
), max_loop_value
,
6014 TYPE_SIGN (lhs_type
), &overflow
);
6018 return (wi::min_precision (max_loop_value
, TYPE_SIGN (lhs_type
))
6019 <= TYPE_PRECISION (lhs_type
));
6022 /* Function vectorizable_reduction.
6024 Check if STMT performs a reduction operation that can be vectorized.
6025 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
6026 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
6027 Return FALSE if not a vectorizable STMT, TRUE otherwise.
6029 This function also handles reduction idioms (patterns) that have been
6030 recognized in advance during vect_pattern_recog. In this case, STMT may be
6032 X = pattern_expr (arg0, arg1, ..., X)
6033 and it's STMT_VINFO_RELATED_STMT points to the last stmt in the original
6034 sequence that had been detected and replaced by the pattern-stmt (STMT).
6036 This function also handles reduction of condition expressions, for example:
6037 for (int i = 0; i < N; i++)
6040 This is handled by vectorising the loop and creating an additional vector
6041 containing the loop indexes for which "a[i] < value" was true. In the
6042 function epilogue this is reduced to a single max value and then used to
6043 index into the vector of results.
6045 In some cases of reduction patterns, the type of the reduction variable X is
6046 different than the type of the other arguments of STMT.
6047 In such cases, the vectype that is used when transforming STMT into a vector
6048 stmt is different than the vectype that is used to determine the
6049 vectorization factor, because it consists of a different number of elements
6050 than the actual number of elements that are being operated upon in parallel.
6052 For example, consider an accumulation of shorts into an int accumulator.
6053 On some targets it's possible to vectorize this pattern operating on 8
6054 shorts at a time (hence, the vectype for purposes of determining the
6055 vectorization factor should be V8HI); on the other hand, the vectype that
6056 is used to create the vector form is actually V4SI (the type of the result).
6058 Upon entry to this function, STMT_VINFO_VECTYPE records the vectype that
6059 indicates what is the actual level of parallelism (V8HI in the example), so
6060 that the right vectorization factor would be derived. This vectype
6061 corresponds to the type of arguments to the reduction stmt, and should *NOT*
6062 be used to create the vectorized stmt. The right vectype for the vectorized
6063 stmt is obtained from the type of the result X:
6064 get_vectype_for_scalar_type (TREE_TYPE (X))
6066 This means that, contrary to "regular" reductions (or "regular" stmts in
6067 general), the following equation:
6068 STMT_VINFO_VECTYPE == get_vectype_for_scalar_type (TREE_TYPE (X))
6069 does *NOT* necessarily hold for reduction patterns. */
6072 vectorizable_reduction (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
6073 gimple
**vec_stmt
, slp_tree slp_node
,
6074 slp_instance slp_node_instance
,
6075 stmt_vector_for_cost
*cost_vec
)
6079 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
6080 tree vectype_out
= STMT_VINFO_VECTYPE (stmt_info
);
6081 tree vectype_in
= NULL_TREE
;
6082 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
6083 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
6084 enum tree_code code
, orig_code
;
6085 internal_fn reduc_fn
;
6086 machine_mode vec_mode
;
6089 tree new_temp
= NULL_TREE
;
6090 enum vect_def_type dt
, cond_reduc_dt
= vect_unknown_def_type
;
6091 gimple
*cond_reduc_def_stmt
= NULL
;
6092 enum tree_code cond_reduc_op_code
= ERROR_MARK
;
6098 stmt_vec_info prev_stmt_info
, prev_phi_info
;
6099 bool single_defuse_cycle
= false;
6100 stmt_vec_info new_stmt_info
= NULL
;
6103 enum vect_def_type dts
[3];
6104 bool nested_cycle
= false, found_nested_cycle_def
= false;
6105 bool double_reduc
= false;
6107 struct loop
* def_stmt_loop
;
6109 auto_vec
<tree
> vec_oprnds0
;
6110 auto_vec
<tree
> vec_oprnds1
;
6111 auto_vec
<tree
> vec_oprnds2
;
6112 auto_vec
<tree
> vect_defs
;
6113 auto_vec
<gimple
*> phis
;
6116 tree cr_index_scalar_type
= NULL_TREE
, cr_index_vector_type
= NULL_TREE
;
6117 tree cond_reduc_val
= NULL_TREE
;
6119 /* Make sure it was already recognized as a reduction computation. */
6120 if (STMT_VINFO_DEF_TYPE (vinfo_for_stmt (stmt
)) != vect_reduction_def
6121 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (stmt
)) != vect_nested_cycle
)
6124 if (nested_in_vect_loop_p (loop
, stmt
))
6127 nested_cycle
= true;
6130 if (REDUC_GROUP_FIRST_ELEMENT (stmt_info
))
6131 gcc_assert (slp_node
&& REDUC_GROUP_FIRST_ELEMENT (stmt_info
) == stmt
);
6133 if (gimple_code (stmt
) == GIMPLE_PHI
)
6135 tree phi_result
= gimple_phi_result (stmt
);
6136 /* Analysis is fully done on the reduction stmt invocation. */
6140 slp_node_instance
->reduc_phis
= slp_node
;
6142 STMT_VINFO_TYPE (stmt_info
) = reduc_vec_info_type
;
6146 if (STMT_VINFO_REDUC_TYPE (stmt_info
) == FOLD_LEFT_REDUCTION
)
6147 /* Leave the scalar phi in place. Note that checking
6148 STMT_VINFO_VEC_REDUCTION_TYPE (as below) only works
6149 for reductions involving a single statement. */
6152 gimple
*reduc_stmt
= STMT_VINFO_REDUC_DEF (stmt_info
);
6153 if (STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (reduc_stmt
)))
6154 reduc_stmt
= STMT_VINFO_RELATED_STMT (vinfo_for_stmt (reduc_stmt
));
6156 stmt_vec_info reduc_stmt_info
= vinfo_for_stmt (reduc_stmt
);
6157 if (STMT_VINFO_VEC_REDUCTION_TYPE (reduc_stmt_info
)
6158 == EXTRACT_LAST_REDUCTION
)
6159 /* Leave the scalar phi in place. */
6162 gcc_assert (is_gimple_assign (reduc_stmt
));
6163 for (unsigned k
= 1; k
< gimple_num_ops (reduc_stmt
); ++k
)
6165 tree op
= gimple_op (reduc_stmt
, k
);
6166 if (op
== gimple_phi_result (stmt
))
6169 && gimple_assign_rhs_code (reduc_stmt
) == COND_EXPR
)
6172 || (GET_MODE_SIZE (SCALAR_TYPE_MODE (TREE_TYPE (vectype_in
)))
6173 < GET_MODE_SIZE (SCALAR_TYPE_MODE (TREE_TYPE (op
)))))
6174 vectype_in
= get_vectype_for_scalar_type (TREE_TYPE (op
));
6177 gcc_assert (vectype_in
);
6182 ncopies
= vect_get_num_copies (loop_vinfo
, vectype_in
);
6184 stmt_vec_info use_stmt_info
;
6186 && STMT_VINFO_RELEVANT (reduc_stmt_info
) <= vect_used_only_live
6187 && (use_stmt_info
= loop_vinfo
->lookup_single_use (phi_result
))
6188 && (use_stmt_info
== reduc_stmt_info
6189 || STMT_VINFO_RELATED_STMT (use_stmt_info
) == reduc_stmt
))
6190 single_defuse_cycle
= true;
6192 /* Create the destination vector */
6193 scalar_dest
= gimple_assign_lhs (reduc_stmt
);
6194 vec_dest
= vect_create_destination_var (scalar_dest
, vectype_out
);
6197 /* The size vect_schedule_slp_instance computes is off for us. */
6198 vec_num
= vect_get_num_vectors
6199 (LOOP_VINFO_VECT_FACTOR (loop_vinfo
)
6200 * SLP_TREE_SCALAR_STMTS (slp_node
).length (),
6205 /* Generate the reduction PHIs upfront. */
6206 prev_phi_info
= NULL
;
6207 for (j
= 0; j
< ncopies
; j
++)
6209 if (j
== 0 || !single_defuse_cycle
)
6211 for (i
= 0; i
< vec_num
; i
++)
6213 /* Create the reduction-phi that defines the reduction
6215 gimple
*new_phi
= create_phi_node (vec_dest
, loop
->header
);
6216 stmt_vec_info new_phi_info
= loop_vinfo
->add_stmt (new_phi
);
6219 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_phi
);
6223 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_phi
;
6225 STMT_VINFO_RELATED_STMT (prev_phi_info
) = new_phi_info
;
6226 prev_phi_info
= new_phi_info
;
6235 /* 1. Is vectorizable reduction? */
6236 /* Not supportable if the reduction variable is used in the loop, unless
6237 it's a reduction chain. */
6238 if (STMT_VINFO_RELEVANT (stmt_info
) > vect_used_in_outer
6239 && !REDUC_GROUP_FIRST_ELEMENT (stmt_info
))
6242 /* Reductions that are not used even in an enclosing outer-loop,
6243 are expected to be "live" (used out of the loop). */
6244 if (STMT_VINFO_RELEVANT (stmt_info
) == vect_unused_in_scope
6245 && !STMT_VINFO_LIVE_P (stmt_info
))
6248 /* 2. Has this been recognized as a reduction pattern?
6250 Check if STMT represents a pattern that has been recognized
6251 in earlier analysis stages. For stmts that represent a pattern,
6252 the STMT_VINFO_RELATED_STMT field records the last stmt in
6253 the original sequence that constitutes the pattern. */
6255 stmt_vec_info orig_stmt_info
= STMT_VINFO_RELATED_STMT (stmt_info
);
6258 gcc_assert (STMT_VINFO_IN_PATTERN_P (orig_stmt_info
));
6259 gcc_assert (!STMT_VINFO_IN_PATTERN_P (stmt_info
));
6262 /* 3. Check the operands of the operation. The first operands are defined
6263 inside the loop body. The last operand is the reduction variable,
6264 which is defined by the loop-header-phi. */
6266 gcc_assert (is_gimple_assign (stmt
));
6269 switch (get_gimple_rhs_class (gimple_assign_rhs_code (stmt
)))
6271 case GIMPLE_BINARY_RHS
:
6272 code
= gimple_assign_rhs_code (stmt
);
6273 op_type
= TREE_CODE_LENGTH (code
);
6274 gcc_assert (op_type
== binary_op
);
6275 ops
[0] = gimple_assign_rhs1 (stmt
);
6276 ops
[1] = gimple_assign_rhs2 (stmt
);
6279 case GIMPLE_TERNARY_RHS
:
6280 code
= gimple_assign_rhs_code (stmt
);
6281 op_type
= TREE_CODE_LENGTH (code
);
6282 gcc_assert (op_type
== ternary_op
);
6283 ops
[0] = gimple_assign_rhs1 (stmt
);
6284 ops
[1] = gimple_assign_rhs2 (stmt
);
6285 ops
[2] = gimple_assign_rhs3 (stmt
);
6288 case GIMPLE_UNARY_RHS
:
6295 if (code
== COND_EXPR
&& slp_node
)
6298 scalar_dest
= gimple_assign_lhs (stmt
);
6299 scalar_type
= TREE_TYPE (scalar_dest
);
6300 if (!POINTER_TYPE_P (scalar_type
) && !INTEGRAL_TYPE_P (scalar_type
)
6301 && !SCALAR_FLOAT_TYPE_P (scalar_type
))
6304 /* Do not try to vectorize bit-precision reductions. */
6305 if (!type_has_mode_precision_p (scalar_type
))
6308 /* All uses but the last are expected to be defined in the loop.
6309 The last use is the reduction variable. In case of nested cycle this
6310 assumption is not true: we use reduc_index to record the index of the
6311 reduction variable. */
6312 gimple
*reduc_def_stmt
= NULL
;
6313 int reduc_index
= -1;
6314 for (i
= 0; i
< op_type
; i
++)
6316 /* The condition of COND_EXPR is checked in vectorizable_condition(). */
6317 if (i
== 0 && code
== COND_EXPR
)
6320 stmt_vec_info def_stmt_info
;
6321 is_simple_use
= vect_is_simple_use (ops
[i
], loop_vinfo
, &dts
[i
], &tem
,
6324 gcc_assert (is_simple_use
);
6325 if (dt
== vect_reduction_def
)
6327 reduc_def_stmt
= def_stmt_info
;
6333 /* To properly compute ncopies we are interested in the widest
6334 input type in case we're looking at a widening accumulation. */
6336 || (GET_MODE_SIZE (SCALAR_TYPE_MODE (TREE_TYPE (vectype_in
)))
6337 < GET_MODE_SIZE (SCALAR_TYPE_MODE (TREE_TYPE (tem
)))))
6341 if (dt
!= vect_internal_def
6342 && dt
!= vect_external_def
6343 && dt
!= vect_constant_def
6344 && dt
!= vect_induction_def
6345 && !(dt
== vect_nested_cycle
&& nested_cycle
))
6348 if (dt
== vect_nested_cycle
)
6350 found_nested_cycle_def
= true;
6351 reduc_def_stmt
= def_stmt_info
;
6355 if (i
== 1 && code
== COND_EXPR
)
6357 /* Record how value of COND_EXPR is defined. */
6358 if (dt
== vect_constant_def
)
6361 cond_reduc_val
= ops
[i
];
6363 if (dt
== vect_induction_def
6365 && is_nonwrapping_integer_induction (def_stmt_info
, loop
))
6368 cond_reduc_def_stmt
= def_stmt_info
;
6374 vectype_in
= vectype_out
;
6376 /* When vectorizing a reduction chain w/o SLP the reduction PHI is not
6377 directy used in stmt. */
6378 if (reduc_index
== -1)
6380 if (STMT_VINFO_REDUC_TYPE (stmt_info
) == FOLD_LEFT_REDUCTION
)
6382 if (dump_enabled_p ())
6383 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6384 "in-order reduction chain without SLP.\n");
6389 reduc_def_stmt
= STMT_VINFO_REDUC_DEF (orig_stmt_info
);
6391 reduc_def_stmt
= STMT_VINFO_REDUC_DEF (stmt_info
);
6394 if (! reduc_def_stmt
|| gimple_code (reduc_def_stmt
) != GIMPLE_PHI
)
6397 if (!(reduc_index
== -1
6398 || dts
[reduc_index
] == vect_reduction_def
6399 || dts
[reduc_index
] == vect_nested_cycle
6400 || ((dts
[reduc_index
] == vect_internal_def
6401 || dts
[reduc_index
] == vect_external_def
6402 || dts
[reduc_index
] == vect_constant_def
6403 || dts
[reduc_index
] == vect_induction_def
)
6404 && nested_cycle
&& found_nested_cycle_def
)))
6406 /* For pattern recognized stmts, orig_stmt might be a reduction,
6407 but some helper statements for the pattern might not, or
6408 might be COND_EXPRs with reduction uses in the condition. */
6409 gcc_assert (orig_stmt_info
);
6413 stmt_vec_info reduc_def_info
= vinfo_for_stmt (reduc_def_stmt
);
6414 /* PHIs should not participate in patterns. */
6415 gcc_assert (!STMT_VINFO_RELATED_STMT (reduc_def_info
));
6416 enum vect_reduction_type v_reduc_type
6417 = STMT_VINFO_REDUC_TYPE (reduc_def_info
);
6418 gimple
*tmp
= STMT_VINFO_REDUC_DEF (reduc_def_info
);
6420 STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info
) = v_reduc_type
;
6421 /* If we have a condition reduction, see if we can simplify it further. */
6422 if (v_reduc_type
== COND_REDUCTION
)
6424 /* TODO: We can't yet handle reduction chains, since we need to treat
6425 each COND_EXPR in the chain specially, not just the last one.
6428 x_1 = PHI <x_3, ...>
6429 x_2 = a_2 ? ... : x_1;
6430 x_3 = a_3 ? ... : x_2;
6432 we're interested in the last element in x_3 for which a_2 || a_3
6433 is true, whereas the current reduction chain handling would
6434 vectorize x_2 as a normal VEC_COND_EXPR and only treat x_3
6435 as a reduction operation. */
6436 if (reduc_index
== -1)
6438 if (dump_enabled_p ())
6439 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6440 "conditional reduction chains not supported\n");
6444 /* vect_is_simple_reduction ensured that operand 2 is the
6445 loop-carried operand. */
6446 gcc_assert (reduc_index
== 2);
6448 /* Loop peeling modifies initial value of reduction PHI, which
6449 makes the reduction stmt to be transformed different to the
6450 original stmt analyzed. We need to record reduction code for
6451 CONST_COND_REDUCTION type reduction at analyzing stage, thus
6452 it can be used directly at transform stage. */
6453 if (STMT_VINFO_VEC_CONST_COND_REDUC_CODE (stmt_info
) == MAX_EXPR
6454 || STMT_VINFO_VEC_CONST_COND_REDUC_CODE (stmt_info
) == MIN_EXPR
)
6456 /* Also set the reduction type to CONST_COND_REDUCTION. */
6457 gcc_assert (cond_reduc_dt
== vect_constant_def
);
6458 STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info
) = CONST_COND_REDUCTION
;
6460 else if (direct_internal_fn_supported_p (IFN_FOLD_EXTRACT_LAST
,
6461 vectype_in
, OPTIMIZE_FOR_SPEED
))
6463 if (dump_enabled_p ())
6464 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6465 "optimizing condition reduction with"
6466 " FOLD_EXTRACT_LAST.\n");
6467 STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info
) = EXTRACT_LAST_REDUCTION
;
6469 else if (cond_reduc_dt
== vect_induction_def
)
6471 stmt_vec_info cond_stmt_vinfo
= vinfo_for_stmt (cond_reduc_def_stmt
);
6473 = STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED (cond_stmt_vinfo
);
6474 tree step
= STMT_VINFO_LOOP_PHI_EVOLUTION_PART (cond_stmt_vinfo
);
6476 gcc_assert (TREE_CODE (base
) == INTEGER_CST
6477 && TREE_CODE (step
) == INTEGER_CST
);
6478 cond_reduc_val
= NULL_TREE
;
6479 /* Find a suitable value, for MAX_EXPR below base, for MIN_EXPR
6480 above base; punt if base is the minimum value of the type for
6481 MAX_EXPR or maximum value of the type for MIN_EXPR for now. */
6482 if (tree_int_cst_sgn (step
) == -1)
6484 cond_reduc_op_code
= MIN_EXPR
;
6485 if (tree_int_cst_sgn (base
) == -1)
6486 cond_reduc_val
= build_int_cst (TREE_TYPE (base
), 0);
6487 else if (tree_int_cst_lt (base
,
6488 TYPE_MAX_VALUE (TREE_TYPE (base
))))
6490 = int_const_binop (PLUS_EXPR
, base
, integer_one_node
);
6494 cond_reduc_op_code
= MAX_EXPR
;
6495 if (tree_int_cst_sgn (base
) == 1)
6496 cond_reduc_val
= build_int_cst (TREE_TYPE (base
), 0);
6497 else if (tree_int_cst_lt (TYPE_MIN_VALUE (TREE_TYPE (base
)),
6500 = int_const_binop (MINUS_EXPR
, base
, integer_one_node
);
6504 if (dump_enabled_p ())
6505 dump_printf_loc (MSG_NOTE
, vect_location
,
6506 "condition expression based on "
6507 "integer induction.\n");
6508 STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info
)
6509 = INTEGER_INDUC_COND_REDUCTION
;
6512 else if (cond_reduc_dt
== vect_constant_def
)
6514 enum vect_def_type cond_initial_dt
;
6515 gimple
*def_stmt
= SSA_NAME_DEF_STMT (ops
[reduc_index
]);
6516 tree cond_initial_val
6517 = PHI_ARG_DEF_FROM_EDGE (def_stmt
, loop_preheader_edge (loop
));
6519 gcc_assert (cond_reduc_val
!= NULL_TREE
);
6520 vect_is_simple_use (cond_initial_val
, loop_vinfo
, &cond_initial_dt
);
6521 if (cond_initial_dt
== vect_constant_def
6522 && types_compatible_p (TREE_TYPE (cond_initial_val
),
6523 TREE_TYPE (cond_reduc_val
)))
6525 tree e
= fold_binary (LE_EXPR
, boolean_type_node
,
6526 cond_initial_val
, cond_reduc_val
);
6527 if (e
&& (integer_onep (e
) || integer_zerop (e
)))
6529 if (dump_enabled_p ())
6530 dump_printf_loc (MSG_NOTE
, vect_location
,
6531 "condition expression based on "
6532 "compile time constant.\n");
6533 /* Record reduction code at analysis stage. */
6534 STMT_VINFO_VEC_CONST_COND_REDUC_CODE (stmt_info
)
6535 = integer_onep (e
) ? MAX_EXPR
: MIN_EXPR
;
6536 STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info
)
6537 = CONST_COND_REDUCTION
;
6544 gcc_assert (tmp
== orig_stmt_info
6545 || (REDUC_GROUP_FIRST_ELEMENT (vinfo_for_stmt (tmp
))
6546 == orig_stmt_info
));
6548 /* We changed STMT to be the first stmt in reduction chain, hence we
6549 check that in this case the first element in the chain is STMT. */
6550 gcc_assert (stmt
== tmp
6551 || REDUC_GROUP_FIRST_ELEMENT (vinfo_for_stmt (tmp
)) == stmt
);
6553 if (STMT_VINFO_LIVE_P (vinfo_for_stmt (reduc_def_stmt
)))
6559 ncopies
= vect_get_num_copies (loop_vinfo
, vectype_in
);
6561 gcc_assert (ncopies
>= 1);
6563 vec_mode
= TYPE_MODE (vectype_in
);
6564 poly_uint64 nunits_out
= TYPE_VECTOR_SUBPARTS (vectype_out
);
6566 if (code
== COND_EXPR
)
6568 /* Only call during the analysis stage, otherwise we'll lose
6570 if (!vec_stmt
&& !vectorizable_condition (stmt
, gsi
, NULL
,
6571 ops
[reduc_index
], 0, NULL
,
6574 if (dump_enabled_p ())
6575 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6576 "unsupported condition in reduction\n");
6582 /* 4. Supportable by target? */
6584 if (code
== LSHIFT_EXPR
|| code
== RSHIFT_EXPR
6585 || code
== LROTATE_EXPR
|| code
== RROTATE_EXPR
)
6587 /* Shifts and rotates are only supported by vectorizable_shifts,
6588 not vectorizable_reduction. */
6589 if (dump_enabled_p ())
6590 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6591 "unsupported shift or rotation.\n");
6595 /* 4.1. check support for the operation in the loop */
6596 optab
= optab_for_tree_code (code
, vectype_in
, optab_default
);
6599 if (dump_enabled_p ())
6600 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6606 if (optab_handler (optab
, vec_mode
) == CODE_FOR_nothing
)
6608 if (dump_enabled_p ())
6609 dump_printf (MSG_NOTE
, "op not supported by target.\n");
6611 if (maybe_ne (GET_MODE_SIZE (vec_mode
), UNITS_PER_WORD
)
6612 || !vect_worthwhile_without_simd_p (loop_vinfo
, code
))
6615 if (dump_enabled_p ())
6616 dump_printf (MSG_NOTE
, "proceeding using word mode.\n");
6619 /* Worthwhile without SIMD support? */
6620 if (!VECTOR_MODE_P (TYPE_MODE (vectype_in
))
6621 && !vect_worthwhile_without_simd_p (loop_vinfo
, code
))
6623 if (dump_enabled_p ())
6624 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6625 "not worthwhile without SIMD support.\n");
6631 /* 4.2. Check support for the epilog operation.
6633 If STMT represents a reduction pattern, then the type of the
6634 reduction variable may be different than the type of the rest
6635 of the arguments. For example, consider the case of accumulation
6636 of shorts into an int accumulator; The original code:
6637 S1: int_a = (int) short_a;
6638 orig_stmt-> S2: int_acc = plus <int_a ,int_acc>;
6641 STMT: int_acc = widen_sum <short_a, int_acc>
6644 1. The tree-code that is used to create the vector operation in the
6645 epilog code (that reduces the partial results) is not the
6646 tree-code of STMT, but is rather the tree-code of the original
6647 stmt from the pattern that STMT is replacing. I.e, in the example
6648 above we want to use 'widen_sum' in the loop, but 'plus' in the
6650 2. The type (mode) we use to check available target support
6651 for the vector operation to be created in the *epilog*, is
6652 determined by the type of the reduction variable (in the example
6653 above we'd check this: optab_handler (plus_optab, vect_int_mode])).
6654 However the type (mode) we use to check available target support
6655 for the vector operation to be created *inside the loop*, is
6656 determined by the type of the other arguments to STMT (in the
6657 example we'd check this: optab_handler (widen_sum_optab,
6660 This is contrary to "regular" reductions, in which the types of all
6661 the arguments are the same as the type of the reduction variable.
6662 For "regular" reductions we can therefore use the same vector type
6663 (and also the same tree-code) when generating the epilog code and
6664 when generating the code inside the loop. */
6666 vect_reduction_type reduction_type
6667 = STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info
);
6669 && (reduction_type
== TREE_CODE_REDUCTION
6670 || reduction_type
== FOLD_LEFT_REDUCTION
))
6672 /* This is a reduction pattern: get the vectype from the type of the
6673 reduction variable, and get the tree-code from orig_stmt. */
6674 orig_code
= gimple_assign_rhs_code (orig_stmt_info
->stmt
);
6675 gcc_assert (vectype_out
);
6676 vec_mode
= TYPE_MODE (vectype_out
);
6680 /* Regular reduction: use the same vectype and tree-code as used for
6681 the vector code inside the loop can be used for the epilog code. */
6684 if (code
== MINUS_EXPR
)
6685 orig_code
= PLUS_EXPR
;
6687 /* For simple condition reductions, replace with the actual expression
6688 we want to base our reduction around. */
6689 if (reduction_type
== CONST_COND_REDUCTION
)
6691 orig_code
= STMT_VINFO_VEC_CONST_COND_REDUC_CODE (stmt_info
);
6692 gcc_assert (orig_code
== MAX_EXPR
|| orig_code
== MIN_EXPR
);
6694 else if (reduction_type
== INTEGER_INDUC_COND_REDUCTION
)
6695 orig_code
= cond_reduc_op_code
;
6700 def_bb
= gimple_bb (reduc_def_stmt
);
6701 def_stmt_loop
= def_bb
->loop_father
;
6702 def_arg
= PHI_ARG_DEF_FROM_EDGE (reduc_def_stmt
,
6703 loop_preheader_edge (def_stmt_loop
));
6704 stmt_vec_info def_arg_stmt_info
= loop_vinfo
->lookup_def (def_arg
);
6705 if (def_arg_stmt_info
6706 && (STMT_VINFO_DEF_TYPE (def_arg_stmt_info
)
6707 == vect_double_reduction_def
))
6708 double_reduc
= true;
6711 reduc_fn
= IFN_LAST
;
6713 if (reduction_type
== TREE_CODE_REDUCTION
6714 || reduction_type
== FOLD_LEFT_REDUCTION
6715 || reduction_type
== INTEGER_INDUC_COND_REDUCTION
6716 || reduction_type
== CONST_COND_REDUCTION
)
6718 if (reduction_type
== FOLD_LEFT_REDUCTION
6719 ? fold_left_reduction_fn (orig_code
, &reduc_fn
)
6720 : reduction_fn_for_scalar_code (orig_code
, &reduc_fn
))
6722 if (reduc_fn
!= IFN_LAST
6723 && !direct_internal_fn_supported_p (reduc_fn
, vectype_out
,
6724 OPTIMIZE_FOR_SPEED
))
6726 if (dump_enabled_p ())
6727 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6728 "reduc op not supported by target.\n");
6730 reduc_fn
= IFN_LAST
;
6735 if (!nested_cycle
|| double_reduc
)
6737 if (dump_enabled_p ())
6738 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6739 "no reduc code for scalar code.\n");
6745 else if (reduction_type
== COND_REDUCTION
)
6747 int scalar_precision
6748 = GET_MODE_PRECISION (SCALAR_TYPE_MODE (scalar_type
));
6749 cr_index_scalar_type
= make_unsigned_type (scalar_precision
);
6750 cr_index_vector_type
= build_vector_type (cr_index_scalar_type
,
6753 if (direct_internal_fn_supported_p (IFN_REDUC_MAX
, cr_index_vector_type
,
6754 OPTIMIZE_FOR_SPEED
))
6755 reduc_fn
= IFN_REDUC_MAX
;
6758 if (reduction_type
!= EXTRACT_LAST_REDUCTION
6759 && reduc_fn
== IFN_LAST
6760 && !nunits_out
.is_constant ())
6762 if (dump_enabled_p ())
6763 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6764 "missing target support for reduction on"
6765 " variable-length vectors.\n");
6769 if ((double_reduc
|| reduction_type
!= TREE_CODE_REDUCTION
)
6772 if (dump_enabled_p ())
6773 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6774 "multiple types in double reduction or condition "
6779 /* For SLP reductions, see if there is a neutral value we can use. */
6780 tree neutral_op
= NULL_TREE
;
6782 neutral_op
= neutral_op_for_slp_reduction
6783 (slp_node_instance
->reduc_phis
, code
,
6784 REDUC_GROUP_FIRST_ELEMENT (stmt_info
) != NULL
);
6786 if (double_reduc
&& reduction_type
== FOLD_LEFT_REDUCTION
)
6788 /* We can't support in-order reductions of code such as this:
6790 for (int i = 0; i < n1; ++i)
6791 for (int j = 0; j < n2; ++j)
6794 since GCC effectively transforms the loop when vectorizing:
6796 for (int i = 0; i < n1 / VF; ++i)
6797 for (int j = 0; j < n2; ++j)
6798 for (int k = 0; k < VF; ++k)
6801 which is a reassociation of the original operation. */
6802 if (dump_enabled_p ())
6803 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6804 "in-order double reduction not supported.\n");
6809 if (reduction_type
== FOLD_LEFT_REDUCTION
6811 && !REDUC_GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt
)))
6813 /* We cannot use in-order reductions in this case because there is
6814 an implicit reassociation of the operations involved. */
6815 if (dump_enabled_p ())
6816 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6817 "in-order unchained SLP reductions not supported.\n");
6821 /* For double reductions, and for SLP reductions with a neutral value,
6822 we construct a variable-length initial vector by loading a vector
6823 full of the neutral value and then shift-and-inserting the start
6824 values into the low-numbered elements. */
6825 if ((double_reduc
|| neutral_op
)
6826 && !nunits_out
.is_constant ()
6827 && !direct_internal_fn_supported_p (IFN_VEC_SHL_INSERT
,
6828 vectype_out
, OPTIMIZE_FOR_SPEED
))
6830 if (dump_enabled_p ())
6831 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6832 "reduction on variable-length vectors requires"
6833 " target support for a vector-shift-and-insert"
6838 /* Check extra constraints for variable-length unchained SLP reductions. */
6839 if (STMT_SLP_TYPE (stmt_info
)
6840 && !REDUC_GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt
))
6841 && !nunits_out
.is_constant ())
6843 /* We checked above that we could build the initial vector when
6844 there's a neutral element value. Check here for the case in
6845 which each SLP statement has its own initial value and in which
6846 that value needs to be repeated for every instance of the
6847 statement within the initial vector. */
6848 unsigned int group_size
= SLP_TREE_SCALAR_STMTS (slp_node
).length ();
6849 scalar_mode elt_mode
= SCALAR_TYPE_MODE (TREE_TYPE (vectype_out
));
6851 && !can_duplicate_and_interleave_p (group_size
, elt_mode
))
6853 if (dump_enabled_p ())
6854 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6855 "unsupported form of SLP reduction for"
6856 " variable-length vectors: cannot build"
6857 " initial vector.\n");
6860 /* The epilogue code relies on the number of elements being a multiple
6861 of the group size. The duplicate-and-interleave approach to setting
6862 up the the initial vector does too. */
6863 if (!multiple_p (nunits_out
, group_size
))
6865 if (dump_enabled_p ())
6866 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6867 "unsupported form of SLP reduction for"
6868 " variable-length vectors: the vector size"
6869 " is not a multiple of the number of results.\n");
6874 /* In case of widenning multiplication by a constant, we update the type
6875 of the constant to be the type of the other operand. We check that the
6876 constant fits the type in the pattern recognition pass. */
6877 if (code
== DOT_PROD_EXPR
6878 && !types_compatible_p (TREE_TYPE (ops
[0]), TREE_TYPE (ops
[1])))
6880 if (TREE_CODE (ops
[0]) == INTEGER_CST
)
6881 ops
[0] = fold_convert (TREE_TYPE (ops
[1]), ops
[0]);
6882 else if (TREE_CODE (ops
[1]) == INTEGER_CST
)
6883 ops
[1] = fold_convert (TREE_TYPE (ops
[0]), ops
[1]);
6886 if (dump_enabled_p ())
6887 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6888 "invalid types in dot-prod\n");
6894 if (reduction_type
== COND_REDUCTION
)
6898 if (! max_loop_iterations (loop
, &ni
))
6900 if (dump_enabled_p ())
6901 dump_printf_loc (MSG_NOTE
, vect_location
,
6902 "loop count not known, cannot create cond "
6906 /* Convert backedges to iterations. */
6909 /* The additional index will be the same type as the condition. Check
6910 that the loop can fit into this less one (because we'll use up the
6911 zero slot for when there are no matches). */
6912 tree max_index
= TYPE_MAX_VALUE (cr_index_scalar_type
);
6913 if (wi::geu_p (ni
, wi::to_widest (max_index
)))
6915 if (dump_enabled_p ())
6916 dump_printf_loc (MSG_NOTE
, vect_location
,
6917 "loop size is greater than data size.\n");
6922 /* In case the vectorization factor (VF) is bigger than the number
6923 of elements that we can fit in a vectype (nunits), we have to generate
6924 more than one vector stmt - i.e - we need to "unroll" the
6925 vector stmt by a factor VF/nunits. For more details see documentation
6926 in vectorizable_operation. */
6928 /* If the reduction is used in an outer loop we need to generate
6929 VF intermediate results, like so (e.g. for ncopies=2):
6934 (i.e. we generate VF results in 2 registers).
6935 In this case we have a separate def-use cycle for each copy, and therefore
6936 for each copy we get the vector def for the reduction variable from the
6937 respective phi node created for this copy.
6939 Otherwise (the reduction is unused in the loop nest), we can combine
6940 together intermediate results, like so (e.g. for ncopies=2):
6944 (i.e. we generate VF/2 results in a single register).
6945 In this case for each copy we get the vector def for the reduction variable
6946 from the vectorized reduction operation generated in the previous iteration.
6948 This only works when we see both the reduction PHI and its only consumer
6949 in vectorizable_reduction and there are no intermediate stmts
6951 stmt_vec_info use_stmt_info
;
6952 tree reduc_phi_result
= gimple_phi_result (reduc_def_stmt
);
6954 && (STMT_VINFO_RELEVANT (stmt_info
) <= vect_used_only_live
)
6955 && (use_stmt_info
= loop_vinfo
->lookup_single_use (reduc_phi_result
))
6956 && (use_stmt_info
== stmt_info
6957 || STMT_VINFO_RELATED_STMT (use_stmt_info
) == stmt
))
6959 single_defuse_cycle
= true;
6963 epilog_copies
= ncopies
;
6965 /* If the reduction stmt is one of the patterns that have lane
6966 reduction embedded we cannot handle the case of ! single_defuse_cycle. */
6968 && ! single_defuse_cycle
)
6969 && (code
== DOT_PROD_EXPR
6970 || code
== WIDEN_SUM_EXPR
6971 || code
== SAD_EXPR
))
6973 if (dump_enabled_p ())
6974 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6975 "multi def-use cycle not possible for lane-reducing "
6976 "reduction operation\n");
6981 vec_num
= SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node
);
6985 internal_fn cond_fn
= get_conditional_internal_fn (code
);
6986 vec_loop_masks
*masks
= &LOOP_VINFO_MASKS (loop_vinfo
);
6988 if (!vec_stmt
) /* transformation not required. */
6990 vect_model_reduction_cost (stmt_info
, reduc_fn
, ncopies
, cost_vec
);
6991 if (loop_vinfo
&& LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo
))
6993 if (reduction_type
!= FOLD_LEFT_REDUCTION
6994 && (cond_fn
== IFN_LAST
6995 || !direct_internal_fn_supported_p (cond_fn
, vectype_in
,
6996 OPTIMIZE_FOR_SPEED
)))
6998 if (dump_enabled_p ())
6999 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7000 "can't use a fully-masked loop because no"
7001 " conditional operation is available.\n");
7002 LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo
) = false;
7004 else if (reduc_index
== -1)
7006 if (dump_enabled_p ())
7007 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7008 "can't use a fully-masked loop for chained"
7010 LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo
) = false;
7013 vect_record_loop_mask (loop_vinfo
, masks
, ncopies
* vec_num
,
7016 if (dump_enabled_p ()
7017 && reduction_type
== FOLD_LEFT_REDUCTION
)
7018 dump_printf_loc (MSG_NOTE
, vect_location
,
7019 "using an in-order (fold-left) reduction.\n");
7020 STMT_VINFO_TYPE (stmt_info
) = reduc_vec_info_type
;
7026 if (dump_enabled_p ())
7027 dump_printf_loc (MSG_NOTE
, vect_location
, "transform reduction.\n");
7029 /* FORNOW: Multiple types are not supported for condition. */
7030 if (code
== COND_EXPR
)
7031 gcc_assert (ncopies
== 1);
7033 bool masked_loop_p
= LOOP_VINFO_FULLY_MASKED_P (loop_vinfo
);
7035 if (reduction_type
== FOLD_LEFT_REDUCTION
)
7036 return vectorize_fold_left_reduction
7037 (stmt
, gsi
, vec_stmt
, slp_node
, reduc_def_stmt
, code
,
7038 reduc_fn
, ops
, vectype_in
, reduc_index
, masks
);
7040 if (reduction_type
== EXTRACT_LAST_REDUCTION
)
7042 gcc_assert (!slp_node
);
7043 return vectorizable_condition (stmt
, gsi
, vec_stmt
,
7044 NULL
, reduc_index
, NULL
, NULL
);
7047 /* Create the destination vector */
7048 vec_dest
= vect_create_destination_var (scalar_dest
, vectype_out
);
7050 prev_stmt_info
= NULL
;
7051 prev_phi_info
= NULL
;
7054 vec_oprnds0
.create (1);
7055 vec_oprnds1
.create (1);
7056 if (op_type
== ternary_op
)
7057 vec_oprnds2
.create (1);
7060 phis
.create (vec_num
);
7061 vect_defs
.create (vec_num
);
7063 vect_defs
.quick_push (NULL_TREE
);
7066 phis
.splice (SLP_TREE_VEC_STMTS (slp_node_instance
->reduc_phis
));
7068 phis
.quick_push (STMT_VINFO_VEC_STMT (vinfo_for_stmt (reduc_def_stmt
)));
7070 for (j
= 0; j
< ncopies
; j
++)
7072 if (code
== COND_EXPR
)
7074 gcc_assert (!slp_node
);
7075 vectorizable_condition (stmt
, gsi
, vec_stmt
,
7076 PHI_RESULT (phis
[0]),
7077 reduc_index
, NULL
, NULL
);
7078 /* Multiple types are not supported for condition. */
7087 /* Get vec defs for all the operands except the reduction index,
7088 ensuring the ordering of the ops in the vector is kept. */
7089 auto_vec
<tree
, 3> slp_ops
;
7090 auto_vec
<vec
<tree
>, 3> vec_defs
;
7092 slp_ops
.quick_push (ops
[0]);
7093 slp_ops
.quick_push (ops
[1]);
7094 if (op_type
== ternary_op
)
7095 slp_ops
.quick_push (ops
[2]);
7097 vect_get_slp_defs (slp_ops
, slp_node
, &vec_defs
);
7099 vec_oprnds0
.safe_splice (vec_defs
[0]);
7100 vec_defs
[0].release ();
7101 vec_oprnds1
.safe_splice (vec_defs
[1]);
7102 vec_defs
[1].release ();
7103 if (op_type
== ternary_op
)
7105 vec_oprnds2
.safe_splice (vec_defs
[2]);
7106 vec_defs
[2].release ();
7111 vec_oprnds0
.quick_push
7112 (vect_get_vec_def_for_operand (ops
[0], stmt
));
7113 vec_oprnds1
.quick_push
7114 (vect_get_vec_def_for_operand (ops
[1], stmt
));
7115 if (op_type
== ternary_op
)
7116 vec_oprnds2
.quick_push
7117 (vect_get_vec_def_for_operand (ops
[2], stmt
));
7124 gcc_assert (reduc_index
!= -1 || ! single_defuse_cycle
);
7126 if (single_defuse_cycle
&& reduc_index
== 0)
7127 vec_oprnds0
[0] = gimple_get_lhs (new_stmt_info
->stmt
);
7130 = vect_get_vec_def_for_stmt_copy (dts
[0], vec_oprnds0
[0]);
7131 if (single_defuse_cycle
&& reduc_index
== 1)
7132 vec_oprnds1
[0] = gimple_get_lhs (new_stmt_info
->stmt
);
7135 = vect_get_vec_def_for_stmt_copy (dts
[1], vec_oprnds1
[0]);
7136 if (op_type
== ternary_op
)
7138 if (single_defuse_cycle
&& reduc_index
== 2)
7139 vec_oprnds2
[0] = gimple_get_lhs (new_stmt_info
->stmt
);
7142 = vect_get_vec_def_for_stmt_copy (dts
[2], vec_oprnds2
[0]);
7147 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, def0
)
7149 tree vop
[3] = { def0
, vec_oprnds1
[i
], NULL_TREE
};
7152 /* Make sure that the reduction accumulator is vop[0]. */
7153 if (reduc_index
== 1)
7155 gcc_assert (commutative_tree_code (code
));
7156 std::swap (vop
[0], vop
[1]);
7158 tree mask
= vect_get_loop_mask (gsi
, masks
, vec_num
* ncopies
,
7159 vectype_in
, i
* ncopies
+ j
);
7160 gcall
*call
= gimple_build_call_internal (cond_fn
, 4, mask
,
7163 new_temp
= make_ssa_name (vec_dest
, call
);
7164 gimple_call_set_lhs (call
, new_temp
);
7165 gimple_call_set_nothrow (call
, true);
7166 new_stmt_info
= vect_finish_stmt_generation (stmt
, call
, gsi
);
7170 if (op_type
== ternary_op
)
7171 vop
[2] = vec_oprnds2
[i
];
7173 gassign
*new_stmt
= gimple_build_assign (vec_dest
, code
,
7174 vop
[0], vop
[1], vop
[2]);
7175 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
7176 gimple_assign_set_lhs (new_stmt
, new_temp
);
7178 = vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
7183 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt_info
);
7184 vect_defs
.quick_push (new_temp
);
7187 vect_defs
[0] = new_temp
;
7194 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt_info
;
7196 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt_info
;
7198 prev_stmt_info
= new_stmt_info
;
7201 /* Finalize the reduction-phi (set its arguments) and create the
7202 epilog reduction code. */
7203 if ((!single_defuse_cycle
|| code
== COND_EXPR
) && !slp_node
)
7204 vect_defs
[0] = gimple_get_lhs (*vec_stmt
);
7206 vect_create_epilog_for_reduction (vect_defs
, stmt
, reduc_def_stmt
,
7207 epilog_copies
, reduc_fn
, phis
,
7208 double_reduc
, slp_node
, slp_node_instance
,
7209 cond_reduc_val
, cond_reduc_op_code
,
7215 /* Function vect_min_worthwhile_factor.
7217 For a loop where we could vectorize the operation indicated by CODE,
7218 return the minimum vectorization factor that makes it worthwhile
7219 to use generic vectors. */
7221 vect_min_worthwhile_factor (enum tree_code code
)
7241 /* Return true if VINFO indicates we are doing loop vectorization and if
7242 it is worth decomposing CODE operations into scalar operations for
7243 that loop's vectorization factor. */
7246 vect_worthwhile_without_simd_p (vec_info
*vinfo
, tree_code code
)
7248 loop_vec_info loop_vinfo
= dyn_cast
<loop_vec_info
> (vinfo
);
7249 unsigned HOST_WIDE_INT value
;
7251 && LOOP_VINFO_VECT_FACTOR (loop_vinfo
).is_constant (&value
)
7252 && value
>= vect_min_worthwhile_factor (code
));
7255 /* Function vectorizable_induction
7257 Check if PHI performs an induction computation that can be vectorized.
7258 If VEC_STMT is also passed, vectorize the induction PHI: create a vectorized
7259 phi to replace it, put it in VEC_STMT, and add it to the same basic block.
7260 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
7263 vectorizable_induction (gimple
*phi
,
7264 gimple_stmt_iterator
*gsi ATTRIBUTE_UNUSED
,
7265 gimple
**vec_stmt
, slp_tree slp_node
,
7266 stmt_vector_for_cost
*cost_vec
)
7268 stmt_vec_info stmt_info
= vinfo_for_stmt (phi
);
7269 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
7270 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
7272 bool nested_in_vect_loop
= false;
7273 struct loop
*iv_loop
;
7275 edge pe
= loop_preheader_edge (loop
);
7277 tree new_vec
, vec_init
, vec_step
, t
;
7280 gphi
*induction_phi
;
7281 tree induc_def
, vec_dest
;
7282 tree init_expr
, step_expr
;
7283 poly_uint64 vf
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
7287 imm_use_iterator imm_iter
;
7288 use_operand_p use_p
;
7292 gimple_stmt_iterator si
;
7293 basic_block bb
= gimple_bb (phi
);
7295 if (gimple_code (phi
) != GIMPLE_PHI
)
7298 if (!STMT_VINFO_RELEVANT_P (stmt_info
))
7301 /* Make sure it was recognized as induction computation. */
7302 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_induction_def
)
7305 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
7306 poly_uint64 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
7311 ncopies
= vect_get_num_copies (loop_vinfo
, vectype
);
7312 gcc_assert (ncopies
>= 1);
7314 /* FORNOW. These restrictions should be relaxed. */
7315 if (nested_in_vect_loop_p (loop
, phi
))
7317 imm_use_iterator imm_iter
;
7318 use_operand_p use_p
;
7325 if (dump_enabled_p ())
7326 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7327 "multiple types in nested loop.\n");
7331 /* FORNOW: outer loop induction with SLP not supported. */
7332 if (STMT_SLP_TYPE (stmt_info
))
7336 latch_e
= loop_latch_edge (loop
->inner
);
7337 loop_arg
= PHI_ARG_DEF_FROM_EDGE (phi
, latch_e
);
7338 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, loop_arg
)
7340 gimple
*use_stmt
= USE_STMT (use_p
);
7341 if (is_gimple_debug (use_stmt
))
7344 if (!flow_bb_inside_loop_p (loop
->inner
, gimple_bb (use_stmt
)))
7346 exit_phi
= use_stmt
;
7352 stmt_vec_info exit_phi_vinfo
= loop_vinfo
->lookup_stmt (exit_phi
);
7353 if (!(STMT_VINFO_RELEVANT_P (exit_phi_vinfo
)
7354 && !STMT_VINFO_LIVE_P (exit_phi_vinfo
)))
7356 if (dump_enabled_p ())
7357 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7358 "inner-loop induction only used outside "
7359 "of the outer vectorized loop.\n");
7364 nested_in_vect_loop
= true;
7365 iv_loop
= loop
->inner
;
7369 gcc_assert (iv_loop
== (gimple_bb (phi
))->loop_father
);
7371 if (slp_node
&& !nunits
.is_constant ())
7373 /* The current SLP code creates the initial value element-by-element. */
7374 if (dump_enabled_p ())
7375 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7376 "SLP induction not supported for variable-length"
7381 if (!vec_stmt
) /* transformation not required. */
7383 STMT_VINFO_TYPE (stmt_info
) = induc_vec_info_type
;
7384 DUMP_VECT_SCOPE ("vectorizable_induction");
7385 vect_model_induction_cost (stmt_info
, ncopies
, cost_vec
);
7391 /* Compute a vector variable, initialized with the first VF values of
7392 the induction variable. E.g., for an iv with IV_PHI='X' and
7393 evolution S, for a vector of 4 units, we want to compute:
7394 [X, X + S, X + 2*S, X + 3*S]. */
7396 if (dump_enabled_p ())
7397 dump_printf_loc (MSG_NOTE
, vect_location
, "transform induction phi.\n");
7399 latch_e
= loop_latch_edge (iv_loop
);
7400 loop_arg
= PHI_ARG_DEF_FROM_EDGE (phi
, latch_e
);
7402 step_expr
= STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_info
);
7403 gcc_assert (step_expr
!= NULL_TREE
);
7405 pe
= loop_preheader_edge (iv_loop
);
7406 init_expr
= PHI_ARG_DEF_FROM_EDGE (phi
,
7407 loop_preheader_edge (iv_loop
));
7410 if (!nested_in_vect_loop
)
7412 /* Convert the initial value to the desired type. */
7413 tree new_type
= TREE_TYPE (vectype
);
7414 init_expr
= gimple_convert (&stmts
, new_type
, init_expr
);
7416 /* If we are using the loop mask to "peel" for alignment then we need
7417 to adjust the start value here. */
7418 tree skip_niters
= LOOP_VINFO_MASK_SKIP_NITERS (loop_vinfo
);
7419 if (skip_niters
!= NULL_TREE
)
7421 if (FLOAT_TYPE_P (vectype
))
7422 skip_niters
= gimple_build (&stmts
, FLOAT_EXPR
, new_type
,
7425 skip_niters
= gimple_convert (&stmts
, new_type
, skip_niters
);
7426 tree skip_step
= gimple_build (&stmts
, MULT_EXPR
, new_type
,
7427 skip_niters
, step_expr
);
7428 init_expr
= gimple_build (&stmts
, MINUS_EXPR
, new_type
,
7429 init_expr
, skip_step
);
7433 /* Convert the step to the desired type. */
7434 step_expr
= gimple_convert (&stmts
, TREE_TYPE (vectype
), step_expr
);
7438 new_bb
= gsi_insert_seq_on_edge_immediate (pe
, stmts
);
7439 gcc_assert (!new_bb
);
7442 /* Find the first insertion point in the BB. */
7443 si
= gsi_after_labels (bb
);
7445 /* For SLP induction we have to generate several IVs as for example
7446 with group size 3 we need [i, i, i, i + S] [i + S, i + S, i + 2*S, i + 2*S]
7447 [i + 2*S, i + 3*S, i + 3*S, i + 3*S]. The step is the same uniform
7448 [VF*S, VF*S, VF*S, VF*S] for all. */
7451 /* Enforced above. */
7452 unsigned int const_nunits
= nunits
.to_constant ();
7454 /* Generate [VF*S, VF*S, ... ]. */
7455 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr
)))
7457 expr
= build_int_cst (integer_type_node
, vf
);
7458 expr
= fold_convert (TREE_TYPE (step_expr
), expr
);
7461 expr
= build_int_cst (TREE_TYPE (step_expr
), vf
);
7462 new_name
= fold_build2 (MULT_EXPR
, TREE_TYPE (step_expr
),
7464 if (! CONSTANT_CLASS_P (new_name
))
7465 new_name
= vect_init_vector (phi
, new_name
,
7466 TREE_TYPE (step_expr
), NULL
);
7467 new_vec
= build_vector_from_val (vectype
, new_name
);
7468 vec_step
= vect_init_vector (phi
, new_vec
, vectype
, NULL
);
7470 /* Now generate the IVs. */
7471 unsigned group_size
= SLP_TREE_SCALAR_STMTS (slp_node
).length ();
7472 unsigned nvects
= SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node
);
7473 unsigned elts
= const_nunits
* nvects
;
7474 unsigned nivs
= least_common_multiple (group_size
,
7475 const_nunits
) / const_nunits
;
7476 gcc_assert (elts
% group_size
== 0);
7477 tree elt
= init_expr
;
7479 for (ivn
= 0; ivn
< nivs
; ++ivn
)
7481 tree_vector_builder
elts (vectype
, const_nunits
, 1);
7483 for (unsigned eltn
= 0; eltn
< const_nunits
; ++eltn
)
7485 if (ivn
*const_nunits
+ eltn
>= group_size
7486 && (ivn
* const_nunits
+ eltn
) % group_size
== 0)
7487 elt
= gimple_build (&stmts
, PLUS_EXPR
, TREE_TYPE (elt
),
7489 elts
.quick_push (elt
);
7491 vec_init
= gimple_build_vector (&stmts
, &elts
);
7494 new_bb
= gsi_insert_seq_on_edge_immediate (pe
, stmts
);
7495 gcc_assert (!new_bb
);
7498 /* Create the induction-phi that defines the induction-operand. */
7499 vec_dest
= vect_get_new_vect_var (vectype
, vect_simple_var
, "vec_iv_");
7500 induction_phi
= create_phi_node (vec_dest
, iv_loop
->header
);
7501 loop_vinfo
->add_stmt (induction_phi
);
7502 induc_def
= PHI_RESULT (induction_phi
);
7504 /* Create the iv update inside the loop */
7505 vec_def
= make_ssa_name (vec_dest
);
7506 new_stmt
= gimple_build_assign (vec_def
, PLUS_EXPR
, induc_def
, vec_step
);
7507 gsi_insert_before (&si
, new_stmt
, GSI_SAME_STMT
);
7508 loop_vinfo
->add_stmt (new_stmt
);
7510 /* Set the arguments of the phi node: */
7511 add_phi_arg (induction_phi
, vec_init
, pe
, UNKNOWN_LOCATION
);
7512 add_phi_arg (induction_phi
, vec_def
, loop_latch_edge (iv_loop
),
7515 SLP_TREE_VEC_STMTS (slp_node
).quick_push (induction_phi
);
7518 /* Re-use IVs when we can. */
7522 = least_common_multiple (group_size
, const_nunits
) / group_size
;
7523 /* Generate [VF'*S, VF'*S, ... ]. */
7524 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr
)))
7526 expr
= build_int_cst (integer_type_node
, vfp
);
7527 expr
= fold_convert (TREE_TYPE (step_expr
), expr
);
7530 expr
= build_int_cst (TREE_TYPE (step_expr
), vfp
);
7531 new_name
= fold_build2 (MULT_EXPR
, TREE_TYPE (step_expr
),
7533 if (! CONSTANT_CLASS_P (new_name
))
7534 new_name
= vect_init_vector (phi
, new_name
,
7535 TREE_TYPE (step_expr
), NULL
);
7536 new_vec
= build_vector_from_val (vectype
, new_name
);
7537 vec_step
= vect_init_vector (phi
, new_vec
, vectype
, NULL
);
7538 for (; ivn
< nvects
; ++ivn
)
7540 gimple
*iv
= SLP_TREE_VEC_STMTS (slp_node
)[ivn
- nivs
];
7542 if (gimple_code (iv
) == GIMPLE_PHI
)
7543 def
= gimple_phi_result (iv
);
7545 def
= gimple_assign_lhs (iv
);
7546 new_stmt
= gimple_build_assign (make_ssa_name (vectype
),
7549 if (gimple_code (iv
) == GIMPLE_PHI
)
7550 gsi_insert_before (&si
, new_stmt
, GSI_SAME_STMT
);
7553 gimple_stmt_iterator tgsi
= gsi_for_stmt (iv
);
7554 gsi_insert_after (&tgsi
, new_stmt
, GSI_CONTINUE_LINKING
);
7556 loop_vinfo
->add_stmt (new_stmt
);
7557 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
7564 /* Create the vector that holds the initial_value of the induction. */
7565 if (nested_in_vect_loop
)
7567 /* iv_loop is nested in the loop to be vectorized. init_expr had already
7568 been created during vectorization of previous stmts. We obtain it
7569 from the STMT_VINFO_VEC_STMT of the defining stmt. */
7570 vec_init
= vect_get_vec_def_for_operand (init_expr
, phi
);
7571 /* If the initial value is not of proper type, convert it. */
7572 if (!useless_type_conversion_p (vectype
, TREE_TYPE (vec_init
)))
7575 = gimple_build_assign (vect_get_new_ssa_name (vectype
,
7579 build1 (VIEW_CONVERT_EXPR
, vectype
,
7581 vec_init
= gimple_assign_lhs (new_stmt
);
7582 new_bb
= gsi_insert_on_edge_immediate (loop_preheader_edge (iv_loop
),
7584 gcc_assert (!new_bb
);
7585 loop_vinfo
->add_stmt (new_stmt
);
7590 /* iv_loop is the loop to be vectorized. Create:
7591 vec_init = [X, X+S, X+2*S, X+3*S] (S = step_expr, X = init_expr) */
7593 new_name
= gimple_convert (&stmts
, TREE_TYPE (vectype
), init_expr
);
7595 unsigned HOST_WIDE_INT const_nunits
;
7596 if (nunits
.is_constant (&const_nunits
))
7598 tree_vector_builder
elts (vectype
, const_nunits
, 1);
7599 elts
.quick_push (new_name
);
7600 for (i
= 1; i
< const_nunits
; i
++)
7602 /* Create: new_name_i = new_name + step_expr */
7603 new_name
= gimple_build (&stmts
, PLUS_EXPR
, TREE_TYPE (new_name
),
7604 new_name
, step_expr
);
7605 elts
.quick_push (new_name
);
7607 /* Create a vector from [new_name_0, new_name_1, ...,
7608 new_name_nunits-1] */
7609 vec_init
= gimple_build_vector (&stmts
, &elts
);
7611 else if (INTEGRAL_TYPE_P (TREE_TYPE (step_expr
)))
7612 /* Build the initial value directly from a VEC_SERIES_EXPR. */
7613 vec_init
= gimple_build (&stmts
, VEC_SERIES_EXPR
, vectype
,
7614 new_name
, step_expr
);
7618 [base, base, base, ...]
7619 + (vectype) [0, 1, 2, ...] * [step, step, step, ...]. */
7620 gcc_assert (SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr
)));
7621 gcc_assert (flag_associative_math
);
7622 tree index
= build_index_vector (vectype
, 0, 1);
7623 tree base_vec
= gimple_build_vector_from_val (&stmts
, vectype
,
7625 tree step_vec
= gimple_build_vector_from_val (&stmts
, vectype
,
7627 vec_init
= gimple_build (&stmts
, FLOAT_EXPR
, vectype
, index
);
7628 vec_init
= gimple_build (&stmts
, MULT_EXPR
, vectype
,
7629 vec_init
, step_vec
);
7630 vec_init
= gimple_build (&stmts
, PLUS_EXPR
, vectype
,
7631 vec_init
, base_vec
);
7636 new_bb
= gsi_insert_seq_on_edge_immediate (pe
, stmts
);
7637 gcc_assert (!new_bb
);
7642 /* Create the vector that holds the step of the induction. */
7643 if (nested_in_vect_loop
)
7644 /* iv_loop is nested in the loop to be vectorized. Generate:
7645 vec_step = [S, S, S, S] */
7646 new_name
= step_expr
;
7649 /* iv_loop is the loop to be vectorized. Generate:
7650 vec_step = [VF*S, VF*S, VF*S, VF*S] */
7651 gimple_seq seq
= NULL
;
7652 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr
)))
7654 expr
= build_int_cst (integer_type_node
, vf
);
7655 expr
= gimple_build (&seq
, FLOAT_EXPR
, TREE_TYPE (step_expr
), expr
);
7658 expr
= build_int_cst (TREE_TYPE (step_expr
), vf
);
7659 new_name
= gimple_build (&seq
, MULT_EXPR
, TREE_TYPE (step_expr
),
7663 new_bb
= gsi_insert_seq_on_edge_immediate (pe
, seq
);
7664 gcc_assert (!new_bb
);
7668 t
= unshare_expr (new_name
);
7669 gcc_assert (CONSTANT_CLASS_P (new_name
)
7670 || TREE_CODE (new_name
) == SSA_NAME
);
7671 new_vec
= build_vector_from_val (vectype
, t
);
7672 vec_step
= vect_init_vector (phi
, new_vec
, vectype
, NULL
);
7675 /* Create the following def-use cycle:
7680 vec_iv = PHI <vec_init, vec_loop>
7684 vec_loop = vec_iv + vec_step; */
7686 /* Create the induction-phi that defines the induction-operand. */
7687 vec_dest
= vect_get_new_vect_var (vectype
, vect_simple_var
, "vec_iv_");
7688 induction_phi
= create_phi_node (vec_dest
, iv_loop
->header
);
7689 stmt_vec_info induction_phi_info
= loop_vinfo
->add_stmt (induction_phi
);
7690 induc_def
= PHI_RESULT (induction_phi
);
7692 /* Create the iv update inside the loop */
7693 vec_def
= make_ssa_name (vec_dest
);
7694 new_stmt
= gimple_build_assign (vec_def
, PLUS_EXPR
, induc_def
, vec_step
);
7695 gsi_insert_before (&si
, new_stmt
, GSI_SAME_STMT
);
7696 stmt_vec_info new_stmt_info
= loop_vinfo
->add_stmt (new_stmt
);
7698 /* Set the arguments of the phi node: */
7699 add_phi_arg (induction_phi
, vec_init
, pe
, UNKNOWN_LOCATION
);
7700 add_phi_arg (induction_phi
, vec_def
, loop_latch_edge (iv_loop
),
7703 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= induction_phi
;
7705 /* In case that vectorization factor (VF) is bigger than the number
7706 of elements that we can fit in a vectype (nunits), we have to generate
7707 more than one vector stmt - i.e - we need to "unroll" the
7708 vector stmt by a factor VF/nunits. For more details see documentation
7709 in vectorizable_operation. */
7713 gimple_seq seq
= NULL
;
7714 stmt_vec_info prev_stmt_vinfo
;
7715 /* FORNOW. This restriction should be relaxed. */
7716 gcc_assert (!nested_in_vect_loop
);
7718 /* Create the vector that holds the step of the induction. */
7719 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr
)))
7721 expr
= build_int_cst (integer_type_node
, nunits
);
7722 expr
= gimple_build (&seq
, FLOAT_EXPR
, TREE_TYPE (step_expr
), expr
);
7725 expr
= build_int_cst (TREE_TYPE (step_expr
), nunits
);
7726 new_name
= gimple_build (&seq
, MULT_EXPR
, TREE_TYPE (step_expr
),
7730 new_bb
= gsi_insert_seq_on_edge_immediate (pe
, seq
);
7731 gcc_assert (!new_bb
);
7734 t
= unshare_expr (new_name
);
7735 gcc_assert (CONSTANT_CLASS_P (new_name
)
7736 || TREE_CODE (new_name
) == SSA_NAME
);
7737 new_vec
= build_vector_from_val (vectype
, t
);
7738 vec_step
= vect_init_vector (phi
, new_vec
, vectype
, NULL
);
7740 vec_def
= induc_def
;
7741 prev_stmt_vinfo
= induction_phi_info
;
7742 for (i
= 1; i
< ncopies
; i
++)
7744 /* vec_i = vec_prev + vec_step */
7745 new_stmt
= gimple_build_assign (vec_dest
, PLUS_EXPR
,
7747 vec_def
= make_ssa_name (vec_dest
, new_stmt
);
7748 gimple_assign_set_lhs (new_stmt
, vec_def
);
7750 gsi_insert_before (&si
, new_stmt
, GSI_SAME_STMT
);
7751 new_stmt_info
= loop_vinfo
->add_stmt (new_stmt
);
7752 STMT_VINFO_RELATED_STMT (prev_stmt_vinfo
) = new_stmt_info
;
7753 prev_stmt_vinfo
= new_stmt_info
;
7757 if (nested_in_vect_loop
)
7759 /* Find the loop-closed exit-phi of the induction, and record
7760 the final vector of induction results: */
7762 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, loop_arg
)
7764 gimple
*use_stmt
= USE_STMT (use_p
);
7765 if (is_gimple_debug (use_stmt
))
7768 if (!flow_bb_inside_loop_p (iv_loop
, gimple_bb (use_stmt
)))
7770 exit_phi
= use_stmt
;
7776 stmt_vec_info stmt_vinfo
= loop_vinfo
->lookup_stmt (exit_phi
);
7777 /* FORNOW. Currently not supporting the case that an inner-loop induction
7778 is not used in the outer-loop (i.e. only outside the outer-loop). */
7779 gcc_assert (STMT_VINFO_RELEVANT_P (stmt_vinfo
)
7780 && !STMT_VINFO_LIVE_P (stmt_vinfo
));
7782 STMT_VINFO_VEC_STMT (stmt_vinfo
) = new_stmt
;
7783 if (dump_enabled_p ())
7785 dump_printf_loc (MSG_NOTE
, vect_location
,
7786 "vector of inductions after inner-loop:");
7787 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, new_stmt
, 0);
7793 if (dump_enabled_p ())
7795 dump_printf_loc (MSG_NOTE
, vect_location
,
7796 "transform induction: created def-use cycle: ");
7797 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, induction_phi
, 0);
7798 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
,
7799 SSA_NAME_DEF_STMT (vec_def
), 0);
7805 /* Function vectorizable_live_operation.
7807 STMT computes a value that is used outside the loop. Check if
7808 it can be supported. */
7811 vectorizable_live_operation (gimple
*stmt
,
7812 gimple_stmt_iterator
*gsi ATTRIBUTE_UNUSED
,
7813 slp_tree slp_node
, int slp_index
,
7815 stmt_vector_for_cost
*)
7817 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
7818 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
7819 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
7820 imm_use_iterator imm_iter
;
7821 tree lhs
, lhs_type
, bitsize
, vec_bitsize
;
7822 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
7823 poly_uint64 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
7826 auto_vec
<tree
> vec_oprnds
;
7828 poly_uint64 vec_index
= 0;
7830 gcc_assert (STMT_VINFO_LIVE_P (stmt_info
));
7832 if (STMT_VINFO_DEF_TYPE (stmt_info
) == vect_reduction_def
)
7835 /* FORNOW. CHECKME. */
7836 if (nested_in_vect_loop_p (loop
, stmt
))
7839 /* If STMT is not relevant and it is a simple assignment and its inputs are
7840 invariant then it can remain in place, unvectorized. The original last
7841 scalar value that it computes will be used. */
7842 if (!STMT_VINFO_RELEVANT_P (stmt_info
))
7844 gcc_assert (is_simple_and_all_uses_invariant (stmt
, loop_vinfo
));
7845 if (dump_enabled_p ())
7846 dump_printf_loc (MSG_NOTE
, vect_location
,
7847 "statement is simple and uses invariant. Leaving in "
7855 ncopies
= vect_get_num_copies (loop_vinfo
, vectype
);
7859 gcc_assert (slp_index
>= 0);
7861 int num_scalar
= SLP_TREE_SCALAR_STMTS (slp_node
).length ();
7862 int num_vec
= SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node
);
7864 /* Get the last occurrence of the scalar index from the concatenation of
7865 all the slp vectors. Calculate which slp vector it is and the index
7867 poly_uint64 pos
= (num_vec
* nunits
) - num_scalar
+ slp_index
;
7869 /* Calculate which vector contains the result, and which lane of
7870 that vector we need. */
7871 if (!can_div_trunc_p (pos
, nunits
, &vec_entry
, &vec_index
))
7873 if (dump_enabled_p ())
7874 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7875 "Cannot determine which vector holds the"
7876 " final result.\n");
7883 /* No transformation required. */
7884 if (LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo
))
7886 if (!direct_internal_fn_supported_p (IFN_EXTRACT_LAST
, vectype
,
7887 OPTIMIZE_FOR_SPEED
))
7889 if (dump_enabled_p ())
7890 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7891 "can't use a fully-masked loop because "
7892 "the target doesn't support extract last "
7894 LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo
) = false;
7898 if (dump_enabled_p ())
7899 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7900 "can't use a fully-masked loop because an "
7901 "SLP statement is live after the loop.\n");
7902 LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo
) = false;
7904 else if (ncopies
> 1)
7906 if (dump_enabled_p ())
7907 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7908 "can't use a fully-masked loop because"
7909 " ncopies is greater than 1.\n");
7910 LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo
) = false;
7914 gcc_assert (ncopies
== 1 && !slp_node
);
7915 vect_record_loop_mask (loop_vinfo
,
7916 &LOOP_VINFO_MASKS (loop_vinfo
),
7923 /* If stmt has a related stmt, then use that for getting the lhs. */
7924 if (is_pattern_stmt_p (stmt_info
))
7925 stmt
= STMT_VINFO_RELATED_STMT (stmt_info
);
7927 lhs
= (is_a
<gphi
*> (stmt
)) ? gimple_phi_result (stmt
)
7928 : gimple_get_lhs (stmt
);
7929 lhs_type
= TREE_TYPE (lhs
);
7931 bitsize
= (VECTOR_BOOLEAN_TYPE_P (vectype
)
7932 ? bitsize_int (TYPE_PRECISION (TREE_TYPE (vectype
)))
7933 : TYPE_SIZE (TREE_TYPE (vectype
)));
7934 vec_bitsize
= TYPE_SIZE (vectype
);
7936 /* Get the vectorized lhs of STMT and the lane to use (counted in bits). */
7937 tree vec_lhs
, bitstart
;
7940 gcc_assert (!LOOP_VINFO_FULLY_MASKED_P (loop_vinfo
));
7942 /* Get the correct slp vectorized stmt. */
7943 gimple
*vec_stmt
= SLP_TREE_VEC_STMTS (slp_node
)[vec_entry
];
7944 if (gphi
*phi
= dyn_cast
<gphi
*> (vec_stmt
))
7945 vec_lhs
= gimple_phi_result (phi
);
7947 vec_lhs
= gimple_get_lhs (vec_stmt
);
7949 /* Get entry to use. */
7950 bitstart
= bitsize_int (vec_index
);
7951 bitstart
= int_const_binop (MULT_EXPR
, bitsize
, bitstart
);
7955 enum vect_def_type dt
= STMT_VINFO_DEF_TYPE (stmt_info
);
7956 vec_lhs
= vect_get_vec_def_for_operand_1 (stmt_info
, dt
);
7957 gcc_checking_assert (ncopies
== 1
7958 || !LOOP_VINFO_FULLY_MASKED_P (loop_vinfo
));
7960 /* For multiple copies, get the last copy. */
7961 for (int i
= 1; i
< ncopies
; ++i
)
7962 vec_lhs
= vect_get_vec_def_for_stmt_copy (vect_unknown_def_type
,
7965 /* Get the last lane in the vector. */
7966 bitstart
= int_const_binop (MINUS_EXPR
, vec_bitsize
, bitsize
);
7969 gimple_seq stmts
= NULL
;
7971 if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo
))
7975 SCALAR_RES = EXTRACT_LAST <VEC_LHS, MASK>
7977 where VEC_LHS is the vectorized live-out result and MASK is
7978 the loop mask for the final iteration. */
7979 gcc_assert (ncopies
== 1 && !slp_node
);
7980 tree scalar_type
= TREE_TYPE (STMT_VINFO_VECTYPE (stmt_info
));
7981 tree mask
= vect_get_loop_mask (gsi
, &LOOP_VINFO_MASKS (loop_vinfo
),
7983 tree scalar_res
= gimple_build (&stmts
, CFN_EXTRACT_LAST
,
7984 scalar_type
, mask
, vec_lhs
);
7986 /* Convert the extracted vector element to the required scalar type. */
7987 new_tree
= gimple_convert (&stmts
, lhs_type
, scalar_res
);
7991 tree bftype
= TREE_TYPE (vectype
);
7992 if (VECTOR_BOOLEAN_TYPE_P (vectype
))
7993 bftype
= build_nonstandard_integer_type (tree_to_uhwi (bitsize
), 1);
7994 new_tree
= build3 (BIT_FIELD_REF
, bftype
, vec_lhs
, bitsize
, bitstart
);
7995 new_tree
= force_gimple_operand (fold_convert (lhs_type
, new_tree
),
7996 &stmts
, true, NULL_TREE
);
8000 gsi_insert_seq_on_edge_immediate (single_exit (loop
), stmts
);
8002 /* Replace use of lhs with newly computed result. If the use stmt is a
8003 single arg PHI, just replace all uses of PHI result. It's necessary
8004 because lcssa PHI defining lhs may be before newly inserted stmt. */
8005 use_operand_p use_p
;
8006 FOR_EACH_IMM_USE_STMT (use_stmt
, imm_iter
, lhs
)
8007 if (!flow_bb_inside_loop_p (loop
, gimple_bb (use_stmt
))
8008 && !is_gimple_debug (use_stmt
))
8010 if (gimple_code (use_stmt
) == GIMPLE_PHI
8011 && gimple_phi_num_args (use_stmt
) == 1)
8013 replace_uses_by (gimple_phi_result (use_stmt
), new_tree
);
8017 FOR_EACH_IMM_USE_ON_STMT (use_p
, imm_iter
)
8018 SET_USE (use_p
, new_tree
);
8020 update_stmt (use_stmt
);
8026 /* Kill any debug uses outside LOOP of SSA names defined in STMT. */
8029 vect_loop_kill_debug_uses (struct loop
*loop
, gimple
*stmt
)
8031 ssa_op_iter op_iter
;
8032 imm_use_iterator imm_iter
;
8033 def_operand_p def_p
;
8036 FOR_EACH_PHI_OR_STMT_DEF (def_p
, stmt
, op_iter
, SSA_OP_DEF
)
8038 FOR_EACH_IMM_USE_STMT (ustmt
, imm_iter
, DEF_FROM_PTR (def_p
))
8042 if (!is_gimple_debug (ustmt
))
8045 bb
= gimple_bb (ustmt
);
8047 if (!flow_bb_inside_loop_p (loop
, bb
))
8049 if (gimple_debug_bind_p (ustmt
))
8051 if (dump_enabled_p ())
8052 dump_printf_loc (MSG_NOTE
, vect_location
,
8053 "killing debug use\n");
8055 gimple_debug_bind_reset_value (ustmt
);
8056 update_stmt (ustmt
);
8065 /* Given loop represented by LOOP_VINFO, return true if computation of
8066 LOOP_VINFO_NITERS (= LOOP_VINFO_NITERSM1 + 1) doesn't overflow, false
8070 loop_niters_no_overflow (loop_vec_info loop_vinfo
)
8072 /* Constant case. */
8073 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo
))
8075 tree cst_niters
= LOOP_VINFO_NITERS (loop_vinfo
);
8076 tree cst_nitersm1
= LOOP_VINFO_NITERSM1 (loop_vinfo
);
8078 gcc_assert (TREE_CODE (cst_niters
) == INTEGER_CST
);
8079 gcc_assert (TREE_CODE (cst_nitersm1
) == INTEGER_CST
);
8080 if (wi::to_widest (cst_nitersm1
) < wi::to_widest (cst_niters
))
8085 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
8086 /* Check the upper bound of loop niters. */
8087 if (get_max_loop_iterations (loop
, &max
))
8089 tree type
= TREE_TYPE (LOOP_VINFO_NITERS (loop_vinfo
));
8090 signop sgn
= TYPE_SIGN (type
);
8091 widest_int type_max
= widest_int::from (wi::max_value (type
), sgn
);
8098 /* Return a mask type with half the number of elements as TYPE. */
8101 vect_halve_mask_nunits (tree type
)
8103 poly_uint64 nunits
= exact_div (TYPE_VECTOR_SUBPARTS (type
), 2);
8104 return build_truth_vector_type (nunits
, current_vector_size
);
8107 /* Return a mask type with twice as many elements as TYPE. */
8110 vect_double_mask_nunits (tree type
)
8112 poly_uint64 nunits
= TYPE_VECTOR_SUBPARTS (type
) * 2;
8113 return build_truth_vector_type (nunits
, current_vector_size
);
8116 /* Record that a fully-masked version of LOOP_VINFO would need MASKS to
8117 contain a sequence of NVECTORS masks that each control a vector of type
8121 vect_record_loop_mask (loop_vec_info loop_vinfo
, vec_loop_masks
*masks
,
8122 unsigned int nvectors
, tree vectype
)
8124 gcc_assert (nvectors
!= 0);
8125 if (masks
->length () < nvectors
)
8126 masks
->safe_grow_cleared (nvectors
);
8127 rgroup_masks
*rgm
= &(*masks
)[nvectors
- 1];
8128 /* The number of scalars per iteration and the number of vectors are
8129 both compile-time constants. */
8130 unsigned int nscalars_per_iter
8131 = exact_div (nvectors
* TYPE_VECTOR_SUBPARTS (vectype
),
8132 LOOP_VINFO_VECT_FACTOR (loop_vinfo
)).to_constant ();
8133 if (rgm
->max_nscalars_per_iter
< nscalars_per_iter
)
8135 rgm
->max_nscalars_per_iter
= nscalars_per_iter
;
8136 rgm
->mask_type
= build_same_sized_truth_vector_type (vectype
);
8140 /* Given a complete set of masks MASKS, extract mask number INDEX
8141 for an rgroup that operates on NVECTORS vectors of type VECTYPE,
8142 where 0 <= INDEX < NVECTORS. Insert any set-up statements before GSI.
8144 See the comment above vec_loop_masks for more details about the mask
8148 vect_get_loop_mask (gimple_stmt_iterator
*gsi
, vec_loop_masks
*masks
,
8149 unsigned int nvectors
, tree vectype
, unsigned int index
)
8151 rgroup_masks
*rgm
= &(*masks
)[nvectors
- 1];
8152 tree mask_type
= rgm
->mask_type
;
8154 /* Populate the rgroup's mask array, if this is the first time we've
8156 if (rgm
->masks
.is_empty ())
8158 rgm
->masks
.safe_grow_cleared (nvectors
);
8159 for (unsigned int i
= 0; i
< nvectors
; ++i
)
8161 tree mask
= make_temp_ssa_name (mask_type
, NULL
, "loop_mask");
8162 /* Provide a dummy definition until the real one is available. */
8163 SSA_NAME_DEF_STMT (mask
) = gimple_build_nop ();
8164 rgm
->masks
[i
] = mask
;
8168 tree mask
= rgm
->masks
[index
];
8169 if (maybe_ne (TYPE_VECTOR_SUBPARTS (mask_type
),
8170 TYPE_VECTOR_SUBPARTS (vectype
)))
8172 /* A loop mask for data type X can be reused for data type Y
8173 if X has N times more elements than Y and if Y's elements
8174 are N times bigger than X's. In this case each sequence
8175 of N elements in the loop mask will be all-zero or all-one.
8176 We can then view-convert the mask so that each sequence of
8177 N elements is replaced by a single element. */
8178 gcc_assert (multiple_p (TYPE_VECTOR_SUBPARTS (mask_type
),
8179 TYPE_VECTOR_SUBPARTS (vectype
)));
8180 gimple_seq seq
= NULL
;
8181 mask_type
= build_same_sized_truth_vector_type (vectype
);
8182 mask
= gimple_build (&seq
, VIEW_CONVERT_EXPR
, mask_type
, mask
);
8184 gsi_insert_seq_before (gsi
, seq
, GSI_SAME_STMT
);
8189 /* Scale profiling counters by estimation for LOOP which is vectorized
8193 scale_profile_for_vect_loop (struct loop
*loop
, unsigned vf
)
8195 edge preheader
= loop_preheader_edge (loop
);
8196 /* Reduce loop iterations by the vectorization factor. */
8197 gcov_type new_est_niter
= niter_for_unrolled_loop (loop
, vf
);
8198 profile_count freq_h
= loop
->header
->count
, freq_e
= preheader
->count ();
8200 if (freq_h
.nonzero_p ())
8202 profile_probability p
;
8204 /* Avoid dropping loop body profile counter to 0 because of zero count
8205 in loop's preheader. */
8206 if (!(freq_e
== profile_count::zero ()))
8207 freq_e
= freq_e
.force_nonzero ();
8208 p
= freq_e
.apply_scale (new_est_niter
+ 1, 1).probability_in (freq_h
);
8209 scale_loop_frequencies (loop
, p
);
8212 edge exit_e
= single_exit (loop
);
8213 exit_e
->probability
= profile_probability::always ()
8214 .apply_scale (1, new_est_niter
+ 1);
8216 edge exit_l
= single_pred_edge (loop
->latch
);
8217 profile_probability prob
= exit_l
->probability
;
8218 exit_l
->probability
= exit_e
->probability
.invert ();
8219 if (prob
.initialized_p () && exit_l
->probability
.initialized_p ())
8220 scale_bbs_frequencies (&loop
->latch
, 1, exit_l
->probability
/ prob
);
8223 /* Vectorize STMT if relevant, inserting any new instructions before GSI.
8224 When vectorizing STMT as a store, set *SEEN_STORE to its stmt_vec_info.
8225 *SLP_SCHEDULE is a running record of whether we have called
8226 vect_schedule_slp. */
8229 vect_transform_loop_stmt (loop_vec_info loop_vinfo
, gimple
*stmt
,
8230 gimple_stmt_iterator
*gsi
,
8231 stmt_vec_info
*seen_store
, bool *slp_scheduled
)
8233 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
8234 poly_uint64 vf
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
8235 stmt_vec_info stmt_info
= loop_vinfo
->lookup_stmt (stmt
);
8239 if (dump_enabled_p ())
8241 dump_printf_loc (MSG_NOTE
, vect_location
,
8242 "------>vectorizing statement: ");
8243 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
8246 if (MAY_HAVE_DEBUG_BIND_STMTS
&& !STMT_VINFO_LIVE_P (stmt_info
))
8247 vect_loop_kill_debug_uses (loop
, stmt
);
8249 if (!STMT_VINFO_RELEVANT_P (stmt_info
)
8250 && !STMT_VINFO_LIVE_P (stmt_info
))
8253 if (STMT_VINFO_VECTYPE (stmt_info
))
8256 = TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info
));
8257 if (!STMT_SLP_TYPE (stmt_info
)
8258 && maybe_ne (nunits
, vf
)
8259 && dump_enabled_p ())
8260 /* For SLP VF is set according to unrolling factor, and not
8261 to vector size, hence for SLP this print is not valid. */
8262 dump_printf_loc (MSG_NOTE
, vect_location
, "multiple-types.\n");
8265 /* SLP. Schedule all the SLP instances when the first SLP stmt is
8267 if (slp_vect_type slptype
= STMT_SLP_TYPE (stmt_info
))
8270 if (!*slp_scheduled
)
8272 *slp_scheduled
= true;
8274 DUMP_VECT_SCOPE ("scheduling SLP instances");
8276 vect_schedule_slp (loop_vinfo
);
8279 /* Hybrid SLP stmts must be vectorized in addition to SLP. */
8280 if (slptype
== pure_slp
)
8284 if (dump_enabled_p ())
8285 dump_printf_loc (MSG_NOTE
, vect_location
, "transform statement.\n");
8287 bool grouped_store
= false;
8288 if (vect_transform_stmt (stmt
, gsi
, &grouped_store
, NULL
, NULL
))
8289 *seen_store
= stmt_info
;
8292 /* Function vect_transform_loop.
8294 The analysis phase has determined that the loop is vectorizable.
8295 Vectorize the loop - created vectorized stmts to replace the scalar
8296 stmts in the loop, and update the loop exit condition.
8297 Returns scalar epilogue loop if any. */
8300 vect_transform_loop (loop_vec_info loop_vinfo
)
8302 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
8303 struct loop
*epilogue
= NULL
;
8304 basic_block
*bbs
= LOOP_VINFO_BBS (loop_vinfo
);
8305 int nbbs
= loop
->num_nodes
;
8307 tree niters_vector
= NULL_TREE
;
8308 tree step_vector
= NULL_TREE
;
8309 tree niters_vector_mult_vf
= NULL_TREE
;
8310 poly_uint64 vf
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
8311 unsigned int lowest_vf
= constant_lower_bound (vf
);
8312 bool slp_scheduled
= false;
8314 bool check_profitability
= false;
8317 DUMP_VECT_SCOPE ("vec_transform_loop");
8319 loop_vinfo
->shared
->check_datarefs ();
8321 /* Use the more conservative vectorization threshold. If the number
8322 of iterations is constant assume the cost check has been performed
8323 by our caller. If the threshold makes all loops profitable that
8324 run at least the (estimated) vectorization factor number of times
8325 checking is pointless, too. */
8326 th
= LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo
);
8327 if (th
>= vect_vf_for_cost (loop_vinfo
)
8328 && !LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo
))
8330 if (dump_enabled_p ())
8331 dump_printf_loc (MSG_NOTE
, vect_location
,
8332 "Profitability threshold is %d loop iterations.\n",
8334 check_profitability
= true;
8337 /* Make sure there exists a single-predecessor exit bb. Do this before
8339 edge e
= single_exit (loop
);
8340 if (! single_pred_p (e
->dest
))
8342 split_loop_exit_edge (e
);
8343 if (dump_enabled_p ())
8344 dump_printf (MSG_NOTE
, "split exit edge\n");
8347 /* Version the loop first, if required, so the profitability check
8350 if (LOOP_REQUIRES_VERSIONING (loop_vinfo
))
8352 poly_uint64 versioning_threshold
8353 = LOOP_VINFO_VERSIONING_THRESHOLD (loop_vinfo
);
8354 if (check_profitability
8355 && ordered_p (poly_uint64 (th
), versioning_threshold
))
8357 versioning_threshold
= ordered_max (poly_uint64 (th
),
8358 versioning_threshold
);
8359 check_profitability
= false;
8361 vect_loop_versioning (loop_vinfo
, th
, check_profitability
,
8362 versioning_threshold
);
8363 check_profitability
= false;
8366 /* Make sure there exists a single-predecessor exit bb also on the
8367 scalar loop copy. Do this after versioning but before peeling
8368 so CFG structure is fine for both scalar and if-converted loop
8369 to make slpeel_duplicate_current_defs_from_edges face matched
8370 loop closed PHI nodes on the exit. */
8371 if (LOOP_VINFO_SCALAR_LOOP (loop_vinfo
))
8373 e
= single_exit (LOOP_VINFO_SCALAR_LOOP (loop_vinfo
));
8374 if (! single_pred_p (e
->dest
))
8376 split_loop_exit_edge (e
);
8377 if (dump_enabled_p ())
8378 dump_printf (MSG_NOTE
, "split exit edge of scalar loop\n");
8382 tree niters
= vect_build_loop_niters (loop_vinfo
);
8383 LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo
) = niters
;
8384 tree nitersm1
= unshare_expr (LOOP_VINFO_NITERSM1 (loop_vinfo
));
8385 bool niters_no_overflow
= loop_niters_no_overflow (loop_vinfo
);
8386 epilogue
= vect_do_peeling (loop_vinfo
, niters
, nitersm1
, &niters_vector
,
8387 &step_vector
, &niters_vector_mult_vf
, th
,
8388 check_profitability
, niters_no_overflow
);
8390 if (niters_vector
== NULL_TREE
)
8392 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo
)
8393 && !LOOP_VINFO_FULLY_MASKED_P (loop_vinfo
)
8394 && known_eq (lowest_vf
, vf
))
8397 = build_int_cst (TREE_TYPE (LOOP_VINFO_NITERS (loop_vinfo
)),
8398 LOOP_VINFO_INT_NITERS (loop_vinfo
) / lowest_vf
);
8399 step_vector
= build_one_cst (TREE_TYPE (niters
));
8402 vect_gen_vector_loop_niters (loop_vinfo
, niters
, &niters_vector
,
8403 &step_vector
, niters_no_overflow
);
8406 /* 1) Make sure the loop header has exactly two entries
8407 2) Make sure we have a preheader basic block. */
8409 gcc_assert (EDGE_COUNT (loop
->header
->preds
) == 2);
8411 split_edge (loop_preheader_edge (loop
));
8413 if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo
)
8414 && vect_use_loop_mask_for_alignment_p (loop_vinfo
))
8415 /* This will deal with any possible peeling. */
8416 vect_prepare_for_masked_peels (loop_vinfo
);
8418 /* FORNOW: the vectorizer supports only loops which body consist
8419 of one basic block (header + empty latch). When the vectorizer will
8420 support more involved loop forms, the order by which the BBs are
8421 traversed need to be reconsidered. */
8423 for (i
= 0; i
< nbbs
; i
++)
8425 basic_block bb
= bbs
[i
];
8426 stmt_vec_info stmt_info
;
8428 for (gphi_iterator si
= gsi_start_phis (bb
); !gsi_end_p (si
);
8431 gphi
*phi
= si
.phi ();
8432 if (dump_enabled_p ())
8434 dump_printf_loc (MSG_NOTE
, vect_location
,
8435 "------>vectorizing phi: ");
8436 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, phi
, 0);
8438 stmt_info
= loop_vinfo
->lookup_stmt (phi
);
8442 if (MAY_HAVE_DEBUG_BIND_STMTS
&& !STMT_VINFO_LIVE_P (stmt_info
))
8443 vect_loop_kill_debug_uses (loop
, phi
);
8445 if (!STMT_VINFO_RELEVANT_P (stmt_info
)
8446 && !STMT_VINFO_LIVE_P (stmt_info
))
8449 if (STMT_VINFO_VECTYPE (stmt_info
)
8451 (TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info
)), vf
))
8452 && dump_enabled_p ())
8453 dump_printf_loc (MSG_NOTE
, vect_location
, "multiple-types.\n");
8455 if ((STMT_VINFO_DEF_TYPE (stmt_info
) == vect_induction_def
8456 || STMT_VINFO_DEF_TYPE (stmt_info
) == vect_reduction_def
8457 || STMT_VINFO_DEF_TYPE (stmt_info
) == vect_nested_cycle
)
8458 && ! PURE_SLP_STMT (stmt_info
))
8460 if (dump_enabled_p ())
8461 dump_printf_loc (MSG_NOTE
, vect_location
, "transform phi.\n");
8462 vect_transform_stmt (phi
, NULL
, NULL
, NULL
, NULL
);
8466 for (gimple_stmt_iterator si
= gsi_start_bb (bb
);
8469 stmt
= gsi_stmt (si
);
8470 /* During vectorization remove existing clobber stmts. */
8471 if (gimple_clobber_p (stmt
))
8473 unlink_stmt_vdef (stmt
);
8474 gsi_remove (&si
, true);
8475 release_defs (stmt
);
8479 stmt_info
= loop_vinfo
->lookup_stmt (stmt
);
8481 /* vector stmts created in the outer-loop during vectorization of
8482 stmts in an inner-loop may not have a stmt_info, and do not
8483 need to be vectorized. */
8484 stmt_vec_info seen_store
= NULL
;
8487 if (STMT_VINFO_IN_PATTERN_P (stmt_info
))
8489 gimple
*def_seq
= STMT_VINFO_PATTERN_DEF_SEQ (stmt_info
);
8490 for (gimple_stmt_iterator subsi
= gsi_start (def_seq
);
8491 !gsi_end_p (subsi
); gsi_next (&subsi
))
8492 vect_transform_loop_stmt (loop_vinfo
,
8493 gsi_stmt (subsi
), &si
,
8496 gimple
*pat_stmt
= STMT_VINFO_RELATED_STMT (stmt_info
);
8497 vect_transform_loop_stmt (loop_vinfo
, pat_stmt
, &si
,
8498 &seen_store
, &slp_scheduled
);
8500 vect_transform_loop_stmt (loop_vinfo
, stmt
, &si
,
8501 &seen_store
, &slp_scheduled
);
8505 if (STMT_VINFO_GROUPED_ACCESS (seen_store
))
8507 /* Interleaving. If IS_STORE is TRUE, the
8508 vectorization of the interleaving chain was
8509 completed - free all the stores in the chain. */
8511 vect_remove_stores (DR_GROUP_FIRST_ELEMENT (seen_store
));
8515 /* Free the attached stmt_vec_info and remove the
8517 free_stmt_vec_info (stmt
);
8518 unlink_stmt_vdef (stmt
);
8519 gsi_remove (&si
, true);
8520 release_defs (stmt
);
8528 /* Stub out scalar statements that must not survive vectorization.
8529 Doing this here helps with grouped statements, or statements that
8530 are involved in patterns. */
8531 for (gimple_stmt_iterator gsi
= gsi_start_bb (bb
);
8532 !gsi_end_p (gsi
); gsi_next (&gsi
))
8534 gcall
*call
= dyn_cast
<gcall
*> (gsi_stmt (gsi
));
8535 if (call
&& gimple_call_internal_p (call
, IFN_MASK_LOAD
))
8537 tree lhs
= gimple_get_lhs (call
);
8538 if (!VECTOR_TYPE_P (TREE_TYPE (lhs
)))
8540 tree zero
= build_zero_cst (TREE_TYPE (lhs
));
8541 gimple
*new_stmt
= gimple_build_assign (lhs
, zero
);
8542 gsi_replace (&gsi
, new_stmt
, true);
8548 /* The vectorization factor is always > 1, so if we use an IV increment of 1.
8549 a zero NITERS becomes a nonzero NITERS_VECTOR. */
8550 if (integer_onep (step_vector
))
8551 niters_no_overflow
= true;
8552 vect_set_loop_condition (loop
, loop_vinfo
, niters_vector
, step_vector
,
8553 niters_vector_mult_vf
, !niters_no_overflow
);
8555 unsigned int assumed_vf
= vect_vf_for_cost (loop_vinfo
);
8556 scale_profile_for_vect_loop (loop
, assumed_vf
);
8558 /* True if the final iteration might not handle a full vector's
8559 worth of scalar iterations. */
8560 bool final_iter_may_be_partial
= LOOP_VINFO_FULLY_MASKED_P (loop_vinfo
);
8561 /* The minimum number of iterations performed by the epilogue. This
8562 is 1 when peeling for gaps because we always need a final scalar
8564 int min_epilogue_iters
= LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo
) ? 1 : 0;
8565 /* +1 to convert latch counts to loop iteration counts,
8566 -min_epilogue_iters to remove iterations that cannot be performed
8567 by the vector code. */
8568 int bias_for_lowest
= 1 - min_epilogue_iters
;
8569 int bias_for_assumed
= bias_for_lowest
;
8570 int alignment_npeels
= LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo
);
8571 if (alignment_npeels
&& LOOP_VINFO_FULLY_MASKED_P (loop_vinfo
))
8573 /* When the amount of peeling is known at compile time, the first
8574 iteration will have exactly alignment_npeels active elements.
8575 In the worst case it will have at least one. */
8576 int min_first_active
= (alignment_npeels
> 0 ? alignment_npeels
: 1);
8577 bias_for_lowest
+= lowest_vf
- min_first_active
;
8578 bias_for_assumed
+= assumed_vf
- min_first_active
;
8580 /* In these calculations the "- 1" converts loop iteration counts
8581 back to latch counts. */
8582 if (loop
->any_upper_bound
)
8583 loop
->nb_iterations_upper_bound
8584 = (final_iter_may_be_partial
8585 ? wi::udiv_ceil (loop
->nb_iterations_upper_bound
+ bias_for_lowest
,
8587 : wi::udiv_floor (loop
->nb_iterations_upper_bound
+ bias_for_lowest
,
8589 if (loop
->any_likely_upper_bound
)
8590 loop
->nb_iterations_likely_upper_bound
8591 = (final_iter_may_be_partial
8592 ? wi::udiv_ceil (loop
->nb_iterations_likely_upper_bound
8593 + bias_for_lowest
, lowest_vf
) - 1
8594 : wi::udiv_floor (loop
->nb_iterations_likely_upper_bound
8595 + bias_for_lowest
, lowest_vf
) - 1);
8596 if (loop
->any_estimate
)
8597 loop
->nb_iterations_estimate
8598 = (final_iter_may_be_partial
8599 ? wi::udiv_ceil (loop
->nb_iterations_estimate
+ bias_for_assumed
,
8601 : wi::udiv_floor (loop
->nb_iterations_estimate
+ bias_for_assumed
,
8604 if (dump_enabled_p ())
8606 if (!LOOP_VINFO_EPILOGUE_P (loop_vinfo
))
8608 dump_printf_loc (MSG_NOTE
, vect_location
,
8609 "LOOP VECTORIZED\n");
8611 dump_printf_loc (MSG_NOTE
, vect_location
,
8612 "OUTER LOOP VECTORIZED\n");
8613 dump_printf (MSG_NOTE
, "\n");
8617 dump_printf_loc (MSG_NOTE
, vect_location
,
8618 "LOOP EPILOGUE VECTORIZED (VS=");
8619 dump_dec (MSG_NOTE
, current_vector_size
);
8620 dump_printf (MSG_NOTE
, ")\n");
8624 /* Free SLP instances here because otherwise stmt reference counting
8626 slp_instance instance
;
8627 FOR_EACH_VEC_ELT (LOOP_VINFO_SLP_INSTANCES (loop_vinfo
), i
, instance
)
8628 vect_free_slp_instance (instance
, true);
8629 LOOP_VINFO_SLP_INSTANCES (loop_vinfo
).release ();
8630 /* Clear-up safelen field since its value is invalid after vectorization
8631 since vectorized loop can have loop-carried dependencies. */
8634 /* Don't vectorize epilogue for epilogue. */
8635 if (LOOP_VINFO_EPILOGUE_P (loop_vinfo
))
8638 if (!PARAM_VALUE (PARAM_VECT_EPILOGUES_NOMASK
))
8643 auto_vector_sizes vector_sizes
;
8644 targetm
.vectorize
.autovectorize_vector_sizes (&vector_sizes
);
8645 unsigned int next_size
= 0;
8647 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo
)
8648 && LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo
) >= 0
8649 && known_eq (vf
, lowest_vf
))
8652 = (LOOP_VINFO_INT_NITERS (loop_vinfo
)
8653 - LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo
));
8654 eiters
= eiters
% lowest_vf
;
8655 epilogue
->nb_iterations_upper_bound
= eiters
- 1;
8658 while (next_size
< vector_sizes
.length ()
8659 && !(constant_multiple_p (current_vector_size
,
8660 vector_sizes
[next_size
], &ratio
)
8661 && eiters
>= lowest_vf
/ ratio
))
8665 while (next_size
< vector_sizes
.length ()
8666 && maybe_lt (current_vector_size
, vector_sizes
[next_size
]))
8669 if (next_size
== vector_sizes
.length ())
8675 epilogue
->force_vectorize
= loop
->force_vectorize
;
8676 epilogue
->safelen
= loop
->safelen
;
8677 epilogue
->dont_vectorize
= false;
8679 /* We may need to if-convert epilogue to vectorize it. */
8680 if (LOOP_VINFO_SCALAR_LOOP (loop_vinfo
))
8681 tree_if_conversion (epilogue
);
8687 /* The code below is trying to perform simple optimization - revert
8688 if-conversion for masked stores, i.e. if the mask of a store is zero
8689 do not perform it and all stored value producers also if possible.
8697 this transformation will produce the following semi-hammock:
8699 if (!mask__ifc__42.18_165 == { 0, 0, 0, 0, 0, 0, 0, 0 })
8701 vect__11.19_170 = MASK_LOAD (vectp_p1.20_168, 0B, mask__ifc__42.18_165);
8702 vect__12.22_172 = vect__11.19_170 + vect_cst__171;
8703 MASK_STORE (vectp_p1.23_175, 0B, mask__ifc__42.18_165, vect__12.22_172);
8704 vect__18.25_182 = MASK_LOAD (vectp_p3.26_180, 0B, mask__ifc__42.18_165);
8705 vect__19.28_184 = vect__18.25_182 + vect_cst__183;
8706 MASK_STORE (vectp_p2.29_187, 0B, mask__ifc__42.18_165, vect__19.28_184);
8711 optimize_mask_stores (struct loop
*loop
)
8713 basic_block
*bbs
= get_loop_body (loop
);
8714 unsigned nbbs
= loop
->num_nodes
;
8717 struct loop
*bb_loop
;
8718 gimple_stmt_iterator gsi
;
8720 auto_vec
<gimple
*> worklist
;
8722 vect_location
= find_loop_location (loop
);
8723 /* Pick up all masked stores in loop if any. */
8724 for (i
= 0; i
< nbbs
; i
++)
8727 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
);
8730 stmt
= gsi_stmt (gsi
);
8731 if (gimple_call_internal_p (stmt
, IFN_MASK_STORE
))
8732 worklist
.safe_push (stmt
);
8737 if (worklist
.is_empty ())
8740 /* Loop has masked stores. */
8741 while (!worklist
.is_empty ())
8743 gimple
*last
, *last_store
;
8746 basic_block store_bb
, join_bb
;
8747 gimple_stmt_iterator gsi_to
;
8748 tree vdef
, new_vdef
;
8753 last
= worklist
.pop ();
8754 mask
= gimple_call_arg (last
, 2);
8755 bb
= gimple_bb (last
);
8756 /* Create then_bb and if-then structure in CFG, then_bb belongs to
8757 the same loop as if_bb. It could be different to LOOP when two
8758 level loop-nest is vectorized and mask_store belongs to the inner
8760 e
= split_block (bb
, last
);
8761 bb_loop
= bb
->loop_father
;
8762 gcc_assert (loop
== bb_loop
|| flow_loop_nested_p (loop
, bb_loop
));
8764 store_bb
= create_empty_bb (bb
);
8765 add_bb_to_loop (store_bb
, bb_loop
);
8766 e
->flags
= EDGE_TRUE_VALUE
;
8767 efalse
= make_edge (bb
, store_bb
, EDGE_FALSE_VALUE
);
8768 /* Put STORE_BB to likely part. */
8769 efalse
->probability
= profile_probability::unlikely ();
8770 store_bb
->count
= efalse
->count ();
8771 make_single_succ_edge (store_bb
, join_bb
, EDGE_FALLTHRU
);
8772 if (dom_info_available_p (CDI_DOMINATORS
))
8773 set_immediate_dominator (CDI_DOMINATORS
, store_bb
, bb
);
8774 if (dump_enabled_p ())
8775 dump_printf_loc (MSG_NOTE
, vect_location
,
8776 "Create new block %d to sink mask stores.",
8778 /* Create vector comparison with boolean result. */
8779 vectype
= TREE_TYPE (mask
);
8780 zero
= build_zero_cst (vectype
);
8781 stmt
= gimple_build_cond (EQ_EXPR
, mask
, zero
, NULL_TREE
, NULL_TREE
);
8782 gsi
= gsi_last_bb (bb
);
8783 gsi_insert_after (&gsi
, stmt
, GSI_SAME_STMT
);
8784 /* Create new PHI node for vdef of the last masked store:
8785 .MEM_2 = VDEF <.MEM_1>
8786 will be converted to
8787 .MEM.3 = VDEF <.MEM_1>
8788 and new PHI node will be created in join bb
8789 .MEM_2 = PHI <.MEM_1, .MEM_3>
8791 vdef
= gimple_vdef (last
);
8792 new_vdef
= make_ssa_name (gimple_vop (cfun
), last
);
8793 gimple_set_vdef (last
, new_vdef
);
8794 phi
= create_phi_node (vdef
, join_bb
);
8795 add_phi_arg (phi
, new_vdef
, EDGE_SUCC (store_bb
, 0), UNKNOWN_LOCATION
);
8797 /* Put all masked stores with the same mask to STORE_BB if possible. */
8800 gimple_stmt_iterator gsi_from
;
8801 gimple
*stmt1
= NULL
;
8803 /* Move masked store to STORE_BB. */
8805 gsi
= gsi_for_stmt (last
);
8807 /* Shift GSI to the previous stmt for further traversal. */
8809 gsi_to
= gsi_start_bb (store_bb
);
8810 gsi_move_before (&gsi_from
, &gsi_to
);
8811 /* Setup GSI_TO to the non-empty block start. */
8812 gsi_to
= gsi_start_bb (store_bb
);
8813 if (dump_enabled_p ())
8815 dump_printf_loc (MSG_NOTE
, vect_location
,
8816 "Move stmt to created bb\n");
8817 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, last
, 0);
8819 /* Move all stored value producers if possible. */
8820 while (!gsi_end_p (gsi
))
8823 imm_use_iterator imm_iter
;
8824 use_operand_p use_p
;
8827 /* Skip debug statements. */
8828 if (is_gimple_debug (gsi_stmt (gsi
)))
8833 stmt1
= gsi_stmt (gsi
);
8834 /* Do not consider statements writing to memory or having
8835 volatile operand. */
8836 if (gimple_vdef (stmt1
)
8837 || gimple_has_volatile_ops (stmt1
))
8841 lhs
= gimple_get_lhs (stmt1
);
8845 /* LHS of vectorized stmt must be SSA_NAME. */
8846 if (TREE_CODE (lhs
) != SSA_NAME
)
8849 if (!VECTOR_TYPE_P (TREE_TYPE (lhs
)))
8851 /* Remove dead scalar statement. */
8852 if (has_zero_uses (lhs
))
8854 gsi_remove (&gsi_from
, true);
8859 /* Check that LHS does not have uses outside of STORE_BB. */
8861 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, lhs
)
8864 use_stmt
= USE_STMT (use_p
);
8865 if (is_gimple_debug (use_stmt
))
8867 if (gimple_bb (use_stmt
) != store_bb
)
8876 if (gimple_vuse (stmt1
)
8877 && gimple_vuse (stmt1
) != gimple_vuse (last_store
))
8880 /* Can move STMT1 to STORE_BB. */
8881 if (dump_enabled_p ())
8883 dump_printf_loc (MSG_NOTE
, vect_location
,
8884 "Move stmt to created bb\n");
8885 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt1
, 0);
8887 gsi_move_before (&gsi_from
, &gsi_to
);
8888 /* Shift GSI_TO for further insertion. */
8891 /* Put other masked stores with the same mask to STORE_BB. */
8892 if (worklist
.is_empty ()
8893 || gimple_call_arg (worklist
.last (), 2) != mask
8894 || worklist
.last () != stmt1
)
8896 last
= worklist
.pop ();
8898 add_phi_arg (phi
, gimple_vuse (last_store
), e
, UNKNOWN_LOCATION
);