2 Copyright (C) 2003-2017 Free Software Foundation, Inc.
3 Contributed by Dorit Naishlos <dorit@il.ibm.com> and
4 Ira Rosen <irar@il.ibm.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
24 #include "coretypes.h"
31 #include "tree-pass.h"
33 #include "optabs-tree.h"
34 #include "diagnostic-core.h"
35 #include "fold-const.h"
36 #include "stor-layout.h"
39 #include "gimple-iterator.h"
40 #include "gimplify-me.h"
41 #include "tree-ssa-loop-ivopts.h"
42 #include "tree-ssa-loop-manip.h"
43 #include "tree-ssa-loop-niter.h"
44 #include "tree-ssa-loop.h"
47 #include "tree-scalar-evolution.h"
48 #include "tree-vectorizer.h"
49 #include "gimple-fold.h"
52 #include "tree-if-conv.h"
54 /* Loop Vectorization Pass.
56 This pass tries to vectorize loops.
58 For example, the vectorizer transforms the following simple loop:
60 short a[N]; short b[N]; short c[N]; int i;
66 as if it was manually vectorized by rewriting the source code into:
68 typedef int __attribute__((mode(V8HI))) v8hi;
69 short a[N]; short b[N]; short c[N]; int i;
70 v8hi *pa = (v8hi*)a, *pb = (v8hi*)b, *pc = (v8hi*)c;
73 for (i=0; i<N/8; i++){
80 The main entry to this pass is vectorize_loops(), in which
81 the vectorizer applies a set of analyses on a given set of loops,
82 followed by the actual vectorization transformation for the loops that
83 had successfully passed the analysis phase.
84 Throughout this pass we make a distinction between two types of
85 data: scalars (which are represented by SSA_NAMES), and memory references
86 ("data-refs"). These two types of data require different handling both
87 during analysis and transformation. The types of data-refs that the
88 vectorizer currently supports are ARRAY_REFS which base is an array DECL
89 (not a pointer), and INDIRECT_REFS through pointers; both array and pointer
90 accesses are required to have a simple (consecutive) access pattern.
94 The driver for the analysis phase is vect_analyze_loop().
95 It applies a set of analyses, some of which rely on the scalar evolution
96 analyzer (scev) developed by Sebastian Pop.
98 During the analysis phase the vectorizer records some information
99 per stmt in a "stmt_vec_info" struct which is attached to each stmt in the
100 loop, as well as general information about the loop as a whole, which is
101 recorded in a "loop_vec_info" struct attached to each loop.
103 Transformation phase:
104 =====================
105 The loop transformation phase scans all the stmts in the loop, and
106 creates a vector stmt (or a sequence of stmts) for each scalar stmt S in
107 the loop that needs to be vectorized. It inserts the vector code sequence
108 just before the scalar stmt S, and records a pointer to the vector code
109 in STMT_VINFO_VEC_STMT (stmt_info) (stmt_info is the stmt_vec_info struct
110 attached to S). This pointer will be used for the vectorization of following
111 stmts which use the def of stmt S. Stmt S is removed if it writes to memory;
112 otherwise, we rely on dead code elimination for removing it.
114 For example, say stmt S1 was vectorized into stmt VS1:
117 S1: b = x[i]; STMT_VINFO_VEC_STMT (stmt_info (S1)) = VS1
120 To vectorize stmt S2, the vectorizer first finds the stmt that defines
121 the operand 'b' (S1), and gets the relevant vector def 'vb' from the
122 vector stmt VS1 pointed to by STMT_VINFO_VEC_STMT (stmt_info (S1)). The
123 resulting sequence would be:
126 S1: b = x[i]; STMT_VINFO_VEC_STMT (stmt_info (S1)) = VS1
128 S2: a = b; STMT_VINFO_VEC_STMT (stmt_info (S2)) = VS2
130 Operands that are not SSA_NAMEs, are data-refs that appear in
131 load/store operations (like 'x[i]' in S1), and are handled differently.
135 Currently the only target specific information that is used is the
136 size of the vector (in bytes) - "TARGET_VECTORIZE_UNITS_PER_SIMD_WORD".
137 Targets that can support different sizes of vectors, for now will need
138 to specify one value for "TARGET_VECTORIZE_UNITS_PER_SIMD_WORD". More
139 flexibility will be added in the future.
141 Since we only vectorize operations which vector form can be
142 expressed using existing tree codes, to verify that an operation is
143 supported, the vectorizer checks the relevant optab at the relevant
144 machine_mode (e.g, optab_handler (add_optab, V8HImode)). If
145 the value found is CODE_FOR_nothing, then there's no target support, and
146 we can't vectorize the stmt.
148 For additional information on this project see:
149 http://gcc.gnu.org/projects/tree-ssa/vectorization.html
152 static void vect_estimate_min_profitable_iters (loop_vec_info
, int *, int *);
154 /* Function vect_determine_vectorization_factor
156 Determine the vectorization factor (VF). VF is the number of data elements
157 that are operated upon in parallel in a single iteration of the vectorized
158 loop. For example, when vectorizing a loop that operates on 4byte elements,
159 on a target with vector size (VS) 16byte, the VF is set to 4, since 4
160 elements can fit in a single vector register.
162 We currently support vectorization of loops in which all types operated upon
163 are of the same size. Therefore this function currently sets VF according to
164 the size of the types operated upon, and fails if there are multiple sizes
167 VF is also the factor by which the loop iterations are strip-mined, e.g.:
174 for (i=0; i<N; i+=VF){
175 a[i:VF] = b[i:VF] + c[i:VF];
180 vect_determine_vectorization_factor (loop_vec_info loop_vinfo
)
182 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
183 basic_block
*bbs
= LOOP_VINFO_BBS (loop_vinfo
);
184 unsigned nbbs
= loop
->num_nodes
;
185 unsigned int vectorization_factor
= 0;
186 tree scalar_type
= NULL_TREE
;
190 stmt_vec_info stmt_info
;
193 gimple
*stmt
, *pattern_stmt
= NULL
;
194 gimple_seq pattern_def_seq
= NULL
;
195 gimple_stmt_iterator pattern_def_si
= gsi_none ();
196 bool analyze_pattern_stmt
= false;
198 auto_vec
<stmt_vec_info
> mask_producers
;
200 if (dump_enabled_p ())
201 dump_printf_loc (MSG_NOTE
, vect_location
,
202 "=== vect_determine_vectorization_factor ===\n");
204 for (i
= 0; i
< nbbs
; i
++)
206 basic_block bb
= bbs
[i
];
208 for (gphi_iterator si
= gsi_start_phis (bb
); !gsi_end_p (si
);
212 stmt_info
= vinfo_for_stmt (phi
);
213 if (dump_enabled_p ())
215 dump_printf_loc (MSG_NOTE
, vect_location
, "==> examining phi: ");
216 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, phi
, 0);
219 gcc_assert (stmt_info
);
221 if (STMT_VINFO_RELEVANT_P (stmt_info
)
222 || STMT_VINFO_LIVE_P (stmt_info
))
224 gcc_assert (!STMT_VINFO_VECTYPE (stmt_info
));
225 scalar_type
= TREE_TYPE (PHI_RESULT (phi
));
227 if (dump_enabled_p ())
229 dump_printf_loc (MSG_NOTE
, vect_location
,
230 "get vectype for scalar type: ");
231 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, scalar_type
);
232 dump_printf (MSG_NOTE
, "\n");
235 vectype
= get_vectype_for_scalar_type (scalar_type
);
238 if (dump_enabled_p ())
240 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
241 "not vectorized: unsupported "
243 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
245 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
249 STMT_VINFO_VECTYPE (stmt_info
) = vectype
;
251 if (dump_enabled_p ())
253 dump_printf_loc (MSG_NOTE
, vect_location
, "vectype: ");
254 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, vectype
);
255 dump_printf (MSG_NOTE
, "\n");
258 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
259 if (dump_enabled_p ())
260 dump_printf_loc (MSG_NOTE
, vect_location
, "nunits = %d\n",
263 if (!vectorization_factor
264 || (nunits
> vectorization_factor
))
265 vectorization_factor
= nunits
;
269 for (gimple_stmt_iterator si
= gsi_start_bb (bb
);
270 !gsi_end_p (si
) || analyze_pattern_stmt
;)
274 if (analyze_pattern_stmt
)
277 stmt
= gsi_stmt (si
);
279 stmt_info
= vinfo_for_stmt (stmt
);
281 if (dump_enabled_p ())
283 dump_printf_loc (MSG_NOTE
, vect_location
,
284 "==> examining statement: ");
285 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
288 gcc_assert (stmt_info
);
290 /* Skip stmts which do not need to be vectorized. */
291 if ((!STMT_VINFO_RELEVANT_P (stmt_info
)
292 && !STMT_VINFO_LIVE_P (stmt_info
))
293 || gimple_clobber_p (stmt
))
295 if (STMT_VINFO_IN_PATTERN_P (stmt_info
)
296 && (pattern_stmt
= STMT_VINFO_RELATED_STMT (stmt_info
))
297 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt
))
298 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt
))))
301 stmt_info
= vinfo_for_stmt (pattern_stmt
);
302 if (dump_enabled_p ())
304 dump_printf_loc (MSG_NOTE
, vect_location
,
305 "==> examining pattern statement: ");
306 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
311 if (dump_enabled_p ())
312 dump_printf_loc (MSG_NOTE
, vect_location
, "skip.\n");
317 else if (STMT_VINFO_IN_PATTERN_P (stmt_info
)
318 && (pattern_stmt
= STMT_VINFO_RELATED_STMT (stmt_info
))
319 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt
))
320 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt
))))
321 analyze_pattern_stmt
= true;
323 /* If a pattern statement has def stmts, analyze them too. */
324 if (is_pattern_stmt_p (stmt_info
))
326 if (pattern_def_seq
== NULL
)
328 pattern_def_seq
= STMT_VINFO_PATTERN_DEF_SEQ (stmt_info
);
329 pattern_def_si
= gsi_start (pattern_def_seq
);
331 else if (!gsi_end_p (pattern_def_si
))
332 gsi_next (&pattern_def_si
);
333 if (pattern_def_seq
!= NULL
)
335 gimple
*pattern_def_stmt
= NULL
;
336 stmt_vec_info pattern_def_stmt_info
= NULL
;
338 while (!gsi_end_p (pattern_def_si
))
340 pattern_def_stmt
= gsi_stmt (pattern_def_si
);
341 pattern_def_stmt_info
342 = vinfo_for_stmt (pattern_def_stmt
);
343 if (STMT_VINFO_RELEVANT_P (pattern_def_stmt_info
)
344 || STMT_VINFO_LIVE_P (pattern_def_stmt_info
))
346 gsi_next (&pattern_def_si
);
349 if (!gsi_end_p (pattern_def_si
))
351 if (dump_enabled_p ())
353 dump_printf_loc (MSG_NOTE
, vect_location
,
354 "==> examining pattern def stmt: ");
355 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
,
356 pattern_def_stmt
, 0);
359 stmt
= pattern_def_stmt
;
360 stmt_info
= pattern_def_stmt_info
;
364 pattern_def_si
= gsi_none ();
365 analyze_pattern_stmt
= false;
369 analyze_pattern_stmt
= false;
372 if (gimple_get_lhs (stmt
) == NULL_TREE
373 /* MASK_STORE has no lhs, but is ok. */
374 && (!is_gimple_call (stmt
)
375 || !gimple_call_internal_p (stmt
)
376 || gimple_call_internal_fn (stmt
) != IFN_MASK_STORE
))
378 if (is_gimple_call (stmt
))
380 /* Ignore calls with no lhs. These must be calls to
381 #pragma omp simd functions, and what vectorization factor
382 it really needs can't be determined until
383 vectorizable_simd_clone_call. */
384 if (!analyze_pattern_stmt
&& gsi_end_p (pattern_def_si
))
386 pattern_def_seq
= NULL
;
391 if (dump_enabled_p ())
393 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
394 "not vectorized: irregular stmt.");
395 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
,
401 if (VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt
))))
403 if (dump_enabled_p ())
405 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
406 "not vectorized: vector stmt in loop:");
407 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
414 if (STMT_VINFO_VECTYPE (stmt_info
))
416 /* The only case when a vectype had been already set is for stmts
417 that contain a dataref, or for "pattern-stmts" (stmts
418 generated by the vectorizer to represent/replace a certain
420 gcc_assert (STMT_VINFO_DATA_REF (stmt_info
)
421 || is_pattern_stmt_p (stmt_info
)
422 || !gsi_end_p (pattern_def_si
));
423 vectype
= STMT_VINFO_VECTYPE (stmt_info
);
427 gcc_assert (!STMT_VINFO_DATA_REF (stmt_info
));
428 if (gimple_call_internal_p (stmt
, IFN_MASK_STORE
))
429 scalar_type
= TREE_TYPE (gimple_call_arg (stmt
, 3));
431 scalar_type
= TREE_TYPE (gimple_get_lhs (stmt
));
433 /* Bool ops don't participate in vectorization factor
434 computation. For comparison use compared types to
436 if (VECT_SCALAR_BOOLEAN_TYPE_P (scalar_type
)
437 && is_gimple_assign (stmt
)
438 && gimple_assign_rhs_code (stmt
) != COND_EXPR
)
440 if (STMT_VINFO_RELEVANT_P (stmt_info
)
441 || STMT_VINFO_LIVE_P (stmt_info
))
442 mask_producers
.safe_push (stmt_info
);
445 if (TREE_CODE_CLASS (gimple_assign_rhs_code (stmt
))
447 && !VECT_SCALAR_BOOLEAN_TYPE_P
448 (TREE_TYPE (gimple_assign_rhs1 (stmt
))))
449 scalar_type
= TREE_TYPE (gimple_assign_rhs1 (stmt
));
452 if (!analyze_pattern_stmt
&& gsi_end_p (pattern_def_si
))
454 pattern_def_seq
= NULL
;
461 if (dump_enabled_p ())
463 dump_printf_loc (MSG_NOTE
, vect_location
,
464 "get vectype for scalar type: ");
465 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, scalar_type
);
466 dump_printf (MSG_NOTE
, "\n");
468 vectype
= get_vectype_for_scalar_type (scalar_type
);
471 if (dump_enabled_p ())
473 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
474 "not vectorized: unsupported "
476 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
478 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
484 STMT_VINFO_VECTYPE (stmt_info
) = vectype
;
486 if (dump_enabled_p ())
488 dump_printf_loc (MSG_NOTE
, vect_location
, "vectype: ");
489 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, vectype
);
490 dump_printf (MSG_NOTE
, "\n");
494 /* Don't try to compute VF out scalar types if we stmt
495 produces boolean vector. Use result vectype instead. */
496 if (VECTOR_BOOLEAN_TYPE_P (vectype
))
497 vf_vectype
= vectype
;
500 /* The vectorization factor is according to the smallest
501 scalar type (or the largest vector size, but we only
502 support one vector size per loop). */
504 scalar_type
= vect_get_smallest_scalar_type (stmt
, &dummy
,
506 if (dump_enabled_p ())
508 dump_printf_loc (MSG_NOTE
, vect_location
,
509 "get vectype for scalar type: ");
510 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, scalar_type
);
511 dump_printf (MSG_NOTE
, "\n");
513 vf_vectype
= get_vectype_for_scalar_type (scalar_type
);
517 if (dump_enabled_p ())
519 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
520 "not vectorized: unsupported data-type ");
521 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
523 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
528 if ((GET_MODE_SIZE (TYPE_MODE (vectype
))
529 != GET_MODE_SIZE (TYPE_MODE (vf_vectype
))))
531 if (dump_enabled_p ())
533 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
534 "not vectorized: different sized vector "
535 "types in statement, ");
536 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
538 dump_printf (MSG_MISSED_OPTIMIZATION
, " and ");
539 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
541 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
546 if (dump_enabled_p ())
548 dump_printf_loc (MSG_NOTE
, vect_location
, "vectype: ");
549 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, vf_vectype
);
550 dump_printf (MSG_NOTE
, "\n");
553 nunits
= TYPE_VECTOR_SUBPARTS (vf_vectype
);
554 if (dump_enabled_p ())
555 dump_printf_loc (MSG_NOTE
, vect_location
, "nunits = %d\n", nunits
);
556 if (!vectorization_factor
557 || (nunits
> vectorization_factor
))
558 vectorization_factor
= nunits
;
560 if (!analyze_pattern_stmt
&& gsi_end_p (pattern_def_si
))
562 pattern_def_seq
= NULL
;
568 /* TODO: Analyze cost. Decide if worth while to vectorize. */
569 if (dump_enabled_p ())
570 dump_printf_loc (MSG_NOTE
, vect_location
, "vectorization factor = %d\n",
571 vectorization_factor
);
572 if (vectorization_factor
<= 1)
574 if (dump_enabled_p ())
575 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
576 "not vectorized: unsupported data-type\n");
579 LOOP_VINFO_VECT_FACTOR (loop_vinfo
) = vectorization_factor
;
581 for (i
= 0; i
< mask_producers
.length (); i
++)
583 tree mask_type
= NULL
;
585 stmt
= STMT_VINFO_STMT (mask_producers
[i
]);
587 if (is_gimple_assign (stmt
)
588 && TREE_CODE_CLASS (gimple_assign_rhs_code (stmt
)) == tcc_comparison
589 && !VECT_SCALAR_BOOLEAN_TYPE_P
590 (TREE_TYPE (gimple_assign_rhs1 (stmt
))))
592 scalar_type
= TREE_TYPE (gimple_assign_rhs1 (stmt
));
593 mask_type
= get_mask_type_for_scalar_type (scalar_type
);
597 if (dump_enabled_p ())
598 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
599 "not vectorized: unsupported mask\n");
608 enum vect_def_type dt
;
610 FOR_EACH_SSA_TREE_OPERAND (rhs
, stmt
, iter
, SSA_OP_USE
)
612 if (!vect_is_simple_use (rhs
, mask_producers
[i
]->vinfo
,
613 &def_stmt
, &dt
, &vectype
))
615 if (dump_enabled_p ())
617 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
618 "not vectorized: can't compute mask type "
620 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
,
626 /* No vectype probably means external definition.
627 Allow it in case there is another operand which
628 allows to determine mask type. */
634 else if (TYPE_VECTOR_SUBPARTS (mask_type
)
635 != TYPE_VECTOR_SUBPARTS (vectype
))
637 if (dump_enabled_p ())
639 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
640 "not vectorized: different sized masks "
641 "types in statement, ");
642 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
644 dump_printf (MSG_MISSED_OPTIMIZATION
, " and ");
645 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
647 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
651 else if (VECTOR_BOOLEAN_TYPE_P (mask_type
)
652 != VECTOR_BOOLEAN_TYPE_P (vectype
))
654 if (dump_enabled_p ())
656 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
657 "not vectorized: mixed mask and "
658 "nonmask vector types in statement, ");
659 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
661 dump_printf (MSG_MISSED_OPTIMIZATION
, " and ");
662 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
664 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
670 /* We may compare boolean value loaded as vector of integers.
671 Fix mask_type in such case. */
673 && !VECTOR_BOOLEAN_TYPE_P (mask_type
)
674 && gimple_code (stmt
) == GIMPLE_ASSIGN
675 && TREE_CODE_CLASS (gimple_assign_rhs_code (stmt
)) == tcc_comparison
)
676 mask_type
= build_same_sized_truth_vector_type (mask_type
);
679 /* No mask_type should mean loop invariant predicate.
680 This is probably a subject for optimization in
684 if (dump_enabled_p ())
686 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
687 "not vectorized: can't compute mask type "
689 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
,
695 STMT_VINFO_VECTYPE (mask_producers
[i
]) = mask_type
;
702 /* Function vect_is_simple_iv_evolution.
704 FORNOW: A simple evolution of an induction variables in the loop is
705 considered a polynomial evolution. */
708 vect_is_simple_iv_evolution (unsigned loop_nb
, tree access_fn
, tree
* init
,
713 tree evolution_part
= evolution_part_in_loop_num (access_fn
, loop_nb
);
716 /* When there is no evolution in this loop, the evolution function
718 if (evolution_part
== NULL_TREE
)
721 /* When the evolution is a polynomial of degree >= 2
722 the evolution function is not "simple". */
723 if (tree_is_chrec (evolution_part
))
726 step_expr
= evolution_part
;
727 init_expr
= unshare_expr (initial_condition_in_loop_num (access_fn
, loop_nb
));
729 if (dump_enabled_p ())
731 dump_printf_loc (MSG_NOTE
, vect_location
, "step: ");
732 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, step_expr
);
733 dump_printf (MSG_NOTE
, ", init: ");
734 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, init_expr
);
735 dump_printf (MSG_NOTE
, "\n");
741 if (TREE_CODE (step_expr
) != INTEGER_CST
742 && (TREE_CODE (step_expr
) != SSA_NAME
743 || ((bb
= gimple_bb (SSA_NAME_DEF_STMT (step_expr
)))
744 && flow_bb_inside_loop_p (get_loop (cfun
, loop_nb
), bb
))
745 || (!INTEGRAL_TYPE_P (TREE_TYPE (step_expr
))
746 && (!SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr
))
747 || !flag_associative_math
)))
748 && (TREE_CODE (step_expr
) != REAL_CST
749 || !flag_associative_math
))
751 if (dump_enabled_p ())
752 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
760 /* Function vect_analyze_scalar_cycles_1.
762 Examine the cross iteration def-use cycles of scalar variables
763 in LOOP. LOOP_VINFO represents the loop that is now being
764 considered for vectorization (can be LOOP, or an outer-loop
768 vect_analyze_scalar_cycles_1 (loop_vec_info loop_vinfo
, struct loop
*loop
)
770 basic_block bb
= loop
->header
;
772 auto_vec
<gimple
*, 64> worklist
;
776 if (dump_enabled_p ())
777 dump_printf_loc (MSG_NOTE
, vect_location
,
778 "=== vect_analyze_scalar_cycles ===\n");
780 /* First - identify all inductions. Reduction detection assumes that all the
781 inductions have been identified, therefore, this order must not be
783 for (gsi
= gsi_start_phis (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
785 gphi
*phi
= gsi
.phi ();
786 tree access_fn
= NULL
;
787 tree def
= PHI_RESULT (phi
);
788 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (phi
);
790 if (dump_enabled_p ())
792 dump_printf_loc (MSG_NOTE
, vect_location
, "Analyze phi: ");
793 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, phi
, 0);
796 /* Skip virtual phi's. The data dependences that are associated with
797 virtual defs/uses (i.e., memory accesses) are analyzed elsewhere. */
798 if (virtual_operand_p (def
))
801 STMT_VINFO_DEF_TYPE (stmt_vinfo
) = vect_unknown_def_type
;
803 /* Analyze the evolution function. */
804 access_fn
= analyze_scalar_evolution (loop
, def
);
807 STRIP_NOPS (access_fn
);
808 if (dump_enabled_p ())
810 dump_printf_loc (MSG_NOTE
, vect_location
,
811 "Access function of PHI: ");
812 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, access_fn
);
813 dump_printf (MSG_NOTE
, "\n");
815 STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED (stmt_vinfo
)
816 = initial_condition_in_loop_num (access_fn
, loop
->num
);
817 STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_vinfo
)
818 = evolution_part_in_loop_num (access_fn
, loop
->num
);
822 || !vect_is_simple_iv_evolution (loop
->num
, access_fn
, &init
, &step
)
823 || (LOOP_VINFO_LOOP (loop_vinfo
) != loop
824 && TREE_CODE (step
) != INTEGER_CST
))
826 worklist
.safe_push (phi
);
830 gcc_assert (STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED (stmt_vinfo
)
832 gcc_assert (STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_vinfo
) != NULL_TREE
);
834 if (dump_enabled_p ())
835 dump_printf_loc (MSG_NOTE
, vect_location
, "Detected induction.\n");
836 STMT_VINFO_DEF_TYPE (stmt_vinfo
) = vect_induction_def
;
840 /* Second - identify all reductions and nested cycles. */
841 while (worklist
.length () > 0)
843 gimple
*phi
= worklist
.pop ();
844 tree def
= PHI_RESULT (phi
);
845 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (phi
);
848 if (dump_enabled_p ())
850 dump_printf_loc (MSG_NOTE
, vect_location
, "Analyze phi: ");
851 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, phi
, 0);
854 gcc_assert (!virtual_operand_p (def
)
855 && STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_unknown_def_type
);
857 reduc_stmt
= vect_force_simple_reduction (loop_vinfo
, phi
,
858 &double_reduc
, false);
863 if (dump_enabled_p ())
864 dump_printf_loc (MSG_NOTE
, vect_location
,
865 "Detected double reduction.\n");
867 STMT_VINFO_DEF_TYPE (stmt_vinfo
) = vect_double_reduction_def
;
868 STMT_VINFO_DEF_TYPE (vinfo_for_stmt (reduc_stmt
)) =
869 vect_double_reduction_def
;
873 if (loop
!= LOOP_VINFO_LOOP (loop_vinfo
))
875 if (dump_enabled_p ())
876 dump_printf_loc (MSG_NOTE
, vect_location
,
877 "Detected vectorizable nested cycle.\n");
879 STMT_VINFO_DEF_TYPE (stmt_vinfo
) = vect_nested_cycle
;
880 STMT_VINFO_DEF_TYPE (vinfo_for_stmt (reduc_stmt
)) =
885 if (dump_enabled_p ())
886 dump_printf_loc (MSG_NOTE
, vect_location
,
887 "Detected reduction.\n");
889 STMT_VINFO_DEF_TYPE (stmt_vinfo
) = vect_reduction_def
;
890 STMT_VINFO_DEF_TYPE (vinfo_for_stmt (reduc_stmt
)) =
892 /* Store the reduction cycles for possible vectorization in
893 loop-aware SLP if it was not detected as reduction
895 if (! GROUP_FIRST_ELEMENT (vinfo_for_stmt (reduc_stmt
)))
896 LOOP_VINFO_REDUCTIONS (loop_vinfo
).safe_push (reduc_stmt
);
901 if (dump_enabled_p ())
902 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
903 "Unknown def-use cycle pattern.\n");
908 /* Function vect_analyze_scalar_cycles.
910 Examine the cross iteration def-use cycles of scalar variables, by
911 analyzing the loop-header PHIs of scalar variables. Classify each
912 cycle as one of the following: invariant, induction, reduction, unknown.
913 We do that for the loop represented by LOOP_VINFO, and also to its
914 inner-loop, if exists.
915 Examples for scalar cycles:
930 vect_analyze_scalar_cycles (loop_vec_info loop_vinfo
)
932 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
934 vect_analyze_scalar_cycles_1 (loop_vinfo
, loop
);
936 /* When vectorizing an outer-loop, the inner-loop is executed sequentially.
937 Reductions in such inner-loop therefore have different properties than
938 the reductions in the nest that gets vectorized:
939 1. When vectorized, they are executed in the same order as in the original
940 scalar loop, so we can't change the order of computation when
942 2. FIXME: Inner-loop reductions can be used in the inner-loop, so the
943 current checks are too strict. */
946 vect_analyze_scalar_cycles_1 (loop_vinfo
, loop
->inner
);
949 /* Transfer group and reduction information from STMT to its pattern stmt. */
952 vect_fixup_reduc_chain (gimple
*stmt
)
954 gimple
*firstp
= STMT_VINFO_RELATED_STMT (vinfo_for_stmt (stmt
));
956 gcc_assert (!GROUP_FIRST_ELEMENT (vinfo_for_stmt (firstp
))
957 && GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt
)));
958 GROUP_SIZE (vinfo_for_stmt (firstp
)) = GROUP_SIZE (vinfo_for_stmt (stmt
));
961 stmtp
= STMT_VINFO_RELATED_STMT (vinfo_for_stmt (stmt
));
962 GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmtp
)) = firstp
;
963 stmt
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (stmt
));
965 GROUP_NEXT_ELEMENT (vinfo_for_stmt (stmtp
))
966 = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (stmt
));
969 STMT_VINFO_DEF_TYPE (vinfo_for_stmt (stmtp
)) = vect_reduction_def
;
972 /* Fixup scalar cycles that now have their stmts detected as patterns. */
975 vect_fixup_scalar_cycles_with_patterns (loop_vec_info loop_vinfo
)
980 FOR_EACH_VEC_ELT (LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo
), i
, first
)
981 if (STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (first
)))
983 gimple
*next
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (first
));
986 if (! STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (next
)))
988 next
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (next
));
990 /* If not all stmt in the chain are patterns try to handle
991 the chain without patterns. */
994 vect_fixup_reduc_chain (first
);
995 LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo
)[i
]
996 = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (first
));
1001 /* Function vect_get_loop_niters.
1003 Determine how many iterations the loop is executed and place it
1004 in NUMBER_OF_ITERATIONS. Place the number of latch iterations
1005 in NUMBER_OF_ITERATIONSM1. Place the condition under which the
1006 niter information holds in ASSUMPTIONS.
1008 Return the loop exit condition. */
1012 vect_get_loop_niters (struct loop
*loop
, tree
*assumptions
,
1013 tree
*number_of_iterations
, tree
*number_of_iterationsm1
)
1015 edge exit
= single_exit (loop
);
1016 struct tree_niter_desc niter_desc
;
1017 tree niter_assumptions
, niter
, may_be_zero
;
1018 gcond
*cond
= get_loop_exit_condition (loop
);
1020 *assumptions
= boolean_true_node
;
1021 *number_of_iterationsm1
= chrec_dont_know
;
1022 *number_of_iterations
= chrec_dont_know
;
1023 if (dump_enabled_p ())
1024 dump_printf_loc (MSG_NOTE
, vect_location
,
1025 "=== get_loop_niters ===\n");
1030 niter
= chrec_dont_know
;
1031 may_be_zero
= NULL_TREE
;
1032 niter_assumptions
= boolean_true_node
;
1033 if (!number_of_iterations_exit_assumptions (loop
, exit
, &niter_desc
, NULL
)
1034 || chrec_contains_undetermined (niter_desc
.niter
))
1037 niter_assumptions
= niter_desc
.assumptions
;
1038 may_be_zero
= niter_desc
.may_be_zero
;
1039 niter
= niter_desc
.niter
;
1041 if (may_be_zero
&& integer_zerop (may_be_zero
))
1042 may_be_zero
= NULL_TREE
;
1046 if (COMPARISON_CLASS_P (may_be_zero
))
1048 /* Try to combine may_be_zero with assumptions, this can simplify
1049 computation of niter expression. */
1050 if (niter_assumptions
&& !integer_nonzerop (niter_assumptions
))
1051 niter_assumptions
= fold_build2 (TRUTH_AND_EXPR
, boolean_type_node
,
1053 fold_build1 (TRUTH_NOT_EXPR
,
1057 niter
= fold_build3 (COND_EXPR
, TREE_TYPE (niter
), may_be_zero
,
1058 build_int_cst (TREE_TYPE (niter
), 0), niter
);
1060 may_be_zero
= NULL_TREE
;
1062 else if (integer_nonzerop (may_be_zero
))
1064 *number_of_iterationsm1
= build_int_cst (TREE_TYPE (niter
), 0);
1065 *number_of_iterations
= build_int_cst (TREE_TYPE (niter
), 1);
1072 *assumptions
= niter_assumptions
;
1073 *number_of_iterationsm1
= niter
;
1075 /* We want the number of loop header executions which is the number
1076 of latch executions plus one.
1077 ??? For UINT_MAX latch executions this number overflows to zero
1078 for loops like do { n++; } while (n != 0); */
1079 if (niter
&& !chrec_contains_undetermined (niter
))
1080 niter
= fold_build2 (PLUS_EXPR
, TREE_TYPE (niter
), unshare_expr (niter
),
1081 build_int_cst (TREE_TYPE (niter
), 1));
1082 *number_of_iterations
= niter
;
1087 /* Function bb_in_loop_p
1089 Used as predicate for dfs order traversal of the loop bbs. */
1092 bb_in_loop_p (const_basic_block bb
, const void *data
)
1094 const struct loop
*const loop
= (const struct loop
*)data
;
1095 if (flow_bb_inside_loop_p (loop
, bb
))
1101 /* Function new_loop_vec_info.
1103 Create and initialize a new loop_vec_info struct for LOOP, as well as
1104 stmt_vec_info structs for all the stmts in LOOP. */
1106 static loop_vec_info
1107 new_loop_vec_info (struct loop
*loop
)
1111 gimple_stmt_iterator si
;
1112 unsigned int i
, nbbs
;
1114 res
= (loop_vec_info
) xcalloc (1, sizeof (struct _loop_vec_info
));
1115 res
->kind
= vec_info::loop
;
1116 LOOP_VINFO_LOOP (res
) = loop
;
1118 bbs
= get_loop_body (loop
);
1120 /* Create/Update stmt_info for all stmts in the loop. */
1121 for (i
= 0; i
< loop
->num_nodes
; i
++)
1123 basic_block bb
= bbs
[i
];
1125 for (si
= gsi_start_phis (bb
); !gsi_end_p (si
); gsi_next (&si
))
1127 gimple
*phi
= gsi_stmt (si
);
1128 gimple_set_uid (phi
, 0);
1129 set_vinfo_for_stmt (phi
, new_stmt_vec_info (phi
, res
));
1132 for (si
= gsi_start_bb (bb
); !gsi_end_p (si
); gsi_next (&si
))
1134 gimple
*stmt
= gsi_stmt (si
);
1135 gimple_set_uid (stmt
, 0);
1136 set_vinfo_for_stmt (stmt
, new_stmt_vec_info (stmt
, res
));
1140 /* CHECKME: We want to visit all BBs before their successors (except for
1141 latch blocks, for which this assertion wouldn't hold). In the simple
1142 case of the loop forms we allow, a dfs order of the BBs would the same
1143 as reversed postorder traversal, so we are safe. */
1146 bbs
= XCNEWVEC (basic_block
, loop
->num_nodes
);
1147 nbbs
= dfs_enumerate_from (loop
->header
, 0, bb_in_loop_p
,
1148 bbs
, loop
->num_nodes
, loop
);
1149 gcc_assert (nbbs
== loop
->num_nodes
);
1151 LOOP_VINFO_BBS (res
) = bbs
;
1152 LOOP_VINFO_NITERSM1 (res
) = NULL
;
1153 LOOP_VINFO_NITERS (res
) = NULL
;
1154 LOOP_VINFO_NITERS_UNCHANGED (res
) = NULL
;
1155 LOOP_VINFO_NITERS_ASSUMPTIONS (res
) = NULL
;
1156 LOOP_VINFO_COST_MODEL_THRESHOLD (res
) = 0;
1157 LOOP_VINFO_VECTORIZABLE_P (res
) = 0;
1158 LOOP_VINFO_PEELING_FOR_ALIGNMENT (res
) = 0;
1159 LOOP_VINFO_VECT_FACTOR (res
) = 0;
1160 LOOP_VINFO_LOOP_NEST (res
) = vNULL
;
1161 LOOP_VINFO_DATAREFS (res
) = vNULL
;
1162 LOOP_VINFO_DDRS (res
) = vNULL
;
1163 LOOP_VINFO_UNALIGNED_DR (res
) = NULL
;
1164 LOOP_VINFO_MAY_MISALIGN_STMTS (res
) = vNULL
;
1165 LOOP_VINFO_MAY_ALIAS_DDRS (res
) = vNULL
;
1166 LOOP_VINFO_GROUPED_STORES (res
) = vNULL
;
1167 LOOP_VINFO_REDUCTIONS (res
) = vNULL
;
1168 LOOP_VINFO_REDUCTION_CHAINS (res
) = vNULL
;
1169 LOOP_VINFO_SLP_INSTANCES (res
) = vNULL
;
1170 LOOP_VINFO_SLP_UNROLLING_FACTOR (res
) = 1;
1171 LOOP_VINFO_TARGET_COST_DATA (res
) = init_cost (loop
);
1172 LOOP_VINFO_PEELING_FOR_GAPS (res
) = false;
1173 LOOP_VINFO_PEELING_FOR_NITER (res
) = false;
1174 LOOP_VINFO_OPERANDS_SWAPPED (res
) = false;
1175 LOOP_VINFO_ORIG_LOOP_INFO (res
) = NULL
;
1181 /* Function destroy_loop_vec_info.
1183 Free LOOP_VINFO struct, as well as all the stmt_vec_info structs of all the
1184 stmts in the loop. */
1187 destroy_loop_vec_info (loop_vec_info loop_vinfo
, bool clean_stmts
)
1192 gimple_stmt_iterator si
;
1194 vec
<slp_instance
> slp_instances
;
1195 slp_instance instance
;
1201 loop
= LOOP_VINFO_LOOP (loop_vinfo
);
1203 bbs
= LOOP_VINFO_BBS (loop_vinfo
);
1204 nbbs
= clean_stmts
? loop
->num_nodes
: 0;
1205 swapped
= LOOP_VINFO_OPERANDS_SWAPPED (loop_vinfo
);
1207 for (j
= 0; j
< nbbs
; j
++)
1209 basic_block bb
= bbs
[j
];
1210 for (si
= gsi_start_phis (bb
); !gsi_end_p (si
); gsi_next (&si
))
1211 free_stmt_vec_info (gsi_stmt (si
));
1213 for (si
= gsi_start_bb (bb
); !gsi_end_p (si
); )
1215 gimple
*stmt
= gsi_stmt (si
);
1217 /* We may have broken canonical form by moving a constant
1218 into RHS1 of a commutative op. Fix such occurrences. */
1219 if (swapped
&& is_gimple_assign (stmt
))
1221 enum tree_code code
= gimple_assign_rhs_code (stmt
);
1223 if ((code
== PLUS_EXPR
1224 || code
== POINTER_PLUS_EXPR
1225 || code
== MULT_EXPR
)
1226 && CONSTANT_CLASS_P (gimple_assign_rhs1 (stmt
)))
1227 swap_ssa_operands (stmt
,
1228 gimple_assign_rhs1_ptr (stmt
),
1229 gimple_assign_rhs2_ptr (stmt
));
1230 else if (code
== COND_EXPR
1231 && CONSTANT_CLASS_P (gimple_assign_rhs2 (stmt
)))
1233 tree cond_expr
= gimple_assign_rhs1 (stmt
);
1234 enum tree_code cond_code
= TREE_CODE (cond_expr
);
1236 if (TREE_CODE_CLASS (cond_code
) == tcc_comparison
)
1238 bool honor_nans
= HONOR_NANS (TREE_OPERAND (cond_expr
,
1240 cond_code
= invert_tree_comparison (cond_code
,
1242 if (cond_code
!= ERROR_MARK
)
1244 TREE_SET_CODE (cond_expr
, cond_code
);
1245 swap_ssa_operands (stmt
,
1246 gimple_assign_rhs2_ptr (stmt
),
1247 gimple_assign_rhs3_ptr (stmt
));
1253 /* Free stmt_vec_info. */
1254 free_stmt_vec_info (stmt
);
1259 free (LOOP_VINFO_BBS (loop_vinfo
));
1260 vect_destroy_datarefs (loop_vinfo
);
1261 free_dependence_relations (LOOP_VINFO_DDRS (loop_vinfo
));
1262 LOOP_VINFO_LOOP_NEST (loop_vinfo
).release ();
1263 LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo
).release ();
1264 LOOP_VINFO_COMP_ALIAS_DDRS (loop_vinfo
).release ();
1265 LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo
).release ();
1266 slp_instances
= LOOP_VINFO_SLP_INSTANCES (loop_vinfo
);
1267 FOR_EACH_VEC_ELT (slp_instances
, j
, instance
)
1268 vect_free_slp_instance (instance
);
1270 LOOP_VINFO_SLP_INSTANCES (loop_vinfo
).release ();
1271 LOOP_VINFO_GROUPED_STORES (loop_vinfo
).release ();
1272 LOOP_VINFO_REDUCTIONS (loop_vinfo
).release ();
1273 LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo
).release ();
1275 destroy_cost_data (LOOP_VINFO_TARGET_COST_DATA (loop_vinfo
));
1276 loop_vinfo
->scalar_cost_vec
.release ();
1283 /* Calculate the cost of one scalar iteration of the loop. */
1285 vect_compute_single_scalar_iteration_cost (loop_vec_info loop_vinfo
)
1287 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
1288 basic_block
*bbs
= LOOP_VINFO_BBS (loop_vinfo
);
1289 int nbbs
= loop
->num_nodes
, factor
, scalar_single_iter_cost
= 0;
1290 int innerloop_iters
, i
;
1292 /* Count statements in scalar loop. Using this as scalar cost for a single
1295 TODO: Add outer loop support.
1297 TODO: Consider assigning different costs to different scalar
1301 innerloop_iters
= 1;
1303 innerloop_iters
= 50; /* FIXME */
1305 for (i
= 0; i
< nbbs
; i
++)
1307 gimple_stmt_iterator si
;
1308 basic_block bb
= bbs
[i
];
1310 if (bb
->loop_father
== loop
->inner
)
1311 factor
= innerloop_iters
;
1315 for (si
= gsi_start_bb (bb
); !gsi_end_p (si
); gsi_next (&si
))
1317 gimple
*stmt
= gsi_stmt (si
);
1318 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1320 if (!is_gimple_assign (stmt
) && !is_gimple_call (stmt
))
1323 /* Skip stmts that are not vectorized inside the loop. */
1325 && !STMT_VINFO_RELEVANT_P (stmt_info
)
1326 && (!STMT_VINFO_LIVE_P (stmt_info
)
1327 || !VECTORIZABLE_CYCLE_DEF (STMT_VINFO_DEF_TYPE (stmt_info
)))
1328 && !STMT_VINFO_IN_PATTERN_P (stmt_info
))
1331 vect_cost_for_stmt kind
;
1332 if (STMT_VINFO_DATA_REF (stmt_info
))
1334 if (DR_IS_READ (STMT_VINFO_DATA_REF (stmt_info
)))
1337 kind
= scalar_store
;
1342 scalar_single_iter_cost
1343 += record_stmt_cost (&LOOP_VINFO_SCALAR_ITERATION_COST (loop_vinfo
),
1344 factor
, kind
, stmt_info
, 0, vect_prologue
);
1347 LOOP_VINFO_SINGLE_SCALAR_ITERATION_COST (loop_vinfo
)
1348 = scalar_single_iter_cost
;
1352 /* Function vect_analyze_loop_form_1.
1354 Verify that certain CFG restrictions hold, including:
1355 - the loop has a pre-header
1356 - the loop has a single entry and exit
1357 - the loop exit condition is simple enough
1358 - the number of iterations can be analyzed, i.e, a countable loop. The
1359 niter could be analyzed under some assumptions. */
1362 vect_analyze_loop_form_1 (struct loop
*loop
, gcond
**loop_cond
,
1363 tree
*assumptions
, tree
*number_of_iterationsm1
,
1364 tree
*number_of_iterations
, gcond
**inner_loop_cond
)
1366 if (dump_enabled_p ())
1367 dump_printf_loc (MSG_NOTE
, vect_location
,
1368 "=== vect_analyze_loop_form ===\n");
1370 /* Different restrictions apply when we are considering an inner-most loop,
1371 vs. an outer (nested) loop.
1372 (FORNOW. May want to relax some of these restrictions in the future). */
1376 /* Inner-most loop. We currently require that the number of BBs is
1377 exactly 2 (the header and latch). Vectorizable inner-most loops
1388 if (loop
->num_nodes
!= 2)
1390 if (dump_enabled_p ())
1391 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1392 "not vectorized: control flow in loop.\n");
1396 if (empty_block_p (loop
->header
))
1398 if (dump_enabled_p ())
1399 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1400 "not vectorized: empty loop.\n");
1406 struct loop
*innerloop
= loop
->inner
;
1409 /* Nested loop. We currently require that the loop is doubly-nested,
1410 contains a single inner loop, and the number of BBs is exactly 5.
1411 Vectorizable outer-loops look like this:
1423 The inner-loop has the properties expected of inner-most loops
1424 as described above. */
1426 if ((loop
->inner
)->inner
|| (loop
->inner
)->next
)
1428 if (dump_enabled_p ())
1429 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1430 "not vectorized: multiple nested loops.\n");
1434 if (loop
->num_nodes
!= 5)
1436 if (dump_enabled_p ())
1437 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1438 "not vectorized: control flow in loop.\n");
1442 entryedge
= loop_preheader_edge (innerloop
);
1443 if (entryedge
->src
!= loop
->header
1444 || !single_exit (innerloop
)
1445 || single_exit (innerloop
)->dest
!= EDGE_PRED (loop
->latch
, 0)->src
)
1447 if (dump_enabled_p ())
1448 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1449 "not vectorized: unsupported outerloop form.\n");
1453 /* Analyze the inner-loop. */
1454 tree inner_niterm1
, inner_niter
, inner_assumptions
;
1455 if (! vect_analyze_loop_form_1 (loop
->inner
, inner_loop_cond
,
1456 &inner_assumptions
, &inner_niterm1
,
1458 /* Don't support analyzing niter under assumptions for inner
1460 || !integer_onep (inner_assumptions
))
1462 if (dump_enabled_p ())
1463 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1464 "not vectorized: Bad inner loop.\n");
1468 if (!expr_invariant_in_loop_p (loop
, inner_niter
))
1470 if (dump_enabled_p ())
1471 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1472 "not vectorized: inner-loop count not"
1477 if (dump_enabled_p ())
1478 dump_printf_loc (MSG_NOTE
, vect_location
,
1479 "Considering outer-loop vectorization.\n");
1482 if (!single_exit (loop
)
1483 || EDGE_COUNT (loop
->header
->preds
) != 2)
1485 if (dump_enabled_p ())
1487 if (!single_exit (loop
))
1488 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1489 "not vectorized: multiple exits.\n");
1490 else if (EDGE_COUNT (loop
->header
->preds
) != 2)
1491 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1492 "not vectorized: too many incoming edges.\n");
1497 /* We assume that the loop exit condition is at the end of the loop. i.e,
1498 that the loop is represented as a do-while (with a proper if-guard
1499 before the loop if needed), where the loop header contains all the
1500 executable statements, and the latch is empty. */
1501 if (!empty_block_p (loop
->latch
)
1502 || !gimple_seq_empty_p (phi_nodes (loop
->latch
)))
1504 if (dump_enabled_p ())
1505 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1506 "not vectorized: latch block not empty.\n");
1510 /* Make sure the exit is not abnormal. */
1511 edge e
= single_exit (loop
);
1512 if (e
->flags
& EDGE_ABNORMAL
)
1514 if (dump_enabled_p ())
1515 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1516 "not vectorized: abnormal loop exit edge.\n");
1520 *loop_cond
= vect_get_loop_niters (loop
, assumptions
, number_of_iterations
,
1521 number_of_iterationsm1
);
1524 if (dump_enabled_p ())
1525 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1526 "not vectorized: complicated exit condition.\n");
1530 if (integer_zerop (*assumptions
)
1531 || !*number_of_iterations
1532 || chrec_contains_undetermined (*number_of_iterations
))
1534 if (dump_enabled_p ())
1535 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1536 "not vectorized: number of iterations cannot be "
1541 if (integer_zerop (*number_of_iterations
))
1543 if (dump_enabled_p ())
1544 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1545 "not vectorized: number of iterations = 0.\n");
1552 /* Analyze LOOP form and return a loop_vec_info if it is of suitable form. */
1555 vect_analyze_loop_form (struct loop
*loop
)
1557 tree assumptions
, number_of_iterations
, number_of_iterationsm1
;
1558 gcond
*loop_cond
, *inner_loop_cond
= NULL
;
1560 if (! vect_analyze_loop_form_1 (loop
, &loop_cond
,
1561 &assumptions
, &number_of_iterationsm1
,
1562 &number_of_iterations
, &inner_loop_cond
))
1565 loop_vec_info loop_vinfo
= new_loop_vec_info (loop
);
1566 LOOP_VINFO_NITERSM1 (loop_vinfo
) = number_of_iterationsm1
;
1567 LOOP_VINFO_NITERS (loop_vinfo
) = number_of_iterations
;
1568 LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo
) = number_of_iterations
;
1569 if (!integer_onep (assumptions
))
1571 /* We consider to vectorize this loop by versioning it under
1572 some assumptions. In order to do this, we need to clear
1573 existing information computed by scev and niter analyzer. */
1575 free_numbers_of_iterations_estimates (loop
);
1576 /* Also set flag for this loop so that following scev and niter
1577 analysis are done under the assumptions. */
1578 loop_constraint_set (loop
, LOOP_C_FINITE
);
1579 /* Also record the assumptions for versioning. */
1580 LOOP_VINFO_NITERS_ASSUMPTIONS (loop_vinfo
) = assumptions
;
1583 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo
))
1585 if (dump_enabled_p ())
1587 dump_printf_loc (MSG_NOTE
, vect_location
,
1588 "Symbolic number of iterations is ");
1589 dump_generic_expr (MSG_NOTE
, TDF_DETAILS
, number_of_iterations
);
1590 dump_printf (MSG_NOTE
, "\n");
1594 STMT_VINFO_TYPE (vinfo_for_stmt (loop_cond
)) = loop_exit_ctrl_vec_info_type
;
1595 if (inner_loop_cond
)
1596 STMT_VINFO_TYPE (vinfo_for_stmt (inner_loop_cond
))
1597 = loop_exit_ctrl_vec_info_type
;
1599 gcc_assert (!loop
->aux
);
1600 loop
->aux
= loop_vinfo
;
1606 /* Scan the loop stmts and dependent on whether there are any (non-)SLP
1607 statements update the vectorization factor. */
1610 vect_update_vf_for_slp (loop_vec_info loop_vinfo
)
1612 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
1613 basic_block
*bbs
= LOOP_VINFO_BBS (loop_vinfo
);
1614 int nbbs
= loop
->num_nodes
;
1615 unsigned int vectorization_factor
;
1618 if (dump_enabled_p ())
1619 dump_printf_loc (MSG_NOTE
, vect_location
,
1620 "=== vect_update_vf_for_slp ===\n");
1622 vectorization_factor
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
1623 gcc_assert (vectorization_factor
!= 0);
1625 /* If all the stmts in the loop can be SLPed, we perform only SLP, and
1626 vectorization factor of the loop is the unrolling factor required by
1627 the SLP instances. If that unrolling factor is 1, we say, that we
1628 perform pure SLP on loop - cross iteration parallelism is not
1630 bool only_slp_in_loop
= true;
1631 for (i
= 0; i
< nbbs
; i
++)
1633 basic_block bb
= bbs
[i
];
1634 for (gimple_stmt_iterator si
= gsi_start_bb (bb
); !gsi_end_p (si
);
1637 gimple
*stmt
= gsi_stmt (si
);
1638 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1639 if (STMT_VINFO_IN_PATTERN_P (stmt_info
)
1640 && STMT_VINFO_RELATED_STMT (stmt_info
))
1642 stmt
= STMT_VINFO_RELATED_STMT (stmt_info
);
1643 stmt_info
= vinfo_for_stmt (stmt
);
1645 if ((STMT_VINFO_RELEVANT_P (stmt_info
)
1646 || VECTORIZABLE_CYCLE_DEF (STMT_VINFO_DEF_TYPE (stmt_info
)))
1647 && !PURE_SLP_STMT (stmt_info
))
1648 /* STMT needs both SLP and loop-based vectorization. */
1649 only_slp_in_loop
= false;
1653 if (only_slp_in_loop
)
1655 dump_printf_loc (MSG_NOTE
, vect_location
,
1656 "Loop contains only SLP stmts\n");
1657 vectorization_factor
= LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo
);
1661 dump_printf_loc (MSG_NOTE
, vect_location
,
1662 "Loop contains SLP and non-SLP stmts\n");
1663 vectorization_factor
1664 = least_common_multiple (vectorization_factor
,
1665 LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo
));
1668 LOOP_VINFO_VECT_FACTOR (loop_vinfo
) = vectorization_factor
;
1669 if (dump_enabled_p ())
1670 dump_printf_loc (MSG_NOTE
, vect_location
,
1671 "Updating vectorization factor to %d\n",
1672 vectorization_factor
);
1675 /* Function vect_analyze_loop_operations.
1677 Scan the loop stmts and make sure they are all vectorizable. */
1680 vect_analyze_loop_operations (loop_vec_info loop_vinfo
)
1682 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
1683 basic_block
*bbs
= LOOP_VINFO_BBS (loop_vinfo
);
1684 int nbbs
= loop
->num_nodes
;
1686 stmt_vec_info stmt_info
;
1687 bool need_to_vectorize
= false;
1690 if (dump_enabled_p ())
1691 dump_printf_loc (MSG_NOTE
, vect_location
,
1692 "=== vect_analyze_loop_operations ===\n");
1694 for (i
= 0; i
< nbbs
; i
++)
1696 basic_block bb
= bbs
[i
];
1698 for (gphi_iterator si
= gsi_start_phis (bb
); !gsi_end_p (si
);
1701 gphi
*phi
= si
.phi ();
1704 stmt_info
= vinfo_for_stmt (phi
);
1705 if (dump_enabled_p ())
1707 dump_printf_loc (MSG_NOTE
, vect_location
, "examining phi: ");
1708 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, phi
, 0);
1710 if (virtual_operand_p (gimple_phi_result (phi
)))
1713 /* Inner-loop loop-closed exit phi in outer-loop vectorization
1714 (i.e., a phi in the tail of the outer-loop). */
1715 if (! is_loop_header_bb_p (bb
))
1717 /* FORNOW: we currently don't support the case that these phis
1718 are not used in the outerloop (unless it is double reduction,
1719 i.e., this phi is vect_reduction_def), cause this case
1720 requires to actually do something here. */
1721 if (STMT_VINFO_LIVE_P (stmt_info
)
1722 && STMT_VINFO_DEF_TYPE (stmt_info
)
1723 != vect_double_reduction_def
)
1725 if (dump_enabled_p ())
1726 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1727 "Unsupported loop-closed phi in "
1732 /* If PHI is used in the outer loop, we check that its operand
1733 is defined in the inner loop. */
1734 if (STMT_VINFO_RELEVANT_P (stmt_info
))
1737 gimple
*op_def_stmt
;
1739 if (gimple_phi_num_args (phi
) != 1)
1742 phi_op
= PHI_ARG_DEF (phi
, 0);
1743 if (TREE_CODE (phi_op
) != SSA_NAME
)
1746 op_def_stmt
= SSA_NAME_DEF_STMT (phi_op
);
1747 if (gimple_nop_p (op_def_stmt
)
1748 || !flow_bb_inside_loop_p (loop
, gimple_bb (op_def_stmt
))
1749 || !vinfo_for_stmt (op_def_stmt
))
1752 if (STMT_VINFO_RELEVANT (vinfo_for_stmt (op_def_stmt
))
1753 != vect_used_in_outer
1754 && STMT_VINFO_RELEVANT (vinfo_for_stmt (op_def_stmt
))
1755 != vect_used_in_outer_by_reduction
)
1762 gcc_assert (stmt_info
);
1764 if ((STMT_VINFO_RELEVANT (stmt_info
) == vect_used_in_scope
1765 || STMT_VINFO_LIVE_P (stmt_info
))
1766 && STMT_VINFO_DEF_TYPE (stmt_info
) != vect_induction_def
)
1768 /* A scalar-dependence cycle that we don't support. */
1769 if (dump_enabled_p ())
1770 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1771 "not vectorized: scalar dependence cycle.\n");
1775 if (STMT_VINFO_RELEVANT_P (stmt_info
))
1777 need_to_vectorize
= true;
1778 if (STMT_VINFO_DEF_TYPE (stmt_info
) == vect_induction_def
1779 && ! PURE_SLP_STMT (stmt_info
))
1780 ok
= vectorizable_induction (phi
, NULL
, NULL
, NULL
);
1781 else if ((STMT_VINFO_DEF_TYPE (stmt_info
) == vect_reduction_def
1782 || STMT_VINFO_DEF_TYPE (stmt_info
) == vect_nested_cycle
)
1783 && ! PURE_SLP_STMT (stmt_info
))
1784 ok
= vectorizable_reduction (phi
, NULL
, NULL
, NULL
);
1787 if (ok
&& STMT_VINFO_LIVE_P (stmt_info
))
1788 ok
= vectorizable_live_operation (phi
, NULL
, NULL
, -1, NULL
);
1792 if (dump_enabled_p ())
1794 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1795 "not vectorized: relevant phi not "
1797 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, phi
, 0);
1803 for (gimple_stmt_iterator si
= gsi_start_bb (bb
); !gsi_end_p (si
);
1806 gimple
*stmt
= gsi_stmt (si
);
1807 if (!gimple_clobber_p (stmt
)
1808 && !vect_analyze_stmt (stmt
, &need_to_vectorize
, NULL
))
1813 /* All operations in the loop are either irrelevant (deal with loop
1814 control, or dead), or only used outside the loop and can be moved
1815 out of the loop (e.g. invariants, inductions). The loop can be
1816 optimized away by scalar optimizations. We're better off not
1817 touching this loop. */
1818 if (!need_to_vectorize
)
1820 if (dump_enabled_p ())
1821 dump_printf_loc (MSG_NOTE
, vect_location
,
1822 "All the computation can be taken out of the loop.\n");
1823 if (dump_enabled_p ())
1824 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1825 "not vectorized: redundant loop. no profit to "
1834 /* Function vect_analyze_loop_2.
1836 Apply a set of analyses on LOOP, and create a loop_vec_info struct
1837 for it. The different analyses will record information in the
1838 loop_vec_info struct. */
1840 vect_analyze_loop_2 (loop_vec_info loop_vinfo
, bool &fatal
)
1843 int max_vf
= MAX_VECTORIZATION_FACTOR
;
1845 unsigned int n_stmts
= 0;
1847 /* The first group of checks is independent of the vector size. */
1850 /* Find all data references in the loop (which correspond to vdefs/vuses)
1851 and analyze their evolution in the loop. */
1853 basic_block
*bbs
= LOOP_VINFO_BBS (loop_vinfo
);
1855 loop_p loop
= LOOP_VINFO_LOOP (loop_vinfo
);
1856 if (!find_loop_nest (loop
, &LOOP_VINFO_LOOP_NEST (loop_vinfo
)))
1858 if (dump_enabled_p ())
1859 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1860 "not vectorized: loop nest containing two "
1861 "or more consecutive inner loops cannot be "
1866 for (unsigned i
= 0; i
< loop
->num_nodes
; i
++)
1867 for (gimple_stmt_iterator gsi
= gsi_start_bb (bbs
[i
]);
1868 !gsi_end_p (gsi
); gsi_next (&gsi
))
1870 gimple
*stmt
= gsi_stmt (gsi
);
1871 if (is_gimple_debug (stmt
))
1874 if (!find_data_references_in_stmt (loop
, stmt
,
1875 &LOOP_VINFO_DATAREFS (loop_vinfo
)))
1877 if (is_gimple_call (stmt
) && loop
->safelen
)
1879 tree fndecl
= gimple_call_fndecl (stmt
), op
;
1880 if (fndecl
!= NULL_TREE
)
1882 cgraph_node
*node
= cgraph_node::get (fndecl
);
1883 if (node
!= NULL
&& node
->simd_clones
!= NULL
)
1885 unsigned int j
, n
= gimple_call_num_args (stmt
);
1886 for (j
= 0; j
< n
; j
++)
1888 op
= gimple_call_arg (stmt
, j
);
1890 || (REFERENCE_CLASS_P (op
)
1891 && get_base_address (op
)))
1894 op
= gimple_call_lhs (stmt
);
1895 /* Ignore #pragma omp declare simd functions
1896 if they don't have data references in the
1897 call stmt itself. */
1901 || (REFERENCE_CLASS_P (op
)
1902 && get_base_address (op
)))))
1907 if (dump_enabled_p ())
1908 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1909 "not vectorized: loop contains function "
1910 "calls or data references that cannot "
1916 /* Analyze the data references and also adjust the minimal
1917 vectorization factor according to the loads and stores. */
1919 ok
= vect_analyze_data_refs (loop_vinfo
, &min_vf
);
1922 if (dump_enabled_p ())
1923 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1924 "bad data references.\n");
1928 /* Classify all cross-iteration scalar data-flow cycles.
1929 Cross-iteration cycles caused by virtual phis are analyzed separately. */
1930 vect_analyze_scalar_cycles (loop_vinfo
);
1932 vect_pattern_recog (loop_vinfo
);
1934 vect_fixup_scalar_cycles_with_patterns (loop_vinfo
);
1936 /* Analyze the access patterns of the data-refs in the loop (consecutive,
1937 complex, etc.). FORNOW: Only handle consecutive access pattern. */
1939 ok
= vect_analyze_data_ref_accesses (loop_vinfo
);
1942 if (dump_enabled_p ())
1943 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1944 "bad data access.\n");
1948 /* Data-flow analysis to detect stmts that do not need to be vectorized. */
1950 ok
= vect_mark_stmts_to_be_vectorized (loop_vinfo
);
1953 if (dump_enabled_p ())
1954 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1955 "unexpected pattern.\n");
1959 /* While the rest of the analysis below depends on it in some way. */
1962 /* Analyze data dependences between the data-refs in the loop
1963 and adjust the maximum vectorization factor according to
1965 FORNOW: fail at the first data dependence that we encounter. */
1967 ok
= vect_analyze_data_ref_dependences (loop_vinfo
, &max_vf
);
1971 if (dump_enabled_p ())
1972 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1973 "bad data dependence.\n");
1977 ok
= vect_determine_vectorization_factor (loop_vinfo
);
1980 if (dump_enabled_p ())
1981 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1982 "can't determine vectorization factor.\n");
1985 if (max_vf
< LOOP_VINFO_VECT_FACTOR (loop_vinfo
))
1987 if (dump_enabled_p ())
1988 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1989 "bad data dependence.\n");
1993 /* Compute the scalar iteration cost. */
1994 vect_compute_single_scalar_iteration_cost (loop_vinfo
);
1996 int saved_vectorization_factor
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
1997 HOST_WIDE_INT estimated_niter
;
1999 int min_scalar_loop_bound
;
2001 /* Check the SLP opportunities in the loop, analyze and build SLP trees. */
2002 ok
= vect_analyze_slp (loop_vinfo
, n_stmts
);
2006 /* If there are any SLP instances mark them as pure_slp. */
2007 bool slp
= vect_make_slp_decision (loop_vinfo
);
2010 /* Find stmts that need to be both vectorized and SLPed. */
2011 vect_detect_hybrid_slp (loop_vinfo
);
2013 /* Update the vectorization factor based on the SLP decision. */
2014 vect_update_vf_for_slp (loop_vinfo
);
2017 /* This is the point where we can re-start analysis with SLP forced off. */
2020 /* Now the vectorization factor is final. */
2021 unsigned vectorization_factor
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
2022 gcc_assert (vectorization_factor
!= 0);
2024 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo
) && dump_enabled_p ())
2025 dump_printf_loc (MSG_NOTE
, vect_location
,
2026 "vectorization_factor = %d, niters = "
2027 HOST_WIDE_INT_PRINT_DEC
"\n", vectorization_factor
,
2028 LOOP_VINFO_INT_NITERS (loop_vinfo
));
2030 HOST_WIDE_INT max_niter
2031 = likely_max_stmt_executions_int (LOOP_VINFO_LOOP (loop_vinfo
));
2032 if ((LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo
)
2033 && (LOOP_VINFO_INT_NITERS (loop_vinfo
) < vectorization_factor
))
2035 && (unsigned HOST_WIDE_INT
) max_niter
< vectorization_factor
))
2037 if (dump_enabled_p ())
2038 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2039 "not vectorized: iteration count smaller than "
2040 "vectorization factor.\n");
2044 /* Analyze the alignment of the data-refs in the loop.
2045 Fail if a data reference is found that cannot be vectorized. */
2047 ok
= vect_analyze_data_refs_alignment (loop_vinfo
);
2050 if (dump_enabled_p ())
2051 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2052 "bad data alignment.\n");
2056 /* Prune the list of ddrs to be tested at run-time by versioning for alias.
2057 It is important to call pruning after vect_analyze_data_ref_accesses,
2058 since we use grouping information gathered by interleaving analysis. */
2059 ok
= vect_prune_runtime_alias_test_list (loop_vinfo
);
2063 /* Do not invoke vect_enhance_data_refs_alignment for eplilogue
2065 if (!LOOP_VINFO_EPILOGUE_P (loop_vinfo
))
2067 /* This pass will decide on using loop versioning and/or loop peeling in
2068 order to enhance the alignment of data references in the loop. */
2069 ok
= vect_enhance_data_refs_alignment (loop_vinfo
);
2072 if (dump_enabled_p ())
2073 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2074 "bad data alignment.\n");
2081 /* Analyze operations in the SLP instances. Note this may
2082 remove unsupported SLP instances which makes the above
2083 SLP kind detection invalid. */
2084 unsigned old_size
= LOOP_VINFO_SLP_INSTANCES (loop_vinfo
).length ();
2085 vect_slp_analyze_operations (LOOP_VINFO_SLP_INSTANCES (loop_vinfo
),
2086 LOOP_VINFO_TARGET_COST_DATA (loop_vinfo
));
2087 if (LOOP_VINFO_SLP_INSTANCES (loop_vinfo
).length () != old_size
)
2091 /* Scan all the remaining operations in the loop that are not subject
2092 to SLP and make sure they are vectorizable. */
2093 ok
= vect_analyze_loop_operations (loop_vinfo
);
2096 if (dump_enabled_p ())
2097 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2098 "bad operation or unsupported loop bound.\n");
2102 /* If epilog loop is required because of data accesses with gaps,
2103 one additional iteration needs to be peeled. Check if there is
2104 enough iterations for vectorization. */
2105 if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo
)
2106 && LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo
))
2108 int vf
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
2109 tree scalar_niters
= LOOP_VINFO_NITERSM1 (loop_vinfo
);
2111 if (wi::to_widest (scalar_niters
) < vf
)
2113 if (dump_enabled_p ())
2114 dump_printf_loc (MSG_NOTE
, vect_location
,
2115 "loop has no enough iterations to support"
2116 " peeling for gaps.\n");
2121 /* Analyze cost. Decide if worth while to vectorize. */
2122 int min_profitable_estimate
, min_profitable_iters
;
2123 vect_estimate_min_profitable_iters (loop_vinfo
, &min_profitable_iters
,
2124 &min_profitable_estimate
);
2126 if (min_profitable_iters
< 0)
2128 if (dump_enabled_p ())
2129 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2130 "not vectorized: vectorization not profitable.\n");
2131 if (dump_enabled_p ())
2132 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2133 "not vectorized: vector version will never be "
2138 min_scalar_loop_bound
= (PARAM_VALUE (PARAM_MIN_VECT_LOOP_BOUND
)
2139 * vectorization_factor
);
2141 /* Use the cost model only if it is more conservative than user specified
2143 th
= (unsigned) MAX (min_scalar_loop_bound
, min_profitable_iters
);
2145 LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo
) = th
;
2147 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo
)
2148 && LOOP_VINFO_INT_NITERS (loop_vinfo
) < th
)
2150 if (dump_enabled_p ())
2151 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2152 "not vectorized: vectorization not profitable.\n");
2153 if (dump_enabled_p ())
2154 dump_printf_loc (MSG_NOTE
, vect_location
,
2155 "not vectorized: iteration count smaller than user "
2156 "specified loop bound parameter or minimum profitable "
2157 "iterations (whichever is more conservative).\n");
2162 = estimated_stmt_executions_int (LOOP_VINFO_LOOP (loop_vinfo
));
2163 if (estimated_niter
== -1)
2164 estimated_niter
= max_niter
;
2165 if (estimated_niter
!= -1
2166 && ((unsigned HOST_WIDE_INT
) estimated_niter
2167 < MAX (th
, (unsigned) min_profitable_estimate
)))
2169 if (dump_enabled_p ())
2170 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2171 "not vectorized: estimated iteration count too "
2173 if (dump_enabled_p ())
2174 dump_printf_loc (MSG_NOTE
, vect_location
,
2175 "not vectorized: estimated iteration count smaller "
2176 "than specified loop bound parameter or minimum "
2177 "profitable iterations (whichever is more "
2178 "conservative).\n");
2182 /* Decide whether we need to create an epilogue loop to handle
2183 remaining scalar iterations. */
2184 th
= ((LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo
)
2185 / LOOP_VINFO_VECT_FACTOR (loop_vinfo
))
2186 * LOOP_VINFO_VECT_FACTOR (loop_vinfo
));
2188 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo
)
2189 && LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo
) > 0)
2191 if (ctz_hwi (LOOP_VINFO_INT_NITERS (loop_vinfo
)
2192 - LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo
))
2193 < exact_log2 (LOOP_VINFO_VECT_FACTOR (loop_vinfo
)))
2194 LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo
) = true;
2196 else if (LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo
)
2197 || (tree_ctz (LOOP_VINFO_NITERS (loop_vinfo
))
2198 < (unsigned)exact_log2 (LOOP_VINFO_VECT_FACTOR (loop_vinfo
))
2199 /* In case of versioning, check if the maximum number of
2200 iterations is greater than th. If they are identical,
2201 the epilogue is unnecessary. */
2202 && (!LOOP_REQUIRES_VERSIONING (loop_vinfo
)
2203 || (unsigned HOST_WIDE_INT
) max_niter
> th
)))
2204 LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo
) = true;
2206 /* If an epilogue loop is required make sure we can create one. */
2207 if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo
)
2208 || LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo
))
2210 if (dump_enabled_p ())
2211 dump_printf_loc (MSG_NOTE
, vect_location
, "epilog loop required\n");
2212 if (!vect_can_advance_ivs_p (loop_vinfo
)
2213 || !slpeel_can_duplicate_loop_p (LOOP_VINFO_LOOP (loop_vinfo
),
2214 single_exit (LOOP_VINFO_LOOP
2217 if (dump_enabled_p ())
2218 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2219 "not vectorized: can't create required "
2225 /* During peeling, we need to check if number of loop iterations is
2226 enough for both peeled prolog loop and vector loop. This check
2227 can be merged along with threshold check of loop versioning, so
2228 increase threshold for this case if necessary. */
2229 if (LOOP_REQUIRES_VERSIONING (loop_vinfo
)
2230 && (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo
)
2231 || LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo
)))
2235 /* Niters for peeled prolog loop. */
2236 if (LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo
) < 0)
2238 struct data_reference
*dr
= LOOP_VINFO_UNALIGNED_DR (loop_vinfo
);
2239 tree vectype
= STMT_VINFO_VECTYPE (vinfo_for_stmt (DR_STMT (dr
)));
2241 niters_th
= TYPE_VECTOR_SUBPARTS (vectype
) - 1;
2244 niters_th
= LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo
);
2246 /* Niters for at least one iteration of vectorized loop. */
2247 niters_th
+= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
2248 /* One additional iteration because of peeling for gap. */
2249 if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo
))
2251 if (LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo
) < niters_th
)
2252 LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo
) = niters_th
;
2255 gcc_assert (vectorization_factor
2256 == (unsigned)LOOP_VINFO_VECT_FACTOR (loop_vinfo
));
2258 /* Ok to vectorize! */
2262 /* Try again with SLP forced off but if we didn't do any SLP there is
2263 no point in re-trying. */
2267 /* If there are reduction chains re-trying will fail anyway. */
2268 if (! LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo
).is_empty ())
2271 /* Likewise if the grouped loads or stores in the SLP cannot be handled
2272 via interleaving or lane instructions. */
2273 slp_instance instance
;
2276 FOR_EACH_VEC_ELT (LOOP_VINFO_SLP_INSTANCES (loop_vinfo
), i
, instance
)
2278 stmt_vec_info vinfo
;
2279 vinfo
= vinfo_for_stmt
2280 (SLP_TREE_SCALAR_STMTS (SLP_INSTANCE_TREE (instance
))[0]);
2281 if (! STMT_VINFO_GROUPED_ACCESS (vinfo
))
2283 vinfo
= vinfo_for_stmt (STMT_VINFO_GROUP_FIRST_ELEMENT (vinfo
));
2284 unsigned int size
= STMT_VINFO_GROUP_SIZE (vinfo
);
2285 tree vectype
= STMT_VINFO_VECTYPE (vinfo
);
2286 if (! vect_store_lanes_supported (vectype
, size
)
2287 && ! vect_grouped_store_supported (vectype
, size
))
2289 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (instance
), j
, node
)
2291 vinfo
= vinfo_for_stmt (SLP_TREE_SCALAR_STMTS (node
)[0]);
2292 vinfo
= vinfo_for_stmt (STMT_VINFO_GROUP_FIRST_ELEMENT (vinfo
));
2293 bool single_element_p
= !STMT_VINFO_GROUP_NEXT_ELEMENT (vinfo
);
2294 size
= STMT_VINFO_GROUP_SIZE (vinfo
);
2295 vectype
= STMT_VINFO_VECTYPE (vinfo
);
2296 if (! vect_load_lanes_supported (vectype
, size
)
2297 && ! vect_grouped_load_supported (vectype
, single_element_p
,
2303 if (dump_enabled_p ())
2304 dump_printf_loc (MSG_NOTE
, vect_location
,
2305 "re-trying with SLP disabled\n");
2307 /* Roll back state appropriately. No SLP this time. */
2309 /* Restore vectorization factor as it were without SLP. */
2310 LOOP_VINFO_VECT_FACTOR (loop_vinfo
) = saved_vectorization_factor
;
2311 /* Free the SLP instances. */
2312 FOR_EACH_VEC_ELT (LOOP_VINFO_SLP_INSTANCES (loop_vinfo
), j
, instance
)
2313 vect_free_slp_instance (instance
);
2314 LOOP_VINFO_SLP_INSTANCES (loop_vinfo
).release ();
2315 /* Reset SLP type to loop_vect on all stmts. */
2316 for (i
= 0; i
< LOOP_VINFO_LOOP (loop_vinfo
)->num_nodes
; ++i
)
2318 basic_block bb
= LOOP_VINFO_BBS (loop_vinfo
)[i
];
2319 for (gimple_stmt_iterator si
= gsi_start_phis (bb
);
2320 !gsi_end_p (si
); gsi_next (&si
))
2322 stmt_vec_info stmt_info
= vinfo_for_stmt (gsi_stmt (si
));
2323 STMT_SLP_TYPE (stmt_info
) = loop_vect
;
2325 for (gimple_stmt_iterator si
= gsi_start_bb (bb
);
2326 !gsi_end_p (si
); gsi_next (&si
))
2328 stmt_vec_info stmt_info
= vinfo_for_stmt (gsi_stmt (si
));
2329 STMT_SLP_TYPE (stmt_info
) = loop_vect
;
2330 if (STMT_VINFO_IN_PATTERN_P (stmt_info
))
2332 stmt_info
= vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info
));
2333 STMT_SLP_TYPE (stmt_info
) = loop_vect
;
2334 for (gimple_stmt_iterator pi
2335 = gsi_start (STMT_VINFO_PATTERN_DEF_SEQ (stmt_info
));
2336 !gsi_end_p (pi
); gsi_next (&pi
))
2338 gimple
*pstmt
= gsi_stmt (pi
);
2339 STMT_SLP_TYPE (vinfo_for_stmt (pstmt
)) = loop_vect
;
2344 /* Free optimized alias test DDRS. */
2345 LOOP_VINFO_COMP_ALIAS_DDRS (loop_vinfo
).release ();
2346 /* Reset target cost data. */
2347 destroy_cost_data (LOOP_VINFO_TARGET_COST_DATA (loop_vinfo
));
2348 LOOP_VINFO_TARGET_COST_DATA (loop_vinfo
)
2349 = init_cost (LOOP_VINFO_LOOP (loop_vinfo
));
2350 /* Reset assorted flags. */
2351 LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo
) = false;
2352 LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo
) = false;
2353 LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo
) = 0;
2358 /* Function vect_analyze_loop.
2360 Apply a set of analyses on LOOP, and create a loop_vec_info struct
2361 for it. The different analyses will record information in the
2362 loop_vec_info struct. If ORIG_LOOP_VINFO is not NULL epilogue must
2365 vect_analyze_loop (struct loop
*loop
, loop_vec_info orig_loop_vinfo
)
2367 loop_vec_info loop_vinfo
;
2368 unsigned int vector_sizes
;
2370 /* Autodetect first vector size we try. */
2371 current_vector_size
= 0;
2372 vector_sizes
= targetm
.vectorize
.autovectorize_vector_sizes ();
2374 if (dump_enabled_p ())
2375 dump_printf_loc (MSG_NOTE
, vect_location
,
2376 "===== analyze_loop_nest =====\n");
2378 if (loop_outer (loop
)
2379 && loop_vec_info_for_loop (loop_outer (loop
))
2380 && LOOP_VINFO_VECTORIZABLE_P (loop_vec_info_for_loop (loop_outer (loop
))))
2382 if (dump_enabled_p ())
2383 dump_printf_loc (MSG_NOTE
, vect_location
,
2384 "outer-loop already vectorized.\n");
2390 /* Check the CFG characteristics of the loop (nesting, entry/exit). */
2391 loop_vinfo
= vect_analyze_loop_form (loop
);
2394 if (dump_enabled_p ())
2395 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2396 "bad loop form.\n");
2402 if (orig_loop_vinfo
)
2403 LOOP_VINFO_ORIG_LOOP_INFO (loop_vinfo
) = orig_loop_vinfo
;
2405 if (vect_analyze_loop_2 (loop_vinfo
, fatal
))
2407 LOOP_VINFO_VECTORIZABLE_P (loop_vinfo
) = 1;
2412 destroy_loop_vec_info (loop_vinfo
, true);
2414 vector_sizes
&= ~current_vector_size
;
2416 || vector_sizes
== 0
2417 || current_vector_size
== 0)
2420 /* Try the next biggest vector size. */
2421 current_vector_size
= 1 << floor_log2 (vector_sizes
);
2422 if (dump_enabled_p ())
2423 dump_printf_loc (MSG_NOTE
, vect_location
,
2424 "***** Re-trying analysis with "
2425 "vector size %d\n", current_vector_size
);
2430 /* Function reduction_code_for_scalar_code
2433 CODE - tree_code of a reduction operations.
2436 REDUC_CODE - the corresponding tree-code to be used to reduce the
2437 vector of partial results into a single scalar result, or ERROR_MARK
2438 if the operation is a supported reduction operation, but does not have
2441 Return FALSE if CODE currently cannot be vectorized as reduction. */
2444 reduction_code_for_scalar_code (enum tree_code code
,
2445 enum tree_code
*reduc_code
)
2450 *reduc_code
= REDUC_MAX_EXPR
;
2454 *reduc_code
= REDUC_MIN_EXPR
;
2458 *reduc_code
= REDUC_PLUS_EXPR
;
2466 *reduc_code
= ERROR_MARK
;
2475 /* Error reporting helper for vect_is_simple_reduction below. GIMPLE statement
2476 STMT is printed with a message MSG. */
2479 report_vect_op (dump_flags_t msg_type
, gimple
*stmt
, const char *msg
)
2481 dump_printf_loc (msg_type
, vect_location
, "%s", msg
);
2482 dump_gimple_stmt (msg_type
, TDF_SLIM
, stmt
, 0);
2486 /* Detect SLP reduction of the form:
2496 PHI is the reduction phi node (#a1 = phi <a5, a0> above)
2497 FIRST_STMT is the first reduction stmt in the chain
2498 (a2 = operation (a1)).
2500 Return TRUE if a reduction chain was detected. */
2503 vect_is_slp_reduction (loop_vec_info loop_info
, gimple
*phi
,
2506 struct loop
*loop
= (gimple_bb (phi
))->loop_father
;
2507 struct loop
*vect_loop
= LOOP_VINFO_LOOP (loop_info
);
2508 enum tree_code code
;
2509 gimple
*current_stmt
= NULL
, *loop_use_stmt
= NULL
, *first
, *next_stmt
;
2510 stmt_vec_info use_stmt_info
, current_stmt_info
;
2512 imm_use_iterator imm_iter
;
2513 use_operand_p use_p
;
2514 int nloop_uses
, size
= 0, n_out_of_loop_uses
;
2517 if (loop
!= vect_loop
)
2520 lhs
= PHI_RESULT (phi
);
2521 code
= gimple_assign_rhs_code (first_stmt
);
2525 n_out_of_loop_uses
= 0;
2526 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, lhs
)
2528 gimple
*use_stmt
= USE_STMT (use_p
);
2529 if (is_gimple_debug (use_stmt
))
2532 /* Check if we got back to the reduction phi. */
2533 if (use_stmt
== phi
)
2535 loop_use_stmt
= use_stmt
;
2540 if (flow_bb_inside_loop_p (loop
, gimple_bb (use_stmt
)))
2542 loop_use_stmt
= use_stmt
;
2546 n_out_of_loop_uses
++;
2548 /* There are can be either a single use in the loop or two uses in
2550 if (nloop_uses
> 1 || (n_out_of_loop_uses
&& nloop_uses
))
2557 /* We reached a statement with no loop uses. */
2558 if (nloop_uses
== 0)
2561 /* This is a loop exit phi, and we haven't reached the reduction phi. */
2562 if (gimple_code (loop_use_stmt
) == GIMPLE_PHI
)
2565 if (!is_gimple_assign (loop_use_stmt
)
2566 || code
!= gimple_assign_rhs_code (loop_use_stmt
)
2567 || !flow_bb_inside_loop_p (loop
, gimple_bb (loop_use_stmt
)))
2570 /* Insert USE_STMT into reduction chain. */
2571 use_stmt_info
= vinfo_for_stmt (loop_use_stmt
);
2574 current_stmt_info
= vinfo_for_stmt (current_stmt
);
2575 GROUP_NEXT_ELEMENT (current_stmt_info
) = loop_use_stmt
;
2576 GROUP_FIRST_ELEMENT (use_stmt_info
)
2577 = GROUP_FIRST_ELEMENT (current_stmt_info
);
2580 GROUP_FIRST_ELEMENT (use_stmt_info
) = loop_use_stmt
;
2582 lhs
= gimple_assign_lhs (loop_use_stmt
);
2583 current_stmt
= loop_use_stmt
;
2587 if (!found
|| loop_use_stmt
!= phi
|| size
< 2)
2590 /* Swap the operands, if needed, to make the reduction operand be the second
2592 lhs
= PHI_RESULT (phi
);
2593 next_stmt
= GROUP_FIRST_ELEMENT (vinfo_for_stmt (current_stmt
));
2596 if (gimple_assign_rhs2 (next_stmt
) == lhs
)
2598 tree op
= gimple_assign_rhs1 (next_stmt
);
2599 gimple
*def_stmt
= NULL
;
2601 if (TREE_CODE (op
) == SSA_NAME
)
2602 def_stmt
= SSA_NAME_DEF_STMT (op
);
2604 /* Check that the other def is either defined in the loop
2605 ("vect_internal_def"), or it's an induction (defined by a
2606 loop-header phi-node). */
2608 && gimple_bb (def_stmt
)
2609 && flow_bb_inside_loop_p (loop
, gimple_bb (def_stmt
))
2610 && (is_gimple_assign (def_stmt
)
2611 || is_gimple_call (def_stmt
)
2612 || STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt
))
2613 == vect_induction_def
2614 || (gimple_code (def_stmt
) == GIMPLE_PHI
2615 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt
))
2616 == vect_internal_def
2617 && !is_loop_header_bb_p (gimple_bb (def_stmt
)))))
2619 lhs
= gimple_assign_lhs (next_stmt
);
2620 next_stmt
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt
));
2628 tree op
= gimple_assign_rhs2 (next_stmt
);
2629 gimple
*def_stmt
= NULL
;
2631 if (TREE_CODE (op
) == SSA_NAME
)
2632 def_stmt
= SSA_NAME_DEF_STMT (op
);
2634 /* Check that the other def is either defined in the loop
2635 ("vect_internal_def"), or it's an induction (defined by a
2636 loop-header phi-node). */
2638 && gimple_bb (def_stmt
)
2639 && flow_bb_inside_loop_p (loop
, gimple_bb (def_stmt
))
2640 && (is_gimple_assign (def_stmt
)
2641 || is_gimple_call (def_stmt
)
2642 || STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt
))
2643 == vect_induction_def
2644 || (gimple_code (def_stmt
) == GIMPLE_PHI
2645 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt
))
2646 == vect_internal_def
2647 && !is_loop_header_bb_p (gimple_bb (def_stmt
)))))
2649 if (dump_enabled_p ())
2651 dump_printf_loc (MSG_NOTE
, vect_location
, "swapping oprnds: ");
2652 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, next_stmt
, 0);
2655 swap_ssa_operands (next_stmt
,
2656 gimple_assign_rhs1_ptr (next_stmt
),
2657 gimple_assign_rhs2_ptr (next_stmt
));
2658 update_stmt (next_stmt
);
2660 if (CONSTANT_CLASS_P (gimple_assign_rhs1 (next_stmt
)))
2661 LOOP_VINFO_OPERANDS_SWAPPED (loop_info
) = true;
2667 lhs
= gimple_assign_lhs (next_stmt
);
2668 next_stmt
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt
));
2671 /* Save the chain for further analysis in SLP detection. */
2672 first
= GROUP_FIRST_ELEMENT (vinfo_for_stmt (current_stmt
));
2673 LOOP_VINFO_REDUCTION_CHAINS (loop_info
).safe_push (first
);
2674 GROUP_SIZE (vinfo_for_stmt (first
)) = size
;
2680 /* Function vect_is_simple_reduction
2682 (1) Detect a cross-iteration def-use cycle that represents a simple
2683 reduction computation. We look for the following pattern:
2688 a2 = operation (a3, a1)
2695 a2 = operation (a3, a1)
2698 1. operation is commutative and associative and it is safe to
2699 change the order of the computation
2700 2. no uses for a2 in the loop (a2 is used out of the loop)
2701 3. no uses of a1 in the loop besides the reduction operation
2702 4. no uses of a1 outside the loop.
2704 Conditions 1,4 are tested here.
2705 Conditions 2,3 are tested in vect_mark_stmts_to_be_vectorized.
2707 (2) Detect a cross-iteration def-use cycle in nested loops, i.e.,
2710 (3) Detect cycles of phi nodes in outer-loop vectorization, i.e., double
2714 inner loop (def of a3)
2717 (4) Detect condition expressions, ie:
2718 for (int i = 0; i < N; i++)
2725 vect_is_simple_reduction (loop_vec_info loop_info
, gimple
*phi
,
2727 bool need_wrapping_integral_overflow
,
2728 enum vect_reduction_type
*v_reduc_type
)
2730 struct loop
*loop
= (gimple_bb (phi
))->loop_father
;
2731 struct loop
*vect_loop
= LOOP_VINFO_LOOP (loop_info
);
2732 gimple
*def_stmt
, *def1
= NULL
, *def2
= NULL
, *phi_use_stmt
= NULL
;
2733 enum tree_code orig_code
, code
;
2734 tree op1
, op2
, op3
= NULL_TREE
, op4
= NULL_TREE
;
2738 imm_use_iterator imm_iter
;
2739 use_operand_p use_p
;
2742 *double_reduc
= false;
2743 *v_reduc_type
= TREE_CODE_REDUCTION
;
2745 name
= PHI_RESULT (phi
);
2746 /* ??? If there are no uses of the PHI result the inner loop reduction
2747 won't be detected as possibly double-reduction by vectorizable_reduction
2748 because that tries to walk the PHI arg from the preheader edge which
2749 can be constant. See PR60382. */
2750 if (has_zero_uses (name
))
2753 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, name
)
2755 gimple
*use_stmt
= USE_STMT (use_p
);
2756 if (is_gimple_debug (use_stmt
))
2759 if (!flow_bb_inside_loop_p (loop
, gimple_bb (use_stmt
)))
2761 if (dump_enabled_p ())
2762 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2763 "intermediate value used outside loop.\n");
2771 if (dump_enabled_p ())
2772 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2773 "reduction value used in loop.\n");
2777 phi_use_stmt
= use_stmt
;
2780 edge latch_e
= loop_latch_edge (loop
);
2781 tree loop_arg
= PHI_ARG_DEF_FROM_EDGE (phi
, latch_e
);
2782 if (TREE_CODE (loop_arg
) != SSA_NAME
)
2784 if (dump_enabled_p ())
2786 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2787 "reduction: not ssa_name: ");
2788 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, loop_arg
);
2789 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
2794 def_stmt
= SSA_NAME_DEF_STMT (loop_arg
);
2795 if (is_gimple_assign (def_stmt
))
2797 name
= gimple_assign_lhs (def_stmt
);
2800 else if (gimple_code (def_stmt
) == GIMPLE_PHI
)
2802 name
= PHI_RESULT (def_stmt
);
2807 if (dump_enabled_p ())
2809 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2810 "reduction: unhandled reduction operation: ");
2811 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, def_stmt
, 0);
2817 auto_vec
<gphi
*, 3> lcphis
;
2818 if (flow_bb_inside_loop_p (loop
, gimple_bb (def_stmt
)))
2819 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, name
)
2821 gimple
*use_stmt
= USE_STMT (use_p
);
2822 if (is_gimple_debug (use_stmt
))
2824 if (flow_bb_inside_loop_p (loop
, gimple_bb (use_stmt
)))
2827 /* We can have more than one loop-closed PHI. */
2828 lcphis
.safe_push (as_a
<gphi
*> (use_stmt
));
2831 if (dump_enabled_p ())
2832 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2833 "reduction used in loop.\n");
2838 /* If DEF_STMT is a phi node itself, we expect it to have a single argument
2839 defined in the inner loop. */
2842 op1
= PHI_ARG_DEF (def_stmt
, 0);
2844 if (gimple_phi_num_args (def_stmt
) != 1
2845 || TREE_CODE (op1
) != SSA_NAME
)
2847 if (dump_enabled_p ())
2848 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2849 "unsupported phi node definition.\n");
2854 def1
= SSA_NAME_DEF_STMT (op1
);
2855 if (gimple_bb (def1
)
2856 && flow_bb_inside_loop_p (loop
, gimple_bb (def_stmt
))
2858 && flow_bb_inside_loop_p (loop
->inner
, gimple_bb (def1
))
2859 && is_gimple_assign (def1
)
2860 && flow_bb_inside_loop_p (loop
->inner
, gimple_bb (phi_use_stmt
)))
2862 if (dump_enabled_p ())
2863 report_vect_op (MSG_NOTE
, def_stmt
,
2864 "detected double reduction: ");
2866 *double_reduc
= true;
2873 /* If we are vectorizing an inner reduction we are executing that
2874 in the original order only in case we are not dealing with a
2875 double reduction. */
2876 bool check_reduction
= true;
2877 if (flow_loop_nested_p (vect_loop
, loop
))
2881 check_reduction
= false;
2882 FOR_EACH_VEC_ELT (lcphis
, i
, lcphi
)
2883 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, gimple_phi_result (lcphi
))
2885 gimple
*use_stmt
= USE_STMT (use_p
);
2886 if (is_gimple_debug (use_stmt
))
2888 if (! flow_bb_inside_loop_p (vect_loop
, gimple_bb (use_stmt
)))
2889 check_reduction
= true;
2893 bool nested_in_vect_loop
= flow_loop_nested_p (vect_loop
, loop
);
2894 code
= orig_code
= gimple_assign_rhs_code (def_stmt
);
2896 /* We can handle "res -= x[i]", which is non-associative by
2897 simply rewriting this into "res += -x[i]". Avoid changing
2898 gimple instruction for the first simple tests and only do this
2899 if we're allowed to change code at all. */
2900 if (code
== MINUS_EXPR
2901 && (op1
= gimple_assign_rhs1 (def_stmt
))
2902 && TREE_CODE (op1
) == SSA_NAME
2903 && SSA_NAME_DEF_STMT (op1
) == phi
)
2906 if (code
== COND_EXPR
)
2908 if (! nested_in_vect_loop
)
2909 *v_reduc_type
= COND_REDUCTION
;
2911 op3
= gimple_assign_rhs1 (def_stmt
);
2912 if (COMPARISON_CLASS_P (op3
))
2914 op4
= TREE_OPERAND (op3
, 1);
2915 op3
= TREE_OPERAND (op3
, 0);
2918 op1
= gimple_assign_rhs2 (def_stmt
);
2919 op2
= gimple_assign_rhs3 (def_stmt
);
2921 else if (!commutative_tree_code (code
) || !associative_tree_code (code
))
2923 if (dump_enabled_p ())
2924 report_vect_op (MSG_MISSED_OPTIMIZATION
, def_stmt
,
2925 "reduction: not commutative/associative: ");
2928 else if (get_gimple_rhs_class (code
) == GIMPLE_BINARY_RHS
)
2930 op1
= gimple_assign_rhs1 (def_stmt
);
2931 op2
= gimple_assign_rhs2 (def_stmt
);
2935 if (dump_enabled_p ())
2936 report_vect_op (MSG_MISSED_OPTIMIZATION
, def_stmt
,
2937 "reduction: not handled operation: ");
2941 if (TREE_CODE (op1
) != SSA_NAME
&& TREE_CODE (op2
) != SSA_NAME
)
2943 if (dump_enabled_p ())
2944 report_vect_op (MSG_MISSED_OPTIMIZATION
, def_stmt
,
2945 "reduction: both uses not ssa_names: ");
2950 type
= TREE_TYPE (gimple_assign_lhs (def_stmt
));
2951 if ((TREE_CODE (op1
) == SSA_NAME
2952 && !types_compatible_p (type
,TREE_TYPE (op1
)))
2953 || (TREE_CODE (op2
) == SSA_NAME
2954 && !types_compatible_p (type
, TREE_TYPE (op2
)))
2955 || (op3
&& TREE_CODE (op3
) == SSA_NAME
2956 && !types_compatible_p (type
, TREE_TYPE (op3
)))
2957 || (op4
&& TREE_CODE (op4
) == SSA_NAME
2958 && !types_compatible_p (type
, TREE_TYPE (op4
))))
2960 if (dump_enabled_p ())
2962 dump_printf_loc (MSG_NOTE
, vect_location
,
2963 "reduction: multiple types: operation type: ");
2964 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, type
);
2965 dump_printf (MSG_NOTE
, ", operands types: ");
2966 dump_generic_expr (MSG_NOTE
, TDF_SLIM
,
2968 dump_printf (MSG_NOTE
, ",");
2969 dump_generic_expr (MSG_NOTE
, TDF_SLIM
,
2973 dump_printf (MSG_NOTE
, ",");
2974 dump_generic_expr (MSG_NOTE
, TDF_SLIM
,
2980 dump_printf (MSG_NOTE
, ",");
2981 dump_generic_expr (MSG_NOTE
, TDF_SLIM
,
2984 dump_printf (MSG_NOTE
, "\n");
2990 /* Check that it's ok to change the order of the computation.
2991 Generally, when vectorizing a reduction we change the order of the
2992 computation. This may change the behavior of the program in some
2993 cases, so we need to check that this is ok. One exception is when
2994 vectorizing an outer-loop: the inner-loop is executed sequentially,
2995 and therefore vectorizing reductions in the inner-loop during
2996 outer-loop vectorization is safe. */
2998 if (*v_reduc_type
!= COND_REDUCTION
3001 /* CHECKME: check for !flag_finite_math_only too? */
3002 if (SCALAR_FLOAT_TYPE_P (type
) && !flag_associative_math
)
3004 /* Changing the order of operations changes the semantics. */
3005 if (dump_enabled_p ())
3006 report_vect_op (MSG_MISSED_OPTIMIZATION
, def_stmt
,
3007 "reduction: unsafe fp math optimization: ");
3010 else if (INTEGRAL_TYPE_P (type
))
3012 if (!operation_no_trapping_overflow (type
, code
))
3014 /* Changing the order of operations changes the semantics. */
3015 if (dump_enabled_p ())
3016 report_vect_op (MSG_MISSED_OPTIMIZATION
, def_stmt
,
3017 "reduction: unsafe int math optimization"
3018 " (overflow traps): ");
3021 if (need_wrapping_integral_overflow
3022 && !TYPE_OVERFLOW_WRAPS (type
)
3023 && operation_can_overflow (code
))
3025 /* Changing the order of operations changes the semantics. */
3026 if (dump_enabled_p ())
3027 report_vect_op (MSG_MISSED_OPTIMIZATION
, def_stmt
,
3028 "reduction: unsafe int math optimization"
3029 " (overflow doesn't wrap): ");
3033 else if (SAT_FIXED_POINT_TYPE_P (type
))
3035 /* Changing the order of operations changes the semantics. */
3036 if (dump_enabled_p ())
3037 report_vect_op (MSG_MISSED_OPTIMIZATION
, def_stmt
,
3038 "reduction: unsafe fixed-point math optimization: ");
3043 /* Reduction is safe. We're dealing with one of the following:
3044 1) integer arithmetic and no trapv
3045 2) floating point arithmetic, and special flags permit this optimization
3046 3) nested cycle (i.e., outer loop vectorization). */
3047 if (TREE_CODE (op1
) == SSA_NAME
)
3048 def1
= SSA_NAME_DEF_STMT (op1
);
3050 if (TREE_CODE (op2
) == SSA_NAME
)
3051 def2
= SSA_NAME_DEF_STMT (op2
);
3053 if (code
!= COND_EXPR
3054 && ((!def1
|| gimple_nop_p (def1
)) && (!def2
|| gimple_nop_p (def2
))))
3056 if (dump_enabled_p ())
3057 report_vect_op (MSG_NOTE
, def_stmt
, "reduction: no defs for operands: ");
3061 /* Check that one def is the reduction def, defined by PHI,
3062 the other def is either defined in the loop ("vect_internal_def"),
3063 or it's an induction (defined by a loop-header phi-node). */
3065 if (def2
&& def2
== phi
3066 && (code
== COND_EXPR
3067 || !def1
|| gimple_nop_p (def1
)
3068 || !flow_bb_inside_loop_p (loop
, gimple_bb (def1
))
3069 || (def1
&& flow_bb_inside_loop_p (loop
, gimple_bb (def1
))
3070 && (is_gimple_assign (def1
)
3071 || is_gimple_call (def1
)
3072 || STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def1
))
3073 == vect_induction_def
3074 || (gimple_code (def1
) == GIMPLE_PHI
3075 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def1
))
3076 == vect_internal_def
3077 && !is_loop_header_bb_p (gimple_bb (def1
)))))))
3079 if (dump_enabled_p ())
3080 report_vect_op (MSG_NOTE
, def_stmt
, "detected reduction: ");
3084 if (def1
&& def1
== phi
3085 && (code
== COND_EXPR
3086 || !def2
|| gimple_nop_p (def2
)
3087 || !flow_bb_inside_loop_p (loop
, gimple_bb (def2
))
3088 || (def2
&& flow_bb_inside_loop_p (loop
, gimple_bb (def2
))
3089 && (is_gimple_assign (def2
)
3090 || is_gimple_call (def2
)
3091 || STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def2
))
3092 == vect_induction_def
3093 || (gimple_code (def2
) == GIMPLE_PHI
3094 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def2
))
3095 == vect_internal_def
3096 && !is_loop_header_bb_p (gimple_bb (def2
)))))))
3098 if (! nested_in_vect_loop
&& orig_code
!= MINUS_EXPR
)
3100 /* Check if we can swap operands (just for simplicity - so that
3101 the rest of the code can assume that the reduction variable
3102 is always the last (second) argument). */
3103 if (code
== COND_EXPR
)
3105 /* Swap cond_expr by inverting the condition. */
3106 tree cond_expr
= gimple_assign_rhs1 (def_stmt
);
3107 enum tree_code invert_code
= ERROR_MARK
;
3108 enum tree_code cond_code
= TREE_CODE (cond_expr
);
3110 if (TREE_CODE_CLASS (cond_code
) == tcc_comparison
)
3112 bool honor_nans
= HONOR_NANS (TREE_OPERAND (cond_expr
, 0));
3113 invert_code
= invert_tree_comparison (cond_code
, honor_nans
);
3115 if (invert_code
!= ERROR_MARK
)
3117 TREE_SET_CODE (cond_expr
, invert_code
);
3118 swap_ssa_operands (def_stmt
,
3119 gimple_assign_rhs2_ptr (def_stmt
),
3120 gimple_assign_rhs3_ptr (def_stmt
));
3124 if (dump_enabled_p ())
3125 report_vect_op (MSG_NOTE
, def_stmt
,
3126 "detected reduction: cannot swap operands "
3132 swap_ssa_operands (def_stmt
, gimple_assign_rhs1_ptr (def_stmt
),
3133 gimple_assign_rhs2_ptr (def_stmt
));
3135 if (dump_enabled_p ())
3136 report_vect_op (MSG_NOTE
, def_stmt
,
3137 "detected reduction: need to swap operands: ");
3139 if (CONSTANT_CLASS_P (gimple_assign_rhs1 (def_stmt
)))
3140 LOOP_VINFO_OPERANDS_SWAPPED (loop_info
) = true;
3144 if (dump_enabled_p ())
3145 report_vect_op (MSG_NOTE
, def_stmt
, "detected reduction: ");
3151 /* Try to find SLP reduction chain. */
3152 if (! nested_in_vect_loop
3153 && code
!= COND_EXPR
3154 && vect_is_slp_reduction (loop_info
, phi
, def_stmt
))
3156 if (dump_enabled_p ())
3157 report_vect_op (MSG_NOTE
, def_stmt
,
3158 "reduction: detected reduction chain: ");
3163 if (dump_enabled_p ())
3164 report_vect_op (MSG_MISSED_OPTIMIZATION
, def_stmt
,
3165 "reduction: unknown pattern: ");
3170 /* Wrapper around vect_is_simple_reduction, which will modify code
3171 in-place if it enables detection of more reductions. Arguments
3175 vect_force_simple_reduction (loop_vec_info loop_info
, gimple
*phi
,
3177 bool need_wrapping_integral_overflow
)
3179 enum vect_reduction_type v_reduc_type
;
3180 gimple
*def
= vect_is_simple_reduction (loop_info
, phi
, double_reduc
,
3181 need_wrapping_integral_overflow
,
3185 stmt_vec_info reduc_def_info
= vinfo_for_stmt (phi
);
3186 STMT_VINFO_REDUC_TYPE (reduc_def_info
) = v_reduc_type
;
3187 STMT_VINFO_REDUC_DEF (reduc_def_info
) = def
;
3188 reduc_def_info
= vinfo_for_stmt (def
);
3189 STMT_VINFO_REDUC_DEF (reduc_def_info
) = phi
;
3194 /* Calculate cost of peeling the loop PEEL_ITERS_PROLOGUE times. */
3196 vect_get_known_peeling_cost (loop_vec_info loop_vinfo
, int peel_iters_prologue
,
3197 int *peel_iters_epilogue
,
3198 stmt_vector_for_cost
*scalar_cost_vec
,
3199 stmt_vector_for_cost
*prologue_cost_vec
,
3200 stmt_vector_for_cost
*epilogue_cost_vec
)
3203 int vf
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
3205 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo
))
3207 *peel_iters_epilogue
= vf
/2;
3208 if (dump_enabled_p ())
3209 dump_printf_loc (MSG_NOTE
, vect_location
,
3210 "cost model: epilogue peel iters set to vf/2 "
3211 "because loop iterations are unknown .\n");
3213 /* If peeled iterations are known but number of scalar loop
3214 iterations are unknown, count a taken branch per peeled loop. */
3215 retval
= record_stmt_cost (prologue_cost_vec
, 1, cond_branch_taken
,
3216 NULL
, 0, vect_prologue
);
3217 retval
= record_stmt_cost (prologue_cost_vec
, 1, cond_branch_taken
,
3218 NULL
, 0, vect_epilogue
);
3222 int niters
= LOOP_VINFO_INT_NITERS (loop_vinfo
);
3223 peel_iters_prologue
= niters
< peel_iters_prologue
?
3224 niters
: peel_iters_prologue
;
3225 *peel_iters_epilogue
= (niters
- peel_iters_prologue
) % vf
;
3226 /* If we need to peel for gaps, but no peeling is required, we have to
3227 peel VF iterations. */
3228 if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo
) && !*peel_iters_epilogue
)
3229 *peel_iters_epilogue
= vf
;
3232 stmt_info_for_cost
*si
;
3234 if (peel_iters_prologue
)
3235 FOR_EACH_VEC_ELT (*scalar_cost_vec
, j
, si
)
3237 stmt_vec_info stmt_info
3238 = si
->stmt
? vinfo_for_stmt (si
->stmt
) : NULL
;
3239 retval
+= record_stmt_cost (prologue_cost_vec
,
3240 si
->count
* peel_iters_prologue
,
3241 si
->kind
, stmt_info
, si
->misalign
,
3244 if (*peel_iters_epilogue
)
3245 FOR_EACH_VEC_ELT (*scalar_cost_vec
, j
, si
)
3247 stmt_vec_info stmt_info
3248 = si
->stmt
? vinfo_for_stmt (si
->stmt
) : NULL
;
3249 retval
+= record_stmt_cost (epilogue_cost_vec
,
3250 si
->count
* *peel_iters_epilogue
,
3251 si
->kind
, stmt_info
, si
->misalign
,
3258 /* Function vect_estimate_min_profitable_iters
3260 Return the number of iterations required for the vector version of the
3261 loop to be profitable relative to the cost of the scalar version of the
3264 *RET_MIN_PROFITABLE_NITERS is a cost model profitability threshold
3265 of iterations for vectorization. -1 value means loop vectorization
3266 is not profitable. This returned value may be used for dynamic
3267 profitability check.
3269 *RET_MIN_PROFITABLE_ESTIMATE is a profitability threshold to be used
3270 for static check against estimated number of iterations. */
3273 vect_estimate_min_profitable_iters (loop_vec_info loop_vinfo
,
3274 int *ret_min_profitable_niters
,
3275 int *ret_min_profitable_estimate
)
3277 int min_profitable_iters
;
3278 int min_profitable_estimate
;
3279 int peel_iters_prologue
;
3280 int peel_iters_epilogue
;
3281 unsigned vec_inside_cost
= 0;
3282 int vec_outside_cost
= 0;
3283 unsigned vec_prologue_cost
= 0;
3284 unsigned vec_epilogue_cost
= 0;
3285 int scalar_single_iter_cost
= 0;
3286 int scalar_outside_cost
= 0;
3287 int vf
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
3288 int npeel
= LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo
);
3289 void *target_cost_data
= LOOP_VINFO_TARGET_COST_DATA (loop_vinfo
);
3291 /* Cost model disabled. */
3292 if (unlimited_cost_model (LOOP_VINFO_LOOP (loop_vinfo
)))
3294 dump_printf_loc (MSG_NOTE
, vect_location
, "cost model disabled.\n");
3295 *ret_min_profitable_niters
= 0;
3296 *ret_min_profitable_estimate
= 0;
3300 /* Requires loop versioning tests to handle misalignment. */
3301 if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo
))
3303 /* FIXME: Make cost depend on complexity of individual check. */
3304 unsigned len
= LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo
).length ();
3305 (void) add_stmt_cost (target_cost_data
, len
, vector_stmt
, NULL
, 0,
3307 dump_printf (MSG_NOTE
,
3308 "cost model: Adding cost of checks for loop "
3309 "versioning to treat misalignment.\n");
3312 /* Requires loop versioning with alias checks. */
3313 if (LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo
))
3315 /* FIXME: Make cost depend on complexity of individual check. */
3316 unsigned len
= LOOP_VINFO_COMP_ALIAS_DDRS (loop_vinfo
).length ();
3317 (void) add_stmt_cost (target_cost_data
, len
, vector_stmt
, NULL
, 0,
3319 dump_printf (MSG_NOTE
,
3320 "cost model: Adding cost of checks for loop "
3321 "versioning aliasing.\n");
3324 /* Requires loop versioning with niter checks. */
3325 if (LOOP_REQUIRES_VERSIONING_FOR_NITERS (loop_vinfo
))
3327 /* FIXME: Make cost depend on complexity of individual check. */
3328 (void) add_stmt_cost (target_cost_data
, 1, vector_stmt
, NULL
, 0,
3330 dump_printf (MSG_NOTE
,
3331 "cost model: Adding cost of checks for loop "
3332 "versioning niters.\n");
3335 if (LOOP_REQUIRES_VERSIONING (loop_vinfo
))
3336 (void) add_stmt_cost (target_cost_data
, 1, cond_branch_taken
, NULL
, 0,
3339 /* Count statements in scalar loop. Using this as scalar cost for a single
3342 TODO: Add outer loop support.
3344 TODO: Consider assigning different costs to different scalar
3347 scalar_single_iter_cost
3348 = LOOP_VINFO_SINGLE_SCALAR_ITERATION_COST (loop_vinfo
);
3350 /* Add additional cost for the peeled instructions in prologue and epilogue
3353 FORNOW: If we don't know the value of peel_iters for prologue or epilogue
3354 at compile-time - we assume it's vf/2 (the worst would be vf-1).
3356 TODO: Build an expression that represents peel_iters for prologue and
3357 epilogue to be used in a run-time test. */
3361 peel_iters_prologue
= vf
/2;
3362 dump_printf (MSG_NOTE
, "cost model: "
3363 "prologue peel iters set to vf/2.\n");
3365 /* If peeling for alignment is unknown, loop bound of main loop becomes
3367 peel_iters_epilogue
= vf
/2;
3368 dump_printf (MSG_NOTE
, "cost model: "
3369 "epilogue peel iters set to vf/2 because "
3370 "peeling for alignment is unknown.\n");
3372 /* If peeled iterations are unknown, count a taken branch and a not taken
3373 branch per peeled loop. Even if scalar loop iterations are known,
3374 vector iterations are not known since peeled prologue iterations are
3375 not known. Hence guards remain the same. */
3376 (void) add_stmt_cost (target_cost_data
, 1, cond_branch_taken
,
3377 NULL
, 0, vect_prologue
);
3378 (void) add_stmt_cost (target_cost_data
, 1, cond_branch_not_taken
,
3379 NULL
, 0, vect_prologue
);
3380 (void) add_stmt_cost (target_cost_data
, 1, cond_branch_taken
,
3381 NULL
, 0, vect_epilogue
);
3382 (void) add_stmt_cost (target_cost_data
, 1, cond_branch_not_taken
,
3383 NULL
, 0, vect_epilogue
);
3384 stmt_info_for_cost
*si
;
3386 FOR_EACH_VEC_ELT (LOOP_VINFO_SCALAR_ITERATION_COST (loop_vinfo
), j
, si
)
3388 struct _stmt_vec_info
*stmt_info
3389 = si
->stmt
? vinfo_for_stmt (si
->stmt
) : NULL
;
3390 (void) add_stmt_cost (target_cost_data
,
3391 si
->count
* peel_iters_prologue
,
3392 si
->kind
, stmt_info
, si
->misalign
,
3394 (void) add_stmt_cost (target_cost_data
,
3395 si
->count
* peel_iters_epilogue
,
3396 si
->kind
, stmt_info
, si
->misalign
,
3402 stmt_vector_for_cost prologue_cost_vec
, epilogue_cost_vec
;
3403 stmt_info_for_cost
*si
;
3405 void *data
= LOOP_VINFO_TARGET_COST_DATA (loop_vinfo
);
3407 prologue_cost_vec
.create (2);
3408 epilogue_cost_vec
.create (2);
3409 peel_iters_prologue
= npeel
;
3411 (void) vect_get_known_peeling_cost (loop_vinfo
, peel_iters_prologue
,
3412 &peel_iters_epilogue
,
3413 &LOOP_VINFO_SCALAR_ITERATION_COST
3416 &epilogue_cost_vec
);
3418 FOR_EACH_VEC_ELT (prologue_cost_vec
, j
, si
)
3420 struct _stmt_vec_info
*stmt_info
3421 = si
->stmt
? vinfo_for_stmt (si
->stmt
) : NULL
;
3422 (void) add_stmt_cost (data
, si
->count
, si
->kind
, stmt_info
,
3423 si
->misalign
, vect_prologue
);
3426 FOR_EACH_VEC_ELT (epilogue_cost_vec
, j
, si
)
3428 struct _stmt_vec_info
*stmt_info
3429 = si
->stmt
? vinfo_for_stmt (si
->stmt
) : NULL
;
3430 (void) add_stmt_cost (data
, si
->count
, si
->kind
, stmt_info
,
3431 si
->misalign
, vect_epilogue
);
3434 prologue_cost_vec
.release ();
3435 epilogue_cost_vec
.release ();
3438 /* FORNOW: The scalar outside cost is incremented in one of the
3441 1. The vectorizer checks for alignment and aliasing and generates
3442 a condition that allows dynamic vectorization. A cost model
3443 check is ANDED with the versioning condition. Hence scalar code
3444 path now has the added cost of the versioning check.
3446 if (cost > th & versioning_check)
3449 Hence run-time scalar is incremented by not-taken branch cost.
3451 2. The vectorizer then checks if a prologue is required. If the
3452 cost model check was not done before during versioning, it has to
3453 be done before the prologue check.
3456 prologue = scalar_iters
3461 if (prologue == num_iters)
3464 Hence the run-time scalar cost is incremented by a taken branch,
3465 plus a not-taken branch, plus a taken branch cost.
3467 3. The vectorizer then checks if an epilogue is required. If the
3468 cost model check was not done before during prologue check, it
3469 has to be done with the epilogue check.
3475 if (prologue == num_iters)
3478 if ((cost <= th) | (scalar_iters-prologue-epilogue == 0))
3481 Hence the run-time scalar cost should be incremented by 2 taken
3484 TODO: The back end may reorder the BBS's differently and reverse
3485 conditions/branch directions. Change the estimates below to
3486 something more reasonable. */
3488 /* If the number of iterations is known and we do not do versioning, we can
3489 decide whether to vectorize at compile time. Hence the scalar version
3490 do not carry cost model guard costs. */
3491 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo
)
3492 || LOOP_REQUIRES_VERSIONING (loop_vinfo
))
3494 /* Cost model check occurs at versioning. */
3495 if (LOOP_REQUIRES_VERSIONING (loop_vinfo
))
3496 scalar_outside_cost
+= vect_get_stmt_cost (cond_branch_not_taken
);
3499 /* Cost model check occurs at prologue generation. */
3500 if (LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo
) < 0)
3501 scalar_outside_cost
+= 2 * vect_get_stmt_cost (cond_branch_taken
)
3502 + vect_get_stmt_cost (cond_branch_not_taken
);
3503 /* Cost model check occurs at epilogue generation. */
3505 scalar_outside_cost
+= 2 * vect_get_stmt_cost (cond_branch_taken
);
3509 /* Complete the target-specific cost calculations. */
3510 finish_cost (LOOP_VINFO_TARGET_COST_DATA (loop_vinfo
), &vec_prologue_cost
,
3511 &vec_inside_cost
, &vec_epilogue_cost
);
3513 vec_outside_cost
= (int)(vec_prologue_cost
+ vec_epilogue_cost
);
3515 if (dump_enabled_p ())
3517 dump_printf_loc (MSG_NOTE
, vect_location
, "Cost model analysis: \n");
3518 dump_printf (MSG_NOTE
, " Vector inside of loop cost: %d\n",
3520 dump_printf (MSG_NOTE
, " Vector prologue cost: %d\n",
3522 dump_printf (MSG_NOTE
, " Vector epilogue cost: %d\n",
3524 dump_printf (MSG_NOTE
, " Scalar iteration cost: %d\n",
3525 scalar_single_iter_cost
);
3526 dump_printf (MSG_NOTE
, " Scalar outside cost: %d\n",
3527 scalar_outside_cost
);
3528 dump_printf (MSG_NOTE
, " Vector outside cost: %d\n",
3530 dump_printf (MSG_NOTE
, " prologue iterations: %d\n",
3531 peel_iters_prologue
);
3532 dump_printf (MSG_NOTE
, " epilogue iterations: %d\n",
3533 peel_iters_epilogue
);
3536 /* Calculate number of iterations required to make the vector version
3537 profitable, relative to the loop bodies only. The following condition
3539 SIC * niters + SOC > VIC * ((niters-PL_ITERS-EP_ITERS)/VF) + VOC
3541 SIC = scalar iteration cost, VIC = vector iteration cost,
3542 VOC = vector outside cost, VF = vectorization factor,
3543 PL_ITERS = prologue iterations, EP_ITERS= epilogue iterations
3544 SOC = scalar outside cost for run time cost model check. */
3546 if ((scalar_single_iter_cost
* vf
) > (int) vec_inside_cost
)
3548 if (vec_outside_cost
<= 0)
3549 min_profitable_iters
= 0;
3552 min_profitable_iters
= ((vec_outside_cost
- scalar_outside_cost
) * vf
3553 - vec_inside_cost
* peel_iters_prologue
3554 - vec_inside_cost
* peel_iters_epilogue
)
3555 / ((scalar_single_iter_cost
* vf
)
3558 if ((scalar_single_iter_cost
* vf
* min_profitable_iters
)
3559 <= (((int) vec_inside_cost
* min_profitable_iters
)
3560 + (((int) vec_outside_cost
- scalar_outside_cost
) * vf
)))
3561 min_profitable_iters
++;
3564 /* vector version will never be profitable. */
3567 if (LOOP_VINFO_LOOP (loop_vinfo
)->force_vectorize
)
3568 warning_at (vect_location
, OPT_Wopenmp_simd
, "vectorization "
3569 "did not happen for a simd loop");
3571 if (dump_enabled_p ())
3572 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3573 "cost model: the vector iteration cost = %d "
3574 "divided by the scalar iteration cost = %d "
3575 "is greater or equal to the vectorization factor = %d"
3577 vec_inside_cost
, scalar_single_iter_cost
, vf
);
3578 *ret_min_profitable_niters
= -1;
3579 *ret_min_profitable_estimate
= -1;
3583 dump_printf (MSG_NOTE
,
3584 " Calculated minimum iters for profitability: %d\n",
3585 min_profitable_iters
);
3587 min_profitable_iters
=
3588 min_profitable_iters
< vf
? vf
: min_profitable_iters
;
3590 if (dump_enabled_p ())
3591 dump_printf_loc (MSG_NOTE
, vect_location
,
3592 " Runtime profitability threshold = %d\n",
3593 min_profitable_iters
);
3595 *ret_min_profitable_niters
= min_profitable_iters
;
3597 /* Calculate number of iterations required to make the vector version
3598 profitable, relative to the loop bodies only.
3600 Non-vectorized variant is SIC * niters and it must win over vector
3601 variant on the expected loop trip count. The following condition must hold true:
3602 SIC * niters > VIC * ((niters-PL_ITERS-EP_ITERS)/VF) + VOC + SOC */
3604 if (vec_outside_cost
<= 0)
3605 min_profitable_estimate
= 0;
3608 min_profitable_estimate
= ((vec_outside_cost
+ scalar_outside_cost
) * vf
3609 - vec_inside_cost
* peel_iters_prologue
3610 - vec_inside_cost
* peel_iters_epilogue
)
3611 / ((scalar_single_iter_cost
* vf
)
3614 min_profitable_estimate
= MAX (min_profitable_estimate
, min_profitable_iters
);
3615 if (dump_enabled_p ())
3616 dump_printf_loc (MSG_NOTE
, vect_location
,
3617 " Static estimate profitability threshold = %d\n",
3618 min_profitable_estimate
);
3620 *ret_min_profitable_estimate
= min_profitable_estimate
;
3623 /* Writes into SEL a mask for a vec_perm, equivalent to a vec_shr by OFFSET
3624 vector elements (not bits) for a vector of mode MODE. */
3626 calc_vec_perm_mask_for_shift (machine_mode mode
, unsigned int offset
,
3629 unsigned int i
, nelt
= GET_MODE_NUNITS (mode
);
3631 for (i
= 0; i
< nelt
; i
++)
3632 sel
[i
] = (i
+ offset
) & (2*nelt
- 1);
3635 /* Checks whether the target supports whole-vector shifts for vectors of mode
3636 MODE. This is the case if _either_ the platform handles vec_shr_optab, _or_
3637 it supports vec_perm_const with masks for all necessary shift amounts. */
3639 have_whole_vector_shift (machine_mode mode
)
3641 if (optab_handler (vec_shr_optab
, mode
) != CODE_FOR_nothing
)
3644 if (direct_optab_handler (vec_perm_const_optab
, mode
) == CODE_FOR_nothing
)
3647 unsigned int i
, nelt
= GET_MODE_NUNITS (mode
);
3648 unsigned char *sel
= XALLOCAVEC (unsigned char, nelt
);
3650 for (i
= nelt
/2; i
>= 1; i
/=2)
3652 calc_vec_perm_mask_for_shift (mode
, i
, sel
);
3653 if (!can_vec_perm_p (mode
, false, sel
))
3659 /* Return the reduction operand (with index REDUC_INDEX) of STMT. */
3662 get_reduction_op (gimple
*stmt
, int reduc_index
)
3664 switch (get_gimple_rhs_class (gimple_assign_rhs_code (stmt
)))
3666 case GIMPLE_SINGLE_RHS
:
3667 gcc_assert (TREE_OPERAND_LENGTH (gimple_assign_rhs1 (stmt
))
3669 return TREE_OPERAND (gimple_assign_rhs1 (stmt
), reduc_index
);
3670 case GIMPLE_UNARY_RHS
:
3671 return gimple_assign_rhs1 (stmt
);
3672 case GIMPLE_BINARY_RHS
:
3674 ? gimple_assign_rhs2 (stmt
) : gimple_assign_rhs1 (stmt
));
3675 case GIMPLE_TERNARY_RHS
:
3676 return gimple_op (stmt
, reduc_index
+ 1);
3682 /* TODO: Close dependency between vect_model_*_cost and vectorizable_*
3683 functions. Design better to avoid maintenance issues. */
3685 /* Function vect_model_reduction_cost.
3687 Models cost for a reduction operation, including the vector ops
3688 generated within the strip-mine loop, the initial definition before
3689 the loop, and the epilogue code that must be generated. */
3692 vect_model_reduction_cost (stmt_vec_info stmt_info
, enum tree_code reduc_code
,
3695 int prologue_cost
= 0, epilogue_cost
= 0;
3696 enum tree_code code
;
3701 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
3702 struct loop
*loop
= NULL
;
3703 void *target_cost_data
;
3707 loop
= LOOP_VINFO_LOOP (loop_vinfo
);
3708 target_cost_data
= LOOP_VINFO_TARGET_COST_DATA (loop_vinfo
);
3711 target_cost_data
= BB_VINFO_TARGET_COST_DATA (STMT_VINFO_BB_VINFO (stmt_info
));
3713 /* Condition reductions generate two reductions in the loop. */
3714 if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info
) == COND_REDUCTION
)
3717 /* Cost of reduction op inside loop. */
3718 unsigned inside_cost
= add_stmt_cost (target_cost_data
, ncopies
, vector_stmt
,
3719 stmt_info
, 0, vect_body
);
3721 vectype
= STMT_VINFO_VECTYPE (stmt_info
);
3722 mode
= TYPE_MODE (vectype
);
3723 orig_stmt
= STMT_VINFO_RELATED_STMT (stmt_info
);
3726 orig_stmt
= STMT_VINFO_STMT (stmt_info
);
3728 code
= gimple_assign_rhs_code (orig_stmt
);
3730 /* Add in cost for initial definition.
3731 For cond reduction we have four vectors: initial index, step, initial
3732 result of the data reduction, initial value of the index reduction. */
3733 int prologue_stmts
= STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info
)
3734 == COND_REDUCTION
? 4 : 1;
3735 prologue_cost
+= add_stmt_cost (target_cost_data
, prologue_stmts
,
3736 scalar_to_vec
, stmt_info
, 0,
3739 /* Determine cost of epilogue code.
3741 We have a reduction operator that will reduce the vector in one statement.
3742 Also requires scalar extract. */
3744 if (!loop
|| !nested_in_vect_loop_p (loop
, orig_stmt
))
3746 if (reduc_code
!= ERROR_MARK
)
3748 if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info
) == COND_REDUCTION
)
3750 /* An EQ stmt and an COND_EXPR stmt. */
3751 epilogue_cost
+= add_stmt_cost (target_cost_data
, 2,
3752 vector_stmt
, stmt_info
, 0,
3754 /* Reduction of the max index and a reduction of the found
3756 epilogue_cost
+= add_stmt_cost (target_cost_data
, 2,
3757 vec_to_scalar
, stmt_info
, 0,
3759 /* A broadcast of the max value. */
3760 epilogue_cost
+= add_stmt_cost (target_cost_data
, 1,
3761 scalar_to_vec
, stmt_info
, 0,
3766 epilogue_cost
+= add_stmt_cost (target_cost_data
, 1, vector_stmt
,
3767 stmt_info
, 0, vect_epilogue
);
3768 epilogue_cost
+= add_stmt_cost (target_cost_data
, 1,
3769 vec_to_scalar
, stmt_info
, 0,
3773 else if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info
) == COND_REDUCTION
)
3775 unsigned nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
3776 /* Extraction of scalar elements. */
3777 epilogue_cost
+= add_stmt_cost (target_cost_data
, 2 * nunits
,
3778 vec_to_scalar
, stmt_info
, 0,
3780 /* Scalar max reductions via COND_EXPR / MAX_EXPR. */
3781 epilogue_cost
+= add_stmt_cost (target_cost_data
, 2 * nunits
- 3,
3782 scalar_stmt
, stmt_info
, 0,
3787 int vec_size_in_bits
= tree_to_uhwi (TYPE_SIZE (vectype
));
3789 TYPE_SIZE (TREE_TYPE (gimple_assign_lhs (orig_stmt
)));
3790 int element_bitsize
= tree_to_uhwi (bitsize
);
3791 int nelements
= vec_size_in_bits
/ element_bitsize
;
3793 if (code
== COND_EXPR
)
3796 optab
= optab_for_tree_code (code
, vectype
, optab_default
);
3798 /* We have a whole vector shift available. */
3799 if (optab
!= unknown_optab
3800 && VECTOR_MODE_P (mode
)
3801 && optab_handler (optab
, mode
) != CODE_FOR_nothing
3802 && have_whole_vector_shift (mode
))
3804 /* Final reduction via vector shifts and the reduction operator.
3805 Also requires scalar extract. */
3806 epilogue_cost
+= add_stmt_cost (target_cost_data
,
3807 exact_log2 (nelements
) * 2,
3808 vector_stmt
, stmt_info
, 0,
3810 epilogue_cost
+= add_stmt_cost (target_cost_data
, 1,
3811 vec_to_scalar
, stmt_info
, 0,
3815 /* Use extracts and reduction op for final reduction. For N
3816 elements, we have N extracts and N-1 reduction ops. */
3817 epilogue_cost
+= add_stmt_cost (target_cost_data
,
3818 nelements
+ nelements
- 1,
3819 vector_stmt
, stmt_info
, 0,
3824 if (dump_enabled_p ())
3825 dump_printf (MSG_NOTE
,
3826 "vect_model_reduction_cost: inside_cost = %d, "
3827 "prologue_cost = %d, epilogue_cost = %d .\n", inside_cost
,
3828 prologue_cost
, epilogue_cost
);
3832 /* Function vect_model_induction_cost.
3834 Models cost for induction operations. */
3837 vect_model_induction_cost (stmt_vec_info stmt_info
, int ncopies
)
3839 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
3840 void *target_cost_data
= LOOP_VINFO_TARGET_COST_DATA (loop_vinfo
);
3841 unsigned inside_cost
, prologue_cost
;
3843 if (PURE_SLP_STMT (stmt_info
))
3846 /* loop cost for vec_loop. */
3847 inside_cost
= add_stmt_cost (target_cost_data
, ncopies
, vector_stmt
,
3848 stmt_info
, 0, vect_body
);
3850 /* prologue cost for vec_init and vec_step. */
3851 prologue_cost
= add_stmt_cost (target_cost_data
, 2, scalar_to_vec
,
3852 stmt_info
, 0, vect_prologue
);
3854 if (dump_enabled_p ())
3855 dump_printf_loc (MSG_NOTE
, vect_location
,
3856 "vect_model_induction_cost: inside_cost = %d, "
3857 "prologue_cost = %d .\n", inside_cost
, prologue_cost
);
3862 /* Function get_initial_def_for_reduction
3865 STMT - a stmt that performs a reduction operation in the loop.
3866 INIT_VAL - the initial value of the reduction variable
3869 ADJUSTMENT_DEF - a tree that holds a value to be added to the final result
3870 of the reduction (used for adjusting the epilog - see below).
3871 Return a vector variable, initialized according to the operation that STMT
3872 performs. This vector will be used as the initial value of the
3873 vector of partial results.
3875 Option1 (adjust in epilog): Initialize the vector as follows:
3876 add/bit or/xor: [0,0,...,0,0]
3877 mult/bit and: [1,1,...,1,1]
3878 min/max/cond_expr: [init_val,init_val,..,init_val,init_val]
3879 and when necessary (e.g. add/mult case) let the caller know
3880 that it needs to adjust the result by init_val.
3882 Option2: Initialize the vector as follows:
3883 add/bit or/xor: [init_val,0,0,...,0]
3884 mult/bit and: [init_val,1,1,...,1]
3885 min/max/cond_expr: [init_val,init_val,...,init_val]
3886 and no adjustments are needed.
3888 For example, for the following code:
3894 STMT is 's = s + a[i]', and the reduction variable is 's'.
3895 For a vector of 4 units, we want to return either [0,0,0,init_val],
3896 or [0,0,0,0] and let the caller know that it needs to adjust
3897 the result at the end by 'init_val'.
3899 FORNOW, we are using the 'adjust in epilog' scheme, because this way the
3900 initialization vector is simpler (same element in all entries), if
3901 ADJUSTMENT_DEF is not NULL, and Option2 otherwise.
3903 A cost model should help decide between these two schemes. */
3906 get_initial_def_for_reduction (gimple
*stmt
, tree init_val
,
3907 tree
*adjustment_def
)
3909 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (stmt
);
3910 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_vinfo
);
3911 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
3912 tree scalar_type
= TREE_TYPE (init_val
);
3913 tree vectype
= get_vectype_for_scalar_type (scalar_type
);
3915 enum tree_code code
= gimple_assign_rhs_code (stmt
);
3920 bool nested_in_vect_loop
= false;
3921 REAL_VALUE_TYPE real_init_val
= dconst0
;
3922 int int_init_val
= 0;
3923 gimple
*def_stmt
= NULL
;
3924 gimple_seq stmts
= NULL
;
3926 gcc_assert (vectype
);
3927 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
3929 gcc_assert (POINTER_TYPE_P (scalar_type
) || INTEGRAL_TYPE_P (scalar_type
)
3930 || SCALAR_FLOAT_TYPE_P (scalar_type
));
3932 if (nested_in_vect_loop_p (loop
, stmt
))
3933 nested_in_vect_loop
= true;
3935 gcc_assert (loop
== (gimple_bb (stmt
))->loop_father
);
3937 /* In case of double reduction we only create a vector variable to be put
3938 in the reduction phi node. The actual statement creation is done in
3939 vect_create_epilog_for_reduction. */
3940 if (adjustment_def
&& nested_in_vect_loop
3941 && TREE_CODE (init_val
) == SSA_NAME
3942 && (def_stmt
= SSA_NAME_DEF_STMT (init_val
))
3943 && gimple_code (def_stmt
) == GIMPLE_PHI
3944 && flow_bb_inside_loop_p (loop
, gimple_bb (def_stmt
))
3945 && vinfo_for_stmt (def_stmt
)
3946 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt
))
3947 == vect_double_reduction_def
)
3949 *adjustment_def
= NULL
;
3950 return vect_create_destination_var (init_val
, vectype
);
3953 /* In case of a nested reduction do not use an adjustment def as
3954 that case is not supported by the epilogue generation correctly
3955 if ncopies is not one. */
3956 if (adjustment_def
&& nested_in_vect_loop
)
3958 *adjustment_def
= NULL
;
3959 return vect_get_vec_def_for_operand (init_val
, stmt
);
3964 case WIDEN_SUM_EXPR
:
3973 /* ADJUSMENT_DEF is NULL when called from
3974 vect_create_epilog_for_reduction to vectorize double reduction. */
3976 *adjustment_def
= init_val
;
3978 if (code
== MULT_EXPR
)
3980 real_init_val
= dconst1
;
3984 if (code
== BIT_AND_EXPR
)
3987 if (SCALAR_FLOAT_TYPE_P (scalar_type
))
3988 def_for_init
= build_real (scalar_type
, real_init_val
);
3990 def_for_init
= build_int_cst (scalar_type
, int_init_val
);
3992 /* Create a vector of '0' or '1' except the first element. */
3993 elts
= XALLOCAVEC (tree
, nunits
);
3994 for (i
= nunits
- 2; i
>= 0; --i
)
3995 elts
[i
+ 1] = def_for_init
;
3997 /* Option1: the first element is '0' or '1' as well. */
4000 elts
[0] = def_for_init
;
4001 init_def
= build_vector (vectype
, elts
);
4005 /* Option2: the first element is INIT_VAL. */
4007 if (TREE_CONSTANT (init_val
))
4008 init_def
= build_vector (vectype
, elts
);
4011 vec
<constructor_elt
, va_gc
> *v
;
4012 vec_alloc (v
, nunits
);
4013 CONSTRUCTOR_APPEND_ELT (v
, NULL_TREE
, init_val
);
4014 for (i
= 1; i
< nunits
; ++i
)
4015 CONSTRUCTOR_APPEND_ELT (v
, NULL_TREE
, elts
[i
]);
4016 init_def
= build_constructor (vectype
, v
);
4026 *adjustment_def
= NULL_TREE
;
4027 if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_vinfo
) != COND_REDUCTION
)
4029 init_def
= vect_get_vec_def_for_operand (init_val
, stmt
);
4033 init_val
= gimple_convert (&stmts
, TREE_TYPE (vectype
), init_val
);
4034 if (! gimple_seq_empty_p (stmts
))
4035 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop
), stmts
);
4036 init_def
= build_vector_from_val (vectype
, init_val
);
4046 /* Get at the initial defs for OP in the reduction SLP_NODE.
4047 NUMBER_OF_VECTORS is the number of vector defs to create.
4048 REDUC_INDEX is the index of the reduction operand in the statements. */
4051 get_initial_defs_for_reduction (slp_tree slp_node
,
4052 vec
<tree
> *vec_oprnds
,
4053 unsigned int number_of_vectors
,
4054 int reduc_index
, enum tree_code code
)
4056 vec
<gimple
*> stmts
= SLP_TREE_SCALAR_STMTS (slp_node
);
4057 gimple
*stmt
= stmts
[0];
4058 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (stmt
);
4062 unsigned j
, number_of_places_left_in_vector
;
4063 tree vector_type
, scalar_type
;
4065 int group_size
= stmts
.length ();
4066 unsigned int vec_num
, i
;
4067 unsigned number_of_copies
= 1;
4069 voprnds
.create (number_of_vectors
);
4071 tree neutral_op
= NULL
;
4074 gimple_seq ctor_seq
= NULL
;
4076 vector_type
= STMT_VINFO_VECTYPE (stmt_vinfo
);
4077 scalar_type
= TREE_TYPE (vector_type
);
4078 nunits
= TYPE_VECTOR_SUBPARTS (vector_type
);
4080 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_reduction_def
4081 && reduc_index
!= -1);
4083 /* op is the reduction operand of the first stmt already. */
4084 /* For additional copies (see the explanation of NUMBER_OF_COPIES below)
4085 we need either neutral operands or the original operands. See
4086 get_initial_def_for_reduction() for details. */
4089 case WIDEN_SUM_EXPR
:
4096 neutral_op
= build_zero_cst (scalar_type
);
4100 neutral_op
= build_one_cst (scalar_type
);
4104 neutral_op
= build_all_ones_cst (scalar_type
);
4107 /* For MIN/MAX we don't have an easy neutral operand but
4108 the initial values can be used fine here. Only for
4109 a reduction chain we have to force a neutral element. */
4112 if (!GROUP_FIRST_ELEMENT (stmt_vinfo
))
4116 tree op
= get_reduction_op (stmts
[0], reduc_index
);
4117 def_stmt
= SSA_NAME_DEF_STMT (op
);
4118 loop
= (gimple_bb (stmt
))->loop_father
;
4119 neutral_op
= PHI_ARG_DEF_FROM_EDGE (def_stmt
,
4120 loop_preheader_edge (loop
));
4125 gcc_assert (!GROUP_FIRST_ELEMENT (stmt_vinfo
));
4129 /* NUMBER_OF_COPIES is the number of times we need to use the same values in
4130 created vectors. It is greater than 1 if unrolling is performed.
4132 For example, we have two scalar operands, s1 and s2 (e.g., group of
4133 strided accesses of size two), while NUNITS is four (i.e., four scalars
4134 of this type can be packed in a vector). The output vector will contain
4135 two copies of each scalar operand: {s1, s2, s1, s2}. (NUMBER_OF_COPIES
4138 If GROUP_SIZE > NUNITS, the scalars will be split into several vectors
4139 containing the operands.
4141 For example, NUNITS is four as before, and the group size is 8
4142 (s1, s2, ..., s8). We will create two vectors {s1, s2, s3, s4} and
4143 {s5, s6, s7, s8}. */
4145 number_of_copies
= nunits
* number_of_vectors
/ group_size
;
4147 number_of_places_left_in_vector
= nunits
;
4149 elts
= XALLOCAVEC (tree
, nunits
);
4150 for (j
= 0; j
< number_of_copies
; j
++)
4152 for (i
= group_size
- 1; stmts
.iterate (i
, &stmt
); i
--)
4154 tree op
= get_reduction_op (stmt
, reduc_index
);
4155 loop
= (gimple_bb (stmt
))->loop_father
;
4156 def_stmt
= SSA_NAME_DEF_STMT (op
);
4160 /* Get the def before the loop. In reduction chain we have only
4161 one initial value. */
4162 if ((j
!= (number_of_copies
- 1)
4163 || (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt
))
4168 op
= PHI_ARG_DEF_FROM_EDGE (def_stmt
,
4169 loop_preheader_edge (loop
));
4171 /* Create 'vect_ = {op0,op1,...,opn}'. */
4172 number_of_places_left_in_vector
--;
4173 elts
[number_of_places_left_in_vector
] = op
;
4174 if (!CONSTANT_CLASS_P (op
))
4177 if (number_of_places_left_in_vector
== 0)
4180 vec_cst
= build_vector (vector_type
, elts
);
4183 vec
<constructor_elt
, va_gc
> *v
;
4185 vec_alloc (v
, nunits
);
4186 for (k
= 0; k
< nunits
; ++k
)
4187 CONSTRUCTOR_APPEND_ELT (v
, NULL_TREE
, elts
[k
]);
4188 vec_cst
= build_constructor (vector_type
, v
);
4191 gimple_stmt_iterator gsi
;
4192 init
= vect_init_vector (stmt
, vec_cst
, vector_type
, NULL
);
4193 if (ctor_seq
!= NULL
)
4195 gsi
= gsi_for_stmt (SSA_NAME_DEF_STMT (init
));
4196 gsi_insert_seq_before_without_update (&gsi
, ctor_seq
,
4200 voprnds
.quick_push (init
);
4202 number_of_places_left_in_vector
= nunits
;
4208 /* Since the vectors are created in the reverse order, we should invert
4210 vec_num
= voprnds
.length ();
4211 for (j
= vec_num
; j
!= 0; j
--)
4213 vop
= voprnds
[j
- 1];
4214 vec_oprnds
->quick_push (vop
);
4219 /* In case that VF is greater than the unrolling factor needed for the SLP
4220 group of stmts, NUMBER_OF_VECTORS to be created is greater than
4221 NUMBER_OF_SCALARS/NUNITS or NUNITS/NUMBER_OF_SCALARS, and hence we have
4222 to replicate the vectors. */
4223 while (number_of_vectors
> vec_oprnds
->length ())
4225 tree neutral_vec
= NULL
;
4230 neutral_vec
= build_vector_from_val (vector_type
, neutral_op
);
4232 vec_oprnds
->quick_push (neutral_vec
);
4236 for (i
= 0; vec_oprnds
->iterate (i
, &vop
) && i
< vec_num
; i
++)
4237 vec_oprnds
->quick_push (vop
);
4243 /* Function vect_create_epilog_for_reduction
4245 Create code at the loop-epilog to finalize the result of a reduction
4248 VECT_DEFS is list of vector of partial results, i.e., the lhs's of vector
4249 reduction statements.
4250 STMT is the scalar reduction stmt that is being vectorized.
4251 NCOPIES is > 1 in case the vectorization factor (VF) is bigger than the
4252 number of elements that we can fit in a vectype (nunits). In this case
4253 we have to generate more than one vector stmt - i.e - we need to "unroll"
4254 the vector stmt by a factor VF/nunits. For more details see documentation
4255 in vectorizable_operation.
4256 REDUC_CODE is the tree-code for the epilog reduction.
4257 REDUCTION_PHIS is a list of the phi-nodes that carry the reduction
4259 REDUC_INDEX is the index of the operand in the right hand side of the
4260 statement that is defined by REDUCTION_PHI.
4261 DOUBLE_REDUC is TRUE if double reduction phi nodes should be handled.
4262 SLP_NODE is an SLP node containing a group of reduction statements. The
4263 first one in this group is STMT.
4266 1. Creates the reduction def-use cycles: sets the arguments for
4268 The loop-entry argument is the vectorized initial-value of the reduction.
4269 The loop-latch argument is taken from VECT_DEFS - the vector of partial
4271 2. "Reduces" each vector of partial results VECT_DEFS into a single result,
4272 by applying the operation specified by REDUC_CODE if available, or by
4273 other means (whole-vector shifts or a scalar loop).
4274 The function also creates a new phi node at the loop exit to preserve
4275 loop-closed form, as illustrated below.
4277 The flow at the entry to this function:
4280 vec_def = phi <null, null> # REDUCTION_PHI
4281 VECT_DEF = vector_stmt # vectorized form of STMT
4282 s_loop = scalar_stmt # (scalar) STMT
4284 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
4288 The above is transformed by this function into:
4291 vec_def = phi <vec_init, VECT_DEF> # REDUCTION_PHI
4292 VECT_DEF = vector_stmt # vectorized form of STMT
4293 s_loop = scalar_stmt # (scalar) STMT
4295 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
4296 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
4297 v_out2 = reduce <v_out1>
4298 s_out3 = extract_field <v_out2, 0>
4299 s_out4 = adjust_result <s_out3>
4305 vect_create_epilog_for_reduction (vec
<tree
> vect_defs
, gimple
*stmt
,
4306 gimple
*reduc_def_stmt
,
4307 int ncopies
, enum tree_code reduc_code
,
4308 vec
<gimple
*> reduction_phis
,
4309 int reduc_index
, bool double_reduc
,
4312 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
4313 stmt_vec_info prev_phi_info
;
4316 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
4317 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
), *outer_loop
= NULL
;
4318 basic_block exit_bb
;
4321 gimple
*new_phi
= NULL
, *phi
;
4322 gimple_stmt_iterator exit_gsi
;
4324 tree new_temp
= NULL_TREE
, new_dest
, new_name
, new_scalar_dest
;
4325 gimple
*epilog_stmt
= NULL
;
4326 enum tree_code code
= gimple_assign_rhs_code (stmt
);
4329 tree adjustment_def
= NULL
;
4330 tree vec_initial_def
= NULL
;
4331 tree expr
, def
, initial_def
= NULL
;
4332 tree orig_name
, scalar_result
;
4333 imm_use_iterator imm_iter
, phi_imm_iter
;
4334 use_operand_p use_p
, phi_use_p
;
4335 gimple
*use_stmt
, *orig_stmt
, *reduction_phi
= NULL
;
4336 bool nested_in_vect_loop
= false;
4337 auto_vec
<gimple
*> new_phis
;
4338 auto_vec
<gimple
*> inner_phis
;
4339 enum vect_def_type dt
= vect_unknown_def_type
;
4341 auto_vec
<tree
> scalar_results
;
4342 unsigned int group_size
= 1, k
, ratio
;
4343 auto_vec
<tree
> vec_initial_defs
;
4344 auto_vec
<gimple
*> phis
;
4345 bool slp_reduc
= false;
4346 tree new_phi_result
;
4347 gimple
*inner_phi
= NULL
;
4348 tree induction_index
= NULL_TREE
;
4351 group_size
= SLP_TREE_SCALAR_STMTS (slp_node
).length ();
4353 if (nested_in_vect_loop_p (loop
, stmt
))
4357 nested_in_vect_loop
= true;
4358 gcc_assert (!slp_node
);
4361 vectype
= STMT_VINFO_VECTYPE (stmt_info
);
4362 gcc_assert (vectype
);
4363 mode
= TYPE_MODE (vectype
);
4365 /* 1. Create the reduction def-use cycle:
4366 Set the arguments of REDUCTION_PHIS, i.e., transform
4369 vec_def = phi <null, null> # REDUCTION_PHI
4370 VECT_DEF = vector_stmt # vectorized form of STMT
4376 vec_def = phi <vec_init, VECT_DEF> # REDUCTION_PHI
4377 VECT_DEF = vector_stmt # vectorized form of STMT
4380 (in case of SLP, do it for all the phis). */
4382 /* Get the loop-entry arguments. */
4383 enum vect_def_type initial_def_dt
= vect_unknown_def_type
;
4386 unsigned vec_num
= SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node
);
4387 vec_initial_defs
.reserve (vec_num
);
4388 get_initial_defs_for_reduction (slp_node
, &vec_initial_defs
,
4389 vec_num
, reduc_index
, code
);
4393 /* Get at the scalar def before the loop, that defines the initial value
4394 of the reduction variable. */
4396 initial_def
= PHI_ARG_DEF_FROM_EDGE (reduc_def_stmt
,
4397 loop_preheader_edge (loop
));
4398 vect_is_simple_use (initial_def
, loop_vinfo
, &def_stmt
, &initial_def_dt
);
4399 vec_initial_def
= get_initial_def_for_reduction (stmt
, initial_def
,
4401 vec_initial_defs
.create (1);
4402 vec_initial_defs
.quick_push (vec_initial_def
);
4405 /* Set phi nodes arguments. */
4406 FOR_EACH_VEC_ELT (reduction_phis
, i
, phi
)
4408 tree vec_init_def
, def
;
4410 vec_init_def
= force_gimple_operand (vec_initial_defs
[i
], &stmts
,
4413 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop
), stmts
);
4416 for (j
= 0; j
< ncopies
; j
++)
4420 phi
= STMT_VINFO_RELATED_STMT (vinfo_for_stmt (phi
));
4421 if (nested_in_vect_loop
)
4423 = vect_get_vec_def_for_stmt_copy (initial_def_dt
,
4427 /* Set the loop-entry arg of the reduction-phi. */
4429 if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info
)
4430 == INTEGER_INDUC_COND_REDUCTION
)
4432 /* Initialise the reduction phi to zero. This prevents initial
4433 values of non-zero interferring with the reduction op. */
4434 gcc_assert (ncopies
== 1);
4435 gcc_assert (i
== 0);
4437 tree vec_init_def_type
= TREE_TYPE (vec_init_def
);
4438 tree zero_vec
= build_zero_cst (vec_init_def_type
);
4440 add_phi_arg (as_a
<gphi
*> (phi
), zero_vec
,
4441 loop_preheader_edge (loop
), UNKNOWN_LOCATION
);
4444 add_phi_arg (as_a
<gphi
*> (phi
), vec_init_def
,
4445 loop_preheader_edge (loop
), UNKNOWN_LOCATION
);
4447 /* Set the loop-latch arg for the reduction-phi. */
4449 def
= vect_get_vec_def_for_stmt_copy (vect_unknown_def_type
, def
);
4451 add_phi_arg (as_a
<gphi
*> (phi
), def
, loop_latch_edge (loop
),
4454 if (dump_enabled_p ())
4456 dump_printf_loc (MSG_NOTE
, vect_location
,
4457 "transform reduction: created def-use cycle: ");
4458 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, phi
, 0);
4459 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, SSA_NAME_DEF_STMT (def
), 0);
4464 /* For cond reductions we want to create a new vector (INDEX_COND_EXPR)
4465 which is updated with the current index of the loop for every match of
4466 the original loop's cond_expr (VEC_STMT). This results in a vector
4467 containing the last time the condition passed for that vector lane.
4468 The first match will be a 1 to allow 0 to be used for non-matching
4469 indexes. If there are no matches at all then the vector will be all
4471 if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info
) == COND_REDUCTION
)
4473 tree indx_before_incr
, indx_after_incr
;
4474 int nunits_out
= TYPE_VECTOR_SUBPARTS (vectype
);
4477 gimple
*vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
4478 gcc_assert (gimple_assign_rhs_code (vec_stmt
) == VEC_COND_EXPR
);
4480 int scalar_precision
4481 = GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (vectype
)));
4482 tree cr_index_scalar_type
= make_unsigned_type (scalar_precision
);
4483 tree cr_index_vector_type
= build_vector_type
4484 (cr_index_scalar_type
, TYPE_VECTOR_SUBPARTS (vectype
));
4486 /* First we create a simple vector induction variable which starts
4487 with the values {1,2,3,...} (SERIES_VECT) and increments by the
4488 vector size (STEP). */
4490 /* Create a {1,2,3,...} vector. */
4491 tree
*vtemp
= XALLOCAVEC (tree
, nunits_out
);
4492 for (k
= 0; k
< nunits_out
; ++k
)
4493 vtemp
[k
] = build_int_cst (cr_index_scalar_type
, k
+ 1);
4494 tree series_vect
= build_vector (cr_index_vector_type
, vtemp
);
4496 /* Create a vector of the step value. */
4497 tree step
= build_int_cst (cr_index_scalar_type
, nunits_out
);
4498 tree vec_step
= build_vector_from_val (cr_index_vector_type
, step
);
4500 /* Create an induction variable. */
4501 gimple_stmt_iterator incr_gsi
;
4503 standard_iv_increment_position (loop
, &incr_gsi
, &insert_after
);
4504 create_iv (series_vect
, vec_step
, NULL_TREE
, loop
, &incr_gsi
,
4505 insert_after
, &indx_before_incr
, &indx_after_incr
);
4507 /* Next create a new phi node vector (NEW_PHI_TREE) which starts
4508 filled with zeros (VEC_ZERO). */
4510 /* Create a vector of 0s. */
4511 tree zero
= build_zero_cst (cr_index_scalar_type
);
4512 tree vec_zero
= build_vector_from_val (cr_index_vector_type
, zero
);
4514 /* Create a vector phi node. */
4515 tree new_phi_tree
= make_ssa_name (cr_index_vector_type
);
4516 new_phi
= create_phi_node (new_phi_tree
, loop
->header
);
4517 set_vinfo_for_stmt (new_phi
,
4518 new_stmt_vec_info (new_phi
, loop_vinfo
));
4519 add_phi_arg (as_a
<gphi
*> (new_phi
), vec_zero
,
4520 loop_preheader_edge (loop
), UNKNOWN_LOCATION
);
4522 /* Now take the condition from the loops original cond_expr
4523 (VEC_STMT) and produce a new cond_expr (INDEX_COND_EXPR) which for
4524 every match uses values from the induction variable
4525 (INDEX_BEFORE_INCR) otherwise uses values from the phi node
4527 Finally, we update the phi (NEW_PHI_TREE) to take the value of
4528 the new cond_expr (INDEX_COND_EXPR). */
4530 /* Duplicate the condition from vec_stmt. */
4531 tree ccompare
= unshare_expr (gimple_assign_rhs1 (vec_stmt
));
4533 /* Create a conditional, where the condition is taken from vec_stmt
4534 (CCOMPARE), then is the induction index (INDEX_BEFORE_INCR) and
4535 else is the phi (NEW_PHI_TREE). */
4536 tree index_cond_expr
= build3 (VEC_COND_EXPR
, cr_index_vector_type
,
4537 ccompare
, indx_before_incr
,
4539 induction_index
= make_ssa_name (cr_index_vector_type
);
4540 gimple
*index_condition
= gimple_build_assign (induction_index
,
4542 gsi_insert_before (&incr_gsi
, index_condition
, GSI_SAME_STMT
);
4543 stmt_vec_info index_vec_info
= new_stmt_vec_info (index_condition
,
4545 STMT_VINFO_VECTYPE (index_vec_info
) = cr_index_vector_type
;
4546 set_vinfo_for_stmt (index_condition
, index_vec_info
);
4548 /* Update the phi with the vec cond. */
4549 add_phi_arg (as_a
<gphi
*> (new_phi
), induction_index
,
4550 loop_latch_edge (loop
), UNKNOWN_LOCATION
);
4553 /* 2. Create epilog code.
4554 The reduction epilog code operates across the elements of the vector
4555 of partial results computed by the vectorized loop.
4556 The reduction epilog code consists of:
4558 step 1: compute the scalar result in a vector (v_out2)
4559 step 2: extract the scalar result (s_out3) from the vector (v_out2)
4560 step 3: adjust the scalar result (s_out3) if needed.
4562 Step 1 can be accomplished using one the following three schemes:
4563 (scheme 1) using reduc_code, if available.
4564 (scheme 2) using whole-vector shifts, if available.
4565 (scheme 3) using a scalar loop. In this case steps 1+2 above are
4568 The overall epilog code looks like this:
4570 s_out0 = phi <s_loop> # original EXIT_PHI
4571 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
4572 v_out2 = reduce <v_out1> # step 1
4573 s_out3 = extract_field <v_out2, 0> # step 2
4574 s_out4 = adjust_result <s_out3> # step 3
4576 (step 3 is optional, and steps 1 and 2 may be combined).
4577 Lastly, the uses of s_out0 are replaced by s_out4. */
4580 /* 2.1 Create new loop-exit-phis to preserve loop-closed form:
4581 v_out1 = phi <VECT_DEF>
4582 Store them in NEW_PHIS. */
4584 exit_bb
= single_exit (loop
)->dest
;
4585 prev_phi_info
= NULL
;
4586 new_phis
.create (vect_defs
.length ());
4587 FOR_EACH_VEC_ELT (vect_defs
, i
, def
)
4589 for (j
= 0; j
< ncopies
; j
++)
4591 tree new_def
= copy_ssa_name (def
);
4592 phi
= create_phi_node (new_def
, exit_bb
);
4593 set_vinfo_for_stmt (phi
, new_stmt_vec_info (phi
, loop_vinfo
));
4595 new_phis
.quick_push (phi
);
4598 def
= vect_get_vec_def_for_stmt_copy (dt
, def
);
4599 STMT_VINFO_RELATED_STMT (prev_phi_info
) = phi
;
4602 SET_PHI_ARG_DEF (phi
, single_exit (loop
)->dest_idx
, def
);
4603 prev_phi_info
= vinfo_for_stmt (phi
);
4607 /* The epilogue is created for the outer-loop, i.e., for the loop being
4608 vectorized. Create exit phis for the outer loop. */
4612 exit_bb
= single_exit (loop
)->dest
;
4613 inner_phis
.create (vect_defs
.length ());
4614 FOR_EACH_VEC_ELT (new_phis
, i
, phi
)
4616 tree new_result
= copy_ssa_name (PHI_RESULT (phi
));
4617 gphi
*outer_phi
= create_phi_node (new_result
, exit_bb
);
4618 SET_PHI_ARG_DEF (outer_phi
, single_exit (loop
)->dest_idx
,
4620 set_vinfo_for_stmt (outer_phi
, new_stmt_vec_info (outer_phi
,
4622 inner_phis
.quick_push (phi
);
4623 new_phis
[i
] = outer_phi
;
4624 prev_phi_info
= vinfo_for_stmt (outer_phi
);
4625 while (STMT_VINFO_RELATED_STMT (vinfo_for_stmt (phi
)))
4627 phi
= STMT_VINFO_RELATED_STMT (vinfo_for_stmt (phi
));
4628 new_result
= copy_ssa_name (PHI_RESULT (phi
));
4629 outer_phi
= create_phi_node (new_result
, exit_bb
);
4630 SET_PHI_ARG_DEF (outer_phi
, single_exit (loop
)->dest_idx
,
4632 set_vinfo_for_stmt (outer_phi
, new_stmt_vec_info (outer_phi
,
4634 STMT_VINFO_RELATED_STMT (prev_phi_info
) = outer_phi
;
4635 prev_phi_info
= vinfo_for_stmt (outer_phi
);
4640 exit_gsi
= gsi_after_labels (exit_bb
);
4642 /* 2.2 Get the relevant tree-code to use in the epilog for schemes 2,3
4643 (i.e. when reduc_code is not available) and in the final adjustment
4644 code (if needed). Also get the original scalar reduction variable as
4645 defined in the loop. In case STMT is a "pattern-stmt" (i.e. - it
4646 represents a reduction pattern), the tree-code and scalar-def are
4647 taken from the original stmt that the pattern-stmt (STMT) replaces.
4648 Otherwise (it is a regular reduction) - the tree-code and scalar-def
4649 are taken from STMT. */
4651 orig_stmt
= STMT_VINFO_RELATED_STMT (stmt_info
);
4654 /* Regular reduction */
4659 /* Reduction pattern */
4660 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (orig_stmt
);
4661 gcc_assert (STMT_VINFO_IN_PATTERN_P (stmt_vinfo
));
4662 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_vinfo
) == stmt
);
4665 code
= gimple_assign_rhs_code (orig_stmt
);
4666 /* For MINUS_EXPR the initial vector is [init_val,0,...,0], therefore,
4667 partial results are added and not subtracted. */
4668 if (code
== MINUS_EXPR
)
4671 scalar_dest
= gimple_assign_lhs (orig_stmt
);
4672 scalar_type
= TREE_TYPE (scalar_dest
);
4673 scalar_results
.create (group_size
);
4674 new_scalar_dest
= vect_create_destination_var (scalar_dest
, NULL
);
4675 bitsize
= TYPE_SIZE (scalar_type
);
4677 /* In case this is a reduction in an inner-loop while vectorizing an outer
4678 loop - we don't need to extract a single scalar result at the end of the
4679 inner-loop (unless it is double reduction, i.e., the use of reduction is
4680 outside the outer-loop). The final vector of partial results will be used
4681 in the vectorized outer-loop, or reduced to a scalar result at the end of
4683 if (nested_in_vect_loop
&& !double_reduc
)
4684 goto vect_finalize_reduction
;
4686 /* SLP reduction without reduction chain, e.g.,
4690 b2 = operation (b1) */
4691 slp_reduc
= (slp_node
&& !GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt
)));
4693 /* In case of reduction chain, e.g.,
4696 a3 = operation (a2),
4698 we may end up with more than one vector result. Here we reduce them to
4700 if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt
)))
4702 tree first_vect
= PHI_RESULT (new_phis
[0]);
4704 gassign
*new_vec_stmt
= NULL
;
4706 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
4707 for (k
= 1; k
< new_phis
.length (); k
++)
4709 gimple
*next_phi
= new_phis
[k
];
4710 tree second_vect
= PHI_RESULT (next_phi
);
4712 tmp
= build2 (code
, vectype
, first_vect
, second_vect
);
4713 new_vec_stmt
= gimple_build_assign (vec_dest
, tmp
);
4714 first_vect
= make_ssa_name (vec_dest
, new_vec_stmt
);
4715 gimple_assign_set_lhs (new_vec_stmt
, first_vect
);
4716 gsi_insert_before (&exit_gsi
, new_vec_stmt
, GSI_SAME_STMT
);
4719 new_phi_result
= first_vect
;
4722 new_phis
.truncate (0);
4723 new_phis
.safe_push (new_vec_stmt
);
4727 new_phi_result
= PHI_RESULT (new_phis
[0]);
4729 if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info
) == COND_REDUCTION
4730 && reduc_code
!= ERROR_MARK
)
4732 /* For condition reductions, we have a vector (NEW_PHI_RESULT) containing
4733 various data values where the condition matched and another vector
4734 (INDUCTION_INDEX) containing all the indexes of those matches. We
4735 need to extract the last matching index (which will be the index with
4736 highest value) and use this to index into the data vector.
4737 For the case where there were no matches, the data vector will contain
4738 all default values and the index vector will be all zeros. */
4740 /* Get various versions of the type of the vector of indexes. */
4741 tree index_vec_type
= TREE_TYPE (induction_index
);
4742 gcc_checking_assert (TYPE_UNSIGNED (index_vec_type
));
4743 tree index_scalar_type
= TREE_TYPE (index_vec_type
);
4744 tree index_vec_cmp_type
= build_same_sized_truth_vector_type
4747 /* Get an unsigned integer version of the type of the data vector. */
4748 int scalar_precision
= GET_MODE_PRECISION (TYPE_MODE (scalar_type
));
4749 tree scalar_type_unsigned
= make_unsigned_type (scalar_precision
);
4750 tree vectype_unsigned
= build_vector_type
4751 (scalar_type_unsigned
, TYPE_VECTOR_SUBPARTS (vectype
));
4753 /* First we need to create a vector (ZERO_VEC) of zeros and another
4754 vector (MAX_INDEX_VEC) filled with the last matching index, which we
4755 can create using a MAX reduction and then expanding.
4756 In the case where the loop never made any matches, the max index will
4759 /* Vector of {0, 0, 0,...}. */
4760 tree zero_vec
= make_ssa_name (vectype
);
4761 tree zero_vec_rhs
= build_zero_cst (vectype
);
4762 gimple
*zero_vec_stmt
= gimple_build_assign (zero_vec
, zero_vec_rhs
);
4763 gsi_insert_before (&exit_gsi
, zero_vec_stmt
, GSI_SAME_STMT
);
4765 /* Find maximum value from the vector of found indexes. */
4766 tree max_index
= make_ssa_name (index_scalar_type
);
4767 gimple
*max_index_stmt
= gimple_build_assign (max_index
, REDUC_MAX_EXPR
,
4769 gsi_insert_before (&exit_gsi
, max_index_stmt
, GSI_SAME_STMT
);
4771 /* Vector of {max_index, max_index, max_index,...}. */
4772 tree max_index_vec
= make_ssa_name (index_vec_type
);
4773 tree max_index_vec_rhs
= build_vector_from_val (index_vec_type
,
4775 gimple
*max_index_vec_stmt
= gimple_build_assign (max_index_vec
,
4777 gsi_insert_before (&exit_gsi
, max_index_vec_stmt
, GSI_SAME_STMT
);
4779 /* Next we compare the new vector (MAX_INDEX_VEC) full of max indexes
4780 with the vector (INDUCTION_INDEX) of found indexes, choosing values
4781 from the data vector (NEW_PHI_RESULT) for matches, 0 (ZERO_VEC)
4782 otherwise. Only one value should match, resulting in a vector
4783 (VEC_COND) with one data value and the rest zeros.
4784 In the case where the loop never made any matches, every index will
4785 match, resulting in a vector with all data values (which will all be
4786 the default value). */
4788 /* Compare the max index vector to the vector of found indexes to find
4789 the position of the max value. */
4790 tree vec_compare
= make_ssa_name (index_vec_cmp_type
);
4791 gimple
*vec_compare_stmt
= gimple_build_assign (vec_compare
, EQ_EXPR
,
4794 gsi_insert_before (&exit_gsi
, vec_compare_stmt
, GSI_SAME_STMT
);
4796 /* Use the compare to choose either values from the data vector or
4798 tree vec_cond
= make_ssa_name (vectype
);
4799 gimple
*vec_cond_stmt
= gimple_build_assign (vec_cond
, VEC_COND_EXPR
,
4800 vec_compare
, new_phi_result
,
4802 gsi_insert_before (&exit_gsi
, vec_cond_stmt
, GSI_SAME_STMT
);
4804 /* Finally we need to extract the data value from the vector (VEC_COND)
4805 into a scalar (MATCHED_DATA_REDUC). Logically we want to do a OR
4806 reduction, but because this doesn't exist, we can use a MAX reduction
4807 instead. The data value might be signed or a float so we need to cast
4809 In the case where the loop never made any matches, the data values are
4810 all identical, and so will reduce down correctly. */
4812 /* Make the matched data values unsigned. */
4813 tree vec_cond_cast
= make_ssa_name (vectype_unsigned
);
4814 tree vec_cond_cast_rhs
= build1 (VIEW_CONVERT_EXPR
, vectype_unsigned
,
4816 gimple
*vec_cond_cast_stmt
= gimple_build_assign (vec_cond_cast
,
4819 gsi_insert_before (&exit_gsi
, vec_cond_cast_stmt
, GSI_SAME_STMT
);
4821 /* Reduce down to a scalar value. */
4822 tree data_reduc
= make_ssa_name (scalar_type_unsigned
);
4823 optab ot
= optab_for_tree_code (REDUC_MAX_EXPR
, vectype_unsigned
,
4825 gcc_assert (optab_handler (ot
, TYPE_MODE (vectype_unsigned
))
4826 != CODE_FOR_nothing
);
4827 gimple
*data_reduc_stmt
= gimple_build_assign (data_reduc
,
4830 gsi_insert_before (&exit_gsi
, data_reduc_stmt
, GSI_SAME_STMT
);
4832 /* Convert the reduced value back to the result type and set as the
4834 gimple_seq stmts
= NULL
;
4835 new_temp
= gimple_build (&stmts
, VIEW_CONVERT_EXPR
, scalar_type
,
4837 gsi_insert_seq_before (&exit_gsi
, stmts
, GSI_SAME_STMT
);
4838 scalar_results
.safe_push (new_temp
);
4840 else if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info
) == COND_REDUCTION
4841 && reduc_code
== ERROR_MARK
)
4843 /* Condition redution without supported REDUC_MAX_EXPR. Generate
4845 idx_val = induction_index[0];
4846 val = data_reduc[0];
4847 for (idx = 0, val = init, i = 0; i < nelts; ++i)
4848 if (induction_index[i] > idx_val)
4849 val = data_reduc[i], idx_val = induction_index[i];
4852 tree data_eltype
= TREE_TYPE (TREE_TYPE (new_phi_result
));
4853 tree idx_eltype
= TREE_TYPE (TREE_TYPE (induction_index
));
4854 unsigned HOST_WIDE_INT el_size
= tree_to_uhwi (TYPE_SIZE (idx_eltype
));
4855 unsigned HOST_WIDE_INT v_size
4856 = el_size
* TYPE_VECTOR_SUBPARTS (TREE_TYPE (induction_index
));
4857 tree idx_val
= NULL_TREE
, val
= NULL_TREE
;
4858 for (unsigned HOST_WIDE_INT off
= 0; off
< v_size
; off
+= el_size
)
4860 tree old_idx_val
= idx_val
;
4862 idx_val
= make_ssa_name (idx_eltype
);
4863 epilog_stmt
= gimple_build_assign (idx_val
, BIT_FIELD_REF
,
4864 build3 (BIT_FIELD_REF
, idx_eltype
,
4866 bitsize_int (el_size
),
4867 bitsize_int (off
)));
4868 gsi_insert_before (&exit_gsi
, epilog_stmt
, GSI_SAME_STMT
);
4869 val
= make_ssa_name (data_eltype
);
4870 epilog_stmt
= gimple_build_assign (val
, BIT_FIELD_REF
,
4871 build3 (BIT_FIELD_REF
,
4874 bitsize_int (el_size
),
4875 bitsize_int (off
)));
4876 gsi_insert_before (&exit_gsi
, epilog_stmt
, GSI_SAME_STMT
);
4879 tree new_idx_val
= idx_val
;
4881 if (off
!= v_size
- el_size
)
4883 new_idx_val
= make_ssa_name (idx_eltype
);
4884 epilog_stmt
= gimple_build_assign (new_idx_val
,
4887 gsi_insert_before (&exit_gsi
, epilog_stmt
, GSI_SAME_STMT
);
4889 new_val
= make_ssa_name (data_eltype
);
4890 epilog_stmt
= gimple_build_assign (new_val
,
4897 gsi_insert_before (&exit_gsi
, epilog_stmt
, GSI_SAME_STMT
);
4898 idx_val
= new_idx_val
;
4902 /* Convert the reduced value back to the result type and set as the
4904 gimple_seq stmts
= NULL
;
4905 val
= gimple_convert (&stmts
, scalar_type
, val
);
4906 gsi_insert_seq_before (&exit_gsi
, stmts
, GSI_SAME_STMT
);
4907 scalar_results
.safe_push (val
);
4910 /* 2.3 Create the reduction code, using one of the three schemes described
4911 above. In SLP we simply need to extract all the elements from the
4912 vector (without reducing them), so we use scalar shifts. */
4913 else if (reduc_code
!= ERROR_MARK
&& !slp_reduc
)
4919 v_out2 = reduc_expr <v_out1> */
4921 if (dump_enabled_p ())
4922 dump_printf_loc (MSG_NOTE
, vect_location
,
4923 "Reduce using direct vector reduction.\n");
4925 vec_elem_type
= TREE_TYPE (TREE_TYPE (new_phi_result
));
4926 if (!useless_type_conversion_p (scalar_type
, vec_elem_type
))
4929 vect_create_destination_var (scalar_dest
, vec_elem_type
);
4930 tmp
= build1 (reduc_code
, vec_elem_type
, new_phi_result
);
4931 epilog_stmt
= gimple_build_assign (tmp_dest
, tmp
);
4932 new_temp
= make_ssa_name (tmp_dest
, epilog_stmt
);
4933 gimple_assign_set_lhs (epilog_stmt
, new_temp
);
4934 gsi_insert_before (&exit_gsi
, epilog_stmt
, GSI_SAME_STMT
);
4936 tmp
= build1 (NOP_EXPR
, scalar_type
, new_temp
);
4939 tmp
= build1 (reduc_code
, scalar_type
, new_phi_result
);
4941 epilog_stmt
= gimple_build_assign (new_scalar_dest
, tmp
);
4942 new_temp
= make_ssa_name (new_scalar_dest
, epilog_stmt
);
4943 gimple_assign_set_lhs (epilog_stmt
, new_temp
);
4944 gsi_insert_before (&exit_gsi
, epilog_stmt
, GSI_SAME_STMT
);
4946 if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info
)
4947 == INTEGER_INDUC_COND_REDUCTION
)
4949 /* Earlier we set the initial value to be zero. Check the result
4950 and if it is zero then replace with the original initial
4952 tree zero
= build_zero_cst (scalar_type
);
4953 tree zcompare
= build2 (EQ_EXPR
, boolean_type_node
, new_temp
, zero
);
4955 tmp
= make_ssa_name (new_scalar_dest
);
4956 epilog_stmt
= gimple_build_assign (tmp
, COND_EXPR
, zcompare
,
4957 initial_def
, new_temp
);
4958 gsi_insert_before (&exit_gsi
, epilog_stmt
, GSI_SAME_STMT
);
4962 scalar_results
.safe_push (new_temp
);
4966 bool reduce_with_shift
= have_whole_vector_shift (mode
);
4967 int element_bitsize
= tree_to_uhwi (bitsize
);
4968 int vec_size_in_bits
= tree_to_uhwi (TYPE_SIZE (vectype
));
4971 /* COND reductions all do the final reduction with MAX_EXPR. */
4972 if (code
== COND_EXPR
)
4975 /* Regardless of whether we have a whole vector shift, if we're
4976 emulating the operation via tree-vect-generic, we don't want
4977 to use it. Only the first round of the reduction is likely
4978 to still be profitable via emulation. */
4979 /* ??? It might be better to emit a reduction tree code here, so that
4980 tree-vect-generic can expand the first round via bit tricks. */
4981 if (!VECTOR_MODE_P (mode
))
4982 reduce_with_shift
= false;
4985 optab optab
= optab_for_tree_code (code
, vectype
, optab_default
);
4986 if (optab_handler (optab
, mode
) == CODE_FOR_nothing
)
4987 reduce_with_shift
= false;
4990 if (reduce_with_shift
&& !slp_reduc
)
4992 int nelements
= vec_size_in_bits
/ element_bitsize
;
4993 unsigned char *sel
= XALLOCAVEC (unsigned char, nelements
);
4997 tree zero_vec
= build_zero_cst (vectype
);
4999 for (offset = nelements/2; offset >= 1; offset/=2)
5001 Create: va' = vec_shift <va, offset>
5002 Create: va = vop <va, va'>
5007 if (dump_enabled_p ())
5008 dump_printf_loc (MSG_NOTE
, vect_location
,
5009 "Reduce using vector shifts\n");
5011 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
5012 new_temp
= new_phi_result
;
5013 for (elt_offset
= nelements
/ 2;
5017 calc_vec_perm_mask_for_shift (mode
, elt_offset
, sel
);
5018 tree mask
= vect_gen_perm_mask_any (vectype
, sel
);
5019 epilog_stmt
= gimple_build_assign (vec_dest
, VEC_PERM_EXPR
,
5020 new_temp
, zero_vec
, mask
);
5021 new_name
= make_ssa_name (vec_dest
, epilog_stmt
);
5022 gimple_assign_set_lhs (epilog_stmt
, new_name
);
5023 gsi_insert_before (&exit_gsi
, epilog_stmt
, GSI_SAME_STMT
);
5025 epilog_stmt
= gimple_build_assign (vec_dest
, code
, new_name
,
5027 new_temp
= make_ssa_name (vec_dest
, epilog_stmt
);
5028 gimple_assign_set_lhs (epilog_stmt
, new_temp
);
5029 gsi_insert_before (&exit_gsi
, epilog_stmt
, GSI_SAME_STMT
);
5032 /* 2.4 Extract the final scalar result. Create:
5033 s_out3 = extract_field <v_out2, bitpos> */
5035 if (dump_enabled_p ())
5036 dump_printf_loc (MSG_NOTE
, vect_location
,
5037 "extract scalar result\n");
5039 rhs
= build3 (BIT_FIELD_REF
, scalar_type
, new_temp
,
5040 bitsize
, bitsize_zero_node
);
5041 epilog_stmt
= gimple_build_assign (new_scalar_dest
, rhs
);
5042 new_temp
= make_ssa_name (new_scalar_dest
, epilog_stmt
);
5043 gimple_assign_set_lhs (epilog_stmt
, new_temp
);
5044 gsi_insert_before (&exit_gsi
, epilog_stmt
, GSI_SAME_STMT
);
5045 scalar_results
.safe_push (new_temp
);
5050 s = extract_field <v_out2, 0>
5051 for (offset = element_size;
5052 offset < vector_size;
5053 offset += element_size;)
5055 Create: s' = extract_field <v_out2, offset>
5056 Create: s = op <s, s'> // For non SLP cases
5059 if (dump_enabled_p ())
5060 dump_printf_loc (MSG_NOTE
, vect_location
,
5061 "Reduce using scalar code.\n");
5063 vec_size_in_bits
= tree_to_uhwi (TYPE_SIZE (vectype
));
5064 FOR_EACH_VEC_ELT (new_phis
, i
, new_phi
)
5067 if (gimple_code (new_phi
) == GIMPLE_PHI
)
5068 vec_temp
= PHI_RESULT (new_phi
);
5070 vec_temp
= gimple_assign_lhs (new_phi
);
5071 tree rhs
= build3 (BIT_FIELD_REF
, scalar_type
, vec_temp
, bitsize
,
5073 epilog_stmt
= gimple_build_assign (new_scalar_dest
, rhs
);
5074 new_temp
= make_ssa_name (new_scalar_dest
, epilog_stmt
);
5075 gimple_assign_set_lhs (epilog_stmt
, new_temp
);
5076 gsi_insert_before (&exit_gsi
, epilog_stmt
, GSI_SAME_STMT
);
5078 /* In SLP we don't need to apply reduction operation, so we just
5079 collect s' values in SCALAR_RESULTS. */
5081 scalar_results
.safe_push (new_temp
);
5083 for (bit_offset
= element_bitsize
;
5084 bit_offset
< vec_size_in_bits
;
5085 bit_offset
+= element_bitsize
)
5087 tree bitpos
= bitsize_int (bit_offset
);
5088 tree rhs
= build3 (BIT_FIELD_REF
, scalar_type
, vec_temp
,
5091 epilog_stmt
= gimple_build_assign (new_scalar_dest
, rhs
);
5092 new_name
= make_ssa_name (new_scalar_dest
, epilog_stmt
);
5093 gimple_assign_set_lhs (epilog_stmt
, new_name
);
5094 gsi_insert_before (&exit_gsi
, epilog_stmt
, GSI_SAME_STMT
);
5098 /* In SLP we don't need to apply reduction operation, so
5099 we just collect s' values in SCALAR_RESULTS. */
5100 new_temp
= new_name
;
5101 scalar_results
.safe_push (new_name
);
5105 epilog_stmt
= gimple_build_assign (new_scalar_dest
, code
,
5106 new_name
, new_temp
);
5107 new_temp
= make_ssa_name (new_scalar_dest
, epilog_stmt
);
5108 gimple_assign_set_lhs (epilog_stmt
, new_temp
);
5109 gsi_insert_before (&exit_gsi
, epilog_stmt
, GSI_SAME_STMT
);
5114 /* The only case where we need to reduce scalar results in SLP, is
5115 unrolling. If the size of SCALAR_RESULTS is greater than
5116 GROUP_SIZE, we reduce them combining elements modulo
5120 tree res
, first_res
, new_res
;
5123 /* Reduce multiple scalar results in case of SLP unrolling. */
5124 for (j
= group_size
; scalar_results
.iterate (j
, &res
);
5127 first_res
= scalar_results
[j
% group_size
];
5128 new_stmt
= gimple_build_assign (new_scalar_dest
, code
,
5130 new_res
= make_ssa_name (new_scalar_dest
, new_stmt
);
5131 gimple_assign_set_lhs (new_stmt
, new_res
);
5132 gsi_insert_before (&exit_gsi
, new_stmt
, GSI_SAME_STMT
);
5133 scalar_results
[j
% group_size
] = new_res
;
5137 /* Not SLP - we have one scalar to keep in SCALAR_RESULTS. */
5138 scalar_results
.safe_push (new_temp
);
5141 if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info
)
5142 == INTEGER_INDUC_COND_REDUCTION
)
5144 /* Earlier we set the initial value to be zero. Check the result
5145 and if it is zero then replace with the original initial
5147 tree zero
= build_zero_cst (scalar_type
);
5148 tree zcompare
= build2 (EQ_EXPR
, boolean_type_node
, new_temp
, zero
);
5150 tree tmp
= make_ssa_name (new_scalar_dest
);
5151 epilog_stmt
= gimple_build_assign (tmp
, COND_EXPR
, zcompare
,
5152 initial_def
, new_temp
);
5153 gsi_insert_before (&exit_gsi
, epilog_stmt
, GSI_SAME_STMT
);
5154 scalar_results
[0] = tmp
;
5158 vect_finalize_reduction
:
5163 /* 2.5 Adjust the final result by the initial value of the reduction
5164 variable. (When such adjustment is not needed, then
5165 'adjustment_def' is zero). For example, if code is PLUS we create:
5166 new_temp = loop_exit_def + adjustment_def */
5170 gcc_assert (!slp_reduc
);
5171 if (nested_in_vect_loop
)
5173 new_phi
= new_phis
[0];
5174 gcc_assert (TREE_CODE (TREE_TYPE (adjustment_def
)) == VECTOR_TYPE
);
5175 expr
= build2 (code
, vectype
, PHI_RESULT (new_phi
), adjustment_def
);
5176 new_dest
= vect_create_destination_var (scalar_dest
, vectype
);
5180 new_temp
= scalar_results
[0];
5181 gcc_assert (TREE_CODE (TREE_TYPE (adjustment_def
)) != VECTOR_TYPE
);
5182 expr
= build2 (code
, scalar_type
, new_temp
, adjustment_def
);
5183 new_dest
= vect_create_destination_var (scalar_dest
, scalar_type
);
5186 epilog_stmt
= gimple_build_assign (new_dest
, expr
);
5187 new_temp
= make_ssa_name (new_dest
, epilog_stmt
);
5188 gimple_assign_set_lhs (epilog_stmt
, new_temp
);
5189 gsi_insert_before (&exit_gsi
, epilog_stmt
, GSI_SAME_STMT
);
5190 if (nested_in_vect_loop
)
5192 set_vinfo_for_stmt (epilog_stmt
,
5193 new_stmt_vec_info (epilog_stmt
, loop_vinfo
));
5194 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (epilog_stmt
)) =
5195 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (new_phi
));
5198 scalar_results
.quick_push (new_temp
);
5200 scalar_results
[0] = new_temp
;
5203 scalar_results
[0] = new_temp
;
5205 new_phis
[0] = epilog_stmt
;
5208 /* 2.6 Handle the loop-exit phis. Replace the uses of scalar loop-exit
5209 phis with new adjusted scalar results, i.e., replace use <s_out0>
5214 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
5215 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
5216 v_out2 = reduce <v_out1>
5217 s_out3 = extract_field <v_out2, 0>
5218 s_out4 = adjust_result <s_out3>
5225 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
5226 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
5227 v_out2 = reduce <v_out1>
5228 s_out3 = extract_field <v_out2, 0>
5229 s_out4 = adjust_result <s_out3>
5234 /* In SLP reduction chain we reduce vector results into one vector if
5235 necessary, hence we set here GROUP_SIZE to 1. SCALAR_DEST is the LHS of
5236 the last stmt in the reduction chain, since we are looking for the loop
5238 if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt
)))
5240 gimple
*dest_stmt
= SLP_TREE_SCALAR_STMTS (slp_node
)[group_size
- 1];
5241 /* Handle reduction patterns. */
5242 if (STMT_VINFO_RELATED_STMT (vinfo_for_stmt (dest_stmt
)))
5243 dest_stmt
= STMT_VINFO_RELATED_STMT (vinfo_for_stmt (dest_stmt
));
5245 scalar_dest
= gimple_assign_lhs (dest_stmt
);
5249 /* In SLP we may have several statements in NEW_PHIS and REDUCTION_PHIS (in
5250 case that GROUP_SIZE is greater than vectorization factor). Therefore, we
5251 need to match SCALAR_RESULTS with corresponding statements. The first
5252 (GROUP_SIZE / number of new vector stmts) scalar results correspond to
5253 the first vector stmt, etc.
5254 (RATIO is equal to (GROUP_SIZE / number of new vector stmts)). */
5255 if (group_size
> new_phis
.length ())
5257 ratio
= group_size
/ new_phis
.length ();
5258 gcc_assert (!(group_size
% new_phis
.length ()));
5263 for (k
= 0; k
< group_size
; k
++)
5267 epilog_stmt
= new_phis
[k
/ ratio
];
5268 reduction_phi
= reduction_phis
[k
/ ratio
];
5270 inner_phi
= inner_phis
[k
/ ratio
];
5275 gimple
*current_stmt
= SLP_TREE_SCALAR_STMTS (slp_node
)[k
];
5277 orig_stmt
= STMT_VINFO_RELATED_STMT (vinfo_for_stmt (current_stmt
));
5278 /* SLP statements can't participate in patterns. */
5279 gcc_assert (!orig_stmt
);
5280 scalar_dest
= gimple_assign_lhs (current_stmt
);
5284 /* Find the loop-closed-use at the loop exit of the original scalar
5285 result. (The reduction result is expected to have two immediate uses -
5286 one at the latch block, and one at the loop exit). */
5287 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, scalar_dest
)
5288 if (!flow_bb_inside_loop_p (loop
, gimple_bb (USE_STMT (use_p
)))
5289 && !is_gimple_debug (USE_STMT (use_p
)))
5290 phis
.safe_push (USE_STMT (use_p
));
5292 /* While we expect to have found an exit_phi because of loop-closed-ssa
5293 form we can end up without one if the scalar cycle is dead. */
5295 FOR_EACH_VEC_ELT (phis
, i
, exit_phi
)
5299 stmt_vec_info exit_phi_vinfo
= vinfo_for_stmt (exit_phi
);
5302 /* FORNOW. Currently not supporting the case that an inner-loop
5303 reduction is not used in the outer-loop (but only outside the
5304 outer-loop), unless it is double reduction. */
5305 gcc_assert ((STMT_VINFO_RELEVANT_P (exit_phi_vinfo
)
5306 && !STMT_VINFO_LIVE_P (exit_phi_vinfo
))
5310 STMT_VINFO_VEC_STMT (exit_phi_vinfo
) = inner_phi
;
5312 STMT_VINFO_VEC_STMT (exit_phi_vinfo
) = epilog_stmt
;
5314 || STMT_VINFO_DEF_TYPE (exit_phi_vinfo
)
5315 != vect_double_reduction_def
)
5318 /* Handle double reduction:
5320 stmt1: s1 = phi <s0, s2> - double reduction phi (outer loop)
5321 stmt2: s3 = phi <s1, s4> - (regular) reduc phi (inner loop)
5322 stmt3: s4 = use (s3) - (regular) reduc stmt (inner loop)
5323 stmt4: s2 = phi <s4> - double reduction stmt (outer loop)
5325 At that point the regular reduction (stmt2 and stmt3) is
5326 already vectorized, as well as the exit phi node, stmt4.
5327 Here we vectorize the phi node of double reduction, stmt1, and
5328 update all relevant statements. */
5330 /* Go through all the uses of s2 to find double reduction phi
5331 node, i.e., stmt1 above. */
5332 orig_name
= PHI_RESULT (exit_phi
);
5333 FOR_EACH_IMM_USE_STMT (use_stmt
, imm_iter
, orig_name
)
5335 stmt_vec_info use_stmt_vinfo
;
5336 stmt_vec_info new_phi_vinfo
;
5337 tree vect_phi_init
, preheader_arg
, vect_phi_res
, init_def
;
5338 basic_block bb
= gimple_bb (use_stmt
);
5341 /* Check that USE_STMT is really double reduction phi
5343 if (gimple_code (use_stmt
) != GIMPLE_PHI
5344 || gimple_phi_num_args (use_stmt
) != 2
5345 || bb
->loop_father
!= outer_loop
)
5347 use_stmt_vinfo
= vinfo_for_stmt (use_stmt
);
5349 || STMT_VINFO_DEF_TYPE (use_stmt_vinfo
)
5350 != vect_double_reduction_def
)
5353 /* Create vector phi node for double reduction:
5354 vs1 = phi <vs0, vs2>
5355 vs1 was created previously in this function by a call to
5356 vect_get_vec_def_for_operand and is stored in
5358 vs2 is defined by INNER_PHI, the vectorized EXIT_PHI;
5359 vs0 is created here. */
5361 /* Create vector phi node. */
5362 vect_phi
= create_phi_node (vec_initial_def
, bb
);
5363 new_phi_vinfo
= new_stmt_vec_info (vect_phi
,
5364 loop_vec_info_for_loop (outer_loop
));
5365 set_vinfo_for_stmt (vect_phi
, new_phi_vinfo
);
5367 /* Create vs0 - initial def of the double reduction phi. */
5368 preheader_arg
= PHI_ARG_DEF_FROM_EDGE (use_stmt
,
5369 loop_preheader_edge (outer_loop
));
5370 init_def
= get_initial_def_for_reduction (stmt
,
5371 preheader_arg
, NULL
);
5372 vect_phi_init
= vect_init_vector (use_stmt
, init_def
,
5375 /* Update phi node arguments with vs0 and vs2. */
5376 add_phi_arg (vect_phi
, vect_phi_init
,
5377 loop_preheader_edge (outer_loop
),
5379 add_phi_arg (vect_phi
, PHI_RESULT (inner_phi
),
5380 loop_latch_edge (outer_loop
), UNKNOWN_LOCATION
);
5381 if (dump_enabled_p ())
5383 dump_printf_loc (MSG_NOTE
, vect_location
,
5384 "created double reduction phi node: ");
5385 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, vect_phi
, 0);
5388 vect_phi_res
= PHI_RESULT (vect_phi
);
5390 /* Replace the use, i.e., set the correct vs1 in the regular
5391 reduction phi node. FORNOW, NCOPIES is always 1, so the
5392 loop is redundant. */
5393 use
= reduction_phi
;
5394 for (j
= 0; j
< ncopies
; j
++)
5396 edge pr_edge
= loop_preheader_edge (loop
);
5397 SET_PHI_ARG_DEF (use
, pr_edge
->dest_idx
, vect_phi_res
);
5398 use
= STMT_VINFO_RELATED_STMT (vinfo_for_stmt (use
));
5405 if (nested_in_vect_loop
)
5414 /* Find the loop-closed-use at the loop exit of the original scalar
5415 result. (The reduction result is expected to have two immediate uses,
5416 one at the latch block, and one at the loop exit). For double
5417 reductions we are looking for exit phis of the outer loop. */
5418 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, scalar_dest
)
5420 if (!flow_bb_inside_loop_p (loop
, gimple_bb (USE_STMT (use_p
))))
5422 if (!is_gimple_debug (USE_STMT (use_p
)))
5423 phis
.safe_push (USE_STMT (use_p
));
5427 if (double_reduc
&& gimple_code (USE_STMT (use_p
)) == GIMPLE_PHI
)
5429 tree phi_res
= PHI_RESULT (USE_STMT (use_p
));
5431 FOR_EACH_IMM_USE_FAST (phi_use_p
, phi_imm_iter
, phi_res
)
5433 if (!flow_bb_inside_loop_p (loop
,
5434 gimple_bb (USE_STMT (phi_use_p
)))
5435 && !is_gimple_debug (USE_STMT (phi_use_p
)))
5436 phis
.safe_push (USE_STMT (phi_use_p
));
5442 FOR_EACH_VEC_ELT (phis
, i
, exit_phi
)
5444 /* Replace the uses: */
5445 orig_name
= PHI_RESULT (exit_phi
);
5446 scalar_result
= scalar_results
[k
];
5447 FOR_EACH_IMM_USE_STMT (use_stmt
, imm_iter
, orig_name
)
5448 FOR_EACH_IMM_USE_ON_STMT (use_p
, imm_iter
)
5449 SET_USE (use_p
, scalar_result
);
5457 /* Function is_nonwrapping_integer_induction.
5459 Check if STMT (which is part of loop LOOP) both increments and
5460 does not cause overflow. */
5463 is_nonwrapping_integer_induction (gimple
*stmt
, struct loop
*loop
)
5465 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (stmt
);
5466 tree base
= STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED (stmt_vinfo
);
5467 tree step
= STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_vinfo
);
5468 tree lhs_type
= TREE_TYPE (gimple_phi_result (stmt
));
5469 widest_int ni
, max_loop_value
, lhs_max
;
5470 bool overflow
= false;
5472 /* Make sure the loop is integer based. */
5473 if (TREE_CODE (base
) != INTEGER_CST
5474 || TREE_CODE (step
) != INTEGER_CST
)
5477 /* Check that the induction increments. */
5478 if (tree_int_cst_sgn (step
) == -1)
5481 /* Check that the max size of the loop will not wrap. */
5483 if (TYPE_OVERFLOW_UNDEFINED (lhs_type
))
5486 if (! max_stmt_executions (loop
, &ni
))
5489 max_loop_value
= wi::mul (wi::to_widest (step
), ni
, TYPE_SIGN (lhs_type
),
5494 max_loop_value
= wi::add (wi::to_widest (base
), max_loop_value
,
5495 TYPE_SIGN (lhs_type
), &overflow
);
5499 return (wi::min_precision (max_loop_value
, TYPE_SIGN (lhs_type
))
5500 <= TYPE_PRECISION (lhs_type
));
5503 /* Function vectorizable_reduction.
5505 Check if STMT performs a reduction operation that can be vectorized.
5506 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
5507 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
5508 Return FALSE if not a vectorizable STMT, TRUE otherwise.
5510 This function also handles reduction idioms (patterns) that have been
5511 recognized in advance during vect_pattern_recog. In this case, STMT may be
5513 X = pattern_expr (arg0, arg1, ..., X)
5514 and it's STMT_VINFO_RELATED_STMT points to the last stmt in the original
5515 sequence that had been detected and replaced by the pattern-stmt (STMT).
5517 This function also handles reduction of condition expressions, for example:
5518 for (int i = 0; i < N; i++)
5521 This is handled by vectorising the loop and creating an additional vector
5522 containing the loop indexes for which "a[i] < value" was true. In the
5523 function epilogue this is reduced to a single max value and then used to
5524 index into the vector of results.
5526 In some cases of reduction patterns, the type of the reduction variable X is
5527 different than the type of the other arguments of STMT.
5528 In such cases, the vectype that is used when transforming STMT into a vector
5529 stmt is different than the vectype that is used to determine the
5530 vectorization factor, because it consists of a different number of elements
5531 than the actual number of elements that are being operated upon in parallel.
5533 For example, consider an accumulation of shorts into an int accumulator.
5534 On some targets it's possible to vectorize this pattern operating on 8
5535 shorts at a time (hence, the vectype for purposes of determining the
5536 vectorization factor should be V8HI); on the other hand, the vectype that
5537 is used to create the vector form is actually V4SI (the type of the result).
5539 Upon entry to this function, STMT_VINFO_VECTYPE records the vectype that
5540 indicates what is the actual level of parallelism (V8HI in the example), so
5541 that the right vectorization factor would be derived. This vectype
5542 corresponds to the type of arguments to the reduction stmt, and should *NOT*
5543 be used to create the vectorized stmt. The right vectype for the vectorized
5544 stmt is obtained from the type of the result X:
5545 get_vectype_for_scalar_type (TREE_TYPE (X))
5547 This means that, contrary to "regular" reductions (or "regular" stmts in
5548 general), the following equation:
5549 STMT_VINFO_VECTYPE == get_vectype_for_scalar_type (TREE_TYPE (X))
5550 does *NOT* necessarily hold for reduction patterns. */
5553 vectorizable_reduction (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
5554 gimple
**vec_stmt
, slp_tree slp_node
)
5558 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
5559 tree vectype_out
= STMT_VINFO_VECTYPE (stmt_info
);
5560 tree vectype_in
= NULL_TREE
;
5561 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
5562 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
5563 enum tree_code code
, orig_code
, epilog_reduc_code
;
5564 machine_mode vec_mode
;
5566 optab optab
, reduc_optab
;
5567 tree new_temp
= NULL_TREE
;
5569 enum vect_def_type dt
, cond_reduc_dt
= vect_unknown_def_type
;
5570 gphi
*new_phi
= NULL
;
5574 stmt_vec_info orig_stmt_info
= NULL
;
5578 stmt_vec_info prev_stmt_info
, prev_phi_info
;
5579 bool single_defuse_cycle
= false;
5580 gimple
*new_stmt
= NULL
;
5583 enum vect_def_type dts
[3];
5584 bool nested_cycle
= false, found_nested_cycle_def
= false;
5585 bool double_reduc
= false;
5587 struct loop
* def_stmt_loop
, *outer_loop
= NULL
;
5589 gimple
*def_arg_stmt
;
5590 auto_vec
<tree
> vec_oprnds0
;
5591 auto_vec
<tree
> vec_oprnds1
;
5592 auto_vec
<tree
> vec_oprnds2
;
5593 auto_vec
<tree
> vect_defs
;
5594 auto_vec
<gimple
*> phis
;
5597 bool first_p
= true;
5598 tree cr_index_scalar_type
= NULL_TREE
, cr_index_vector_type
= NULL_TREE
;
5599 tree cond_reduc_val
= NULL_TREE
;
5601 /* Make sure it was already recognized as a reduction computation. */
5602 if (STMT_VINFO_DEF_TYPE (vinfo_for_stmt (stmt
)) != vect_reduction_def
5603 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (stmt
)) != vect_nested_cycle
)
5606 if (nested_in_vect_loop_p (loop
, stmt
))
5610 nested_cycle
= true;
5613 /* In case of reduction chain we switch to the first stmt in the chain, but
5614 we don't update STMT_INFO, since only the last stmt is marked as reduction
5615 and has reduction properties. */
5616 if (GROUP_FIRST_ELEMENT (stmt_info
)
5617 && GROUP_FIRST_ELEMENT (stmt_info
) != stmt
)
5619 stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
5623 if (gimple_code (stmt
) == GIMPLE_PHI
)
5625 /* Analysis is fully done on the reduction stmt invocation. */
5628 STMT_VINFO_TYPE (stmt_info
) = reduc_vec_info_type
;
5632 gimple
*reduc_stmt
= STMT_VINFO_REDUC_DEF (stmt_info
);
5633 if (STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (reduc_stmt
)))
5634 reduc_stmt
= STMT_VINFO_RELATED_STMT (vinfo_for_stmt (reduc_stmt
));
5636 gcc_assert (is_gimple_assign (reduc_stmt
));
5637 for (unsigned k
= 1; k
< gimple_num_ops (reduc_stmt
); ++k
)
5639 tree op
= gimple_op (reduc_stmt
, k
);
5640 if (op
== gimple_phi_result (stmt
))
5643 && gimple_assign_rhs_code (reduc_stmt
) == COND_EXPR
)
5645 vectype_in
= get_vectype_for_scalar_type (TREE_TYPE (op
));
5648 gcc_assert (vectype_in
);
5653 ncopies
= (LOOP_VINFO_VECT_FACTOR (loop_vinfo
)
5654 / TYPE_VECTOR_SUBPARTS (vectype_in
));
5656 use_operand_p use_p
;
5659 && (STMT_VINFO_RELEVANT (vinfo_for_stmt (reduc_stmt
))
5660 <= vect_used_only_live
)
5661 && single_imm_use (gimple_phi_result (stmt
), &use_p
, &use_stmt
)
5662 && (use_stmt
== reduc_stmt
5663 || (STMT_VINFO_RELATED_STMT (vinfo_for_stmt (use_stmt
))
5665 single_defuse_cycle
= true;
5667 /* Create the destination vector */
5668 scalar_dest
= gimple_assign_lhs (reduc_stmt
);
5669 vec_dest
= vect_create_destination_var (scalar_dest
, vectype_out
);
5672 /* The size vect_schedule_slp_instance computes is off for us. */
5673 vec_num
= ((LOOP_VINFO_VECT_FACTOR (loop_vinfo
)
5674 * SLP_TREE_SCALAR_STMTS (slp_node
).length ())
5675 / TYPE_VECTOR_SUBPARTS (vectype_in
));
5679 /* Generate the reduction PHIs upfront. */
5680 prev_phi_info
= NULL
;
5681 for (j
= 0; j
< ncopies
; j
++)
5683 if (j
== 0 || !single_defuse_cycle
)
5685 for (i
= 0; i
< vec_num
; i
++)
5687 /* Create the reduction-phi that defines the reduction
5689 new_phi
= create_phi_node (vec_dest
, loop
->header
);
5690 set_vinfo_for_stmt (new_phi
,
5691 new_stmt_vec_info (new_phi
, loop_vinfo
));
5694 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_phi
);
5698 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_phi
;
5700 STMT_VINFO_RELATED_STMT (prev_phi_info
) = new_phi
;
5701 prev_phi_info
= vinfo_for_stmt (new_phi
);
5710 /* 1. Is vectorizable reduction? */
5711 /* Not supportable if the reduction variable is used in the loop, unless
5712 it's a reduction chain. */
5713 if (STMT_VINFO_RELEVANT (stmt_info
) > vect_used_in_outer
5714 && !GROUP_FIRST_ELEMENT (stmt_info
))
5717 /* Reductions that are not used even in an enclosing outer-loop,
5718 are expected to be "live" (used out of the loop). */
5719 if (STMT_VINFO_RELEVANT (stmt_info
) == vect_unused_in_scope
5720 && !STMT_VINFO_LIVE_P (stmt_info
))
5723 /* 2. Has this been recognized as a reduction pattern?
5725 Check if STMT represents a pattern that has been recognized
5726 in earlier analysis stages. For stmts that represent a pattern,
5727 the STMT_VINFO_RELATED_STMT field records the last stmt in
5728 the original sequence that constitutes the pattern. */
5730 orig_stmt
= STMT_VINFO_RELATED_STMT (vinfo_for_stmt (stmt
));
5733 orig_stmt_info
= vinfo_for_stmt (orig_stmt
);
5734 gcc_assert (STMT_VINFO_IN_PATTERN_P (orig_stmt_info
));
5735 gcc_assert (!STMT_VINFO_IN_PATTERN_P (stmt_info
));
5738 /* 3. Check the operands of the operation. The first operands are defined
5739 inside the loop body. The last operand is the reduction variable,
5740 which is defined by the loop-header-phi. */
5742 gcc_assert (is_gimple_assign (stmt
));
5745 switch (get_gimple_rhs_class (gimple_assign_rhs_code (stmt
)))
5747 case GIMPLE_BINARY_RHS
:
5748 code
= gimple_assign_rhs_code (stmt
);
5749 op_type
= TREE_CODE_LENGTH (code
);
5750 gcc_assert (op_type
== binary_op
);
5751 ops
[0] = gimple_assign_rhs1 (stmt
);
5752 ops
[1] = gimple_assign_rhs2 (stmt
);
5755 case GIMPLE_TERNARY_RHS
:
5756 code
= gimple_assign_rhs_code (stmt
);
5757 op_type
= TREE_CODE_LENGTH (code
);
5758 gcc_assert (op_type
== ternary_op
);
5759 ops
[0] = gimple_assign_rhs1 (stmt
);
5760 ops
[1] = gimple_assign_rhs2 (stmt
);
5761 ops
[2] = gimple_assign_rhs3 (stmt
);
5764 case GIMPLE_UNARY_RHS
:
5771 if (code
== COND_EXPR
&& slp_node
)
5774 scalar_dest
= gimple_assign_lhs (stmt
);
5775 scalar_type
= TREE_TYPE (scalar_dest
);
5776 if (!POINTER_TYPE_P (scalar_type
) && !INTEGRAL_TYPE_P (scalar_type
)
5777 && !SCALAR_FLOAT_TYPE_P (scalar_type
))
5780 /* Do not try to vectorize bit-precision reductions. */
5781 if ((TYPE_PRECISION (scalar_type
)
5782 != GET_MODE_PRECISION (TYPE_MODE (scalar_type
))))
5785 /* All uses but the last are expected to be defined in the loop.
5786 The last use is the reduction variable. In case of nested cycle this
5787 assumption is not true: we use reduc_index to record the index of the
5788 reduction variable. */
5789 gimple
*reduc_def_stmt
= NULL
;
5790 int reduc_index
= -1;
5791 for (i
= 0; i
< op_type
; i
++)
5793 /* The condition of COND_EXPR is checked in vectorizable_condition(). */
5794 if (i
== 0 && code
== COND_EXPR
)
5797 is_simple_use
= vect_is_simple_use (ops
[i
], loop_vinfo
,
5798 &def_stmt
, &dts
[i
], &tem
);
5800 gcc_assert (is_simple_use
);
5801 if (dt
== vect_reduction_def
)
5803 reduc_def_stmt
= def_stmt
;
5813 if (dt
!= vect_internal_def
5814 && dt
!= vect_external_def
5815 && dt
!= vect_constant_def
5816 && dt
!= vect_induction_def
5817 && !(dt
== vect_nested_cycle
&& nested_cycle
))
5820 if (dt
== vect_nested_cycle
)
5822 found_nested_cycle_def
= true;
5823 reduc_def_stmt
= def_stmt
;
5827 if (i
== 1 && code
== COND_EXPR
)
5829 /* Record how value of COND_EXPR is defined. */
5830 if (dt
== vect_constant_def
)
5833 cond_reduc_val
= ops
[i
];
5835 if (dt
== vect_induction_def
&& def_stmt
!= NULL
5836 && is_nonwrapping_integer_induction (def_stmt
, loop
))
5842 vectype_in
= vectype_out
;
5844 /* When vectorizing a reduction chain w/o SLP the reduction PHI is not
5845 directy used in stmt. */
5846 if (reduc_index
== -1)
5849 reduc_def_stmt
= STMT_VINFO_REDUC_DEF (orig_stmt_info
);
5851 reduc_def_stmt
= STMT_VINFO_REDUC_DEF (stmt_info
);
5854 if (! reduc_def_stmt
|| gimple_code (reduc_def_stmt
) != GIMPLE_PHI
)
5857 if (!(reduc_index
== -1
5858 || dts
[reduc_index
] == vect_reduction_def
5859 || dts
[reduc_index
] == vect_nested_cycle
5860 || ((dts
[reduc_index
] == vect_internal_def
5861 || dts
[reduc_index
] == vect_external_def
5862 || dts
[reduc_index
] == vect_constant_def
5863 || dts
[reduc_index
] == vect_induction_def
)
5864 && nested_cycle
&& found_nested_cycle_def
)))
5866 /* For pattern recognized stmts, orig_stmt might be a reduction,
5867 but some helper statements for the pattern might not, or
5868 might be COND_EXPRs with reduction uses in the condition. */
5869 gcc_assert (orig_stmt
);
5873 stmt_vec_info reduc_def_info
= vinfo_for_stmt (reduc_def_stmt
);
5874 enum vect_reduction_type v_reduc_type
5875 = STMT_VINFO_REDUC_TYPE (reduc_def_info
);
5876 gimple
*tmp
= STMT_VINFO_REDUC_DEF (reduc_def_info
);
5878 STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info
) = v_reduc_type
;
5879 /* If we have a condition reduction, see if we can simplify it further. */
5880 if (v_reduc_type
== COND_REDUCTION
)
5882 if (cond_reduc_dt
== vect_induction_def
)
5884 if (dump_enabled_p ())
5885 dump_printf_loc (MSG_NOTE
, vect_location
,
5886 "condition expression based on "
5887 "integer induction.\n");
5888 STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info
)
5889 = INTEGER_INDUC_COND_REDUCTION
;
5892 /* Loop peeling modifies initial value of reduction PHI, which
5893 makes the reduction stmt to be transformed different to the
5894 original stmt analyzed. We need to record reduction code for
5895 CONST_COND_REDUCTION type reduction at analyzing stage, thus
5896 it can be used directly at transform stage. */
5897 if (STMT_VINFO_VEC_CONST_COND_REDUC_CODE (stmt_info
) == MAX_EXPR
5898 || STMT_VINFO_VEC_CONST_COND_REDUC_CODE (stmt_info
) == MIN_EXPR
)
5900 /* Also set the reduction type to CONST_COND_REDUCTION. */
5901 gcc_assert (cond_reduc_dt
== vect_constant_def
);
5902 STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info
) = CONST_COND_REDUCTION
;
5904 else if (cond_reduc_dt
== vect_constant_def
)
5906 enum vect_def_type cond_initial_dt
;
5907 gimple
*def_stmt
= SSA_NAME_DEF_STMT (ops
[reduc_index
]);
5908 tree cond_initial_val
5909 = PHI_ARG_DEF_FROM_EDGE (def_stmt
, loop_preheader_edge (loop
));
5911 gcc_assert (cond_reduc_val
!= NULL_TREE
);
5912 vect_is_simple_use (cond_initial_val
, loop_vinfo
,
5913 &def_stmt
, &cond_initial_dt
);
5914 if (cond_initial_dt
== vect_constant_def
5915 && types_compatible_p (TREE_TYPE (cond_initial_val
),
5916 TREE_TYPE (cond_reduc_val
)))
5918 tree e
= fold_binary (LE_EXPR
, boolean_type_node
,
5919 cond_initial_val
, cond_reduc_val
);
5920 if (e
&& (integer_onep (e
) || integer_zerop (e
)))
5922 if (dump_enabled_p ())
5923 dump_printf_loc (MSG_NOTE
, vect_location
,
5924 "condition expression based on "
5925 "compile time constant.\n");
5926 /* Record reduction code at analysis stage. */
5927 STMT_VINFO_VEC_CONST_COND_REDUC_CODE (stmt_info
)
5928 = integer_onep (e
) ? MAX_EXPR
: MIN_EXPR
;
5929 STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info
)
5930 = CONST_COND_REDUCTION
;
5937 gcc_assert (tmp
== orig_stmt
5938 || GROUP_FIRST_ELEMENT (vinfo_for_stmt (tmp
)) == orig_stmt
);
5940 /* We changed STMT to be the first stmt in reduction chain, hence we
5941 check that in this case the first element in the chain is STMT. */
5942 gcc_assert (stmt
== tmp
5943 || GROUP_FIRST_ELEMENT (vinfo_for_stmt (tmp
)) == stmt
);
5945 if (STMT_VINFO_LIVE_P (vinfo_for_stmt (reduc_def_stmt
)))
5951 ncopies
= (LOOP_VINFO_VECT_FACTOR (loop_vinfo
)
5952 / TYPE_VECTOR_SUBPARTS (vectype_in
));
5954 gcc_assert (ncopies
>= 1);
5956 vec_mode
= TYPE_MODE (vectype_in
);
5958 if (code
== COND_EXPR
)
5960 /* Only call during the analysis stage, otherwise we'll lose
5962 if (!vec_stmt
&& !vectorizable_condition (stmt
, gsi
, NULL
,
5963 ops
[reduc_index
], 0, NULL
))
5965 if (dump_enabled_p ())
5966 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5967 "unsupported condition in reduction\n");
5973 /* 4. Supportable by target? */
5975 if (code
== LSHIFT_EXPR
|| code
== RSHIFT_EXPR
5976 || code
== LROTATE_EXPR
|| code
== RROTATE_EXPR
)
5978 /* Shifts and rotates are only supported by vectorizable_shifts,
5979 not vectorizable_reduction. */
5980 if (dump_enabled_p ())
5981 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5982 "unsupported shift or rotation.\n");
5986 /* 4.1. check support for the operation in the loop */
5987 optab
= optab_for_tree_code (code
, vectype_in
, optab_default
);
5990 if (dump_enabled_p ())
5991 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5997 if (optab_handler (optab
, vec_mode
) == CODE_FOR_nothing
)
5999 if (dump_enabled_p ())
6000 dump_printf (MSG_NOTE
, "op not supported by target.\n");
6002 if (GET_MODE_SIZE (vec_mode
) != UNITS_PER_WORD
6003 || LOOP_VINFO_VECT_FACTOR (loop_vinfo
)
6004 < vect_min_worthwhile_factor (code
))
6007 if (dump_enabled_p ())
6008 dump_printf (MSG_NOTE
, "proceeding using word mode.\n");
6011 /* Worthwhile without SIMD support? */
6012 if (!VECTOR_MODE_P (TYPE_MODE (vectype_in
))
6013 && LOOP_VINFO_VECT_FACTOR (loop_vinfo
)
6014 < vect_min_worthwhile_factor (code
))
6016 if (dump_enabled_p ())
6017 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6018 "not worthwhile without SIMD support.\n");
6024 /* 4.2. Check support for the epilog operation.
6026 If STMT represents a reduction pattern, then the type of the
6027 reduction variable may be different than the type of the rest
6028 of the arguments. For example, consider the case of accumulation
6029 of shorts into an int accumulator; The original code:
6030 S1: int_a = (int) short_a;
6031 orig_stmt-> S2: int_acc = plus <int_a ,int_acc>;
6034 STMT: int_acc = widen_sum <short_a, int_acc>
6037 1. The tree-code that is used to create the vector operation in the
6038 epilog code (that reduces the partial results) is not the
6039 tree-code of STMT, but is rather the tree-code of the original
6040 stmt from the pattern that STMT is replacing. I.e, in the example
6041 above we want to use 'widen_sum' in the loop, but 'plus' in the
6043 2. The type (mode) we use to check available target support
6044 for the vector operation to be created in the *epilog*, is
6045 determined by the type of the reduction variable (in the example
6046 above we'd check this: optab_handler (plus_optab, vect_int_mode])).
6047 However the type (mode) we use to check available target support
6048 for the vector operation to be created *inside the loop*, is
6049 determined by the type of the other arguments to STMT (in the
6050 example we'd check this: optab_handler (widen_sum_optab,
6053 This is contrary to "regular" reductions, in which the types of all
6054 the arguments are the same as the type of the reduction variable.
6055 For "regular" reductions we can therefore use the same vector type
6056 (and also the same tree-code) when generating the epilog code and
6057 when generating the code inside the loop. */
6061 /* This is a reduction pattern: get the vectype from the type of the
6062 reduction variable, and get the tree-code from orig_stmt. */
6063 gcc_assert (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info
)
6064 == TREE_CODE_REDUCTION
);
6065 orig_code
= gimple_assign_rhs_code (orig_stmt
);
6066 gcc_assert (vectype_out
);
6067 vec_mode
= TYPE_MODE (vectype_out
);
6071 /* Regular reduction: use the same vectype and tree-code as used for
6072 the vector code inside the loop can be used for the epilog code. */
6075 if (code
== MINUS_EXPR
)
6076 orig_code
= PLUS_EXPR
;
6078 /* For simple condition reductions, replace with the actual expression
6079 we want to base our reduction around. */
6080 if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info
) == CONST_COND_REDUCTION
)
6082 orig_code
= STMT_VINFO_VEC_CONST_COND_REDUC_CODE (stmt_info
);
6083 gcc_assert (orig_code
== MAX_EXPR
|| orig_code
== MIN_EXPR
);
6085 else if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info
)
6086 == INTEGER_INDUC_COND_REDUCTION
)
6087 orig_code
= MAX_EXPR
;
6092 def_bb
= gimple_bb (reduc_def_stmt
);
6093 def_stmt_loop
= def_bb
->loop_father
;
6094 def_arg
= PHI_ARG_DEF_FROM_EDGE (reduc_def_stmt
,
6095 loop_preheader_edge (def_stmt_loop
));
6096 if (TREE_CODE (def_arg
) == SSA_NAME
6097 && (def_arg_stmt
= SSA_NAME_DEF_STMT (def_arg
))
6098 && gimple_code (def_arg_stmt
) == GIMPLE_PHI
6099 && flow_bb_inside_loop_p (outer_loop
, gimple_bb (def_arg_stmt
))
6100 && vinfo_for_stmt (def_arg_stmt
)
6101 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_arg_stmt
))
6102 == vect_double_reduction_def
)
6103 double_reduc
= true;
6106 epilog_reduc_code
= ERROR_MARK
;
6108 if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info
) != COND_REDUCTION
)
6110 if (reduction_code_for_scalar_code (orig_code
, &epilog_reduc_code
))
6112 reduc_optab
= optab_for_tree_code (epilog_reduc_code
, vectype_out
,
6116 if (dump_enabled_p ())
6117 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6118 "no optab for reduction.\n");
6120 epilog_reduc_code
= ERROR_MARK
;
6122 else if (optab_handler (reduc_optab
, vec_mode
) == CODE_FOR_nothing
)
6124 if (dump_enabled_p ())
6125 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6126 "reduc op not supported by target.\n");
6128 epilog_reduc_code
= ERROR_MARK
;
6133 if (!nested_cycle
|| double_reduc
)
6135 if (dump_enabled_p ())
6136 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6137 "no reduc code for scalar code.\n");
6145 int scalar_precision
= GET_MODE_PRECISION (TYPE_MODE (scalar_type
));
6146 cr_index_scalar_type
= make_unsigned_type (scalar_precision
);
6147 cr_index_vector_type
= build_vector_type
6148 (cr_index_scalar_type
, TYPE_VECTOR_SUBPARTS (vectype_out
));
6150 optab
= optab_for_tree_code (REDUC_MAX_EXPR
, cr_index_vector_type
,
6152 if (optab_handler (optab
, TYPE_MODE (cr_index_vector_type
))
6153 != CODE_FOR_nothing
)
6154 epilog_reduc_code
= REDUC_MAX_EXPR
;
6158 || STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info
) != TREE_CODE_REDUCTION
)
6161 if (dump_enabled_p ())
6162 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6163 "multiple types in double reduction or condition "
6168 /* In case of widenning multiplication by a constant, we update the type
6169 of the constant to be the type of the other operand. We check that the
6170 constant fits the type in the pattern recognition pass. */
6171 if (code
== DOT_PROD_EXPR
6172 && !types_compatible_p (TREE_TYPE (ops
[0]), TREE_TYPE (ops
[1])))
6174 if (TREE_CODE (ops
[0]) == INTEGER_CST
)
6175 ops
[0] = fold_convert (TREE_TYPE (ops
[1]), ops
[0]);
6176 else if (TREE_CODE (ops
[1]) == INTEGER_CST
)
6177 ops
[1] = fold_convert (TREE_TYPE (ops
[0]), ops
[1]);
6180 if (dump_enabled_p ())
6181 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6182 "invalid types in dot-prod\n");
6188 if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info
) == COND_REDUCTION
)
6192 if (! max_loop_iterations (loop
, &ni
))
6194 if (dump_enabled_p ())
6195 dump_printf_loc (MSG_NOTE
, vect_location
,
6196 "loop count not known, cannot create cond "
6200 /* Convert backedges to iterations. */
6203 /* The additional index will be the same type as the condition. Check
6204 that the loop can fit into this less one (because we'll use up the
6205 zero slot for when there are no matches). */
6206 tree max_index
= TYPE_MAX_VALUE (cr_index_scalar_type
);
6207 if (wi::geu_p (ni
, wi::to_widest (max_index
)))
6209 if (dump_enabled_p ())
6210 dump_printf_loc (MSG_NOTE
, vect_location
,
6211 "loop size is greater than data size.\n");
6216 if (!vec_stmt
) /* transformation not required. */
6219 vect_model_reduction_cost (stmt_info
, epilog_reduc_code
, ncopies
);
6220 STMT_VINFO_TYPE (stmt_info
) = reduc_vec_info_type
;
6226 if (dump_enabled_p ())
6227 dump_printf_loc (MSG_NOTE
, vect_location
, "transform reduction.\n");
6229 /* FORNOW: Multiple types are not supported for condition. */
6230 if (code
== COND_EXPR
)
6231 gcc_assert (ncopies
== 1);
6233 /* Create the destination vector */
6234 vec_dest
= vect_create_destination_var (scalar_dest
, vectype_out
);
6236 /* In case the vectorization factor (VF) is bigger than the number
6237 of elements that we can fit in a vectype (nunits), we have to generate
6238 more than one vector stmt - i.e - we need to "unroll" the
6239 vector stmt by a factor VF/nunits. For more details see documentation
6240 in vectorizable_operation. */
6242 /* If the reduction is used in an outer loop we need to generate
6243 VF intermediate results, like so (e.g. for ncopies=2):
6248 (i.e. we generate VF results in 2 registers).
6249 In this case we have a separate def-use cycle for each copy, and therefore
6250 for each copy we get the vector def for the reduction variable from the
6251 respective phi node created for this copy.
6253 Otherwise (the reduction is unused in the loop nest), we can combine
6254 together intermediate results, like so (e.g. for ncopies=2):
6258 (i.e. we generate VF/2 results in a single register).
6259 In this case for each copy we get the vector def for the reduction variable
6260 from the vectorized reduction operation generated in the previous iteration.
6262 This only works when we see both the reduction PHI and its only consumer
6263 in vectorizable_reduction and there are no intermediate stmts
6265 use_operand_p use_p
;
6268 && (STMT_VINFO_RELEVANT (stmt_info
) <= vect_used_only_live
)
6269 && single_imm_use (gimple_phi_result (reduc_def_stmt
), &use_p
, &use_stmt
)
6270 && (use_stmt
== stmt
6271 || STMT_VINFO_RELATED_STMT (vinfo_for_stmt (use_stmt
)) == stmt
))
6273 single_defuse_cycle
= true;
6277 epilog_copies
= ncopies
;
6279 prev_stmt_info
= NULL
;
6280 prev_phi_info
= NULL
;
6282 vec_num
= SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node
);
6286 vec_oprnds0
.create (1);
6287 vec_oprnds1
.create (1);
6288 if (op_type
== ternary_op
)
6289 vec_oprnds2
.create (1);
6292 phis
.create (vec_num
);
6293 vect_defs
.create (vec_num
);
6295 vect_defs
.quick_push (NULL_TREE
);
6297 auto_vec
<tree
> vec_oprnds
;
6298 for (j
= 0; j
< ncopies
; j
++)
6300 if (j
== 0 || !single_defuse_cycle
)
6302 for (i
= 0; i
< vec_num
; i
++)
6304 /* Get the created reduction-phi that defines the reduction
6306 tree reduc_def
= gimple_phi_result (reduc_def_stmt
);
6308 vect_get_vec_defs (reduc_def
, NULL
, stmt
, &vec_oprnds
, NULL
,
6312 dt
= vect_reduction_def
;
6313 vect_get_vec_defs_for_stmt_copy (&dt
,
6316 new_phi
= as_a
<gphi
*> (SSA_NAME_DEF_STMT (vec_oprnds
[i
]));
6317 if (j
== 0 || slp_node
)
6318 phis
.quick_push (new_phi
);
6322 if (code
== COND_EXPR
)
6324 gcc_assert (!slp_node
);
6325 vectorizable_condition (stmt
, gsi
, vec_stmt
,
6326 PHI_RESULT (phis
[0]),
6328 /* Multiple types are not supported for condition. */
6337 /* Get vec defs for all the operands except the reduction index,
6338 ensuring the ordering of the ops in the vector is kept. */
6339 auto_vec
<tree
, 3> slp_ops
;
6340 auto_vec
<vec
<tree
>, 3> vec_defs
;
6342 slp_ops
.quick_push (ops
[0]);
6343 slp_ops
.quick_push (ops
[1]);
6344 if (op_type
== ternary_op
)
6345 slp_ops
.quick_push (ops
[2]);
6347 vect_get_slp_defs (slp_ops
, slp_node
, &vec_defs
);
6349 vec_oprnds0
.safe_splice (vec_defs
[0]);
6350 vec_defs
[0].release ();
6351 vec_oprnds1
.safe_splice (vec_defs
[1]);
6352 vec_defs
[1].release ();
6353 if (op_type
== ternary_op
)
6355 vec_oprnds2
.safe_splice (vec_defs
[2]);
6356 vec_defs
[2].release ();
6361 vec_oprnds0
.quick_push
6362 (vect_get_vec_def_for_operand (ops
[0], stmt
));
6363 vec_oprnds1
.quick_push
6364 (vect_get_vec_def_for_operand (ops
[1], stmt
));
6365 if (op_type
== ternary_op
)
6366 vec_oprnds2
.quick_push
6367 (vect_get_vec_def_for_operand (ops
[2], stmt
));
6374 gcc_assert (reduc_index
!= -1 || ! single_defuse_cycle
);
6376 if (single_defuse_cycle
&& reduc_index
== 0)
6377 vec_oprnds0
[0] = gimple_assign_lhs (new_stmt
);
6380 = vect_get_vec_def_for_stmt_copy (dts
[0], vec_oprnds0
[0]);
6381 if (single_defuse_cycle
&& reduc_index
== 1)
6382 vec_oprnds1
[0] = gimple_assign_lhs (new_stmt
);
6385 = vect_get_vec_def_for_stmt_copy (dts
[1], vec_oprnds1
[0]);
6386 if (op_type
== ternary_op
)
6388 if (single_defuse_cycle
&& reduc_index
== 2)
6389 vec_oprnds2
[0] = gimple_assign_lhs (new_stmt
);
6392 = vect_get_vec_def_for_stmt_copy (dts
[2], vec_oprnds2
[0]);
6397 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, def0
)
6399 tree vop
[3] = { def0
, vec_oprnds1
[i
], NULL_TREE
};
6400 if (op_type
== ternary_op
)
6401 vop
[2] = vec_oprnds2
[i
];
6403 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
6404 new_stmt
= gimple_build_assign (new_temp
, code
,
6405 vop
[0], vop
[1], vop
[2]);
6406 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6410 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
6411 vect_defs
.quick_push (new_temp
);
6414 vect_defs
[0] = new_temp
;
6421 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
6423 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
6425 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
6428 /* Finalize the reduction-phi (set its arguments) and create the
6429 epilog reduction code. */
6430 if ((!single_defuse_cycle
|| code
== COND_EXPR
) && !slp_node
)
6431 vect_defs
[0] = gimple_assign_lhs (*vec_stmt
);
6433 vect_create_epilog_for_reduction (vect_defs
, stmt
, reduc_def_stmt
,
6435 epilog_reduc_code
, phis
, reduc_index
,
6436 double_reduc
, slp_node
);
6441 /* Function vect_min_worthwhile_factor.
6443 For a loop where we could vectorize the operation indicated by CODE,
6444 return the minimum vectorization factor that makes it worthwhile
6445 to use generic vectors. */
6447 vect_min_worthwhile_factor (enum tree_code code
)
6468 /* Function vectorizable_induction
6470 Check if PHI performs an induction computation that can be vectorized.
6471 If VEC_STMT is also passed, vectorize the induction PHI: create a vectorized
6472 phi to replace it, put it in VEC_STMT, and add it to the same basic block.
6473 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
6476 vectorizable_induction (gimple
*phi
,
6477 gimple_stmt_iterator
*gsi ATTRIBUTE_UNUSED
,
6478 gimple
**vec_stmt
, slp_tree slp_node
)
6480 stmt_vec_info stmt_info
= vinfo_for_stmt (phi
);
6481 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
6482 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
6484 bool nested_in_vect_loop
= false;
6485 struct loop
*iv_loop
;
6487 edge pe
= loop_preheader_edge (loop
);
6489 tree new_vec
, vec_init
, vec_step
, t
;
6492 gphi
*induction_phi
;
6493 tree induc_def
, vec_dest
;
6494 tree init_expr
, step_expr
;
6495 int vf
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
6499 imm_use_iterator imm_iter
;
6500 use_operand_p use_p
;
6504 gimple_stmt_iterator si
;
6505 basic_block bb
= gimple_bb (phi
);
6507 if (gimple_code (phi
) != GIMPLE_PHI
)
6510 if (!STMT_VINFO_RELEVANT_P (stmt_info
))
6513 /* Make sure it was recognized as induction computation. */
6514 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_induction_def
)
6517 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
6518 unsigned nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
6523 ncopies
= vf
/ nunits
;
6524 gcc_assert (ncopies
>= 1);
6526 /* FORNOW. These restrictions should be relaxed. */
6527 if (nested_in_vect_loop_p (loop
, phi
))
6529 imm_use_iterator imm_iter
;
6530 use_operand_p use_p
;
6537 if (dump_enabled_p ())
6538 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6539 "multiple types in nested loop.\n");
6543 /* FORNOW: outer loop induction with SLP not supported. */
6544 if (STMT_SLP_TYPE (stmt_info
))
6548 latch_e
= loop_latch_edge (loop
->inner
);
6549 loop_arg
= PHI_ARG_DEF_FROM_EDGE (phi
, latch_e
);
6550 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, loop_arg
)
6552 gimple
*use_stmt
= USE_STMT (use_p
);
6553 if (is_gimple_debug (use_stmt
))
6556 if (!flow_bb_inside_loop_p (loop
->inner
, gimple_bb (use_stmt
)))
6558 exit_phi
= use_stmt
;
6564 stmt_vec_info exit_phi_vinfo
= vinfo_for_stmt (exit_phi
);
6565 if (!(STMT_VINFO_RELEVANT_P (exit_phi_vinfo
)
6566 && !STMT_VINFO_LIVE_P (exit_phi_vinfo
)))
6568 if (dump_enabled_p ())
6569 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6570 "inner-loop induction only used outside "
6571 "of the outer vectorized loop.\n");
6576 nested_in_vect_loop
= true;
6577 iv_loop
= loop
->inner
;
6581 gcc_assert (iv_loop
== (gimple_bb (phi
))->loop_father
);
6583 if (!vec_stmt
) /* transformation not required. */
6585 STMT_VINFO_TYPE (stmt_info
) = induc_vec_info_type
;
6586 if (dump_enabled_p ())
6587 dump_printf_loc (MSG_NOTE
, vect_location
,
6588 "=== vectorizable_induction ===\n");
6589 vect_model_induction_cost (stmt_info
, ncopies
);
6595 /* Compute a vector variable, initialized with the first VF values of
6596 the induction variable. E.g., for an iv with IV_PHI='X' and
6597 evolution S, for a vector of 4 units, we want to compute:
6598 [X, X + S, X + 2*S, X + 3*S]. */
6600 if (dump_enabled_p ())
6601 dump_printf_loc (MSG_NOTE
, vect_location
, "transform induction phi.\n");
6603 latch_e
= loop_latch_edge (iv_loop
);
6604 loop_arg
= PHI_ARG_DEF_FROM_EDGE (phi
, latch_e
);
6606 step_expr
= STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_info
);
6607 gcc_assert (step_expr
!= NULL_TREE
);
6609 pe
= loop_preheader_edge (iv_loop
);
6610 init_expr
= PHI_ARG_DEF_FROM_EDGE (phi
,
6611 loop_preheader_edge (iv_loop
));
6613 /* Convert the step to the desired type. */
6615 step_expr
= gimple_convert (&stmts
, TREE_TYPE (vectype
), step_expr
);
6618 new_bb
= gsi_insert_seq_on_edge_immediate (pe
, stmts
);
6619 gcc_assert (!new_bb
);
6622 /* Find the first insertion point in the BB. */
6623 si
= gsi_after_labels (bb
);
6625 /* For SLP induction we have to generate several IVs as for example
6626 with group size 3 we need [i, i, i, i + S] [i + S, i + S, i + 2*S, i + 2*S]
6627 [i + 2*S, i + 3*S, i + 3*S, i + 3*S]. The step is the same uniform
6628 [VF*S, VF*S, VF*S, VF*S] for all. */
6631 /* Convert the init to the desired type. */
6633 init_expr
= gimple_convert (&stmts
, TREE_TYPE (vectype
), init_expr
);
6636 new_bb
= gsi_insert_seq_on_edge_immediate (pe
, stmts
);
6637 gcc_assert (!new_bb
);
6640 /* Generate [VF*S, VF*S, ... ]. */
6641 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr
)))
6643 expr
= build_int_cst (integer_type_node
, vf
);
6644 expr
= fold_convert (TREE_TYPE (step_expr
), expr
);
6647 expr
= build_int_cst (TREE_TYPE (step_expr
), vf
);
6648 new_name
= fold_build2 (MULT_EXPR
, TREE_TYPE (step_expr
),
6650 if (! CONSTANT_CLASS_P (new_name
))
6651 new_name
= vect_init_vector (phi
, new_name
,
6652 TREE_TYPE (step_expr
), NULL
);
6653 new_vec
= build_vector_from_val (vectype
, new_name
);
6654 vec_step
= vect_init_vector (phi
, new_vec
, vectype
, NULL
);
6656 /* Now generate the IVs. */
6657 unsigned group_size
= SLP_TREE_SCALAR_STMTS (slp_node
).length ();
6658 unsigned nvects
= SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node
);
6659 unsigned elts
= nunits
* nvects
;
6660 unsigned nivs
= least_common_multiple (group_size
, nunits
) / nunits
;
6661 gcc_assert (elts
% group_size
== 0);
6662 tree elt
= init_expr
;
6664 for (ivn
= 0; ivn
< nivs
; ++ivn
)
6666 tree
*elts
= XALLOCAVEC (tree
, nunits
);
6667 bool constant_p
= true;
6668 for (unsigned eltn
= 0; eltn
< nunits
; ++eltn
)
6670 if (ivn
*nunits
+ eltn
>= group_size
6671 && (ivn
*nunits
+ eltn
) % group_size
== 0)
6674 elt
= gimple_build (&stmts
, PLUS_EXPR
, TREE_TYPE (elt
),
6678 new_bb
= gsi_insert_seq_on_edge_immediate (pe
, stmts
);
6679 gcc_assert (!new_bb
);
6682 if (! CONSTANT_CLASS_P (elt
))
6687 new_vec
= build_vector (vectype
, elts
);
6690 vec
<constructor_elt
, va_gc
> *v
;
6691 vec_alloc (v
, nunits
);
6692 for (i
= 0; i
< nunits
; ++i
)
6693 CONSTRUCTOR_APPEND_ELT (v
, NULL_TREE
, elts
[i
]);
6694 new_vec
= build_constructor (vectype
, v
);
6696 vec_init
= vect_init_vector (phi
, new_vec
, vectype
, NULL
);
6698 /* Create the induction-phi that defines the induction-operand. */
6699 vec_dest
= vect_get_new_vect_var (vectype
, vect_simple_var
, "vec_iv_");
6700 induction_phi
= create_phi_node (vec_dest
, iv_loop
->header
);
6701 set_vinfo_for_stmt (induction_phi
,
6702 new_stmt_vec_info (induction_phi
, loop_vinfo
));
6703 induc_def
= PHI_RESULT (induction_phi
);
6705 /* Create the iv update inside the loop */
6706 vec_def
= make_ssa_name (vec_dest
);
6707 new_stmt
= gimple_build_assign (vec_def
, PLUS_EXPR
, induc_def
, vec_step
);
6708 gsi_insert_before (&si
, new_stmt
, GSI_SAME_STMT
);
6709 set_vinfo_for_stmt (new_stmt
, new_stmt_vec_info (new_stmt
, loop_vinfo
));
6711 /* Set the arguments of the phi node: */
6712 add_phi_arg (induction_phi
, vec_init
, pe
, UNKNOWN_LOCATION
);
6713 add_phi_arg (induction_phi
, vec_def
, loop_latch_edge (iv_loop
),
6716 SLP_TREE_VEC_STMTS (slp_node
).quick_push (induction_phi
);
6719 /* Re-use IVs when we can. */
6723 = least_common_multiple (group_size
, nunits
) / group_size
;
6724 /* Generate [VF'*S, VF'*S, ... ]. */
6725 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr
)))
6727 expr
= build_int_cst (integer_type_node
, vfp
);
6728 expr
= fold_convert (TREE_TYPE (step_expr
), expr
);
6731 expr
= build_int_cst (TREE_TYPE (step_expr
), vfp
);
6732 new_name
= fold_build2 (MULT_EXPR
, TREE_TYPE (step_expr
),
6734 if (! CONSTANT_CLASS_P (new_name
))
6735 new_name
= vect_init_vector (phi
, new_name
,
6736 TREE_TYPE (step_expr
), NULL
);
6737 new_vec
= build_vector_from_val (vectype
, new_name
);
6738 vec_step
= vect_init_vector (phi
, new_vec
, vectype
, NULL
);
6739 for (; ivn
< nvects
; ++ivn
)
6741 gimple
*iv
= SLP_TREE_VEC_STMTS (slp_node
)[ivn
- nivs
];
6743 if (gimple_code (iv
) == GIMPLE_PHI
)
6744 def
= gimple_phi_result (iv
);
6746 def
= gimple_assign_lhs (iv
);
6747 new_stmt
= gimple_build_assign (make_ssa_name (vectype
),
6750 if (gimple_code (iv
) == GIMPLE_PHI
)
6751 gsi_insert_before (&si
, new_stmt
, GSI_SAME_STMT
);
6754 gimple_stmt_iterator tgsi
= gsi_for_stmt (iv
);
6755 gsi_insert_after (&tgsi
, new_stmt
, GSI_CONTINUE_LINKING
);
6757 set_vinfo_for_stmt (new_stmt
,
6758 new_stmt_vec_info (new_stmt
, loop_vinfo
));
6759 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
6766 /* Create the vector that holds the initial_value of the induction. */
6767 if (nested_in_vect_loop
)
6769 /* iv_loop is nested in the loop to be vectorized. init_expr had already
6770 been created during vectorization of previous stmts. We obtain it
6771 from the STMT_VINFO_VEC_STMT of the defining stmt. */
6772 vec_init
= vect_get_vec_def_for_operand (init_expr
, phi
);
6773 /* If the initial value is not of proper type, convert it. */
6774 if (!useless_type_conversion_p (vectype
, TREE_TYPE (vec_init
)))
6777 = gimple_build_assign (vect_get_new_ssa_name (vectype
,
6781 build1 (VIEW_CONVERT_EXPR
, vectype
,
6783 vec_init
= gimple_assign_lhs (new_stmt
);
6784 new_bb
= gsi_insert_on_edge_immediate (loop_preheader_edge (iv_loop
),
6786 gcc_assert (!new_bb
);
6787 set_vinfo_for_stmt (new_stmt
,
6788 new_stmt_vec_info (new_stmt
, loop_vinfo
));
6793 vec
<constructor_elt
, va_gc
> *v
;
6795 /* iv_loop is the loop to be vectorized. Create:
6796 vec_init = [X, X+S, X+2*S, X+3*S] (S = step_expr, X = init_expr) */
6798 new_name
= gimple_convert (&stmts
, TREE_TYPE (vectype
), init_expr
);
6800 vec_alloc (v
, nunits
);
6801 bool constant_p
= is_gimple_min_invariant (new_name
);
6802 CONSTRUCTOR_APPEND_ELT (v
, NULL_TREE
, new_name
);
6803 for (i
= 1; i
< nunits
; i
++)
6805 /* Create: new_name_i = new_name + step_expr */
6806 new_name
= gimple_build (&stmts
, PLUS_EXPR
, TREE_TYPE (new_name
),
6807 new_name
, step_expr
);
6808 if (!is_gimple_min_invariant (new_name
))
6810 CONSTRUCTOR_APPEND_ELT (v
, NULL_TREE
, new_name
);
6814 new_bb
= gsi_insert_seq_on_edge_immediate (pe
, stmts
);
6815 gcc_assert (!new_bb
);
6818 /* Create a vector from [new_name_0, new_name_1, ..., new_name_nunits-1] */
6820 new_vec
= build_vector_from_ctor (vectype
, v
);
6822 new_vec
= build_constructor (vectype
, v
);
6823 vec_init
= vect_init_vector (phi
, new_vec
, vectype
, NULL
);
6827 /* Create the vector that holds the step of the induction. */
6828 if (nested_in_vect_loop
)
6829 /* iv_loop is nested in the loop to be vectorized. Generate:
6830 vec_step = [S, S, S, S] */
6831 new_name
= step_expr
;
6834 /* iv_loop is the loop to be vectorized. Generate:
6835 vec_step = [VF*S, VF*S, VF*S, VF*S] */
6836 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr
)))
6838 expr
= build_int_cst (integer_type_node
, vf
);
6839 expr
= fold_convert (TREE_TYPE (step_expr
), expr
);
6842 expr
= build_int_cst (TREE_TYPE (step_expr
), vf
);
6843 new_name
= fold_build2 (MULT_EXPR
, TREE_TYPE (step_expr
),
6845 if (TREE_CODE (step_expr
) == SSA_NAME
)
6846 new_name
= vect_init_vector (phi
, new_name
,
6847 TREE_TYPE (step_expr
), NULL
);
6850 t
= unshare_expr (new_name
);
6851 gcc_assert (CONSTANT_CLASS_P (new_name
)
6852 || TREE_CODE (new_name
) == SSA_NAME
);
6853 new_vec
= build_vector_from_val (vectype
, t
);
6854 vec_step
= vect_init_vector (phi
, new_vec
, vectype
, NULL
);
6857 /* Create the following def-use cycle:
6862 vec_iv = PHI <vec_init, vec_loop>
6866 vec_loop = vec_iv + vec_step; */
6868 /* Create the induction-phi that defines the induction-operand. */
6869 vec_dest
= vect_get_new_vect_var (vectype
, vect_simple_var
, "vec_iv_");
6870 induction_phi
= create_phi_node (vec_dest
, iv_loop
->header
);
6871 set_vinfo_for_stmt (induction_phi
,
6872 new_stmt_vec_info (induction_phi
, loop_vinfo
));
6873 induc_def
= PHI_RESULT (induction_phi
);
6875 /* Create the iv update inside the loop */
6876 vec_def
= make_ssa_name (vec_dest
);
6877 new_stmt
= gimple_build_assign (vec_def
, PLUS_EXPR
, induc_def
, vec_step
);
6878 gsi_insert_before (&si
, new_stmt
, GSI_SAME_STMT
);
6879 set_vinfo_for_stmt (new_stmt
, new_stmt_vec_info (new_stmt
, loop_vinfo
));
6881 /* Set the arguments of the phi node: */
6882 add_phi_arg (induction_phi
, vec_init
, pe
, UNKNOWN_LOCATION
);
6883 add_phi_arg (induction_phi
, vec_def
, loop_latch_edge (iv_loop
),
6886 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= induction_phi
;
6888 /* In case that vectorization factor (VF) is bigger than the number
6889 of elements that we can fit in a vectype (nunits), we have to generate
6890 more than one vector stmt - i.e - we need to "unroll" the
6891 vector stmt by a factor VF/nunits. For more details see documentation
6892 in vectorizable_operation. */
6896 stmt_vec_info prev_stmt_vinfo
;
6897 /* FORNOW. This restriction should be relaxed. */
6898 gcc_assert (!nested_in_vect_loop
);
6900 /* Create the vector that holds the step of the induction. */
6901 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr
)))
6903 expr
= build_int_cst (integer_type_node
, nunits
);
6904 expr
= fold_convert (TREE_TYPE (step_expr
), expr
);
6907 expr
= build_int_cst (TREE_TYPE (step_expr
), nunits
);
6908 new_name
= fold_build2 (MULT_EXPR
, TREE_TYPE (step_expr
),
6910 if (TREE_CODE (step_expr
) == SSA_NAME
)
6911 new_name
= vect_init_vector (phi
, new_name
,
6912 TREE_TYPE (step_expr
), NULL
);
6913 t
= unshare_expr (new_name
);
6914 gcc_assert (CONSTANT_CLASS_P (new_name
)
6915 || TREE_CODE (new_name
) == SSA_NAME
);
6916 new_vec
= build_vector_from_val (vectype
, t
);
6917 vec_step
= vect_init_vector (phi
, new_vec
, vectype
, NULL
);
6919 vec_def
= induc_def
;
6920 prev_stmt_vinfo
= vinfo_for_stmt (induction_phi
);
6921 for (i
= 1; i
< ncopies
; i
++)
6923 /* vec_i = vec_prev + vec_step */
6924 new_stmt
= gimple_build_assign (vec_dest
, PLUS_EXPR
,
6926 vec_def
= make_ssa_name (vec_dest
, new_stmt
);
6927 gimple_assign_set_lhs (new_stmt
, vec_def
);
6929 gsi_insert_before (&si
, new_stmt
, GSI_SAME_STMT
);
6930 set_vinfo_for_stmt (new_stmt
,
6931 new_stmt_vec_info (new_stmt
, loop_vinfo
));
6932 STMT_VINFO_RELATED_STMT (prev_stmt_vinfo
) = new_stmt
;
6933 prev_stmt_vinfo
= vinfo_for_stmt (new_stmt
);
6937 if (nested_in_vect_loop
)
6939 /* Find the loop-closed exit-phi of the induction, and record
6940 the final vector of induction results: */
6942 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, loop_arg
)
6944 gimple
*use_stmt
= USE_STMT (use_p
);
6945 if (is_gimple_debug (use_stmt
))
6948 if (!flow_bb_inside_loop_p (iv_loop
, gimple_bb (use_stmt
)))
6950 exit_phi
= use_stmt
;
6956 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (exit_phi
);
6957 /* FORNOW. Currently not supporting the case that an inner-loop induction
6958 is not used in the outer-loop (i.e. only outside the outer-loop). */
6959 gcc_assert (STMT_VINFO_RELEVANT_P (stmt_vinfo
)
6960 && !STMT_VINFO_LIVE_P (stmt_vinfo
));
6962 STMT_VINFO_VEC_STMT (stmt_vinfo
) = new_stmt
;
6963 if (dump_enabled_p ())
6965 dump_printf_loc (MSG_NOTE
, vect_location
,
6966 "vector of inductions after inner-loop:");
6967 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, new_stmt
, 0);
6973 if (dump_enabled_p ())
6975 dump_printf_loc (MSG_NOTE
, vect_location
,
6976 "transform induction: created def-use cycle: ");
6977 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, induction_phi
, 0);
6978 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
,
6979 SSA_NAME_DEF_STMT (vec_def
), 0);
6985 /* Function vectorizable_live_operation.
6987 STMT computes a value that is used outside the loop. Check if
6988 it can be supported. */
6991 vectorizable_live_operation (gimple
*stmt
,
6992 gimple_stmt_iterator
*gsi ATTRIBUTE_UNUSED
,
6993 slp_tree slp_node
, int slp_index
,
6996 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
6997 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
6998 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
6999 imm_use_iterator imm_iter
;
7000 tree lhs
, lhs_type
, bitsize
, vec_bitsize
;
7001 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
7002 int nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
7003 int ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits
;
7005 auto_vec
<tree
> vec_oprnds
;
7007 gcc_assert (STMT_VINFO_LIVE_P (stmt_info
));
7009 if (STMT_VINFO_DEF_TYPE (stmt_info
) == vect_reduction_def
)
7012 /* FORNOW. CHECKME. */
7013 if (nested_in_vect_loop_p (loop
, stmt
))
7016 /* If STMT is not relevant and it is a simple assignment and its inputs are
7017 invariant then it can remain in place, unvectorized. The original last
7018 scalar value that it computes will be used. */
7019 if (!STMT_VINFO_RELEVANT_P (stmt_info
))
7021 gcc_assert (is_simple_and_all_uses_invariant (stmt
, loop_vinfo
));
7022 if (dump_enabled_p ())
7023 dump_printf_loc (MSG_NOTE
, vect_location
,
7024 "statement is simple and uses invariant. Leaving in "
7030 /* No transformation required. */
7033 /* If stmt has a related stmt, then use that for getting the lhs. */
7034 if (is_pattern_stmt_p (stmt_info
))
7035 stmt
= STMT_VINFO_RELATED_STMT (stmt_info
);
7037 lhs
= (is_a
<gphi
*> (stmt
)) ? gimple_phi_result (stmt
)
7038 : gimple_get_lhs (stmt
);
7039 lhs_type
= TREE_TYPE (lhs
);
7041 bitsize
= TYPE_SIZE (TREE_TYPE (vectype
));
7042 vec_bitsize
= TYPE_SIZE (vectype
);
7044 /* Get the vectorized lhs of STMT and the lane to use (counted in bits). */
7045 tree vec_lhs
, bitstart
;
7048 gcc_assert (slp_index
>= 0);
7050 int num_scalar
= SLP_TREE_SCALAR_STMTS (slp_node
).length ();
7051 int num_vec
= SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node
);
7053 /* Get the last occurrence of the scalar index from the concatenation of
7054 all the slp vectors. Calculate which slp vector it is and the index
7056 int pos
= (num_vec
* nunits
) - num_scalar
+ slp_index
;
7057 int vec_entry
= pos
/ nunits
;
7058 int vec_index
= pos
% nunits
;
7060 /* Get the correct slp vectorized stmt. */
7061 vec_lhs
= gimple_get_lhs (SLP_TREE_VEC_STMTS (slp_node
)[vec_entry
]);
7063 /* Get entry to use. */
7064 bitstart
= build_int_cst (unsigned_type_node
, vec_index
);
7065 bitstart
= int_const_binop (MULT_EXPR
, bitsize
, bitstart
);
7069 enum vect_def_type dt
= STMT_VINFO_DEF_TYPE (stmt_info
);
7070 vec_lhs
= vect_get_vec_def_for_operand_1 (stmt
, dt
);
7072 /* For multiple copies, get the last copy. */
7073 for (int i
= 1; i
< ncopies
; ++i
)
7074 vec_lhs
= vect_get_vec_def_for_stmt_copy (vect_unknown_def_type
,
7077 /* Get the last lane in the vector. */
7078 bitstart
= int_const_binop (MINUS_EXPR
, vec_bitsize
, bitsize
);
7081 /* Create a new vectorized stmt for the uses of STMT and insert outside the
7083 gimple_seq stmts
= NULL
;
7084 tree bftype
= TREE_TYPE (vectype
);
7085 if (VECTOR_BOOLEAN_TYPE_P (vectype
))
7086 bftype
= build_nonstandard_integer_type (tree_to_uhwi (bitsize
), 1);
7087 tree new_tree
= build3 (BIT_FIELD_REF
, bftype
, vec_lhs
, bitsize
, bitstart
);
7088 new_tree
= force_gimple_operand (fold_convert (lhs_type
, new_tree
), &stmts
,
7091 gsi_insert_seq_on_edge_immediate (single_exit (loop
), stmts
);
7093 /* Replace use of lhs with newly computed result. If the use stmt is a
7094 single arg PHI, just replace all uses of PHI result. It's necessary
7095 because lcssa PHI defining lhs may be before newly inserted stmt. */
7096 use_operand_p use_p
;
7097 FOR_EACH_IMM_USE_STMT (use_stmt
, imm_iter
, lhs
)
7098 if (!flow_bb_inside_loop_p (loop
, gimple_bb (use_stmt
))
7099 && !is_gimple_debug (use_stmt
))
7101 if (gimple_code (use_stmt
) == GIMPLE_PHI
7102 && gimple_phi_num_args (use_stmt
) == 1)
7104 replace_uses_by (gimple_phi_result (use_stmt
), new_tree
);
7108 FOR_EACH_IMM_USE_ON_STMT (use_p
, imm_iter
)
7109 SET_USE (use_p
, new_tree
);
7111 update_stmt (use_stmt
);
7117 /* Kill any debug uses outside LOOP of SSA names defined in STMT. */
7120 vect_loop_kill_debug_uses (struct loop
*loop
, gimple
*stmt
)
7122 ssa_op_iter op_iter
;
7123 imm_use_iterator imm_iter
;
7124 def_operand_p def_p
;
7127 FOR_EACH_PHI_OR_STMT_DEF (def_p
, stmt
, op_iter
, SSA_OP_DEF
)
7129 FOR_EACH_IMM_USE_STMT (ustmt
, imm_iter
, DEF_FROM_PTR (def_p
))
7133 if (!is_gimple_debug (ustmt
))
7136 bb
= gimple_bb (ustmt
);
7138 if (!flow_bb_inside_loop_p (loop
, bb
))
7140 if (gimple_debug_bind_p (ustmt
))
7142 if (dump_enabled_p ())
7143 dump_printf_loc (MSG_NOTE
, vect_location
,
7144 "killing debug use\n");
7146 gimple_debug_bind_reset_value (ustmt
);
7147 update_stmt (ustmt
);
7156 /* Given loop represented by LOOP_VINFO, return true if computation of
7157 LOOP_VINFO_NITERS (= LOOP_VINFO_NITERSM1 + 1) doesn't overflow, false
7161 loop_niters_no_overflow (loop_vec_info loop_vinfo
)
7163 /* Constant case. */
7164 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo
))
7166 tree cst_niters
= LOOP_VINFO_NITERS (loop_vinfo
);
7167 tree cst_nitersm1
= LOOP_VINFO_NITERSM1 (loop_vinfo
);
7169 gcc_assert (TREE_CODE (cst_niters
) == INTEGER_CST
);
7170 gcc_assert (TREE_CODE (cst_nitersm1
) == INTEGER_CST
);
7171 if (wi::to_widest (cst_nitersm1
) < wi::to_widest (cst_niters
))
7176 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
7177 /* Check the upper bound of loop niters. */
7178 if (get_max_loop_iterations (loop
, &max
))
7180 tree type
= TREE_TYPE (LOOP_VINFO_NITERS (loop_vinfo
));
7181 signop sgn
= TYPE_SIGN (type
);
7182 widest_int type_max
= widest_int::from (wi::max_value (type
), sgn
);
7189 /* Scale profiling counters by estimation for LOOP which is vectorized
7193 scale_profile_for_vect_loop (struct loop
*loop
, unsigned vf
)
7195 edge preheader
= loop_preheader_edge (loop
);
7196 /* Reduce loop iterations by the vectorization factor. */
7197 gcov_type new_est_niter
= niter_for_unrolled_loop (loop
, vf
);
7198 profile_count freq_h
= loop
->header
->count
, freq_e
= preheader
->count
;
7200 /* Use frequency only if counts are zero. */
7201 if (!(freq_h
> 0) && !(freq_e
> 0))
7203 freq_h
= profile_count::from_gcov_type (loop
->header
->frequency
);
7204 freq_e
= profile_count::from_gcov_type (EDGE_FREQUENCY (preheader
));
7208 profile_probability p
;
7210 /* Avoid dropping loop body profile counter to 0 because of zero count
7211 in loop's preheader. */
7212 if (!(freq_e
> profile_count::from_gcov_type (1)))
7213 freq_e
= profile_count::from_gcov_type (1);
7214 p
= freq_e
.apply_scale (new_est_niter
+ 1, 1).probability_in (freq_h
);
7215 scale_loop_frequencies (loop
, p
);
7218 basic_block exit_bb
= single_pred (loop
->latch
);
7219 edge exit_e
= single_exit (loop
);
7220 exit_e
->count
= loop_preheader_edge (loop
)->count
;
7221 exit_e
->probability
= profile_probability::always ()
7222 .apply_scale (1, new_est_niter
+ 1);
7224 edge exit_l
= single_pred_edge (loop
->latch
);
7225 int prob
= exit_l
->probability
.initialized_p ()
7226 ? exit_l
->probability
.to_reg_br_prob_base () : 0;
7227 exit_l
->probability
= exit_e
->probability
.invert ();
7228 exit_l
->count
= exit_bb
->count
- exit_e
->count
;
7230 scale_bbs_frequencies_int (&loop
->latch
, 1,
7231 exit_l
->probability
.to_reg_br_prob_base (), prob
);
7234 /* Function vect_transform_loop.
7236 The analysis phase has determined that the loop is vectorizable.
7237 Vectorize the loop - created vectorized stmts to replace the scalar
7238 stmts in the loop, and update the loop exit condition.
7239 Returns scalar epilogue loop if any. */
7242 vect_transform_loop (loop_vec_info loop_vinfo
)
7244 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
7245 struct loop
*epilogue
= NULL
;
7246 basic_block
*bbs
= LOOP_VINFO_BBS (loop_vinfo
);
7247 int nbbs
= loop
->num_nodes
;
7249 tree niters_vector
= NULL
;
7250 int vf
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
7252 bool slp_scheduled
= false;
7253 gimple
*stmt
, *pattern_stmt
;
7254 gimple_seq pattern_def_seq
= NULL
;
7255 gimple_stmt_iterator pattern_def_si
= gsi_none ();
7256 bool transform_pattern_stmt
= false;
7257 bool check_profitability
= false;
7260 if (dump_enabled_p ())
7261 dump_printf_loc (MSG_NOTE
, vect_location
, "=== vec_transform_loop ===\n");
7263 /* Use the more conservative vectorization threshold. If the number
7264 of iterations is constant assume the cost check has been performed
7265 by our caller. If the threshold makes all loops profitable that
7266 run at least the vectorization factor number of times checking
7267 is pointless, too. */
7268 th
= LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo
);
7269 if (th
>= LOOP_VINFO_VECT_FACTOR (loop_vinfo
)
7270 && !LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo
))
7272 if (dump_enabled_p ())
7273 dump_printf_loc (MSG_NOTE
, vect_location
,
7274 "Profitability threshold is %d loop iterations.\n",
7276 check_profitability
= true;
7279 /* Make sure there exists a single-predecessor exit bb. Do this before
7281 edge e
= single_exit (loop
);
7282 if (! single_pred_p (e
->dest
))
7284 split_loop_exit_edge (e
);
7285 if (dump_enabled_p ())
7286 dump_printf (MSG_NOTE
, "split exit edge\n");
7289 /* Version the loop first, if required, so the profitability check
7292 if (LOOP_REQUIRES_VERSIONING (loop_vinfo
))
7294 vect_loop_versioning (loop_vinfo
, th
, check_profitability
);
7295 check_profitability
= false;
7298 /* Make sure there exists a single-predecessor exit bb also on the
7299 scalar loop copy. Do this after versioning but before peeling
7300 so CFG structure is fine for both scalar and if-converted loop
7301 to make slpeel_duplicate_current_defs_from_edges face matched
7302 loop closed PHI nodes on the exit. */
7303 if (LOOP_VINFO_SCALAR_LOOP (loop_vinfo
))
7305 e
= single_exit (LOOP_VINFO_SCALAR_LOOP (loop_vinfo
));
7306 if (! single_pred_p (e
->dest
))
7308 split_loop_exit_edge (e
);
7309 if (dump_enabled_p ())
7310 dump_printf (MSG_NOTE
, "split exit edge of scalar loop\n");
7314 tree niters
= vect_build_loop_niters (loop_vinfo
);
7315 LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo
) = niters
;
7316 tree nitersm1
= unshare_expr (LOOP_VINFO_NITERSM1 (loop_vinfo
));
7317 bool niters_no_overflow
= loop_niters_no_overflow (loop_vinfo
);
7318 epilogue
= vect_do_peeling (loop_vinfo
, niters
, nitersm1
, &niters_vector
, th
,
7319 check_profitability
, niters_no_overflow
);
7320 if (niters_vector
== NULL_TREE
)
7322 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo
))
7324 = build_int_cst (TREE_TYPE (LOOP_VINFO_NITERS (loop_vinfo
)),
7325 LOOP_VINFO_INT_NITERS (loop_vinfo
) / vf
);
7327 vect_gen_vector_loop_niters (loop_vinfo
, niters
, &niters_vector
,
7328 niters_no_overflow
);
7331 /* 1) Make sure the loop header has exactly two entries
7332 2) Make sure we have a preheader basic block. */
7334 gcc_assert (EDGE_COUNT (loop
->header
->preds
) == 2);
7336 split_edge (loop_preheader_edge (loop
));
7338 /* FORNOW: the vectorizer supports only loops which body consist
7339 of one basic block (header + empty latch). When the vectorizer will
7340 support more involved loop forms, the order by which the BBs are
7341 traversed need to be reconsidered. */
7343 for (i
= 0; i
< nbbs
; i
++)
7345 basic_block bb
= bbs
[i
];
7346 stmt_vec_info stmt_info
;
7348 for (gphi_iterator si
= gsi_start_phis (bb
); !gsi_end_p (si
);
7351 gphi
*phi
= si
.phi ();
7352 if (dump_enabled_p ())
7354 dump_printf_loc (MSG_NOTE
, vect_location
,
7355 "------>vectorizing phi: ");
7356 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, phi
, 0);
7358 stmt_info
= vinfo_for_stmt (phi
);
7362 if (MAY_HAVE_DEBUG_STMTS
&& !STMT_VINFO_LIVE_P (stmt_info
))
7363 vect_loop_kill_debug_uses (loop
, phi
);
7365 if (!STMT_VINFO_RELEVANT_P (stmt_info
)
7366 && !STMT_VINFO_LIVE_P (stmt_info
))
7369 if (STMT_VINFO_VECTYPE (stmt_info
)
7370 && (TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info
))
7371 != (unsigned HOST_WIDE_INT
) vf
)
7372 && dump_enabled_p ())
7373 dump_printf_loc (MSG_NOTE
, vect_location
, "multiple-types.\n");
7375 if ((STMT_VINFO_DEF_TYPE (stmt_info
) == vect_induction_def
7376 || STMT_VINFO_DEF_TYPE (stmt_info
) == vect_reduction_def
7377 || STMT_VINFO_DEF_TYPE (stmt_info
) == vect_nested_cycle
)
7378 && ! PURE_SLP_STMT (stmt_info
))
7380 if (dump_enabled_p ())
7381 dump_printf_loc (MSG_NOTE
, vect_location
, "transform phi.\n");
7382 vect_transform_stmt (phi
, NULL
, NULL
, NULL
, NULL
);
7386 pattern_stmt
= NULL
;
7387 for (gimple_stmt_iterator si
= gsi_start_bb (bb
);
7388 !gsi_end_p (si
) || transform_pattern_stmt
;)
7392 if (transform_pattern_stmt
)
7393 stmt
= pattern_stmt
;
7396 stmt
= gsi_stmt (si
);
7397 /* During vectorization remove existing clobber stmts. */
7398 if (gimple_clobber_p (stmt
))
7400 unlink_stmt_vdef (stmt
);
7401 gsi_remove (&si
, true);
7402 release_defs (stmt
);
7407 if (dump_enabled_p ())
7409 dump_printf_loc (MSG_NOTE
, vect_location
,
7410 "------>vectorizing statement: ");
7411 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
7414 stmt_info
= vinfo_for_stmt (stmt
);
7416 /* vector stmts created in the outer-loop during vectorization of
7417 stmts in an inner-loop may not have a stmt_info, and do not
7418 need to be vectorized. */
7425 if (MAY_HAVE_DEBUG_STMTS
&& !STMT_VINFO_LIVE_P (stmt_info
))
7426 vect_loop_kill_debug_uses (loop
, stmt
);
7428 if (!STMT_VINFO_RELEVANT_P (stmt_info
)
7429 && !STMT_VINFO_LIVE_P (stmt_info
))
7431 if (STMT_VINFO_IN_PATTERN_P (stmt_info
)
7432 && (pattern_stmt
= STMT_VINFO_RELATED_STMT (stmt_info
))
7433 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt
))
7434 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt
))))
7436 stmt
= pattern_stmt
;
7437 stmt_info
= vinfo_for_stmt (stmt
);
7445 else if (STMT_VINFO_IN_PATTERN_P (stmt_info
)
7446 && (pattern_stmt
= STMT_VINFO_RELATED_STMT (stmt_info
))
7447 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt
))
7448 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt
))))
7449 transform_pattern_stmt
= true;
7451 /* If pattern statement has def stmts, vectorize them too. */
7452 if (is_pattern_stmt_p (stmt_info
))
7454 if (pattern_def_seq
== NULL
)
7456 pattern_def_seq
= STMT_VINFO_PATTERN_DEF_SEQ (stmt_info
);
7457 pattern_def_si
= gsi_start (pattern_def_seq
);
7459 else if (!gsi_end_p (pattern_def_si
))
7460 gsi_next (&pattern_def_si
);
7461 if (pattern_def_seq
!= NULL
)
7463 gimple
*pattern_def_stmt
= NULL
;
7464 stmt_vec_info pattern_def_stmt_info
= NULL
;
7466 while (!gsi_end_p (pattern_def_si
))
7468 pattern_def_stmt
= gsi_stmt (pattern_def_si
);
7469 pattern_def_stmt_info
7470 = vinfo_for_stmt (pattern_def_stmt
);
7471 if (STMT_VINFO_RELEVANT_P (pattern_def_stmt_info
)
7472 || STMT_VINFO_LIVE_P (pattern_def_stmt_info
))
7474 gsi_next (&pattern_def_si
);
7477 if (!gsi_end_p (pattern_def_si
))
7479 if (dump_enabled_p ())
7481 dump_printf_loc (MSG_NOTE
, vect_location
,
7482 "==> vectorizing pattern def "
7484 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
,
7485 pattern_def_stmt
, 0);
7488 stmt
= pattern_def_stmt
;
7489 stmt_info
= pattern_def_stmt_info
;
7493 pattern_def_si
= gsi_none ();
7494 transform_pattern_stmt
= false;
7498 transform_pattern_stmt
= false;
7501 if (STMT_VINFO_VECTYPE (stmt_info
))
7505 TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info
));
7506 if (!STMT_SLP_TYPE (stmt_info
)
7507 && nunits
!= (unsigned int) vf
7508 && dump_enabled_p ())
7509 /* For SLP VF is set according to unrolling factor, and not
7510 to vector size, hence for SLP this print is not valid. */
7511 dump_printf_loc (MSG_NOTE
, vect_location
, "multiple-types.\n");
7514 /* SLP. Schedule all the SLP instances when the first SLP stmt is
7516 if (STMT_SLP_TYPE (stmt_info
))
7520 slp_scheduled
= true;
7522 if (dump_enabled_p ())
7523 dump_printf_loc (MSG_NOTE
, vect_location
,
7524 "=== scheduling SLP instances ===\n");
7526 vect_schedule_slp (loop_vinfo
);
7529 /* Hybrid SLP stmts must be vectorized in addition to SLP. */
7530 if (!vinfo_for_stmt (stmt
) || PURE_SLP_STMT (stmt_info
))
7532 if (!transform_pattern_stmt
&& gsi_end_p (pattern_def_si
))
7534 pattern_def_seq
= NULL
;
7541 /* -------- vectorize statement ------------ */
7542 if (dump_enabled_p ())
7543 dump_printf_loc (MSG_NOTE
, vect_location
, "transform statement.\n");
7545 grouped_store
= false;
7546 is_store
= vect_transform_stmt (stmt
, &si
, &grouped_store
, NULL
, NULL
);
7549 if (STMT_VINFO_GROUPED_ACCESS (stmt_info
))
7551 /* Interleaving. If IS_STORE is TRUE, the vectorization of the
7552 interleaving chain was completed - free all the stores in
7555 vect_remove_stores (GROUP_FIRST_ELEMENT (stmt_info
));
7559 /* Free the attached stmt_vec_info and remove the stmt. */
7560 gimple
*store
= gsi_stmt (si
);
7561 free_stmt_vec_info (store
);
7562 unlink_stmt_vdef (store
);
7563 gsi_remove (&si
, true);
7564 release_defs (store
);
7567 /* Stores can only appear at the end of pattern statements. */
7568 gcc_assert (!transform_pattern_stmt
);
7569 pattern_def_seq
= NULL
;
7571 else if (!transform_pattern_stmt
&& gsi_end_p (pattern_def_si
))
7573 pattern_def_seq
= NULL
;
7579 slpeel_make_loop_iterate_ntimes (loop
, niters_vector
);
7581 scale_profile_for_vect_loop (loop
, vf
);
7583 /* The minimum number of iterations performed by the epilogue. This
7584 is 1 when peeling for gaps because we always need a final scalar
7586 int min_epilogue_iters
= LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo
) ? 1 : 0;
7587 /* +1 to convert latch counts to loop iteration counts,
7588 -min_epilogue_iters to remove iterations that cannot be performed
7589 by the vector code. */
7590 int bias
= 1 - min_epilogue_iters
;
7591 /* In these calculations the "- 1" converts loop iteration counts
7592 back to latch counts. */
7593 if (loop
->any_upper_bound
)
7594 loop
->nb_iterations_upper_bound
7595 = wi::udiv_floor (loop
->nb_iterations_upper_bound
+ bias
, vf
) - 1;
7596 if (loop
->any_likely_upper_bound
)
7597 loop
->nb_iterations_likely_upper_bound
7598 = wi::udiv_floor (loop
->nb_iterations_likely_upper_bound
+ bias
, vf
) - 1;
7599 if (loop
->any_estimate
)
7600 loop
->nb_iterations_estimate
7601 = wi::udiv_floor (loop
->nb_iterations_estimate
+ bias
, vf
) - 1;
7603 if (dump_enabled_p ())
7605 if (!LOOP_VINFO_EPILOGUE_P (loop_vinfo
))
7607 dump_printf_loc (MSG_NOTE
, vect_location
,
7608 "LOOP VECTORIZED\n");
7610 dump_printf_loc (MSG_NOTE
, vect_location
,
7611 "OUTER LOOP VECTORIZED\n");
7612 dump_printf (MSG_NOTE
, "\n");
7615 dump_printf_loc (MSG_NOTE
, vect_location
,
7616 "LOOP EPILOGUE VECTORIZED (VS=%d)\n",
7617 current_vector_size
);
7620 /* Free SLP instances here because otherwise stmt reference counting
7622 slp_instance instance
;
7623 FOR_EACH_VEC_ELT (LOOP_VINFO_SLP_INSTANCES (loop_vinfo
), i
, instance
)
7624 vect_free_slp_instance (instance
);
7625 LOOP_VINFO_SLP_INSTANCES (loop_vinfo
).release ();
7626 /* Clear-up safelen field since its value is invalid after vectorization
7627 since vectorized loop can have loop-carried dependencies. */
7630 /* Don't vectorize epilogue for epilogue. */
7631 if (LOOP_VINFO_EPILOGUE_P (loop_vinfo
))
7636 unsigned int vector_sizes
7637 = targetm
.vectorize
.autovectorize_vector_sizes ();
7638 vector_sizes
&= current_vector_size
- 1;
7640 if (!PARAM_VALUE (PARAM_VECT_EPILOGUES_NOMASK
))
7642 else if (!vector_sizes
)
7644 else if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo
)
7645 && LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo
) >= 0)
7647 int smallest_vec_size
= 1 << ctz_hwi (vector_sizes
);
7648 int ratio
= current_vector_size
/ smallest_vec_size
;
7649 int eiters
= LOOP_VINFO_INT_NITERS (loop_vinfo
)
7650 - LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo
);
7651 eiters
= eiters
% vf
;
7653 epilogue
->nb_iterations_upper_bound
= eiters
- 1;
7655 if (eiters
< vf
/ ratio
)
7662 epilogue
->force_vectorize
= loop
->force_vectorize
;
7663 epilogue
->safelen
= loop
->safelen
;
7664 epilogue
->dont_vectorize
= false;
7666 /* We may need to if-convert epilogue to vectorize it. */
7667 if (LOOP_VINFO_SCALAR_LOOP (loop_vinfo
))
7668 tree_if_conversion (epilogue
);
7674 /* The code below is trying to perform simple optimization - revert
7675 if-conversion for masked stores, i.e. if the mask of a store is zero
7676 do not perform it and all stored value producers also if possible.
7684 this transformation will produce the following semi-hammock:
7686 if (!mask__ifc__42.18_165 == { 0, 0, 0, 0, 0, 0, 0, 0 })
7688 vect__11.19_170 = MASK_LOAD (vectp_p1.20_168, 0B, mask__ifc__42.18_165);
7689 vect__12.22_172 = vect__11.19_170 + vect_cst__171;
7690 MASK_STORE (vectp_p1.23_175, 0B, mask__ifc__42.18_165, vect__12.22_172);
7691 vect__18.25_182 = MASK_LOAD (vectp_p3.26_180, 0B, mask__ifc__42.18_165);
7692 vect__19.28_184 = vect__18.25_182 + vect_cst__183;
7693 MASK_STORE (vectp_p2.29_187, 0B, mask__ifc__42.18_165, vect__19.28_184);
7698 optimize_mask_stores (struct loop
*loop
)
7700 basic_block
*bbs
= get_loop_body (loop
);
7701 unsigned nbbs
= loop
->num_nodes
;
7704 struct loop
*bb_loop
;
7705 gimple_stmt_iterator gsi
;
7707 auto_vec
<gimple
*> worklist
;
7709 vect_location
= find_loop_location (loop
);
7710 /* Pick up all masked stores in loop if any. */
7711 for (i
= 0; i
< nbbs
; i
++)
7714 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
);
7717 stmt
= gsi_stmt (gsi
);
7718 if (gimple_call_internal_p (stmt
, IFN_MASK_STORE
))
7719 worklist
.safe_push (stmt
);
7724 if (worklist
.is_empty ())
7727 /* Loop has masked stores. */
7728 while (!worklist
.is_empty ())
7730 gimple
*last
, *last_store
;
7733 basic_block store_bb
, join_bb
;
7734 gimple_stmt_iterator gsi_to
;
7735 tree vdef
, new_vdef
;
7740 last
= worklist
.pop ();
7741 mask
= gimple_call_arg (last
, 2);
7742 bb
= gimple_bb (last
);
7743 /* Create then_bb and if-then structure in CFG, then_bb belongs to
7744 the same loop as if_bb. It could be different to LOOP when two
7745 level loop-nest is vectorized and mask_store belongs to the inner
7747 e
= split_block (bb
, last
);
7748 bb_loop
= bb
->loop_father
;
7749 gcc_assert (loop
== bb_loop
|| flow_loop_nested_p (loop
, bb_loop
));
7751 store_bb
= create_empty_bb (bb
);
7752 add_bb_to_loop (store_bb
, bb_loop
);
7753 e
->flags
= EDGE_TRUE_VALUE
;
7754 efalse
= make_edge (bb
, store_bb
, EDGE_FALSE_VALUE
);
7755 /* Put STORE_BB to likely part. */
7756 efalse
->probability
= profile_probability::unlikely ();
7757 store_bb
->frequency
= PROB_ALWAYS
- EDGE_FREQUENCY (efalse
);
7758 make_single_succ_edge (store_bb
, join_bb
, EDGE_FALLTHRU
);
7759 if (dom_info_available_p (CDI_DOMINATORS
))
7760 set_immediate_dominator (CDI_DOMINATORS
, store_bb
, bb
);
7761 if (dump_enabled_p ())
7762 dump_printf_loc (MSG_NOTE
, vect_location
,
7763 "Create new block %d to sink mask stores.",
7765 /* Create vector comparison with boolean result. */
7766 vectype
= TREE_TYPE (mask
);
7767 zero
= build_zero_cst (vectype
);
7768 stmt
= gimple_build_cond (EQ_EXPR
, mask
, zero
, NULL_TREE
, NULL_TREE
);
7769 gsi
= gsi_last_bb (bb
);
7770 gsi_insert_after (&gsi
, stmt
, GSI_SAME_STMT
);
7771 /* Create new PHI node for vdef of the last masked store:
7772 .MEM_2 = VDEF <.MEM_1>
7773 will be converted to
7774 .MEM.3 = VDEF <.MEM_1>
7775 and new PHI node will be created in join bb
7776 .MEM_2 = PHI <.MEM_1, .MEM_3>
7778 vdef
= gimple_vdef (last
);
7779 new_vdef
= make_ssa_name (gimple_vop (cfun
), last
);
7780 gimple_set_vdef (last
, new_vdef
);
7781 phi
= create_phi_node (vdef
, join_bb
);
7782 add_phi_arg (phi
, new_vdef
, EDGE_SUCC (store_bb
, 0), UNKNOWN_LOCATION
);
7784 /* Put all masked stores with the same mask to STORE_BB if possible. */
7787 gimple_stmt_iterator gsi_from
;
7788 gimple
*stmt1
= NULL
;
7790 /* Move masked store to STORE_BB. */
7792 gsi
= gsi_for_stmt (last
);
7794 /* Shift GSI to the previous stmt for further traversal. */
7796 gsi_to
= gsi_start_bb (store_bb
);
7797 gsi_move_before (&gsi_from
, &gsi_to
);
7798 /* Setup GSI_TO to the non-empty block start. */
7799 gsi_to
= gsi_start_bb (store_bb
);
7800 if (dump_enabled_p ())
7802 dump_printf_loc (MSG_NOTE
, vect_location
,
7803 "Move stmt to created bb\n");
7804 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, last
, 0);
7806 /* Move all stored value producers if possible. */
7807 while (!gsi_end_p (gsi
))
7810 imm_use_iterator imm_iter
;
7811 use_operand_p use_p
;
7814 /* Skip debug statements. */
7815 if (is_gimple_debug (gsi_stmt (gsi
)))
7820 stmt1
= gsi_stmt (gsi
);
7821 /* Do not consider statements writing to memory or having
7822 volatile operand. */
7823 if (gimple_vdef (stmt1
)
7824 || gimple_has_volatile_ops (stmt1
))
7828 lhs
= gimple_get_lhs (stmt1
);
7832 /* LHS of vectorized stmt must be SSA_NAME. */
7833 if (TREE_CODE (lhs
) != SSA_NAME
)
7836 if (!VECTOR_TYPE_P (TREE_TYPE (lhs
)))
7838 /* Remove dead scalar statement. */
7839 if (has_zero_uses (lhs
))
7841 gsi_remove (&gsi_from
, true);
7846 /* Check that LHS does not have uses outside of STORE_BB. */
7848 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, lhs
)
7851 use_stmt
= USE_STMT (use_p
);
7852 if (is_gimple_debug (use_stmt
))
7854 if (gimple_bb (use_stmt
) != store_bb
)
7863 if (gimple_vuse (stmt1
)
7864 && gimple_vuse (stmt1
) != gimple_vuse (last_store
))
7867 /* Can move STMT1 to STORE_BB. */
7868 if (dump_enabled_p ())
7870 dump_printf_loc (MSG_NOTE
, vect_location
,
7871 "Move stmt to created bb\n");
7872 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt1
, 0);
7874 gsi_move_before (&gsi_from
, &gsi_to
);
7875 /* Shift GSI_TO for further insertion. */
7878 /* Put other masked stores with the same mask to STORE_BB. */
7879 if (worklist
.is_empty ()
7880 || gimple_call_arg (worklist
.last (), 2) != mask
7881 || worklist
.last () != stmt1
)
7883 last
= worklist
.pop ();
7885 add_phi_arg (phi
, gimple_vuse (last_store
), e
, UNKNOWN_LOCATION
);