2 Copyright (C) 2003-2014 Free Software Foundation, Inc.
3 Contributed by Dorit Naishlos <dorit@il.ibm.com> and
4 Ira Rosen <irar@il.ibm.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
24 #include "coretypes.h"
28 #include "stor-layout.h"
29 #include "basic-block.h"
30 #include "gimple-pretty-print.h"
31 #include "tree-ssa-alias.h"
32 #include "internal-fn.h"
33 #include "gimple-expr.h"
37 #include "gimple-iterator.h"
38 #include "gimplify-me.h"
39 #include "gimple-ssa.h"
40 #include "tree-phinodes.h"
41 #include "ssa-iterators.h"
42 #include "stringpool.h"
43 #include "tree-ssanames.h"
44 #include "tree-ssa-loop-ivopts.h"
45 #include "tree-ssa-loop-manip.h"
46 #include "tree-ssa-loop-niter.h"
47 #include "tree-pass.h"
53 #include "diagnostic-core.h"
54 #include "tree-chrec.h"
55 #include "tree-scalar-evolution.h"
56 #include "tree-vectorizer.h"
59 /* Loop Vectorization Pass.
61 This pass tries to vectorize loops.
63 For example, the vectorizer transforms the following simple loop:
65 short a[N]; short b[N]; short c[N]; int i;
71 as if it was manually vectorized by rewriting the source code into:
73 typedef int __attribute__((mode(V8HI))) v8hi;
74 short a[N]; short b[N]; short c[N]; int i;
75 v8hi *pa = (v8hi*)a, *pb = (v8hi*)b, *pc = (v8hi*)c;
78 for (i=0; i<N/8; i++){
85 The main entry to this pass is vectorize_loops(), in which
86 the vectorizer applies a set of analyses on a given set of loops,
87 followed by the actual vectorization transformation for the loops that
88 had successfully passed the analysis phase.
89 Throughout this pass we make a distinction between two types of
90 data: scalars (which are represented by SSA_NAMES), and memory references
91 ("data-refs"). These two types of data require different handling both
92 during analysis and transformation. The types of data-refs that the
93 vectorizer currently supports are ARRAY_REFS which base is an array DECL
94 (not a pointer), and INDIRECT_REFS through pointers; both array and pointer
95 accesses are required to have a simple (consecutive) access pattern.
99 The driver for the analysis phase is vect_analyze_loop().
100 It applies a set of analyses, some of which rely on the scalar evolution
101 analyzer (scev) developed by Sebastian Pop.
103 During the analysis phase the vectorizer records some information
104 per stmt in a "stmt_vec_info" struct which is attached to each stmt in the
105 loop, as well as general information about the loop as a whole, which is
106 recorded in a "loop_vec_info" struct attached to each loop.
108 Transformation phase:
109 =====================
110 The loop transformation phase scans all the stmts in the loop, and
111 creates a vector stmt (or a sequence of stmts) for each scalar stmt S in
112 the loop that needs to be vectorized. It inserts the vector code sequence
113 just before the scalar stmt S, and records a pointer to the vector code
114 in STMT_VINFO_VEC_STMT (stmt_info) (stmt_info is the stmt_vec_info struct
115 attached to S). This pointer will be used for the vectorization of following
116 stmts which use the def of stmt S. Stmt S is removed if it writes to memory;
117 otherwise, we rely on dead code elimination for removing it.
119 For example, say stmt S1 was vectorized into stmt VS1:
122 S1: b = x[i]; STMT_VINFO_VEC_STMT (stmt_info (S1)) = VS1
125 To vectorize stmt S2, the vectorizer first finds the stmt that defines
126 the operand 'b' (S1), and gets the relevant vector def 'vb' from the
127 vector stmt VS1 pointed to by STMT_VINFO_VEC_STMT (stmt_info (S1)). The
128 resulting sequence would be:
131 S1: b = x[i]; STMT_VINFO_VEC_STMT (stmt_info (S1)) = VS1
133 S2: a = b; STMT_VINFO_VEC_STMT (stmt_info (S2)) = VS2
135 Operands that are not SSA_NAMEs, are data-refs that appear in
136 load/store operations (like 'x[i]' in S1), and are handled differently.
140 Currently the only target specific information that is used is the
141 size of the vector (in bytes) - "TARGET_VECTORIZE_UNITS_PER_SIMD_WORD".
142 Targets that can support different sizes of vectors, for now will need
143 to specify one value for "TARGET_VECTORIZE_UNITS_PER_SIMD_WORD". More
144 flexibility will be added in the future.
146 Since we only vectorize operations which vector form can be
147 expressed using existing tree codes, to verify that an operation is
148 supported, the vectorizer checks the relevant optab at the relevant
149 machine_mode (e.g, optab_handler (add_optab, V8HImode)). If
150 the value found is CODE_FOR_nothing, then there's no target support, and
151 we can't vectorize the stmt.
153 For additional information on this project see:
154 http://gcc.gnu.org/projects/tree-ssa/vectorization.html
157 static void vect_estimate_min_profitable_iters (loop_vec_info
, int *, int *);
159 /* Function vect_determine_vectorization_factor
161 Determine the vectorization factor (VF). VF is the number of data elements
162 that are operated upon in parallel in a single iteration of the vectorized
163 loop. For example, when vectorizing a loop that operates on 4byte elements,
164 on a target with vector size (VS) 16byte, the VF is set to 4, since 4
165 elements can fit in a single vector register.
167 We currently support vectorization of loops in which all types operated upon
168 are of the same size. Therefore this function currently sets VF according to
169 the size of the types operated upon, and fails if there are multiple sizes
172 VF is also the factor by which the loop iterations are strip-mined, e.g.:
179 for (i=0; i<N; i+=VF){
180 a[i:VF] = b[i:VF] + c[i:VF];
185 vect_determine_vectorization_factor (loop_vec_info loop_vinfo
)
187 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
188 basic_block
*bbs
= LOOP_VINFO_BBS (loop_vinfo
);
189 int nbbs
= loop
->num_nodes
;
190 gimple_stmt_iterator si
;
191 unsigned int vectorization_factor
= 0;
196 stmt_vec_info stmt_info
;
199 gimple stmt
, pattern_stmt
= NULL
;
200 gimple_seq pattern_def_seq
= NULL
;
201 gimple_stmt_iterator pattern_def_si
= gsi_none ();
202 bool analyze_pattern_stmt
= false;
204 if (dump_enabled_p ())
205 dump_printf_loc (MSG_NOTE
, vect_location
,
206 "=== vect_determine_vectorization_factor ===\n");
208 for (i
= 0; i
< nbbs
; i
++)
210 basic_block bb
= bbs
[i
];
212 for (si
= gsi_start_phis (bb
); !gsi_end_p (si
); gsi_next (&si
))
215 stmt_info
= vinfo_for_stmt (phi
);
216 if (dump_enabled_p ())
218 dump_printf_loc (MSG_NOTE
, vect_location
, "==> examining phi: ");
219 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, phi
, 0);
220 dump_printf (MSG_NOTE
, "\n");
223 gcc_assert (stmt_info
);
225 if (STMT_VINFO_RELEVANT_P (stmt_info
))
227 gcc_assert (!STMT_VINFO_VECTYPE (stmt_info
));
228 scalar_type
= TREE_TYPE (PHI_RESULT (phi
));
230 if (dump_enabled_p ())
232 dump_printf_loc (MSG_NOTE
, vect_location
,
233 "get vectype for scalar type: ");
234 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, scalar_type
);
235 dump_printf (MSG_NOTE
, "\n");
238 vectype
= get_vectype_for_scalar_type (scalar_type
);
241 if (dump_enabled_p ())
243 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
244 "not vectorized: unsupported "
246 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
248 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
252 STMT_VINFO_VECTYPE (stmt_info
) = vectype
;
254 if (dump_enabled_p ())
256 dump_printf_loc (MSG_NOTE
, vect_location
, "vectype: ");
257 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, vectype
);
258 dump_printf (MSG_NOTE
, "\n");
261 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
262 if (dump_enabled_p ())
263 dump_printf_loc (MSG_NOTE
, vect_location
, "nunits = %d\n",
266 if (!vectorization_factor
267 || (nunits
> vectorization_factor
))
268 vectorization_factor
= nunits
;
272 for (si
= gsi_start_bb (bb
); !gsi_end_p (si
) || analyze_pattern_stmt
;)
276 if (analyze_pattern_stmt
)
279 stmt
= gsi_stmt (si
);
281 stmt_info
= vinfo_for_stmt (stmt
);
283 if (dump_enabled_p ())
285 dump_printf_loc (MSG_NOTE
, vect_location
,
286 "==> examining statement: ");
287 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
288 dump_printf (MSG_NOTE
, "\n");
291 gcc_assert (stmt_info
);
293 /* Skip stmts which do not need to be vectorized. */
294 if ((!STMT_VINFO_RELEVANT_P (stmt_info
)
295 && !STMT_VINFO_LIVE_P (stmt_info
))
296 || gimple_clobber_p (stmt
))
298 if (STMT_VINFO_IN_PATTERN_P (stmt_info
)
299 && (pattern_stmt
= STMT_VINFO_RELATED_STMT (stmt_info
))
300 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt
))
301 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt
))))
304 stmt_info
= vinfo_for_stmt (pattern_stmt
);
305 if (dump_enabled_p ())
307 dump_printf_loc (MSG_NOTE
, vect_location
,
308 "==> examining pattern statement: ");
309 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
310 dump_printf (MSG_NOTE
, "\n");
315 if (dump_enabled_p ())
316 dump_printf_loc (MSG_NOTE
, vect_location
, "skip.\n");
321 else if (STMT_VINFO_IN_PATTERN_P (stmt_info
)
322 && (pattern_stmt
= STMT_VINFO_RELATED_STMT (stmt_info
))
323 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt
))
324 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt
))))
325 analyze_pattern_stmt
= true;
327 /* If a pattern statement has def stmts, analyze them too. */
328 if (is_pattern_stmt_p (stmt_info
))
330 if (pattern_def_seq
== NULL
)
332 pattern_def_seq
= STMT_VINFO_PATTERN_DEF_SEQ (stmt_info
);
333 pattern_def_si
= gsi_start (pattern_def_seq
);
335 else if (!gsi_end_p (pattern_def_si
))
336 gsi_next (&pattern_def_si
);
337 if (pattern_def_seq
!= NULL
)
339 gimple pattern_def_stmt
= NULL
;
340 stmt_vec_info pattern_def_stmt_info
= NULL
;
342 while (!gsi_end_p (pattern_def_si
))
344 pattern_def_stmt
= gsi_stmt (pattern_def_si
);
345 pattern_def_stmt_info
346 = vinfo_for_stmt (pattern_def_stmt
);
347 if (STMT_VINFO_RELEVANT_P (pattern_def_stmt_info
)
348 || STMT_VINFO_LIVE_P (pattern_def_stmt_info
))
350 gsi_next (&pattern_def_si
);
353 if (!gsi_end_p (pattern_def_si
))
355 if (dump_enabled_p ())
357 dump_printf_loc (MSG_NOTE
, vect_location
,
358 "==> examining pattern def stmt: ");
359 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
,
360 pattern_def_stmt
, 0);
361 dump_printf (MSG_NOTE
, "\n");
364 stmt
= pattern_def_stmt
;
365 stmt_info
= pattern_def_stmt_info
;
369 pattern_def_si
= gsi_none ();
370 analyze_pattern_stmt
= false;
374 analyze_pattern_stmt
= false;
377 if (gimple_get_lhs (stmt
) == NULL_TREE
378 /* MASK_STORE has no lhs, but is ok. */
379 && (!is_gimple_call (stmt
)
380 || !gimple_call_internal_p (stmt
)
381 || gimple_call_internal_fn (stmt
) != IFN_MASK_STORE
))
383 if (is_gimple_call (stmt
))
385 /* Ignore calls with no lhs. These must be calls to
386 #pragma omp simd functions, and what vectorization factor
387 it really needs can't be determined until
388 vectorizable_simd_clone_call. */
389 if (!analyze_pattern_stmt
&& gsi_end_p (pattern_def_si
))
391 pattern_def_seq
= NULL
;
396 if (dump_enabled_p ())
398 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
399 "not vectorized: irregular stmt.");
400 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
,
402 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
407 if (VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt
))))
409 if (dump_enabled_p ())
411 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
412 "not vectorized: vector stmt in loop:");
413 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
414 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
419 if (STMT_VINFO_VECTYPE (stmt_info
))
421 /* The only case when a vectype had been already set is for stmts
422 that contain a dataref, or for "pattern-stmts" (stmts
423 generated by the vectorizer to represent/replace a certain
425 gcc_assert (STMT_VINFO_DATA_REF (stmt_info
)
426 || is_pattern_stmt_p (stmt_info
)
427 || !gsi_end_p (pattern_def_si
));
428 vectype
= STMT_VINFO_VECTYPE (stmt_info
);
432 gcc_assert (!STMT_VINFO_DATA_REF (stmt_info
));
433 if (is_gimple_call (stmt
)
434 && gimple_call_internal_p (stmt
)
435 && gimple_call_internal_fn (stmt
) == IFN_MASK_STORE
)
436 scalar_type
= TREE_TYPE (gimple_call_arg (stmt
, 3));
438 scalar_type
= TREE_TYPE (gimple_get_lhs (stmt
));
439 if (dump_enabled_p ())
441 dump_printf_loc (MSG_NOTE
, vect_location
,
442 "get vectype for scalar type: ");
443 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, scalar_type
);
444 dump_printf (MSG_NOTE
, "\n");
446 vectype
= get_vectype_for_scalar_type (scalar_type
);
449 if (dump_enabled_p ())
451 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
452 "not vectorized: unsupported "
454 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
456 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
461 STMT_VINFO_VECTYPE (stmt_info
) = vectype
;
463 if (dump_enabled_p ())
465 dump_printf_loc (MSG_NOTE
, vect_location
, "vectype: ");
466 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, vectype
);
467 dump_printf (MSG_NOTE
, "\n");
471 /* The vectorization factor is according to the smallest
472 scalar type (or the largest vector size, but we only
473 support one vector size per loop). */
474 scalar_type
= vect_get_smallest_scalar_type (stmt
, &dummy
,
476 if (dump_enabled_p ())
478 dump_printf_loc (MSG_NOTE
, vect_location
,
479 "get vectype for scalar type: ");
480 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, scalar_type
);
481 dump_printf (MSG_NOTE
, "\n");
483 vf_vectype
= get_vectype_for_scalar_type (scalar_type
);
486 if (dump_enabled_p ())
488 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
489 "not vectorized: unsupported data-type ");
490 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
492 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
497 if ((GET_MODE_SIZE (TYPE_MODE (vectype
))
498 != GET_MODE_SIZE (TYPE_MODE (vf_vectype
))))
500 if (dump_enabled_p ())
502 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
503 "not vectorized: different sized vector "
504 "types in statement, ");
505 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
507 dump_printf (MSG_MISSED_OPTIMIZATION
, " and ");
508 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
510 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
515 if (dump_enabled_p ())
517 dump_printf_loc (MSG_NOTE
, vect_location
, "vectype: ");
518 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, vf_vectype
);
519 dump_printf (MSG_NOTE
, "\n");
522 nunits
= TYPE_VECTOR_SUBPARTS (vf_vectype
);
523 if (dump_enabled_p ())
524 dump_printf_loc (MSG_NOTE
, vect_location
, "nunits = %d\n", nunits
);
525 if (!vectorization_factor
526 || (nunits
> vectorization_factor
))
527 vectorization_factor
= nunits
;
529 if (!analyze_pattern_stmt
&& gsi_end_p (pattern_def_si
))
531 pattern_def_seq
= NULL
;
537 /* TODO: Analyze cost. Decide if worth while to vectorize. */
538 if (dump_enabled_p ())
539 dump_printf_loc (MSG_NOTE
, vect_location
, "vectorization factor = %d\n",
540 vectorization_factor
);
541 if (vectorization_factor
<= 1)
543 if (dump_enabled_p ())
544 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
545 "not vectorized: unsupported data-type\n");
548 LOOP_VINFO_VECT_FACTOR (loop_vinfo
) = vectorization_factor
;
554 /* Function vect_is_simple_iv_evolution.
556 FORNOW: A simple evolution of an induction variables in the loop is
557 considered a polynomial evolution. */
560 vect_is_simple_iv_evolution (unsigned loop_nb
, tree access_fn
, tree
* init
,
565 tree evolution_part
= evolution_part_in_loop_num (access_fn
, loop_nb
);
568 /* When there is no evolution in this loop, the evolution function
570 if (evolution_part
== NULL_TREE
)
573 /* When the evolution is a polynomial of degree >= 2
574 the evolution function is not "simple". */
575 if (tree_is_chrec (evolution_part
))
578 step_expr
= evolution_part
;
579 init_expr
= unshare_expr (initial_condition_in_loop_num (access_fn
, loop_nb
));
581 if (dump_enabled_p ())
583 dump_printf_loc (MSG_NOTE
, vect_location
, "step: ");
584 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, step_expr
);
585 dump_printf (MSG_NOTE
, ", init: ");
586 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, init_expr
);
587 dump_printf (MSG_NOTE
, "\n");
593 if (TREE_CODE (step_expr
) != INTEGER_CST
594 && (TREE_CODE (step_expr
) != SSA_NAME
595 || ((bb
= gimple_bb (SSA_NAME_DEF_STMT (step_expr
)))
596 && flow_bb_inside_loop_p (get_loop (cfun
, loop_nb
), bb
))
597 || (!INTEGRAL_TYPE_P (TREE_TYPE (step_expr
))
598 && (!SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr
))
599 || !flag_associative_math
)))
600 && (TREE_CODE (step_expr
) != REAL_CST
601 || !flag_associative_math
))
603 if (dump_enabled_p ())
604 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
612 /* Function vect_analyze_scalar_cycles_1.
614 Examine the cross iteration def-use cycles of scalar variables
615 in LOOP. LOOP_VINFO represents the loop that is now being
616 considered for vectorization (can be LOOP, or an outer-loop
620 vect_analyze_scalar_cycles_1 (loop_vec_info loop_vinfo
, struct loop
*loop
)
622 basic_block bb
= loop
->header
;
624 auto_vec
<gimple
, 64> worklist
;
625 gimple_stmt_iterator gsi
;
628 if (dump_enabled_p ())
629 dump_printf_loc (MSG_NOTE
, vect_location
,
630 "=== vect_analyze_scalar_cycles ===\n");
632 /* First - identify all inductions. Reduction detection assumes that all the
633 inductions have been identified, therefore, this order must not be
635 for (gsi
= gsi_start_phis (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
637 gimple phi
= gsi_stmt (gsi
);
638 tree access_fn
= NULL
;
639 tree def
= PHI_RESULT (phi
);
640 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (phi
);
642 if (dump_enabled_p ())
644 dump_printf_loc (MSG_NOTE
, vect_location
, "Analyze phi: ");
645 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, phi
, 0);
646 dump_printf (MSG_NOTE
, "\n");
649 /* Skip virtual phi's. The data dependences that are associated with
650 virtual defs/uses (i.e., memory accesses) are analyzed elsewhere. */
651 if (virtual_operand_p (def
))
654 STMT_VINFO_DEF_TYPE (stmt_vinfo
) = vect_unknown_def_type
;
656 /* Analyze the evolution function. */
657 access_fn
= analyze_scalar_evolution (loop
, def
);
660 STRIP_NOPS (access_fn
);
661 if (dump_enabled_p ())
663 dump_printf_loc (MSG_NOTE
, vect_location
,
664 "Access function of PHI: ");
665 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, access_fn
);
666 dump_printf (MSG_NOTE
, "\n");
668 STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_vinfo
)
669 = evolution_part_in_loop_num (access_fn
, loop
->num
);
673 || !vect_is_simple_iv_evolution (loop
->num
, access_fn
, &init
, &step
)
674 || (LOOP_VINFO_LOOP (loop_vinfo
) != loop
675 && TREE_CODE (step
) != INTEGER_CST
))
677 worklist
.safe_push (phi
);
681 gcc_assert (STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_vinfo
) != NULL_TREE
);
683 if (dump_enabled_p ())
684 dump_printf_loc (MSG_NOTE
, vect_location
, "Detected induction.\n");
685 STMT_VINFO_DEF_TYPE (stmt_vinfo
) = vect_induction_def
;
689 /* Second - identify all reductions and nested cycles. */
690 while (worklist
.length () > 0)
692 gimple phi
= worklist
.pop ();
693 tree def
= PHI_RESULT (phi
);
694 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (phi
);
698 if (dump_enabled_p ())
700 dump_printf_loc (MSG_NOTE
, vect_location
, "Analyze phi: ");
701 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, phi
, 0);
702 dump_printf (MSG_NOTE
, "\n");
705 gcc_assert (!virtual_operand_p (def
)
706 && STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_unknown_def_type
);
708 nested_cycle
= (loop
!= LOOP_VINFO_LOOP (loop_vinfo
));
709 reduc_stmt
= vect_force_simple_reduction (loop_vinfo
, phi
, !nested_cycle
,
715 if (dump_enabled_p ())
716 dump_printf_loc (MSG_NOTE
, vect_location
,
717 "Detected double reduction.\n");
719 STMT_VINFO_DEF_TYPE (stmt_vinfo
) = vect_double_reduction_def
;
720 STMT_VINFO_DEF_TYPE (vinfo_for_stmt (reduc_stmt
)) =
721 vect_double_reduction_def
;
727 if (dump_enabled_p ())
728 dump_printf_loc (MSG_NOTE
, vect_location
,
729 "Detected vectorizable nested cycle.\n");
731 STMT_VINFO_DEF_TYPE (stmt_vinfo
) = vect_nested_cycle
;
732 STMT_VINFO_DEF_TYPE (vinfo_for_stmt (reduc_stmt
)) =
737 if (dump_enabled_p ())
738 dump_printf_loc (MSG_NOTE
, vect_location
,
739 "Detected reduction.\n");
741 STMT_VINFO_DEF_TYPE (stmt_vinfo
) = vect_reduction_def
;
742 STMT_VINFO_DEF_TYPE (vinfo_for_stmt (reduc_stmt
)) =
744 /* Store the reduction cycles for possible vectorization in
746 LOOP_VINFO_REDUCTIONS (loop_vinfo
).safe_push (reduc_stmt
);
751 if (dump_enabled_p ())
752 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
753 "Unknown def-use cycle pattern.\n");
758 /* Function vect_analyze_scalar_cycles.
760 Examine the cross iteration def-use cycles of scalar variables, by
761 analyzing the loop-header PHIs of scalar variables. Classify each
762 cycle as one of the following: invariant, induction, reduction, unknown.
763 We do that for the loop represented by LOOP_VINFO, and also to its
764 inner-loop, if exists.
765 Examples for scalar cycles:
780 vect_analyze_scalar_cycles (loop_vec_info loop_vinfo
)
782 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
784 vect_analyze_scalar_cycles_1 (loop_vinfo
, loop
);
786 /* When vectorizing an outer-loop, the inner-loop is executed sequentially.
787 Reductions in such inner-loop therefore have different properties than
788 the reductions in the nest that gets vectorized:
789 1. When vectorized, they are executed in the same order as in the original
790 scalar loop, so we can't change the order of computation when
792 2. FIXME: Inner-loop reductions can be used in the inner-loop, so the
793 current checks are too strict. */
796 vect_analyze_scalar_cycles_1 (loop_vinfo
, loop
->inner
);
800 /* Function vect_get_loop_niters.
802 Determine how many iterations the loop is executed and place it
803 in NUMBER_OF_ITERATIONS. Place the number of latch iterations
804 in NUMBER_OF_ITERATIONSM1.
806 Return the loop exit condition. */
809 vect_get_loop_niters (struct loop
*loop
, tree
*number_of_iterations
,
810 tree
*number_of_iterationsm1
)
814 if (dump_enabled_p ())
815 dump_printf_loc (MSG_NOTE
, vect_location
,
816 "=== get_loop_niters ===\n");
818 niters
= number_of_latch_executions (loop
);
819 *number_of_iterationsm1
= niters
;
821 /* We want the number of loop header executions which is the number
822 of latch executions plus one.
823 ??? For UINT_MAX latch executions this number overflows to zero
824 for loops like do { n++; } while (n != 0); */
825 if (niters
&& !chrec_contains_undetermined (niters
))
826 niters
= fold_build2 (PLUS_EXPR
, TREE_TYPE (niters
), unshare_expr (niters
),
827 build_int_cst (TREE_TYPE (niters
), 1));
828 *number_of_iterations
= niters
;
830 return get_loop_exit_condition (loop
);
834 /* Function bb_in_loop_p
836 Used as predicate for dfs order traversal of the loop bbs. */
839 bb_in_loop_p (const_basic_block bb
, const void *data
)
841 const struct loop
*const loop
= (const struct loop
*)data
;
842 if (flow_bb_inside_loop_p (loop
, bb
))
848 /* Function new_loop_vec_info.
850 Create and initialize a new loop_vec_info struct for LOOP, as well as
851 stmt_vec_info structs for all the stmts in LOOP. */
854 new_loop_vec_info (struct loop
*loop
)
858 gimple_stmt_iterator si
;
859 unsigned int i
, nbbs
;
861 res
= (loop_vec_info
) xcalloc (1, sizeof (struct _loop_vec_info
));
862 LOOP_VINFO_LOOP (res
) = loop
;
864 bbs
= get_loop_body (loop
);
866 /* Create/Update stmt_info for all stmts in the loop. */
867 for (i
= 0; i
< loop
->num_nodes
; i
++)
869 basic_block bb
= bbs
[i
];
871 /* BBs in a nested inner-loop will have been already processed (because
872 we will have called vect_analyze_loop_form for any nested inner-loop).
873 Therefore, for stmts in an inner-loop we just want to update the
874 STMT_VINFO_LOOP_VINFO field of their stmt_info to point to the new
875 loop_info of the outer-loop we are currently considering to vectorize
876 (instead of the loop_info of the inner-loop).
877 For stmts in other BBs we need to create a stmt_info from scratch. */
878 if (bb
->loop_father
!= loop
)
881 gcc_assert (loop
->inner
&& bb
->loop_father
== loop
->inner
);
882 for (si
= gsi_start_phis (bb
); !gsi_end_p (si
); gsi_next (&si
))
884 gimple phi
= gsi_stmt (si
);
885 stmt_vec_info stmt_info
= vinfo_for_stmt (phi
);
886 loop_vec_info inner_loop_vinfo
=
887 STMT_VINFO_LOOP_VINFO (stmt_info
);
888 gcc_assert (loop
->inner
== LOOP_VINFO_LOOP (inner_loop_vinfo
));
889 STMT_VINFO_LOOP_VINFO (stmt_info
) = res
;
891 for (si
= gsi_start_bb (bb
); !gsi_end_p (si
); gsi_next (&si
))
893 gimple stmt
= gsi_stmt (si
);
894 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
895 loop_vec_info inner_loop_vinfo
=
896 STMT_VINFO_LOOP_VINFO (stmt_info
);
897 gcc_assert (loop
->inner
== LOOP_VINFO_LOOP (inner_loop_vinfo
));
898 STMT_VINFO_LOOP_VINFO (stmt_info
) = res
;
903 /* bb in current nest. */
904 for (si
= gsi_start_phis (bb
); !gsi_end_p (si
); gsi_next (&si
))
906 gimple phi
= gsi_stmt (si
);
907 gimple_set_uid (phi
, 0);
908 set_vinfo_for_stmt (phi
, new_stmt_vec_info (phi
, res
, NULL
));
911 for (si
= gsi_start_bb (bb
); !gsi_end_p (si
); gsi_next (&si
))
913 gimple stmt
= gsi_stmt (si
);
914 gimple_set_uid (stmt
, 0);
915 set_vinfo_for_stmt (stmt
, new_stmt_vec_info (stmt
, res
, NULL
));
920 /* CHECKME: We want to visit all BBs before their successors (except for
921 latch blocks, for which this assertion wouldn't hold). In the simple
922 case of the loop forms we allow, a dfs order of the BBs would the same
923 as reversed postorder traversal, so we are safe. */
926 bbs
= XCNEWVEC (basic_block
, loop
->num_nodes
);
927 nbbs
= dfs_enumerate_from (loop
->header
, 0, bb_in_loop_p
,
928 bbs
, loop
->num_nodes
, loop
);
929 gcc_assert (nbbs
== loop
->num_nodes
);
931 LOOP_VINFO_BBS (res
) = bbs
;
932 LOOP_VINFO_NITERSM1 (res
) = NULL
;
933 LOOP_VINFO_NITERS (res
) = NULL
;
934 LOOP_VINFO_NITERS_UNCHANGED (res
) = NULL
;
935 LOOP_VINFO_COST_MODEL_MIN_ITERS (res
) = 0;
936 LOOP_VINFO_COST_MODEL_THRESHOLD (res
) = 0;
937 LOOP_VINFO_VECTORIZABLE_P (res
) = 0;
938 LOOP_VINFO_PEELING_FOR_ALIGNMENT (res
) = 0;
939 LOOP_VINFO_VECT_FACTOR (res
) = 0;
940 LOOP_VINFO_LOOP_NEST (res
).create (3);
941 LOOP_VINFO_DATAREFS (res
).create (10);
942 LOOP_VINFO_DDRS (res
).create (10 * 10);
943 LOOP_VINFO_UNALIGNED_DR (res
) = NULL
;
944 LOOP_VINFO_MAY_MISALIGN_STMTS (res
).create (
945 PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIGNMENT_CHECKS
));
946 LOOP_VINFO_MAY_ALIAS_DDRS (res
).create (
947 PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS
));
948 LOOP_VINFO_GROUPED_STORES (res
).create (10);
949 LOOP_VINFO_REDUCTIONS (res
).create (10);
950 LOOP_VINFO_REDUCTION_CHAINS (res
).create (10);
951 LOOP_VINFO_SLP_INSTANCES (res
).create (10);
952 LOOP_VINFO_SLP_UNROLLING_FACTOR (res
) = 1;
953 LOOP_VINFO_TARGET_COST_DATA (res
) = init_cost (loop
);
954 LOOP_VINFO_PEELING_FOR_GAPS (res
) = false;
955 LOOP_VINFO_PEELING_FOR_NITER (res
) = false;
956 LOOP_VINFO_OPERANDS_SWAPPED (res
) = false;
962 /* Function destroy_loop_vec_info.
964 Free LOOP_VINFO struct, as well as all the stmt_vec_info structs of all the
965 stmts in the loop. */
968 destroy_loop_vec_info (loop_vec_info loop_vinfo
, bool clean_stmts
)
973 gimple_stmt_iterator si
;
975 vec
<slp_instance
> slp_instances
;
976 slp_instance instance
;
982 loop
= LOOP_VINFO_LOOP (loop_vinfo
);
984 bbs
= LOOP_VINFO_BBS (loop_vinfo
);
985 nbbs
= clean_stmts
? loop
->num_nodes
: 0;
986 swapped
= LOOP_VINFO_OPERANDS_SWAPPED (loop_vinfo
);
988 for (j
= 0; j
< nbbs
; j
++)
990 basic_block bb
= bbs
[j
];
991 for (si
= gsi_start_phis (bb
); !gsi_end_p (si
); gsi_next (&si
))
992 free_stmt_vec_info (gsi_stmt (si
));
994 for (si
= gsi_start_bb (bb
); !gsi_end_p (si
); )
996 gimple stmt
= gsi_stmt (si
);
998 /* We may have broken canonical form by moving a constant
999 into RHS1 of a commutative op. Fix such occurrences. */
1000 if (swapped
&& is_gimple_assign (stmt
))
1002 enum tree_code code
= gimple_assign_rhs_code (stmt
);
1004 if ((code
== PLUS_EXPR
1005 || code
== POINTER_PLUS_EXPR
1006 || code
== MULT_EXPR
)
1007 && CONSTANT_CLASS_P (gimple_assign_rhs1 (stmt
)))
1008 swap_ssa_operands (stmt
,
1009 gimple_assign_rhs1_ptr (stmt
),
1010 gimple_assign_rhs2_ptr (stmt
));
1013 /* Free stmt_vec_info. */
1014 free_stmt_vec_info (stmt
);
1019 free (LOOP_VINFO_BBS (loop_vinfo
));
1020 vect_destroy_datarefs (loop_vinfo
, NULL
);
1021 free_dependence_relations (LOOP_VINFO_DDRS (loop_vinfo
));
1022 LOOP_VINFO_LOOP_NEST (loop_vinfo
).release ();
1023 LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo
).release ();
1024 LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo
).release ();
1025 slp_instances
= LOOP_VINFO_SLP_INSTANCES (loop_vinfo
);
1026 FOR_EACH_VEC_ELT (slp_instances
, j
, instance
)
1027 vect_free_slp_instance (instance
);
1029 LOOP_VINFO_SLP_INSTANCES (loop_vinfo
).release ();
1030 LOOP_VINFO_GROUPED_STORES (loop_vinfo
).release ();
1031 LOOP_VINFO_REDUCTIONS (loop_vinfo
).release ();
1032 LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo
).release ();
1034 if (LOOP_VINFO_PEELING_HTAB (loop_vinfo
).is_created ())
1035 LOOP_VINFO_PEELING_HTAB (loop_vinfo
).dispose ();
1037 destroy_cost_data (LOOP_VINFO_TARGET_COST_DATA (loop_vinfo
));
1044 /* Function vect_analyze_loop_1.
1046 Apply a set of analyses on LOOP, and create a loop_vec_info struct
1047 for it. The different analyses will record information in the
1048 loop_vec_info struct. This is a subset of the analyses applied in
1049 vect_analyze_loop, to be applied on an inner-loop nested in the loop
1050 that is now considered for (outer-loop) vectorization. */
1052 static loop_vec_info
1053 vect_analyze_loop_1 (struct loop
*loop
)
1055 loop_vec_info loop_vinfo
;
1057 if (dump_enabled_p ())
1058 dump_printf_loc (MSG_NOTE
, vect_location
,
1059 "===== analyze_loop_nest_1 =====\n");
1061 /* Check the CFG characteristics of the loop (nesting, entry/exit, etc. */
1063 loop_vinfo
= vect_analyze_loop_form (loop
);
1066 if (dump_enabled_p ())
1067 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1068 "bad inner-loop form.\n");
1076 /* Function vect_analyze_loop_form.
1078 Verify that certain CFG restrictions hold, including:
1079 - the loop has a pre-header
1080 - the loop has a single entry and exit
1081 - the loop exit condition is simple enough, and the number of iterations
1082 can be analyzed (a countable loop). */
1085 vect_analyze_loop_form (struct loop
*loop
)
1087 loop_vec_info loop_vinfo
;
1089 tree number_of_iterations
= NULL
, number_of_iterationsm1
= NULL
;
1090 loop_vec_info inner_loop_vinfo
= NULL
;
1092 if (dump_enabled_p ())
1093 dump_printf_loc (MSG_NOTE
, vect_location
,
1094 "=== vect_analyze_loop_form ===\n");
1096 /* Different restrictions apply when we are considering an inner-most loop,
1097 vs. an outer (nested) loop.
1098 (FORNOW. May want to relax some of these restrictions in the future). */
1102 /* Inner-most loop. We currently require that the number of BBs is
1103 exactly 2 (the header and latch). Vectorizable inner-most loops
1114 if (loop
->num_nodes
!= 2)
1116 if (dump_enabled_p ())
1117 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1118 "not vectorized: control flow in loop.\n");
1122 if (empty_block_p (loop
->header
))
1124 if (dump_enabled_p ())
1125 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1126 "not vectorized: empty loop.\n");
1132 struct loop
*innerloop
= loop
->inner
;
1135 /* Nested loop. We currently require that the loop is doubly-nested,
1136 contains a single inner loop, and the number of BBs is exactly 5.
1137 Vectorizable outer-loops look like this:
1149 The inner-loop has the properties expected of inner-most loops
1150 as described above. */
1152 if ((loop
->inner
)->inner
|| (loop
->inner
)->next
)
1154 if (dump_enabled_p ())
1155 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1156 "not vectorized: multiple nested loops.\n");
1160 /* Analyze the inner-loop. */
1161 inner_loop_vinfo
= vect_analyze_loop_1 (loop
->inner
);
1162 if (!inner_loop_vinfo
)
1164 if (dump_enabled_p ())
1165 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1166 "not vectorized: Bad inner loop.\n");
1170 if (!expr_invariant_in_loop_p (loop
,
1171 LOOP_VINFO_NITERS (inner_loop_vinfo
)))
1173 if (dump_enabled_p ())
1174 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1175 "not vectorized: inner-loop count not"
1177 destroy_loop_vec_info (inner_loop_vinfo
, true);
1181 if (loop
->num_nodes
!= 5)
1183 if (dump_enabled_p ())
1184 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1185 "not vectorized: control flow in loop.\n");
1186 destroy_loop_vec_info (inner_loop_vinfo
, true);
1190 gcc_assert (EDGE_COUNT (innerloop
->header
->preds
) == 2);
1191 entryedge
= EDGE_PRED (innerloop
->header
, 0);
1192 if (EDGE_PRED (innerloop
->header
, 0)->src
== innerloop
->latch
)
1193 entryedge
= EDGE_PRED (innerloop
->header
, 1);
1195 if (entryedge
->src
!= loop
->header
1196 || !single_exit (innerloop
)
1197 || single_exit (innerloop
)->dest
!= EDGE_PRED (loop
->latch
, 0)->src
)
1199 if (dump_enabled_p ())
1200 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1201 "not vectorized: unsupported outerloop form.\n");
1202 destroy_loop_vec_info (inner_loop_vinfo
, true);
1206 if (dump_enabled_p ())
1207 dump_printf_loc (MSG_NOTE
, vect_location
,
1208 "Considering outer-loop vectorization.\n");
1211 if (!single_exit (loop
)
1212 || EDGE_COUNT (loop
->header
->preds
) != 2)
1214 if (dump_enabled_p ())
1216 if (!single_exit (loop
))
1217 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1218 "not vectorized: multiple exits.\n");
1219 else if (EDGE_COUNT (loop
->header
->preds
) != 2)
1220 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1221 "not vectorized: too many incoming edges.\n");
1223 if (inner_loop_vinfo
)
1224 destroy_loop_vec_info (inner_loop_vinfo
, true);
1228 /* We assume that the loop exit condition is at the end of the loop. i.e,
1229 that the loop is represented as a do-while (with a proper if-guard
1230 before the loop if needed), where the loop header contains all the
1231 executable statements, and the latch is empty. */
1232 if (!empty_block_p (loop
->latch
)
1233 || !gimple_seq_empty_p (phi_nodes (loop
->latch
)))
1235 if (dump_enabled_p ())
1236 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1237 "not vectorized: latch block not empty.\n");
1238 if (inner_loop_vinfo
)
1239 destroy_loop_vec_info (inner_loop_vinfo
, true);
1243 /* Make sure there exists a single-predecessor exit bb: */
1244 if (!single_pred_p (single_exit (loop
)->dest
))
1246 edge e
= single_exit (loop
);
1247 if (!(e
->flags
& EDGE_ABNORMAL
))
1249 split_loop_exit_edge (e
);
1250 if (dump_enabled_p ())
1251 dump_printf (MSG_NOTE
, "split exit edge.\n");
1255 if (dump_enabled_p ())
1256 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1257 "not vectorized: abnormal loop exit edge.\n");
1258 if (inner_loop_vinfo
)
1259 destroy_loop_vec_info (inner_loop_vinfo
, true);
1264 loop_cond
= vect_get_loop_niters (loop
, &number_of_iterations
,
1265 &number_of_iterationsm1
);
1268 if (dump_enabled_p ())
1269 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1270 "not vectorized: complicated exit condition.\n");
1271 if (inner_loop_vinfo
)
1272 destroy_loop_vec_info (inner_loop_vinfo
, true);
1276 if (!number_of_iterations
1277 || chrec_contains_undetermined (number_of_iterations
))
1279 if (dump_enabled_p ())
1280 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1281 "not vectorized: number of iterations cannot be "
1283 if (inner_loop_vinfo
)
1284 destroy_loop_vec_info (inner_loop_vinfo
, true);
1288 if (integer_zerop (number_of_iterations
))
1290 if (dump_enabled_p ())
1291 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1292 "not vectorized: number of iterations = 0.\n");
1293 if (inner_loop_vinfo
)
1294 destroy_loop_vec_info (inner_loop_vinfo
, true);
1298 loop_vinfo
= new_loop_vec_info (loop
);
1299 LOOP_VINFO_NITERSM1 (loop_vinfo
) = number_of_iterationsm1
;
1300 LOOP_VINFO_NITERS (loop_vinfo
) = number_of_iterations
;
1301 LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo
) = number_of_iterations
;
1303 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo
))
1305 if (dump_enabled_p ())
1307 dump_printf_loc (MSG_NOTE
, vect_location
,
1308 "Symbolic number of iterations is ");
1309 dump_generic_expr (MSG_NOTE
, TDF_DETAILS
, number_of_iterations
);
1310 dump_printf (MSG_NOTE
, "\n");
1314 STMT_VINFO_TYPE (vinfo_for_stmt (loop_cond
)) = loop_exit_ctrl_vec_info_type
;
1316 /* CHECKME: May want to keep it around it in the future. */
1317 if (inner_loop_vinfo
)
1318 destroy_loop_vec_info (inner_loop_vinfo
, false);
1320 gcc_assert (!loop
->aux
);
1321 loop
->aux
= loop_vinfo
;
1326 /* Function vect_analyze_loop_operations.
1328 Scan the loop stmts and make sure they are all vectorizable. */
1331 vect_analyze_loop_operations (loop_vec_info loop_vinfo
, bool slp
)
1333 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
1334 basic_block
*bbs
= LOOP_VINFO_BBS (loop_vinfo
);
1335 int nbbs
= loop
->num_nodes
;
1336 gimple_stmt_iterator si
;
1337 unsigned int vectorization_factor
= 0;
1340 stmt_vec_info stmt_info
;
1341 bool need_to_vectorize
= false;
1342 int min_profitable_iters
;
1343 int min_scalar_loop_bound
;
1345 bool only_slp_in_loop
= true, ok
;
1346 HOST_WIDE_INT max_niter
;
1347 HOST_WIDE_INT estimated_niter
;
1348 int min_profitable_estimate
;
1350 if (dump_enabled_p ())
1351 dump_printf_loc (MSG_NOTE
, vect_location
,
1352 "=== vect_analyze_loop_operations ===\n");
1354 gcc_assert (LOOP_VINFO_VECT_FACTOR (loop_vinfo
));
1355 vectorization_factor
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
1358 /* If all the stmts in the loop can be SLPed, we perform only SLP, and
1359 vectorization factor of the loop is the unrolling factor required by
1360 the SLP instances. If that unrolling factor is 1, we say, that we
1361 perform pure SLP on loop - cross iteration parallelism is not
1363 for (i
= 0; i
< nbbs
; i
++)
1365 basic_block bb
= bbs
[i
];
1366 for (si
= gsi_start_bb (bb
); !gsi_end_p (si
); gsi_next (&si
))
1368 gimple stmt
= gsi_stmt (si
);
1369 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1370 gcc_assert (stmt_info
);
1371 if ((STMT_VINFO_RELEVANT_P (stmt_info
)
1372 || VECTORIZABLE_CYCLE_DEF (STMT_VINFO_DEF_TYPE (stmt_info
)))
1373 && !PURE_SLP_STMT (stmt_info
))
1374 /* STMT needs both SLP and loop-based vectorization. */
1375 only_slp_in_loop
= false;
1379 if (only_slp_in_loop
)
1380 vectorization_factor
= LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo
);
1382 vectorization_factor
= least_common_multiple (vectorization_factor
,
1383 LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo
));
1385 LOOP_VINFO_VECT_FACTOR (loop_vinfo
) = vectorization_factor
;
1386 if (dump_enabled_p ())
1387 dump_printf_loc (MSG_NOTE
, vect_location
,
1388 "Updating vectorization factor to %d\n",
1389 vectorization_factor
);
1392 for (i
= 0; i
< nbbs
; i
++)
1394 basic_block bb
= bbs
[i
];
1396 for (si
= gsi_start_phis (bb
); !gsi_end_p (si
); gsi_next (&si
))
1398 phi
= gsi_stmt (si
);
1401 stmt_info
= vinfo_for_stmt (phi
);
1402 if (dump_enabled_p ())
1404 dump_printf_loc (MSG_NOTE
, vect_location
, "examining phi: ");
1405 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, phi
, 0);
1406 dump_printf (MSG_NOTE
, "\n");
1409 /* Inner-loop loop-closed exit phi in outer-loop vectorization
1410 (i.e., a phi in the tail of the outer-loop). */
1411 if (! is_loop_header_bb_p (bb
))
1413 /* FORNOW: we currently don't support the case that these phis
1414 are not used in the outerloop (unless it is double reduction,
1415 i.e., this phi is vect_reduction_def), cause this case
1416 requires to actually do something here. */
1417 if ((!STMT_VINFO_RELEVANT_P (stmt_info
)
1418 || STMT_VINFO_LIVE_P (stmt_info
))
1419 && STMT_VINFO_DEF_TYPE (stmt_info
)
1420 != vect_double_reduction_def
)
1422 if (dump_enabled_p ())
1423 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1424 "Unsupported loop-closed phi in "
1429 /* If PHI is used in the outer loop, we check that its operand
1430 is defined in the inner loop. */
1431 if (STMT_VINFO_RELEVANT_P (stmt_info
))
1436 if (gimple_phi_num_args (phi
) != 1)
1439 phi_op
= PHI_ARG_DEF (phi
, 0);
1440 if (TREE_CODE (phi_op
) != SSA_NAME
)
1443 op_def_stmt
= SSA_NAME_DEF_STMT (phi_op
);
1444 if (gimple_nop_p (op_def_stmt
)
1445 || !flow_bb_inside_loop_p (loop
, gimple_bb (op_def_stmt
))
1446 || !vinfo_for_stmt (op_def_stmt
))
1449 if (STMT_VINFO_RELEVANT (vinfo_for_stmt (op_def_stmt
))
1450 != vect_used_in_outer
1451 && STMT_VINFO_RELEVANT (vinfo_for_stmt (op_def_stmt
))
1452 != vect_used_in_outer_by_reduction
)
1459 gcc_assert (stmt_info
);
1461 if (STMT_VINFO_LIVE_P (stmt_info
))
1463 /* FORNOW: not yet supported. */
1464 if (dump_enabled_p ())
1465 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1466 "not vectorized: value used after loop.\n");
1470 if (STMT_VINFO_RELEVANT (stmt_info
) == vect_used_in_scope
1471 && STMT_VINFO_DEF_TYPE (stmt_info
) != vect_induction_def
)
1473 /* A scalar-dependence cycle that we don't support. */
1474 if (dump_enabled_p ())
1475 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1476 "not vectorized: scalar dependence cycle.\n");
1480 if (STMT_VINFO_RELEVANT_P (stmt_info
))
1482 need_to_vectorize
= true;
1483 if (STMT_VINFO_DEF_TYPE (stmt_info
) == vect_induction_def
)
1484 ok
= vectorizable_induction (phi
, NULL
, NULL
);
1489 if (dump_enabled_p ())
1491 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1492 "not vectorized: relevant phi not "
1494 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, phi
, 0);
1495 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
1501 for (si
= gsi_start_bb (bb
); !gsi_end_p (si
); gsi_next (&si
))
1503 gimple stmt
= gsi_stmt (si
);
1504 if (!gimple_clobber_p (stmt
)
1505 && !vect_analyze_stmt (stmt
, &need_to_vectorize
, NULL
))
1510 /* All operations in the loop are either irrelevant (deal with loop
1511 control, or dead), or only used outside the loop and can be moved
1512 out of the loop (e.g. invariants, inductions). The loop can be
1513 optimized away by scalar optimizations. We're better off not
1514 touching this loop. */
1515 if (!need_to_vectorize
)
1517 if (dump_enabled_p ())
1518 dump_printf_loc (MSG_NOTE
, vect_location
,
1519 "All the computation can be taken out of the loop.\n");
1520 if (dump_enabled_p ())
1521 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1522 "not vectorized: redundant loop. no profit to "
1527 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo
) && dump_enabled_p ())
1528 dump_printf_loc (MSG_NOTE
, vect_location
,
1529 "vectorization_factor = %d, niters = "
1530 HOST_WIDE_INT_PRINT_DEC
"\n", vectorization_factor
,
1531 LOOP_VINFO_INT_NITERS (loop_vinfo
));
1533 if ((LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo
)
1534 && (LOOP_VINFO_INT_NITERS (loop_vinfo
) < vectorization_factor
))
1535 || ((max_niter
= max_stmt_executions_int (loop
)) != -1
1536 && (unsigned HOST_WIDE_INT
) max_niter
< vectorization_factor
))
1538 if (dump_enabled_p ())
1539 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1540 "not vectorized: iteration count too small.\n");
1541 if (dump_enabled_p ())
1542 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1543 "not vectorized: iteration count smaller than "
1544 "vectorization factor.\n");
1548 /* Analyze cost. Decide if worth while to vectorize. */
1550 /* Once VF is set, SLP costs should be updated since the number of created
1551 vector stmts depends on VF. */
1552 vect_update_slp_costs_according_to_vf (loop_vinfo
);
1554 vect_estimate_min_profitable_iters (loop_vinfo
, &min_profitable_iters
,
1555 &min_profitable_estimate
);
1556 LOOP_VINFO_COST_MODEL_MIN_ITERS (loop_vinfo
) = min_profitable_iters
;
1558 if (min_profitable_iters
< 0)
1560 if (dump_enabled_p ())
1561 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1562 "not vectorized: vectorization not profitable.\n");
1563 if (dump_enabled_p ())
1564 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1565 "not vectorized: vector version will never be "
1570 min_scalar_loop_bound
= ((PARAM_VALUE (PARAM_MIN_VECT_LOOP_BOUND
)
1571 * vectorization_factor
) - 1);
1574 /* Use the cost model only if it is more conservative than user specified
1577 th
= (unsigned) min_scalar_loop_bound
;
1578 if (min_profitable_iters
1579 && (!min_scalar_loop_bound
1580 || min_profitable_iters
> min_scalar_loop_bound
))
1581 th
= (unsigned) min_profitable_iters
;
1583 LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo
) = th
;
1585 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo
)
1586 && LOOP_VINFO_INT_NITERS (loop_vinfo
) <= th
)
1588 if (dump_enabled_p ())
1589 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1590 "not vectorized: vectorization not profitable.\n");
1591 if (dump_enabled_p ())
1592 dump_printf_loc (MSG_NOTE
, vect_location
,
1593 "not vectorized: iteration count smaller than user "
1594 "specified loop bound parameter or minimum profitable "
1595 "iterations (whichever is more conservative).\n");
1599 if ((estimated_niter
= estimated_stmt_executions_int (loop
)) != -1
1600 && ((unsigned HOST_WIDE_INT
) estimated_niter
1601 <= MAX (th
, (unsigned)min_profitable_estimate
)))
1603 if (dump_enabled_p ())
1604 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1605 "not vectorized: estimated iteration count too "
1607 if (dump_enabled_p ())
1608 dump_printf_loc (MSG_NOTE
, vect_location
,
1609 "not vectorized: estimated iteration count smaller "
1610 "than specified loop bound parameter or minimum "
1611 "profitable iterations (whichever is more "
1612 "conservative).\n");
1620 /* Function vect_analyze_loop_2.
1622 Apply a set of analyses on LOOP, and create a loop_vec_info struct
1623 for it. The different analyses will record information in the
1624 loop_vec_info struct. */
1626 vect_analyze_loop_2 (loop_vec_info loop_vinfo
)
1628 bool ok
, slp
= false;
1629 int max_vf
= MAX_VECTORIZATION_FACTOR
;
1632 unsigned int n_stmts
= 0;
1634 /* Find all data references in the loop (which correspond to vdefs/vuses)
1635 and analyze their evolution in the loop. Also adjust the minimal
1636 vectorization factor according to the loads and stores.
1638 FORNOW: Handle only simple, array references, which
1639 alignment can be forced, and aligned pointer-references. */
1641 ok
= vect_analyze_data_refs (loop_vinfo
, NULL
, &min_vf
, &n_stmts
);
1644 if (dump_enabled_p ())
1645 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1646 "bad data references.\n");
1650 /* Classify all cross-iteration scalar data-flow cycles.
1651 Cross-iteration cycles caused by virtual phis are analyzed separately. */
1653 vect_analyze_scalar_cycles (loop_vinfo
);
1655 vect_pattern_recog (loop_vinfo
, NULL
);
1657 /* Analyze the access patterns of the data-refs in the loop (consecutive,
1658 complex, etc.). FORNOW: Only handle consecutive access pattern. */
1660 ok
= vect_analyze_data_ref_accesses (loop_vinfo
, NULL
);
1663 if (dump_enabled_p ())
1664 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1665 "bad data access.\n");
1669 /* Data-flow analysis to detect stmts that do not need to be vectorized. */
1671 ok
= vect_mark_stmts_to_be_vectorized (loop_vinfo
);
1674 if (dump_enabled_p ())
1675 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1676 "unexpected pattern.\n");
1680 /* Analyze data dependences between the data-refs in the loop
1681 and adjust the maximum vectorization factor according to
1683 FORNOW: fail at the first data dependence that we encounter. */
1685 ok
= vect_analyze_data_ref_dependences (loop_vinfo
, &max_vf
);
1689 if (dump_enabled_p ())
1690 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1691 "bad data dependence.\n");
1695 ok
= vect_determine_vectorization_factor (loop_vinfo
);
1698 if (dump_enabled_p ())
1699 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1700 "can't determine vectorization factor.\n");
1703 if (max_vf
< LOOP_VINFO_VECT_FACTOR (loop_vinfo
))
1705 if (dump_enabled_p ())
1706 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1707 "bad data dependence.\n");
1711 /* Analyze the alignment of the data-refs in the loop.
1712 Fail if a data reference is found that cannot be vectorized. */
1714 ok
= vect_analyze_data_refs_alignment (loop_vinfo
, NULL
);
1717 if (dump_enabled_p ())
1718 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1719 "bad data alignment.\n");
1723 /* Prune the list of ddrs to be tested at run-time by versioning for alias.
1724 It is important to call pruning after vect_analyze_data_ref_accesses,
1725 since we use grouping information gathered by interleaving analysis. */
1726 ok
= vect_prune_runtime_alias_test_list (loop_vinfo
);
1729 if (dump_enabled_p ())
1730 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1731 "number of versioning for alias "
1732 "run-time tests exceeds %d "
1733 "(--param vect-max-version-for-alias-checks)\n",
1734 PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS
));
1738 /* This pass will decide on using loop versioning and/or loop peeling in
1739 order to enhance the alignment of data references in the loop. */
1741 ok
= vect_enhance_data_refs_alignment (loop_vinfo
);
1744 if (dump_enabled_p ())
1745 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1746 "bad data alignment.\n");
1750 /* Check the SLP opportunities in the loop, analyze and build SLP trees. */
1751 ok
= vect_analyze_slp (loop_vinfo
, NULL
, n_stmts
);
1754 /* Decide which possible SLP instances to SLP. */
1755 slp
= vect_make_slp_decision (loop_vinfo
);
1757 /* Find stmts that need to be both vectorized and SLPed. */
1758 vect_detect_hybrid_slp (loop_vinfo
);
1763 /* Scan all the operations in the loop and make sure they are
1766 ok
= vect_analyze_loop_operations (loop_vinfo
, slp
);
1769 if (dump_enabled_p ())
1770 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1771 "bad operation or unsupported loop bound.\n");
1775 /* Decide whether we need to create an epilogue loop to handle
1776 remaining scalar iterations. */
1777 th
= ((LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo
) + 1)
1778 / LOOP_VINFO_VECT_FACTOR (loop_vinfo
))
1779 * LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
1781 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo
)
1782 && LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo
) > 0)
1784 if (ctz_hwi (LOOP_VINFO_INT_NITERS (loop_vinfo
)
1785 - LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo
))
1786 < exact_log2 (LOOP_VINFO_VECT_FACTOR (loop_vinfo
)))
1787 LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo
) = true;
1789 else if (LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo
)
1790 || (tree_ctz (LOOP_VINFO_NITERS (loop_vinfo
))
1791 < (unsigned)exact_log2 (LOOP_VINFO_VECT_FACTOR (loop_vinfo
))
1792 /* In case of versioning, check if the maximum number of
1793 iterations is greater than th. If they are identical,
1794 the epilogue is unnecessary. */
1795 && ((!LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo
)
1796 && !LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo
))
1797 || (unsigned HOST_WIDE_INT
)max_stmt_executions_int
1798 (LOOP_VINFO_LOOP (loop_vinfo
)) > th
)))
1799 LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo
) = true;
1801 /* If an epilogue loop is required make sure we can create one. */
1802 if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo
)
1803 || LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo
))
1805 if (dump_enabled_p ())
1806 dump_printf_loc (MSG_NOTE
, vect_location
, "epilog loop required\n");
1807 if (!vect_can_advance_ivs_p (loop_vinfo
)
1808 || !slpeel_can_duplicate_loop_p (LOOP_VINFO_LOOP (loop_vinfo
),
1809 single_exit (LOOP_VINFO_LOOP
1812 if (dump_enabled_p ())
1813 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1814 "not vectorized: can't create required "
1823 /* Function vect_analyze_loop.
1825 Apply a set of analyses on LOOP, and create a loop_vec_info struct
1826 for it. The different analyses will record information in the
1827 loop_vec_info struct. */
1829 vect_analyze_loop (struct loop
*loop
)
1831 loop_vec_info loop_vinfo
;
1832 unsigned int vector_sizes
;
1834 /* Autodetect first vector size we try. */
1835 current_vector_size
= 0;
1836 vector_sizes
= targetm
.vectorize
.autovectorize_vector_sizes ();
1838 if (dump_enabled_p ())
1839 dump_printf_loc (MSG_NOTE
, vect_location
,
1840 "===== analyze_loop_nest =====\n");
1842 if (loop_outer (loop
)
1843 && loop_vec_info_for_loop (loop_outer (loop
))
1844 && LOOP_VINFO_VECTORIZABLE_P (loop_vec_info_for_loop (loop_outer (loop
))))
1846 if (dump_enabled_p ())
1847 dump_printf_loc (MSG_NOTE
, vect_location
,
1848 "outer-loop already vectorized.\n");
1854 /* Check the CFG characteristics of the loop (nesting, entry/exit). */
1855 loop_vinfo
= vect_analyze_loop_form (loop
);
1858 if (dump_enabled_p ())
1859 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1860 "bad loop form.\n");
1864 if (vect_analyze_loop_2 (loop_vinfo
))
1866 LOOP_VINFO_VECTORIZABLE_P (loop_vinfo
) = 1;
1871 destroy_loop_vec_info (loop_vinfo
, true);
1873 vector_sizes
&= ~current_vector_size
;
1874 if (vector_sizes
== 0
1875 || current_vector_size
== 0)
1878 /* Try the next biggest vector size. */
1879 current_vector_size
= 1 << floor_log2 (vector_sizes
);
1880 if (dump_enabled_p ())
1881 dump_printf_loc (MSG_NOTE
, vect_location
,
1882 "***** Re-trying analysis with "
1883 "vector size %d\n", current_vector_size
);
1888 /* Function reduction_code_for_scalar_code
1891 CODE - tree_code of a reduction operations.
1894 REDUC_CODE - the corresponding tree-code to be used to reduce the
1895 vector of partial results into a single scalar result (which
1896 will also reside in a vector) or ERROR_MARK if the operation is
1897 a supported reduction operation, but does not have such tree-code.
1899 Return FALSE if CODE currently cannot be vectorized as reduction. */
1902 reduction_code_for_scalar_code (enum tree_code code
,
1903 enum tree_code
*reduc_code
)
1908 *reduc_code
= REDUC_MAX_EXPR
;
1912 *reduc_code
= REDUC_MIN_EXPR
;
1916 *reduc_code
= REDUC_PLUS_EXPR
;
1924 *reduc_code
= ERROR_MARK
;
1933 /* Error reporting helper for vect_is_simple_reduction below. GIMPLE statement
1934 STMT is printed with a message MSG. */
1937 report_vect_op (int msg_type
, gimple stmt
, const char *msg
)
1939 dump_printf_loc (msg_type
, vect_location
, "%s", msg
);
1940 dump_gimple_stmt (msg_type
, TDF_SLIM
, stmt
, 0);
1941 dump_printf (msg_type
, "\n");
1945 /* Detect SLP reduction of the form:
1955 PHI is the reduction phi node (#a1 = phi <a5, a0> above)
1956 FIRST_STMT is the first reduction stmt in the chain
1957 (a2 = operation (a1)).
1959 Return TRUE if a reduction chain was detected. */
1962 vect_is_slp_reduction (loop_vec_info loop_info
, gimple phi
, gimple first_stmt
)
1964 struct loop
*loop
= (gimple_bb (phi
))->loop_father
;
1965 struct loop
*vect_loop
= LOOP_VINFO_LOOP (loop_info
);
1966 enum tree_code code
;
1967 gimple current_stmt
= NULL
, loop_use_stmt
= NULL
, first
, next_stmt
;
1968 stmt_vec_info use_stmt_info
, current_stmt_info
;
1970 imm_use_iterator imm_iter
;
1971 use_operand_p use_p
;
1972 int nloop_uses
, size
= 0, n_out_of_loop_uses
;
1975 if (loop
!= vect_loop
)
1978 lhs
= PHI_RESULT (phi
);
1979 code
= gimple_assign_rhs_code (first_stmt
);
1983 n_out_of_loop_uses
= 0;
1984 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, lhs
)
1986 gimple use_stmt
= USE_STMT (use_p
);
1987 if (is_gimple_debug (use_stmt
))
1990 /* Check if we got back to the reduction phi. */
1991 if (use_stmt
== phi
)
1993 loop_use_stmt
= use_stmt
;
1998 if (flow_bb_inside_loop_p (loop
, gimple_bb (use_stmt
)))
2000 if (vinfo_for_stmt (use_stmt
)
2001 && !STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (use_stmt
)))
2003 loop_use_stmt
= use_stmt
;
2008 n_out_of_loop_uses
++;
2010 /* There are can be either a single use in the loop or two uses in
2012 if (nloop_uses
> 1 || (n_out_of_loop_uses
&& nloop_uses
))
2019 /* We reached a statement with no loop uses. */
2020 if (nloop_uses
== 0)
2023 /* This is a loop exit phi, and we haven't reached the reduction phi. */
2024 if (gimple_code (loop_use_stmt
) == GIMPLE_PHI
)
2027 if (!is_gimple_assign (loop_use_stmt
)
2028 || code
!= gimple_assign_rhs_code (loop_use_stmt
)
2029 || !flow_bb_inside_loop_p (loop
, gimple_bb (loop_use_stmt
)))
2032 /* Insert USE_STMT into reduction chain. */
2033 use_stmt_info
= vinfo_for_stmt (loop_use_stmt
);
2036 current_stmt_info
= vinfo_for_stmt (current_stmt
);
2037 GROUP_NEXT_ELEMENT (current_stmt_info
) = loop_use_stmt
;
2038 GROUP_FIRST_ELEMENT (use_stmt_info
)
2039 = GROUP_FIRST_ELEMENT (current_stmt_info
);
2042 GROUP_FIRST_ELEMENT (use_stmt_info
) = loop_use_stmt
;
2044 lhs
= gimple_assign_lhs (loop_use_stmt
);
2045 current_stmt
= loop_use_stmt
;
2049 if (!found
|| loop_use_stmt
!= phi
|| size
< 2)
2052 /* Swap the operands, if needed, to make the reduction operand be the second
2054 lhs
= PHI_RESULT (phi
);
2055 next_stmt
= GROUP_FIRST_ELEMENT (vinfo_for_stmt (current_stmt
));
2058 if (gimple_assign_rhs2 (next_stmt
) == lhs
)
2060 tree op
= gimple_assign_rhs1 (next_stmt
);
2061 gimple def_stmt
= NULL
;
2063 if (TREE_CODE (op
) == SSA_NAME
)
2064 def_stmt
= SSA_NAME_DEF_STMT (op
);
2066 /* Check that the other def is either defined in the loop
2067 ("vect_internal_def"), or it's an induction (defined by a
2068 loop-header phi-node). */
2070 && gimple_bb (def_stmt
)
2071 && flow_bb_inside_loop_p (loop
, gimple_bb (def_stmt
))
2072 && (is_gimple_assign (def_stmt
)
2073 || is_gimple_call (def_stmt
)
2074 || STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt
))
2075 == vect_induction_def
2076 || (gimple_code (def_stmt
) == GIMPLE_PHI
2077 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt
))
2078 == vect_internal_def
2079 && !is_loop_header_bb_p (gimple_bb (def_stmt
)))))
2081 lhs
= gimple_assign_lhs (next_stmt
);
2082 next_stmt
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt
));
2090 tree op
= gimple_assign_rhs2 (next_stmt
);
2091 gimple def_stmt
= NULL
;
2093 if (TREE_CODE (op
) == SSA_NAME
)
2094 def_stmt
= SSA_NAME_DEF_STMT (op
);
2096 /* Check that the other def is either defined in the loop
2097 ("vect_internal_def"), or it's an induction (defined by a
2098 loop-header phi-node). */
2100 && gimple_bb (def_stmt
)
2101 && flow_bb_inside_loop_p (loop
, gimple_bb (def_stmt
))
2102 && (is_gimple_assign (def_stmt
)
2103 || is_gimple_call (def_stmt
)
2104 || STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt
))
2105 == vect_induction_def
2106 || (gimple_code (def_stmt
) == GIMPLE_PHI
2107 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt
))
2108 == vect_internal_def
2109 && !is_loop_header_bb_p (gimple_bb (def_stmt
)))))
2111 if (dump_enabled_p ())
2113 dump_printf_loc (MSG_NOTE
, vect_location
, "swapping oprnds: ");
2114 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, next_stmt
, 0);
2115 dump_printf (MSG_NOTE
, "\n");
2118 swap_ssa_operands (next_stmt
,
2119 gimple_assign_rhs1_ptr (next_stmt
),
2120 gimple_assign_rhs2_ptr (next_stmt
));
2121 update_stmt (next_stmt
);
2123 if (CONSTANT_CLASS_P (gimple_assign_rhs1 (next_stmt
)))
2124 LOOP_VINFO_OPERANDS_SWAPPED (loop_info
) = true;
2130 lhs
= gimple_assign_lhs (next_stmt
);
2131 next_stmt
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt
));
2134 /* Save the chain for further analysis in SLP detection. */
2135 first
= GROUP_FIRST_ELEMENT (vinfo_for_stmt (current_stmt
));
2136 LOOP_VINFO_REDUCTION_CHAINS (loop_info
).safe_push (first
);
2137 GROUP_SIZE (vinfo_for_stmt (first
)) = size
;
2143 /* Function vect_is_simple_reduction_1
2145 (1) Detect a cross-iteration def-use cycle that represents a simple
2146 reduction computation. We look for the following pattern:
2151 a2 = operation (a3, a1)
2158 a2 = operation (a3, a1)
2161 1. operation is commutative and associative and it is safe to
2162 change the order of the computation (if CHECK_REDUCTION is true)
2163 2. no uses for a2 in the loop (a2 is used out of the loop)
2164 3. no uses of a1 in the loop besides the reduction operation
2165 4. no uses of a1 outside the loop.
2167 Conditions 1,4 are tested here.
2168 Conditions 2,3 are tested in vect_mark_stmts_to_be_vectorized.
2170 (2) Detect a cross-iteration def-use cycle in nested loops, i.e.,
2171 nested cycles, if CHECK_REDUCTION is false.
2173 (3) Detect cycles of phi nodes in outer-loop vectorization, i.e., double
2177 inner loop (def of a3)
2180 If MODIFY is true it tries also to rework the code in-place to enable
2181 detection of more reduction patterns. For the time being we rewrite
2182 "res -= RHS" into "rhs += -RHS" when it seems worthwhile.
2186 vect_is_simple_reduction_1 (loop_vec_info loop_info
, gimple phi
,
2187 bool check_reduction
, bool *double_reduc
,
2190 struct loop
*loop
= (gimple_bb (phi
))->loop_father
;
2191 struct loop
*vect_loop
= LOOP_VINFO_LOOP (loop_info
);
2192 edge latch_e
= loop_latch_edge (loop
);
2193 tree loop_arg
= PHI_ARG_DEF_FROM_EDGE (phi
, latch_e
);
2194 gimple def_stmt
, def1
= NULL
, def2
= NULL
;
2195 enum tree_code orig_code
, code
;
2196 tree op1
, op2
, op3
= NULL_TREE
, op4
= NULL_TREE
;
2200 imm_use_iterator imm_iter
;
2201 use_operand_p use_p
;
2204 *double_reduc
= false;
2206 /* If CHECK_REDUCTION is true, we assume inner-most loop vectorization,
2207 otherwise, we assume outer loop vectorization. */
2208 gcc_assert ((check_reduction
&& loop
== vect_loop
)
2209 || (!check_reduction
&& flow_loop_nested_p (vect_loop
, loop
)));
2211 name
= PHI_RESULT (phi
);
2212 /* ??? If there are no uses of the PHI result the inner loop reduction
2213 won't be detected as possibly double-reduction by vectorizable_reduction
2214 because that tries to walk the PHI arg from the preheader edge which
2215 can be constant. See PR60382. */
2216 if (has_zero_uses (name
))
2219 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, name
)
2221 gimple use_stmt
= USE_STMT (use_p
);
2222 if (is_gimple_debug (use_stmt
))
2225 if (!flow_bb_inside_loop_p (loop
, gimple_bb (use_stmt
)))
2227 if (dump_enabled_p ())
2228 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2229 "intermediate value used outside loop.\n");
2234 if (vinfo_for_stmt (use_stmt
)
2235 && !is_pattern_stmt_p (vinfo_for_stmt (use_stmt
)))
2239 if (dump_enabled_p ())
2240 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2241 "reduction used in loop.\n");
2246 if (TREE_CODE (loop_arg
) != SSA_NAME
)
2248 if (dump_enabled_p ())
2250 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2251 "reduction: not ssa_name: ");
2252 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, loop_arg
);
2253 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
2258 def_stmt
= SSA_NAME_DEF_STMT (loop_arg
);
2261 if (dump_enabled_p ())
2262 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2263 "reduction: no def_stmt.\n");
2267 if (!is_gimple_assign (def_stmt
) && gimple_code (def_stmt
) != GIMPLE_PHI
)
2269 if (dump_enabled_p ())
2271 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, def_stmt
, 0);
2272 dump_printf (MSG_NOTE
, "\n");
2277 if (is_gimple_assign (def_stmt
))
2279 name
= gimple_assign_lhs (def_stmt
);
2284 name
= PHI_RESULT (def_stmt
);
2289 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, name
)
2291 gimple use_stmt
= USE_STMT (use_p
);
2292 if (is_gimple_debug (use_stmt
))
2294 if (flow_bb_inside_loop_p (loop
, gimple_bb (use_stmt
))
2295 && vinfo_for_stmt (use_stmt
)
2296 && !is_pattern_stmt_p (vinfo_for_stmt (use_stmt
)))
2300 if (dump_enabled_p ())
2301 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2302 "reduction used in loop.\n");
2307 /* If DEF_STMT is a phi node itself, we expect it to have a single argument
2308 defined in the inner loop. */
2311 op1
= PHI_ARG_DEF (def_stmt
, 0);
2313 if (gimple_phi_num_args (def_stmt
) != 1
2314 || TREE_CODE (op1
) != SSA_NAME
)
2316 if (dump_enabled_p ())
2317 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2318 "unsupported phi node definition.\n");
2323 def1
= SSA_NAME_DEF_STMT (op1
);
2324 if (gimple_bb (def1
)
2325 && flow_bb_inside_loop_p (loop
, gimple_bb (def_stmt
))
2327 && flow_bb_inside_loop_p (loop
->inner
, gimple_bb (def1
))
2328 && is_gimple_assign (def1
))
2330 if (dump_enabled_p ())
2331 report_vect_op (MSG_NOTE
, def_stmt
,
2332 "detected double reduction: ");
2334 *double_reduc
= true;
2341 code
= orig_code
= gimple_assign_rhs_code (def_stmt
);
2343 /* We can handle "res -= x[i]", which is non-associative by
2344 simply rewriting this into "res += -x[i]". Avoid changing
2345 gimple instruction for the first simple tests and only do this
2346 if we're allowed to change code at all. */
2347 if (code
== MINUS_EXPR
2349 && (op1
= gimple_assign_rhs1 (def_stmt
))
2350 && TREE_CODE (op1
) == SSA_NAME
2351 && SSA_NAME_DEF_STMT (op1
) == phi
)
2355 && (!commutative_tree_code (code
) || !associative_tree_code (code
)))
2357 if (dump_enabled_p ())
2358 report_vect_op (MSG_MISSED_OPTIMIZATION
, def_stmt
,
2359 "reduction: not commutative/associative: ");
2363 if (get_gimple_rhs_class (code
) != GIMPLE_BINARY_RHS
)
2365 if (code
!= COND_EXPR
)
2367 if (dump_enabled_p ())
2368 report_vect_op (MSG_MISSED_OPTIMIZATION
, def_stmt
,
2369 "reduction: not binary operation: ");
2374 op3
= gimple_assign_rhs1 (def_stmt
);
2375 if (COMPARISON_CLASS_P (op3
))
2377 op4
= TREE_OPERAND (op3
, 1);
2378 op3
= TREE_OPERAND (op3
, 0);
2381 op1
= gimple_assign_rhs2 (def_stmt
);
2382 op2
= gimple_assign_rhs3 (def_stmt
);
2384 if (TREE_CODE (op1
) != SSA_NAME
&& TREE_CODE (op2
) != SSA_NAME
)
2386 if (dump_enabled_p ())
2387 report_vect_op (MSG_MISSED_OPTIMIZATION
, def_stmt
,
2388 "reduction: uses not ssa_names: ");
2395 op1
= gimple_assign_rhs1 (def_stmt
);
2396 op2
= gimple_assign_rhs2 (def_stmt
);
2398 if (TREE_CODE (op1
) != SSA_NAME
&& TREE_CODE (op2
) != SSA_NAME
)
2400 if (dump_enabled_p ())
2401 report_vect_op (MSG_MISSED_OPTIMIZATION
, def_stmt
,
2402 "reduction: uses not ssa_names: ");
2408 type
= TREE_TYPE (gimple_assign_lhs (def_stmt
));
2409 if ((TREE_CODE (op1
) == SSA_NAME
2410 && !types_compatible_p (type
,TREE_TYPE (op1
)))
2411 || (TREE_CODE (op2
) == SSA_NAME
2412 && !types_compatible_p (type
, TREE_TYPE (op2
)))
2413 || (op3
&& TREE_CODE (op3
) == SSA_NAME
2414 && !types_compatible_p (type
, TREE_TYPE (op3
)))
2415 || (op4
&& TREE_CODE (op4
) == SSA_NAME
2416 && !types_compatible_p (type
, TREE_TYPE (op4
))))
2418 if (dump_enabled_p ())
2420 dump_printf_loc (MSG_NOTE
, vect_location
,
2421 "reduction: multiple types: operation type: ");
2422 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, type
);
2423 dump_printf (MSG_NOTE
, ", operands types: ");
2424 dump_generic_expr (MSG_NOTE
, TDF_SLIM
,
2426 dump_printf (MSG_NOTE
, ",");
2427 dump_generic_expr (MSG_NOTE
, TDF_SLIM
,
2431 dump_printf (MSG_NOTE
, ",");
2432 dump_generic_expr (MSG_NOTE
, TDF_SLIM
,
2438 dump_printf (MSG_NOTE
, ",");
2439 dump_generic_expr (MSG_NOTE
, TDF_SLIM
,
2442 dump_printf (MSG_NOTE
, "\n");
2448 /* Check that it's ok to change the order of the computation.
2449 Generally, when vectorizing a reduction we change the order of the
2450 computation. This may change the behavior of the program in some
2451 cases, so we need to check that this is ok. One exception is when
2452 vectorizing an outer-loop: the inner-loop is executed sequentially,
2453 and therefore vectorizing reductions in the inner-loop during
2454 outer-loop vectorization is safe. */
2456 /* CHECKME: check for !flag_finite_math_only too? */
2457 if (SCALAR_FLOAT_TYPE_P (type
) && !flag_associative_math
2460 /* Changing the order of operations changes the semantics. */
2461 if (dump_enabled_p ())
2462 report_vect_op (MSG_MISSED_OPTIMIZATION
, def_stmt
,
2463 "reduction: unsafe fp math optimization: ");
2466 else if (INTEGRAL_TYPE_P (type
) && TYPE_OVERFLOW_TRAPS (type
)
2469 /* Changing the order of operations changes the semantics. */
2470 if (dump_enabled_p ())
2471 report_vect_op (MSG_MISSED_OPTIMIZATION
, def_stmt
,
2472 "reduction: unsafe int math optimization: ");
2475 else if (SAT_FIXED_POINT_TYPE_P (type
) && check_reduction
)
2477 /* Changing the order of operations changes the semantics. */
2478 if (dump_enabled_p ())
2479 report_vect_op (MSG_MISSED_OPTIMIZATION
, def_stmt
,
2480 "reduction: unsafe fixed-point math optimization: ");
2484 /* If we detected "res -= x[i]" earlier, rewrite it into
2485 "res += -x[i]" now. If this turns out to be useless reassoc
2486 will clean it up again. */
2487 if (orig_code
== MINUS_EXPR
)
2489 tree rhs
= gimple_assign_rhs2 (def_stmt
);
2490 tree negrhs
= make_ssa_name (TREE_TYPE (rhs
), NULL
);
2491 gimple negate_stmt
= gimple_build_assign_with_ops (NEGATE_EXPR
, negrhs
,
2493 gimple_stmt_iterator gsi
= gsi_for_stmt (def_stmt
);
2494 set_vinfo_for_stmt (negate_stmt
, new_stmt_vec_info (negate_stmt
,
2496 gsi_insert_before (&gsi
, negate_stmt
, GSI_NEW_STMT
);
2497 gimple_assign_set_rhs2 (def_stmt
, negrhs
);
2498 gimple_assign_set_rhs_code (def_stmt
, PLUS_EXPR
);
2499 update_stmt (def_stmt
);
2502 /* Reduction is safe. We're dealing with one of the following:
2503 1) integer arithmetic and no trapv
2504 2) floating point arithmetic, and special flags permit this optimization
2505 3) nested cycle (i.e., outer loop vectorization). */
2506 if (TREE_CODE (op1
) == SSA_NAME
)
2507 def1
= SSA_NAME_DEF_STMT (op1
);
2509 if (TREE_CODE (op2
) == SSA_NAME
)
2510 def2
= SSA_NAME_DEF_STMT (op2
);
2512 if (code
!= COND_EXPR
2513 && ((!def1
|| gimple_nop_p (def1
)) && (!def2
|| gimple_nop_p (def2
))))
2515 if (dump_enabled_p ())
2516 report_vect_op (MSG_NOTE
, def_stmt
, "reduction: no defs for operands: ");
2520 /* Check that one def is the reduction def, defined by PHI,
2521 the other def is either defined in the loop ("vect_internal_def"),
2522 or it's an induction (defined by a loop-header phi-node). */
2524 if (def2
&& def2
== phi
2525 && (code
== COND_EXPR
2526 || !def1
|| gimple_nop_p (def1
)
2527 || !flow_bb_inside_loop_p (loop
, gimple_bb (def1
))
2528 || (def1
&& flow_bb_inside_loop_p (loop
, gimple_bb (def1
))
2529 && (is_gimple_assign (def1
)
2530 || is_gimple_call (def1
)
2531 || STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def1
))
2532 == vect_induction_def
2533 || (gimple_code (def1
) == GIMPLE_PHI
2534 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def1
))
2535 == vect_internal_def
2536 && !is_loop_header_bb_p (gimple_bb (def1
)))))))
2538 if (dump_enabled_p ())
2539 report_vect_op (MSG_NOTE
, def_stmt
, "detected reduction: ");
2543 if (def1
&& def1
== phi
2544 && (code
== COND_EXPR
2545 || !def2
|| gimple_nop_p (def2
)
2546 || !flow_bb_inside_loop_p (loop
, gimple_bb (def2
))
2547 || (def2
&& flow_bb_inside_loop_p (loop
, gimple_bb (def2
))
2548 && (is_gimple_assign (def2
)
2549 || is_gimple_call (def2
)
2550 || STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def2
))
2551 == vect_induction_def
2552 || (gimple_code (def2
) == GIMPLE_PHI
2553 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def2
))
2554 == vect_internal_def
2555 && !is_loop_header_bb_p (gimple_bb (def2
)))))))
2557 if (check_reduction
)
2559 /* Swap operands (just for simplicity - so that the rest of the code
2560 can assume that the reduction variable is always the last (second)
2562 if (dump_enabled_p ())
2563 report_vect_op (MSG_NOTE
, def_stmt
,
2564 "detected reduction: need to swap operands: ");
2566 swap_ssa_operands (def_stmt
, gimple_assign_rhs1_ptr (def_stmt
),
2567 gimple_assign_rhs2_ptr (def_stmt
));
2569 if (CONSTANT_CLASS_P (gimple_assign_rhs1 (def_stmt
)))
2570 LOOP_VINFO_OPERANDS_SWAPPED (loop_info
) = true;
2574 if (dump_enabled_p ())
2575 report_vect_op (MSG_NOTE
, def_stmt
, "detected reduction: ");
2581 /* Try to find SLP reduction chain. */
2582 if (check_reduction
&& vect_is_slp_reduction (loop_info
, phi
, def_stmt
))
2584 if (dump_enabled_p ())
2585 report_vect_op (MSG_NOTE
, def_stmt
,
2586 "reduction: detected reduction chain: ");
2591 if (dump_enabled_p ())
2592 report_vect_op (MSG_MISSED_OPTIMIZATION
, def_stmt
,
2593 "reduction: unknown pattern: ");
2598 /* Wrapper around vect_is_simple_reduction_1, that won't modify code
2599 in-place. Arguments as there. */
2602 vect_is_simple_reduction (loop_vec_info loop_info
, gimple phi
,
2603 bool check_reduction
, bool *double_reduc
)
2605 return vect_is_simple_reduction_1 (loop_info
, phi
, check_reduction
,
2606 double_reduc
, false);
2609 /* Wrapper around vect_is_simple_reduction_1, which will modify code
2610 in-place if it enables detection of more reductions. Arguments
2614 vect_force_simple_reduction (loop_vec_info loop_info
, gimple phi
,
2615 bool check_reduction
, bool *double_reduc
)
2617 return vect_is_simple_reduction_1 (loop_info
, phi
, check_reduction
,
2618 double_reduc
, true);
2621 /* Calculate the cost of one scalar iteration of the loop. */
2623 vect_get_single_scalar_iteration_cost (loop_vec_info loop_vinfo
,
2624 stmt_vector_for_cost
*scalar_cost_vec
)
2626 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
2627 basic_block
*bbs
= LOOP_VINFO_BBS (loop_vinfo
);
2628 int nbbs
= loop
->num_nodes
, factor
, scalar_single_iter_cost
= 0;
2629 int innerloop_iters
, i
;
2631 /* Count statements in scalar loop. Using this as scalar cost for a single
2634 TODO: Add outer loop support.
2636 TODO: Consider assigning different costs to different scalar
2640 innerloop_iters
= 1;
2642 innerloop_iters
= 50; /* FIXME */
2644 for (i
= 0; i
< nbbs
; i
++)
2646 gimple_stmt_iterator si
;
2647 basic_block bb
= bbs
[i
];
2649 if (bb
->loop_father
== loop
->inner
)
2650 factor
= innerloop_iters
;
2654 for (si
= gsi_start_bb (bb
); !gsi_end_p (si
); gsi_next (&si
))
2656 gimple stmt
= gsi_stmt (si
);
2657 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
2659 if (!is_gimple_assign (stmt
) && !is_gimple_call (stmt
))
2662 /* Skip stmts that are not vectorized inside the loop. */
2664 && !STMT_VINFO_RELEVANT_P (stmt_info
)
2665 && (!STMT_VINFO_LIVE_P (stmt_info
)
2666 || !VECTORIZABLE_CYCLE_DEF (STMT_VINFO_DEF_TYPE (stmt_info
)))
2667 && !STMT_VINFO_IN_PATTERN_P (stmt_info
))
2670 vect_cost_for_stmt kind
;
2671 if (STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt
)))
2673 if (DR_IS_READ (STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt
))))
2676 kind
= scalar_store
;
2681 scalar_single_iter_cost
2682 += record_stmt_cost (scalar_cost_vec
, factor
, kind
,
2683 NULL
, 0, vect_prologue
);
2686 return scalar_single_iter_cost
;
2689 /* Calculate cost of peeling the loop PEEL_ITERS_PROLOGUE times. */
2691 vect_get_known_peeling_cost (loop_vec_info loop_vinfo
, int peel_iters_prologue
,
2692 int *peel_iters_epilogue
,
2693 stmt_vector_for_cost
*scalar_cost_vec
,
2694 stmt_vector_for_cost
*prologue_cost_vec
,
2695 stmt_vector_for_cost
*epilogue_cost_vec
)
2698 int vf
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
2700 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo
))
2702 *peel_iters_epilogue
= vf
/2;
2703 if (dump_enabled_p ())
2704 dump_printf_loc (MSG_NOTE
, vect_location
,
2705 "cost model: epilogue peel iters set to vf/2 "
2706 "because loop iterations are unknown .\n");
2708 /* If peeled iterations are known but number of scalar loop
2709 iterations are unknown, count a taken branch per peeled loop. */
2710 retval
= record_stmt_cost (prologue_cost_vec
, 1, cond_branch_taken
,
2711 NULL
, 0, vect_prologue
);
2712 retval
= record_stmt_cost (prologue_cost_vec
, 1, cond_branch_taken
,
2713 NULL
, 0, vect_epilogue
);
2717 int niters
= LOOP_VINFO_INT_NITERS (loop_vinfo
);
2718 peel_iters_prologue
= niters
< peel_iters_prologue
?
2719 niters
: peel_iters_prologue
;
2720 *peel_iters_epilogue
= (niters
- peel_iters_prologue
) % vf
;
2721 /* If we need to peel for gaps, but no peeling is required, we have to
2722 peel VF iterations. */
2723 if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo
) && !*peel_iters_epilogue
)
2724 *peel_iters_epilogue
= vf
;
2727 stmt_info_for_cost
*si
;
2729 if (peel_iters_prologue
)
2730 FOR_EACH_VEC_ELT (*scalar_cost_vec
, j
, si
)
2731 retval
+= record_stmt_cost (prologue_cost_vec
,
2732 si
->count
* peel_iters_prologue
,
2733 si
->kind
, NULL
, si
->misalign
,
2735 if (*peel_iters_epilogue
)
2736 FOR_EACH_VEC_ELT (*scalar_cost_vec
, j
, si
)
2737 retval
+= record_stmt_cost (epilogue_cost_vec
,
2738 si
->count
* *peel_iters_epilogue
,
2739 si
->kind
, NULL
, si
->misalign
,
2745 /* Function vect_estimate_min_profitable_iters
2747 Return the number of iterations required for the vector version of the
2748 loop to be profitable relative to the cost of the scalar version of the
2752 vect_estimate_min_profitable_iters (loop_vec_info loop_vinfo
,
2753 int *ret_min_profitable_niters
,
2754 int *ret_min_profitable_estimate
)
2756 int min_profitable_iters
;
2757 int min_profitable_estimate
;
2758 int peel_iters_prologue
;
2759 int peel_iters_epilogue
;
2760 unsigned vec_inside_cost
= 0;
2761 int vec_outside_cost
= 0;
2762 unsigned vec_prologue_cost
= 0;
2763 unsigned vec_epilogue_cost
= 0;
2764 int scalar_single_iter_cost
= 0;
2765 int scalar_outside_cost
= 0;
2766 int vf
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
2767 int npeel
= LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo
);
2768 void *target_cost_data
= LOOP_VINFO_TARGET_COST_DATA (loop_vinfo
);
2770 /* Cost model disabled. */
2771 if (unlimited_cost_model (LOOP_VINFO_LOOP (loop_vinfo
)))
2773 dump_printf_loc (MSG_NOTE
, vect_location
, "cost model disabled.\n");
2774 *ret_min_profitable_niters
= 0;
2775 *ret_min_profitable_estimate
= 0;
2779 /* Requires loop versioning tests to handle misalignment. */
2780 if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo
))
2782 /* FIXME: Make cost depend on complexity of individual check. */
2783 unsigned len
= LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo
).length ();
2784 (void) add_stmt_cost (target_cost_data
, len
, vector_stmt
, NULL
, 0,
2786 dump_printf (MSG_NOTE
,
2787 "cost model: Adding cost of checks for loop "
2788 "versioning to treat misalignment.\n");
2791 /* Requires loop versioning with alias checks. */
2792 if (LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo
))
2794 /* FIXME: Make cost depend on complexity of individual check. */
2795 unsigned len
= LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo
).length ();
2796 (void) add_stmt_cost (target_cost_data
, len
, vector_stmt
, NULL
, 0,
2798 dump_printf (MSG_NOTE
,
2799 "cost model: Adding cost of checks for loop "
2800 "versioning aliasing.\n");
2803 if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo
)
2804 || LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo
))
2805 (void) add_stmt_cost (target_cost_data
, 1, cond_branch_taken
, NULL
, 0,
2808 /* Count statements in scalar loop. Using this as scalar cost for a single
2811 TODO: Add outer loop support.
2813 TODO: Consider assigning different costs to different scalar
2816 auto_vec
<stmt_info_for_cost
> scalar_cost_vec
;
2817 scalar_single_iter_cost
2818 = vect_get_single_scalar_iteration_cost (loop_vinfo
, &scalar_cost_vec
);
2820 /* Add additional cost for the peeled instructions in prologue and epilogue
2823 FORNOW: If we don't know the value of peel_iters for prologue or epilogue
2824 at compile-time - we assume it's vf/2 (the worst would be vf-1).
2826 TODO: Build an expression that represents peel_iters for prologue and
2827 epilogue to be used in a run-time test. */
2831 peel_iters_prologue
= vf
/2;
2832 dump_printf (MSG_NOTE
, "cost model: "
2833 "prologue peel iters set to vf/2.\n");
2835 /* If peeling for alignment is unknown, loop bound of main loop becomes
2837 peel_iters_epilogue
= vf
/2;
2838 dump_printf (MSG_NOTE
, "cost model: "
2839 "epilogue peel iters set to vf/2 because "
2840 "peeling for alignment is unknown.\n");
2842 /* If peeled iterations are unknown, count a taken branch and a not taken
2843 branch per peeled loop. Even if scalar loop iterations are known,
2844 vector iterations are not known since peeled prologue iterations are
2845 not known. Hence guards remain the same. */
2846 (void) add_stmt_cost (target_cost_data
, 1, cond_branch_taken
,
2847 NULL
, 0, vect_prologue
);
2848 (void) add_stmt_cost (target_cost_data
, 1, cond_branch_not_taken
,
2849 NULL
, 0, vect_prologue
);
2850 (void) add_stmt_cost (target_cost_data
, 1, cond_branch_taken
,
2851 NULL
, 0, vect_epilogue
);
2852 (void) add_stmt_cost (target_cost_data
, 1, cond_branch_not_taken
,
2853 NULL
, 0, vect_epilogue
);
2854 stmt_info_for_cost
*si
;
2856 FOR_EACH_VEC_ELT (scalar_cost_vec
, j
, si
)
2858 struct _stmt_vec_info
*stmt_info
2859 = si
->stmt
? vinfo_for_stmt (si
->stmt
) : NULL
;
2860 (void) add_stmt_cost (target_cost_data
,
2861 si
->count
* peel_iters_prologue
,
2862 si
->kind
, stmt_info
, si
->misalign
,
2864 (void) add_stmt_cost (target_cost_data
,
2865 si
->count
* peel_iters_epilogue
,
2866 si
->kind
, stmt_info
, si
->misalign
,
2872 stmt_vector_for_cost prologue_cost_vec
, epilogue_cost_vec
;
2873 stmt_info_for_cost
*si
;
2875 void *data
= LOOP_VINFO_TARGET_COST_DATA (loop_vinfo
);
2877 prologue_cost_vec
.create (2);
2878 epilogue_cost_vec
.create (2);
2879 peel_iters_prologue
= npeel
;
2881 (void) vect_get_known_peeling_cost (loop_vinfo
, peel_iters_prologue
,
2882 &peel_iters_epilogue
,
2885 &epilogue_cost_vec
);
2887 FOR_EACH_VEC_ELT (prologue_cost_vec
, j
, si
)
2889 struct _stmt_vec_info
*stmt_info
2890 = si
->stmt
? vinfo_for_stmt (si
->stmt
) : NULL
;
2891 (void) add_stmt_cost (data
, si
->count
, si
->kind
, stmt_info
,
2892 si
->misalign
, vect_prologue
);
2895 FOR_EACH_VEC_ELT (epilogue_cost_vec
, j
, si
)
2897 struct _stmt_vec_info
*stmt_info
2898 = si
->stmt
? vinfo_for_stmt (si
->stmt
) : NULL
;
2899 (void) add_stmt_cost (data
, si
->count
, si
->kind
, stmt_info
,
2900 si
->misalign
, vect_epilogue
);
2903 prologue_cost_vec
.release ();
2904 epilogue_cost_vec
.release ();
2907 /* FORNOW: The scalar outside cost is incremented in one of the
2910 1. The vectorizer checks for alignment and aliasing and generates
2911 a condition that allows dynamic vectorization. A cost model
2912 check is ANDED with the versioning condition. Hence scalar code
2913 path now has the added cost of the versioning check.
2915 if (cost > th & versioning_check)
2918 Hence run-time scalar is incremented by not-taken branch cost.
2920 2. The vectorizer then checks if a prologue is required. If the
2921 cost model check was not done before during versioning, it has to
2922 be done before the prologue check.
2925 prologue = scalar_iters
2930 if (prologue == num_iters)
2933 Hence the run-time scalar cost is incremented by a taken branch,
2934 plus a not-taken branch, plus a taken branch cost.
2936 3. The vectorizer then checks if an epilogue is required. If the
2937 cost model check was not done before during prologue check, it
2938 has to be done with the epilogue check.
2944 if (prologue == num_iters)
2947 if ((cost <= th) | (scalar_iters-prologue-epilogue == 0))
2950 Hence the run-time scalar cost should be incremented by 2 taken
2953 TODO: The back end may reorder the BBS's differently and reverse
2954 conditions/branch directions. Change the estimates below to
2955 something more reasonable. */
2957 /* If the number of iterations is known and we do not do versioning, we can
2958 decide whether to vectorize at compile time. Hence the scalar version
2959 do not carry cost model guard costs. */
2960 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo
)
2961 || LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo
)
2962 || LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo
))
2964 /* Cost model check occurs at versioning. */
2965 if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo
)
2966 || LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo
))
2967 scalar_outside_cost
+= vect_get_stmt_cost (cond_branch_not_taken
);
2970 /* Cost model check occurs at prologue generation. */
2971 if (LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo
) < 0)
2972 scalar_outside_cost
+= 2 * vect_get_stmt_cost (cond_branch_taken
)
2973 + vect_get_stmt_cost (cond_branch_not_taken
);
2974 /* Cost model check occurs at epilogue generation. */
2976 scalar_outside_cost
+= 2 * vect_get_stmt_cost (cond_branch_taken
);
2980 /* Complete the target-specific cost calculations. */
2981 finish_cost (LOOP_VINFO_TARGET_COST_DATA (loop_vinfo
), &vec_prologue_cost
,
2982 &vec_inside_cost
, &vec_epilogue_cost
);
2984 vec_outside_cost
= (int)(vec_prologue_cost
+ vec_epilogue_cost
);
2986 /* Calculate number of iterations required to make the vector version
2987 profitable, relative to the loop bodies only. The following condition
2989 SIC * niters + SOC > VIC * ((niters-PL_ITERS-EP_ITERS)/VF) + VOC
2991 SIC = scalar iteration cost, VIC = vector iteration cost,
2992 VOC = vector outside cost, VF = vectorization factor,
2993 PL_ITERS = prologue iterations, EP_ITERS= epilogue iterations
2994 SOC = scalar outside cost for run time cost model check. */
2996 if ((scalar_single_iter_cost
* vf
) > (int) vec_inside_cost
)
2998 if (vec_outside_cost
<= 0)
2999 min_profitable_iters
= 1;
3002 min_profitable_iters
= ((vec_outside_cost
- scalar_outside_cost
) * vf
3003 - vec_inside_cost
* peel_iters_prologue
3004 - vec_inside_cost
* peel_iters_epilogue
)
3005 / ((scalar_single_iter_cost
* vf
)
3008 if ((scalar_single_iter_cost
* vf
* min_profitable_iters
)
3009 <= (((int) vec_inside_cost
* min_profitable_iters
)
3010 + (((int) vec_outside_cost
- scalar_outside_cost
) * vf
)))
3011 min_profitable_iters
++;
3014 /* vector version will never be profitable. */
3017 if (LOOP_VINFO_LOOP (loop_vinfo
)->force_vect
)
3018 warning_at (vect_location
, OPT_Wopenmp_simd
, "vectorization "
3019 "did not happen for a simd loop");
3021 if (dump_enabled_p ())
3022 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3023 "cost model: the vector iteration cost = %d "
3024 "divided by the scalar iteration cost = %d "
3025 "is greater or equal to the vectorization factor = %d"
3027 vec_inside_cost
, scalar_single_iter_cost
, vf
);
3028 *ret_min_profitable_niters
= -1;
3029 *ret_min_profitable_estimate
= -1;
3033 if (dump_enabled_p ())
3035 dump_printf_loc (MSG_NOTE
, vect_location
, "Cost model analysis: \n");
3036 dump_printf (MSG_NOTE
, " Vector inside of loop cost: %d\n",
3038 dump_printf (MSG_NOTE
, " Vector prologue cost: %d\n",
3040 dump_printf (MSG_NOTE
, " Vector epilogue cost: %d\n",
3042 dump_printf (MSG_NOTE
, " Scalar iteration cost: %d\n",
3043 scalar_single_iter_cost
);
3044 dump_printf (MSG_NOTE
, " Scalar outside cost: %d\n",
3045 scalar_outside_cost
);
3046 dump_printf (MSG_NOTE
, " Vector outside cost: %d\n",
3048 dump_printf (MSG_NOTE
, " prologue iterations: %d\n",
3049 peel_iters_prologue
);
3050 dump_printf (MSG_NOTE
, " epilogue iterations: %d\n",
3051 peel_iters_epilogue
);
3052 dump_printf (MSG_NOTE
,
3053 " Calculated minimum iters for profitability: %d\n",
3054 min_profitable_iters
);
3055 dump_printf (MSG_NOTE
, "\n");
3058 min_profitable_iters
=
3059 min_profitable_iters
< vf
? vf
: min_profitable_iters
;
3061 /* Because the condition we create is:
3062 if (niters <= min_profitable_iters)
3063 then skip the vectorized loop. */
3064 min_profitable_iters
--;
3066 if (dump_enabled_p ())
3067 dump_printf_loc (MSG_NOTE
, vect_location
,
3068 " Runtime profitability threshold = %d\n",
3069 min_profitable_iters
);
3071 *ret_min_profitable_niters
= min_profitable_iters
;
3073 /* Calculate number of iterations required to make the vector version
3074 profitable, relative to the loop bodies only.
3076 Non-vectorized variant is SIC * niters and it must win over vector
3077 variant on the expected loop trip count. The following condition must hold true:
3078 SIC * niters > VIC * ((niters-PL_ITERS-EP_ITERS)/VF) + VOC + SOC */
3080 if (vec_outside_cost
<= 0)
3081 min_profitable_estimate
= 1;
3084 min_profitable_estimate
= ((vec_outside_cost
+ scalar_outside_cost
) * vf
3085 - vec_inside_cost
* peel_iters_prologue
3086 - vec_inside_cost
* peel_iters_epilogue
)
3087 / ((scalar_single_iter_cost
* vf
)
3090 min_profitable_estimate
--;
3091 min_profitable_estimate
= MAX (min_profitable_estimate
, min_profitable_iters
);
3092 if (dump_enabled_p ())
3093 dump_printf_loc (MSG_NOTE
, vect_location
,
3094 " Static estimate profitability threshold = %d\n",
3095 min_profitable_iters
);
3097 *ret_min_profitable_estimate
= min_profitable_estimate
;
3101 /* TODO: Close dependency between vect_model_*_cost and vectorizable_*
3102 functions. Design better to avoid maintenance issues. */
3104 /* Function vect_model_reduction_cost.
3106 Models cost for a reduction operation, including the vector ops
3107 generated within the strip-mine loop, the initial definition before
3108 the loop, and the epilogue code that must be generated. */
3111 vect_model_reduction_cost (stmt_vec_info stmt_info
, enum tree_code reduc_code
,
3114 int prologue_cost
= 0, epilogue_cost
= 0;
3115 enum tree_code code
;
3118 gimple stmt
, orig_stmt
;
3120 enum machine_mode mode
;
3121 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
3122 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
3123 void *target_cost_data
= LOOP_VINFO_TARGET_COST_DATA (loop_vinfo
);
3125 /* Cost of reduction op inside loop. */
3126 unsigned inside_cost
= add_stmt_cost (target_cost_data
, ncopies
, vector_stmt
,
3127 stmt_info
, 0, vect_body
);
3128 stmt
= STMT_VINFO_STMT (stmt_info
);
3130 switch (get_gimple_rhs_class (gimple_assign_rhs_code (stmt
)))
3132 case GIMPLE_SINGLE_RHS
:
3133 gcc_assert (TREE_OPERAND_LENGTH (gimple_assign_rhs1 (stmt
)) == ternary_op
);
3134 reduction_op
= TREE_OPERAND (gimple_assign_rhs1 (stmt
), 2);
3136 case GIMPLE_UNARY_RHS
:
3137 reduction_op
= gimple_assign_rhs1 (stmt
);
3139 case GIMPLE_BINARY_RHS
:
3140 reduction_op
= gimple_assign_rhs2 (stmt
);
3142 case GIMPLE_TERNARY_RHS
:
3143 reduction_op
= gimple_assign_rhs3 (stmt
);
3149 vectype
= get_vectype_for_scalar_type (TREE_TYPE (reduction_op
));
3152 if (dump_enabled_p ())
3154 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3155 "unsupported data-type ");
3156 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
3157 TREE_TYPE (reduction_op
));
3158 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
3163 mode
= TYPE_MODE (vectype
);
3164 orig_stmt
= STMT_VINFO_RELATED_STMT (stmt_info
);
3167 orig_stmt
= STMT_VINFO_STMT (stmt_info
);
3169 code
= gimple_assign_rhs_code (orig_stmt
);
3171 /* Add in cost for initial definition. */
3172 prologue_cost
+= add_stmt_cost (target_cost_data
, 1, scalar_to_vec
,
3173 stmt_info
, 0, vect_prologue
);
3175 /* Determine cost of epilogue code.
3177 We have a reduction operator that will reduce the vector in one statement.
3178 Also requires scalar extract. */
3180 if (!nested_in_vect_loop_p (loop
, orig_stmt
))
3182 if (reduc_code
!= ERROR_MARK
)
3184 epilogue_cost
+= add_stmt_cost (target_cost_data
, 1, vector_stmt
,
3185 stmt_info
, 0, vect_epilogue
);
3186 epilogue_cost
+= add_stmt_cost (target_cost_data
, 1, vec_to_scalar
,
3187 stmt_info
, 0, vect_epilogue
);
3191 int vec_size_in_bits
= tree_to_uhwi (TYPE_SIZE (vectype
));
3193 TYPE_SIZE (TREE_TYPE (gimple_assign_lhs (orig_stmt
)));
3194 int element_bitsize
= tree_to_uhwi (bitsize
);
3195 int nelements
= vec_size_in_bits
/ element_bitsize
;
3197 optab
= optab_for_tree_code (code
, vectype
, optab_default
);
3199 /* We have a whole vector shift available. */
3200 if (VECTOR_MODE_P (mode
)
3201 && optab_handler (optab
, mode
) != CODE_FOR_nothing
3202 && optab_handler (vec_shr_optab
, mode
) != CODE_FOR_nothing
)
3204 /* Final reduction via vector shifts and the reduction operator.
3205 Also requires scalar extract. */
3206 epilogue_cost
+= add_stmt_cost (target_cost_data
,
3207 exact_log2 (nelements
) * 2,
3208 vector_stmt
, stmt_info
, 0,
3210 epilogue_cost
+= add_stmt_cost (target_cost_data
, 1,
3211 vec_to_scalar
, stmt_info
, 0,
3215 /* Use extracts and reduction op for final reduction. For N
3216 elements, we have N extracts and N-1 reduction ops. */
3217 epilogue_cost
+= add_stmt_cost (target_cost_data
,
3218 nelements
+ nelements
- 1,
3219 vector_stmt
, stmt_info
, 0,
3224 if (dump_enabled_p ())
3225 dump_printf (MSG_NOTE
,
3226 "vect_model_reduction_cost: inside_cost = %d, "
3227 "prologue_cost = %d, epilogue_cost = %d .\n", inside_cost
,
3228 prologue_cost
, epilogue_cost
);
3234 /* Function vect_model_induction_cost.
3236 Models cost for induction operations. */
3239 vect_model_induction_cost (stmt_vec_info stmt_info
, int ncopies
)
3241 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
3242 void *target_cost_data
= LOOP_VINFO_TARGET_COST_DATA (loop_vinfo
);
3243 unsigned inside_cost
, prologue_cost
;
3245 /* loop cost for vec_loop. */
3246 inside_cost
= add_stmt_cost (target_cost_data
, ncopies
, vector_stmt
,
3247 stmt_info
, 0, vect_body
);
3249 /* prologue cost for vec_init and vec_step. */
3250 prologue_cost
= add_stmt_cost (target_cost_data
, 2, scalar_to_vec
,
3251 stmt_info
, 0, vect_prologue
);
3253 if (dump_enabled_p ())
3254 dump_printf_loc (MSG_NOTE
, vect_location
,
3255 "vect_model_induction_cost: inside_cost = %d, "
3256 "prologue_cost = %d .\n", inside_cost
, prologue_cost
);
3260 /* Function get_initial_def_for_induction
3263 STMT - a stmt that performs an induction operation in the loop.
3264 IV_PHI - the initial value of the induction variable
3267 Return a vector variable, initialized with the first VF values of
3268 the induction variable. E.g., for an iv with IV_PHI='X' and
3269 evolution S, for a vector of 4 units, we want to return:
3270 [X, X + S, X + 2*S, X + 3*S]. */
3273 get_initial_def_for_induction (gimple iv_phi
)
3275 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (iv_phi
);
3276 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_vinfo
);
3277 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
3280 edge pe
= loop_preheader_edge (loop
);
3281 struct loop
*iv_loop
;
3283 tree new_vec
, vec_init
, vec_step
, t
;
3286 gimple init_stmt
, induction_phi
, new_stmt
;
3287 tree induc_def
, vec_def
, vec_dest
;
3288 tree init_expr
, step_expr
;
3289 int vf
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
3293 stmt_vec_info phi_info
= vinfo_for_stmt (iv_phi
);
3294 bool nested_in_vect_loop
= false;
3295 gimple_seq stmts
= NULL
;
3296 imm_use_iterator imm_iter
;
3297 use_operand_p use_p
;
3301 gimple_stmt_iterator si
;
3302 basic_block bb
= gimple_bb (iv_phi
);
3306 /* Is phi in an inner-loop, while vectorizing an enclosing outer-loop? */
3307 if (nested_in_vect_loop_p (loop
, iv_phi
))
3309 nested_in_vect_loop
= true;
3310 iv_loop
= loop
->inner
;
3314 gcc_assert (iv_loop
== (gimple_bb (iv_phi
))->loop_father
);
3316 latch_e
= loop_latch_edge (iv_loop
);
3317 loop_arg
= PHI_ARG_DEF_FROM_EDGE (iv_phi
, latch_e
);
3319 step_expr
= STMT_VINFO_LOOP_PHI_EVOLUTION_PART (phi_info
);
3320 gcc_assert (step_expr
!= NULL_TREE
);
3322 pe
= loop_preheader_edge (iv_loop
);
3323 init_expr
= PHI_ARG_DEF_FROM_EDGE (iv_phi
,
3324 loop_preheader_edge (iv_loop
));
3326 vectype
= get_vectype_for_scalar_type (TREE_TYPE (init_expr
));
3327 resvectype
= get_vectype_for_scalar_type (TREE_TYPE (PHI_RESULT (iv_phi
)));
3328 gcc_assert (vectype
);
3329 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
3330 ncopies
= vf
/ nunits
;
3332 gcc_assert (phi_info
);
3333 gcc_assert (ncopies
>= 1);
3335 /* Convert the step to the desired type. */
3336 step_expr
= force_gimple_operand (fold_convert (TREE_TYPE (vectype
),
3338 &stmts
, true, NULL_TREE
);
3341 new_bb
= gsi_insert_seq_on_edge_immediate (pe
, stmts
);
3342 gcc_assert (!new_bb
);
3345 /* Find the first insertion point in the BB. */
3346 si
= gsi_after_labels (bb
);
3348 /* Create the vector that holds the initial_value of the induction. */
3349 if (nested_in_vect_loop
)
3351 /* iv_loop is nested in the loop to be vectorized. init_expr had already
3352 been created during vectorization of previous stmts. We obtain it
3353 from the STMT_VINFO_VEC_STMT of the defining stmt. */
3354 vec_init
= vect_get_vec_def_for_operand (init_expr
, iv_phi
, NULL
);
3355 /* If the initial value is not of proper type, convert it. */
3356 if (!useless_type_conversion_p (vectype
, TREE_TYPE (vec_init
)))
3358 new_stmt
= gimple_build_assign_with_ops
3360 vect_get_new_vect_var (vectype
, vect_simple_var
, "vec_iv_"),
3361 build1 (VIEW_CONVERT_EXPR
, vectype
, vec_init
), NULL_TREE
);
3362 vec_init
= make_ssa_name (gimple_assign_lhs (new_stmt
), new_stmt
);
3363 gimple_assign_set_lhs (new_stmt
, vec_init
);
3364 new_bb
= gsi_insert_on_edge_immediate (loop_preheader_edge (iv_loop
),
3366 gcc_assert (!new_bb
);
3367 set_vinfo_for_stmt (new_stmt
,
3368 new_stmt_vec_info (new_stmt
, loop_vinfo
, NULL
));
3373 vec
<constructor_elt
, va_gc
> *v
;
3375 /* iv_loop is the loop to be vectorized. Create:
3376 vec_init = [X, X+S, X+2*S, X+3*S] (S = step_expr, X = init_expr) */
3377 new_var
= vect_get_new_vect_var (TREE_TYPE (vectype
),
3378 vect_scalar_var
, "var_");
3379 new_name
= force_gimple_operand (fold_convert (TREE_TYPE (vectype
),
3381 &stmts
, false, new_var
);
3384 new_bb
= gsi_insert_seq_on_edge_immediate (pe
, stmts
);
3385 gcc_assert (!new_bb
);
3388 vec_alloc (v
, nunits
);
3389 bool constant_p
= is_gimple_min_invariant (new_name
);
3390 CONSTRUCTOR_APPEND_ELT (v
, NULL_TREE
, new_name
);
3391 for (i
= 1; i
< nunits
; i
++)
3393 /* Create: new_name_i = new_name + step_expr */
3394 new_name
= fold_build2 (PLUS_EXPR
, TREE_TYPE (new_name
),
3395 new_name
, step_expr
);
3396 if (!is_gimple_min_invariant (new_name
))
3398 init_stmt
= gimple_build_assign (new_var
, new_name
);
3399 new_name
= make_ssa_name (new_var
, init_stmt
);
3400 gimple_assign_set_lhs (init_stmt
, new_name
);
3401 new_bb
= gsi_insert_on_edge_immediate (pe
, init_stmt
);
3402 gcc_assert (!new_bb
);
3403 if (dump_enabled_p ())
3405 dump_printf_loc (MSG_NOTE
, vect_location
,
3406 "created new init_stmt: ");
3407 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, init_stmt
, 0);
3408 dump_printf (MSG_NOTE
, "\n");
3412 CONSTRUCTOR_APPEND_ELT (v
, NULL_TREE
, new_name
);
3414 /* Create a vector from [new_name_0, new_name_1, ..., new_name_nunits-1] */
3416 new_vec
= build_vector_from_ctor (vectype
, v
);
3418 new_vec
= build_constructor (vectype
, v
);
3419 vec_init
= vect_init_vector (iv_phi
, new_vec
, vectype
, NULL
);
3423 /* Create the vector that holds the step of the induction. */
3424 if (nested_in_vect_loop
)
3425 /* iv_loop is nested in the loop to be vectorized. Generate:
3426 vec_step = [S, S, S, S] */
3427 new_name
= step_expr
;
3430 /* iv_loop is the loop to be vectorized. Generate:
3431 vec_step = [VF*S, VF*S, VF*S, VF*S] */
3432 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr
)))
3434 expr
= build_int_cst (integer_type_node
, vf
);
3435 expr
= fold_convert (TREE_TYPE (step_expr
), expr
);
3438 expr
= build_int_cst (TREE_TYPE (step_expr
), vf
);
3439 new_name
= fold_build2 (MULT_EXPR
, TREE_TYPE (step_expr
),
3441 if (TREE_CODE (step_expr
) == SSA_NAME
)
3442 new_name
= vect_init_vector (iv_phi
, new_name
,
3443 TREE_TYPE (step_expr
), NULL
);
3446 t
= unshare_expr (new_name
);
3447 gcc_assert (CONSTANT_CLASS_P (new_name
)
3448 || TREE_CODE (new_name
) == SSA_NAME
);
3449 stepvectype
= get_vectype_for_scalar_type (TREE_TYPE (new_name
));
3450 gcc_assert (stepvectype
);
3451 new_vec
= build_vector_from_val (stepvectype
, t
);
3452 vec_step
= vect_init_vector (iv_phi
, new_vec
, stepvectype
, NULL
);
3455 /* Create the following def-use cycle:
3460 vec_iv = PHI <vec_init, vec_loop>
3464 vec_loop = vec_iv + vec_step; */
3466 /* Create the induction-phi that defines the induction-operand. */
3467 vec_dest
= vect_get_new_vect_var (vectype
, vect_simple_var
, "vec_iv_");
3468 induction_phi
= create_phi_node (vec_dest
, iv_loop
->header
);
3469 set_vinfo_for_stmt (induction_phi
,
3470 new_stmt_vec_info (induction_phi
, loop_vinfo
, NULL
));
3471 induc_def
= PHI_RESULT (induction_phi
);
3473 /* Create the iv update inside the loop */
3474 new_stmt
= gimple_build_assign_with_ops (PLUS_EXPR
, vec_dest
,
3475 induc_def
, vec_step
);
3476 vec_def
= make_ssa_name (vec_dest
, new_stmt
);
3477 gimple_assign_set_lhs (new_stmt
, vec_def
);
3478 gsi_insert_before (&si
, new_stmt
, GSI_SAME_STMT
);
3479 set_vinfo_for_stmt (new_stmt
, new_stmt_vec_info (new_stmt
, loop_vinfo
,
3482 /* Set the arguments of the phi node: */
3483 add_phi_arg (induction_phi
, vec_init
, pe
, UNKNOWN_LOCATION
);
3484 add_phi_arg (induction_phi
, vec_def
, loop_latch_edge (iv_loop
),
3488 /* In case that vectorization factor (VF) is bigger than the number
3489 of elements that we can fit in a vectype (nunits), we have to generate
3490 more than one vector stmt - i.e - we need to "unroll" the
3491 vector stmt by a factor VF/nunits. For more details see documentation
3492 in vectorizable_operation. */
3496 stmt_vec_info prev_stmt_vinfo
;
3497 /* FORNOW. This restriction should be relaxed. */
3498 gcc_assert (!nested_in_vect_loop
);
3500 /* Create the vector that holds the step of the induction. */
3501 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr
)))
3503 expr
= build_int_cst (integer_type_node
, nunits
);
3504 expr
= fold_convert (TREE_TYPE (step_expr
), expr
);
3507 expr
= build_int_cst (TREE_TYPE (step_expr
), nunits
);
3508 new_name
= fold_build2 (MULT_EXPR
, TREE_TYPE (step_expr
),
3510 if (TREE_CODE (step_expr
) == SSA_NAME
)
3511 new_name
= vect_init_vector (iv_phi
, new_name
,
3512 TREE_TYPE (step_expr
), NULL
);
3513 t
= unshare_expr (new_name
);
3514 gcc_assert (CONSTANT_CLASS_P (new_name
)
3515 || TREE_CODE (new_name
) == SSA_NAME
);
3516 new_vec
= build_vector_from_val (stepvectype
, t
);
3517 vec_step
= vect_init_vector (iv_phi
, new_vec
, stepvectype
, NULL
);
3519 vec_def
= induc_def
;
3520 prev_stmt_vinfo
= vinfo_for_stmt (induction_phi
);
3521 for (i
= 1; i
< ncopies
; i
++)
3523 /* vec_i = vec_prev + vec_step */
3524 new_stmt
= gimple_build_assign_with_ops (PLUS_EXPR
, vec_dest
,
3526 vec_def
= make_ssa_name (vec_dest
, new_stmt
);
3527 gimple_assign_set_lhs (new_stmt
, vec_def
);
3529 gsi_insert_before (&si
, new_stmt
, GSI_SAME_STMT
);
3530 if (!useless_type_conversion_p (resvectype
, vectype
))
3532 new_stmt
= gimple_build_assign_with_ops
3534 vect_get_new_vect_var (resvectype
, vect_simple_var
,
3536 build1 (VIEW_CONVERT_EXPR
, resvectype
,
3537 gimple_assign_lhs (new_stmt
)), NULL_TREE
);
3538 gimple_assign_set_lhs (new_stmt
,
3540 (gimple_assign_lhs (new_stmt
), new_stmt
));
3541 gsi_insert_before (&si
, new_stmt
, GSI_SAME_STMT
);
3543 set_vinfo_for_stmt (new_stmt
,
3544 new_stmt_vec_info (new_stmt
, loop_vinfo
, NULL
));
3545 STMT_VINFO_RELATED_STMT (prev_stmt_vinfo
) = new_stmt
;
3546 prev_stmt_vinfo
= vinfo_for_stmt (new_stmt
);
3550 if (nested_in_vect_loop
)
3552 /* Find the loop-closed exit-phi of the induction, and record
3553 the final vector of induction results: */
3555 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, loop_arg
)
3557 gimple use_stmt
= USE_STMT (use_p
);
3558 if (is_gimple_debug (use_stmt
))
3561 if (!flow_bb_inside_loop_p (iv_loop
, gimple_bb (use_stmt
)))
3563 exit_phi
= use_stmt
;
3569 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (exit_phi
);
3570 /* FORNOW. Currently not supporting the case that an inner-loop induction
3571 is not used in the outer-loop (i.e. only outside the outer-loop). */
3572 gcc_assert (STMT_VINFO_RELEVANT_P (stmt_vinfo
)
3573 && !STMT_VINFO_LIVE_P (stmt_vinfo
));
3575 STMT_VINFO_VEC_STMT (stmt_vinfo
) = new_stmt
;
3576 if (dump_enabled_p ())
3578 dump_printf_loc (MSG_NOTE
, vect_location
,
3579 "vector of inductions after inner-loop:");
3580 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, new_stmt
, 0);
3581 dump_printf (MSG_NOTE
, "\n");
3587 if (dump_enabled_p ())
3589 dump_printf_loc (MSG_NOTE
, vect_location
,
3590 "transform induction: created def-use cycle: ");
3591 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, induction_phi
, 0);
3592 dump_printf (MSG_NOTE
, "\n");
3593 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
,
3594 SSA_NAME_DEF_STMT (vec_def
), 0);
3595 dump_printf (MSG_NOTE
, "\n");
3598 STMT_VINFO_VEC_STMT (phi_info
) = induction_phi
;
3599 if (!useless_type_conversion_p (resvectype
, vectype
))
3601 new_stmt
= gimple_build_assign_with_ops
3603 vect_get_new_vect_var (resvectype
, vect_simple_var
, "vec_iv_"),
3604 build1 (VIEW_CONVERT_EXPR
, resvectype
, induc_def
), NULL_TREE
);
3605 induc_def
= make_ssa_name (gimple_assign_lhs (new_stmt
), new_stmt
);
3606 gimple_assign_set_lhs (new_stmt
, induc_def
);
3607 si
= gsi_after_labels (bb
);
3608 gsi_insert_before (&si
, new_stmt
, GSI_SAME_STMT
);
3609 set_vinfo_for_stmt (new_stmt
,
3610 new_stmt_vec_info (new_stmt
, loop_vinfo
, NULL
));
3611 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (new_stmt
))
3612 = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (induction_phi
));
3619 /* Function get_initial_def_for_reduction
3622 STMT - a stmt that performs a reduction operation in the loop.
3623 INIT_VAL - the initial value of the reduction variable
3626 ADJUSTMENT_DEF - a tree that holds a value to be added to the final result
3627 of the reduction (used for adjusting the epilog - see below).
3628 Return a vector variable, initialized according to the operation that STMT
3629 performs. This vector will be used as the initial value of the
3630 vector of partial results.
3632 Option1 (adjust in epilog): Initialize the vector as follows:
3633 add/bit or/xor: [0,0,...,0,0]
3634 mult/bit and: [1,1,...,1,1]
3635 min/max/cond_expr: [init_val,init_val,..,init_val,init_val]
3636 and when necessary (e.g. add/mult case) let the caller know
3637 that it needs to adjust the result by init_val.
3639 Option2: Initialize the vector as follows:
3640 add/bit or/xor: [init_val,0,0,...,0]
3641 mult/bit and: [init_val,1,1,...,1]
3642 min/max/cond_expr: [init_val,init_val,...,init_val]
3643 and no adjustments are needed.
3645 For example, for the following code:
3651 STMT is 's = s + a[i]', and the reduction variable is 's'.
3652 For a vector of 4 units, we want to return either [0,0,0,init_val],
3653 or [0,0,0,0] and let the caller know that it needs to adjust
3654 the result at the end by 'init_val'.
3656 FORNOW, we are using the 'adjust in epilog' scheme, because this way the
3657 initialization vector is simpler (same element in all entries), if
3658 ADJUSTMENT_DEF is not NULL, and Option2 otherwise.
3660 A cost model should help decide between these two schemes. */
3663 get_initial_def_for_reduction (gimple stmt
, tree init_val
,
3664 tree
*adjustment_def
)
3666 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (stmt
);
3667 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_vinfo
);
3668 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
3669 tree scalar_type
= TREE_TYPE (init_val
);
3670 tree vectype
= get_vectype_for_scalar_type (scalar_type
);
3672 enum tree_code code
= gimple_assign_rhs_code (stmt
);
3677 bool nested_in_vect_loop
= false;
3679 REAL_VALUE_TYPE real_init_val
= dconst0
;
3680 int int_init_val
= 0;
3681 gimple def_stmt
= NULL
;
3683 gcc_assert (vectype
);
3684 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
3686 gcc_assert (POINTER_TYPE_P (scalar_type
) || INTEGRAL_TYPE_P (scalar_type
)
3687 || SCALAR_FLOAT_TYPE_P (scalar_type
));
3689 if (nested_in_vect_loop_p (loop
, stmt
))
3690 nested_in_vect_loop
= true;
3692 gcc_assert (loop
== (gimple_bb (stmt
))->loop_father
);
3694 /* In case of double reduction we only create a vector variable to be put
3695 in the reduction phi node. The actual statement creation is done in
3696 vect_create_epilog_for_reduction. */
3697 if (adjustment_def
&& nested_in_vect_loop
3698 && TREE_CODE (init_val
) == SSA_NAME
3699 && (def_stmt
= SSA_NAME_DEF_STMT (init_val
))
3700 && gimple_code (def_stmt
) == GIMPLE_PHI
3701 && flow_bb_inside_loop_p (loop
, gimple_bb (def_stmt
))
3702 && vinfo_for_stmt (def_stmt
)
3703 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt
))
3704 == vect_double_reduction_def
)
3706 *adjustment_def
= NULL
;
3707 return vect_create_destination_var (init_val
, vectype
);
3710 if (TREE_CONSTANT (init_val
))
3712 if (SCALAR_FLOAT_TYPE_P (scalar_type
))
3713 init_value
= build_real (scalar_type
, TREE_REAL_CST (init_val
));
3715 init_value
= build_int_cst (scalar_type
, TREE_INT_CST_LOW (init_val
));
3718 init_value
= init_val
;
3722 case WIDEN_SUM_EXPR
:
3730 /* ADJUSMENT_DEF is NULL when called from
3731 vect_create_epilog_for_reduction to vectorize double reduction. */
3734 if (nested_in_vect_loop
)
3735 *adjustment_def
= vect_get_vec_def_for_operand (init_val
, stmt
,
3738 *adjustment_def
= init_val
;
3741 if (code
== MULT_EXPR
)
3743 real_init_val
= dconst1
;
3747 if (code
== BIT_AND_EXPR
)
3750 if (SCALAR_FLOAT_TYPE_P (scalar_type
))
3751 def_for_init
= build_real (scalar_type
, real_init_val
);
3753 def_for_init
= build_int_cst (scalar_type
, int_init_val
);
3755 /* Create a vector of '0' or '1' except the first element. */
3756 elts
= XALLOCAVEC (tree
, nunits
);
3757 for (i
= nunits
- 2; i
>= 0; --i
)
3758 elts
[i
+ 1] = def_for_init
;
3760 /* Option1: the first element is '0' or '1' as well. */
3763 elts
[0] = def_for_init
;
3764 init_def
= build_vector (vectype
, elts
);
3768 /* Option2: the first element is INIT_VAL. */
3770 if (TREE_CONSTANT (init_val
))
3771 init_def
= build_vector (vectype
, elts
);
3774 vec
<constructor_elt
, va_gc
> *v
;
3775 vec_alloc (v
, nunits
);
3776 CONSTRUCTOR_APPEND_ELT (v
, NULL_TREE
, init_val
);
3777 for (i
= 1; i
< nunits
; ++i
)
3778 CONSTRUCTOR_APPEND_ELT (v
, NULL_TREE
, elts
[i
]);
3779 init_def
= build_constructor (vectype
, v
);
3789 *adjustment_def
= NULL_TREE
;
3790 init_def
= vect_get_vec_def_for_operand (init_val
, stmt
, NULL
);
3794 init_def
= build_vector_from_val (vectype
, init_value
);
3805 /* Function vect_create_epilog_for_reduction
3807 Create code at the loop-epilog to finalize the result of a reduction
3810 VECT_DEFS is list of vector of partial results, i.e., the lhs's of vector
3811 reduction statements.
3812 STMT is the scalar reduction stmt that is being vectorized.
3813 NCOPIES is > 1 in case the vectorization factor (VF) is bigger than the
3814 number of elements that we can fit in a vectype (nunits). In this case
3815 we have to generate more than one vector stmt - i.e - we need to "unroll"
3816 the vector stmt by a factor VF/nunits. For more details see documentation
3817 in vectorizable_operation.
3818 REDUC_CODE is the tree-code for the epilog reduction.
3819 REDUCTION_PHIS is a list of the phi-nodes that carry the reduction
3821 REDUC_INDEX is the index of the operand in the right hand side of the
3822 statement that is defined by REDUCTION_PHI.
3823 DOUBLE_REDUC is TRUE if double reduction phi nodes should be handled.
3824 SLP_NODE is an SLP node containing a group of reduction statements. The
3825 first one in this group is STMT.
3828 1. Creates the reduction def-use cycles: sets the arguments for
3830 The loop-entry argument is the vectorized initial-value of the reduction.
3831 The loop-latch argument is taken from VECT_DEFS - the vector of partial
3833 2. "Reduces" each vector of partial results VECT_DEFS into a single result,
3834 by applying the operation specified by REDUC_CODE if available, or by
3835 other means (whole-vector shifts or a scalar loop).
3836 The function also creates a new phi node at the loop exit to preserve
3837 loop-closed form, as illustrated below.
3839 The flow at the entry to this function:
3842 vec_def = phi <null, null> # REDUCTION_PHI
3843 VECT_DEF = vector_stmt # vectorized form of STMT
3844 s_loop = scalar_stmt # (scalar) STMT
3846 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
3850 The above is transformed by this function into:
3853 vec_def = phi <vec_init, VECT_DEF> # REDUCTION_PHI
3854 VECT_DEF = vector_stmt # vectorized form of STMT
3855 s_loop = scalar_stmt # (scalar) STMT
3857 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
3858 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
3859 v_out2 = reduce <v_out1>
3860 s_out3 = extract_field <v_out2, 0>
3861 s_out4 = adjust_result <s_out3>
3867 vect_create_epilog_for_reduction (vec
<tree
> vect_defs
, gimple stmt
,
3868 int ncopies
, enum tree_code reduc_code
,
3869 vec
<gimple
> reduction_phis
,
3870 int reduc_index
, bool double_reduc
,
3873 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
3874 stmt_vec_info prev_phi_info
;
3876 enum machine_mode mode
;
3877 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
3878 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
), *outer_loop
= NULL
;
3879 basic_block exit_bb
;
3882 gimple new_phi
= NULL
, phi
;
3883 gimple_stmt_iterator exit_gsi
;
3885 tree new_temp
= NULL_TREE
, new_dest
, new_name
, new_scalar_dest
;
3886 gimple epilog_stmt
= NULL
;
3887 enum tree_code code
= gimple_assign_rhs_code (stmt
);
3889 tree bitsize
, bitpos
;
3890 tree adjustment_def
= NULL
;
3891 tree vec_initial_def
= NULL
;
3892 tree reduction_op
, expr
, def
;
3893 tree orig_name
, scalar_result
;
3894 imm_use_iterator imm_iter
, phi_imm_iter
;
3895 use_operand_p use_p
, phi_use_p
;
3896 bool extract_scalar_result
= false;
3897 gimple use_stmt
, orig_stmt
, reduction_phi
= NULL
;
3898 bool nested_in_vect_loop
= false;
3899 auto_vec
<gimple
> new_phis
;
3900 auto_vec
<gimple
> inner_phis
;
3901 enum vect_def_type dt
= vect_unknown_def_type
;
3903 auto_vec
<tree
> scalar_results
;
3904 unsigned int group_size
= 1, k
, ratio
;
3905 auto_vec
<tree
> vec_initial_defs
;
3906 auto_vec
<gimple
> phis
;
3907 bool slp_reduc
= false;
3908 tree new_phi_result
;
3909 gimple inner_phi
= NULL
;
3912 group_size
= SLP_TREE_SCALAR_STMTS (slp_node
).length ();
3914 if (nested_in_vect_loop_p (loop
, stmt
))
3918 nested_in_vect_loop
= true;
3919 gcc_assert (!slp_node
);
3922 switch (get_gimple_rhs_class (gimple_assign_rhs_code (stmt
)))
3924 case GIMPLE_SINGLE_RHS
:
3925 gcc_assert (TREE_OPERAND_LENGTH (gimple_assign_rhs1 (stmt
))
3927 reduction_op
= TREE_OPERAND (gimple_assign_rhs1 (stmt
), reduc_index
);
3929 case GIMPLE_UNARY_RHS
:
3930 reduction_op
= gimple_assign_rhs1 (stmt
);
3932 case GIMPLE_BINARY_RHS
:
3933 reduction_op
= reduc_index
?
3934 gimple_assign_rhs2 (stmt
) : gimple_assign_rhs1 (stmt
);
3936 case GIMPLE_TERNARY_RHS
:
3937 reduction_op
= gimple_op (stmt
, reduc_index
+ 1);
3943 vectype
= get_vectype_for_scalar_type (TREE_TYPE (reduction_op
));
3944 gcc_assert (vectype
);
3945 mode
= TYPE_MODE (vectype
);
3947 /* 1. Create the reduction def-use cycle:
3948 Set the arguments of REDUCTION_PHIS, i.e., transform
3951 vec_def = phi <null, null> # REDUCTION_PHI
3952 VECT_DEF = vector_stmt # vectorized form of STMT
3958 vec_def = phi <vec_init, VECT_DEF> # REDUCTION_PHI
3959 VECT_DEF = vector_stmt # vectorized form of STMT
3962 (in case of SLP, do it for all the phis). */
3964 /* Get the loop-entry arguments. */
3966 vect_get_vec_defs (reduction_op
, NULL_TREE
, stmt
, &vec_initial_defs
,
3967 NULL
, slp_node
, reduc_index
);
3970 vec_initial_defs
.create (1);
3971 /* For the case of reduction, vect_get_vec_def_for_operand returns
3972 the scalar def before the loop, that defines the initial value
3973 of the reduction variable. */
3974 vec_initial_def
= vect_get_vec_def_for_operand (reduction_op
, stmt
,
3976 vec_initial_defs
.quick_push (vec_initial_def
);
3979 /* Set phi nodes arguments. */
3980 FOR_EACH_VEC_ELT (reduction_phis
, i
, phi
)
3982 tree vec_init_def
, def
;
3984 vec_init_def
= force_gimple_operand (vec_initial_defs
[i
], &stmts
,
3986 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop
), stmts
);
3988 for (j
= 0; j
< ncopies
; j
++)
3990 /* Set the loop-entry arg of the reduction-phi. */
3991 add_phi_arg (phi
, vec_init_def
, loop_preheader_edge (loop
),
3994 /* Set the loop-latch arg for the reduction-phi. */
3996 def
= vect_get_vec_def_for_stmt_copy (vect_unknown_def_type
, def
);
3998 add_phi_arg (phi
, def
, loop_latch_edge (loop
), UNKNOWN_LOCATION
);
4000 if (dump_enabled_p ())
4002 dump_printf_loc (MSG_NOTE
, vect_location
,
4003 "transform reduction: created def-use cycle: ");
4004 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, phi
, 0);
4005 dump_printf (MSG_NOTE
, "\n");
4006 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, SSA_NAME_DEF_STMT (def
), 0);
4007 dump_printf (MSG_NOTE
, "\n");
4010 phi
= STMT_VINFO_RELATED_STMT (vinfo_for_stmt (phi
));
4014 /* 2. Create epilog code.
4015 The reduction epilog code operates across the elements of the vector
4016 of partial results computed by the vectorized loop.
4017 The reduction epilog code consists of:
4019 step 1: compute the scalar result in a vector (v_out2)
4020 step 2: extract the scalar result (s_out3) from the vector (v_out2)
4021 step 3: adjust the scalar result (s_out3) if needed.
4023 Step 1 can be accomplished using one the following three schemes:
4024 (scheme 1) using reduc_code, if available.
4025 (scheme 2) using whole-vector shifts, if available.
4026 (scheme 3) using a scalar loop. In this case steps 1+2 above are
4029 The overall epilog code looks like this:
4031 s_out0 = phi <s_loop> # original EXIT_PHI
4032 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
4033 v_out2 = reduce <v_out1> # step 1
4034 s_out3 = extract_field <v_out2, 0> # step 2
4035 s_out4 = adjust_result <s_out3> # step 3
4037 (step 3 is optional, and steps 1 and 2 may be combined).
4038 Lastly, the uses of s_out0 are replaced by s_out4. */
4041 /* 2.1 Create new loop-exit-phis to preserve loop-closed form:
4042 v_out1 = phi <VECT_DEF>
4043 Store them in NEW_PHIS. */
4045 exit_bb
= single_exit (loop
)->dest
;
4046 prev_phi_info
= NULL
;
4047 new_phis
.create (vect_defs
.length ());
4048 FOR_EACH_VEC_ELT (vect_defs
, i
, def
)
4050 for (j
= 0; j
< ncopies
; j
++)
4052 tree new_def
= copy_ssa_name (def
, NULL
);
4053 phi
= create_phi_node (new_def
, exit_bb
);
4054 set_vinfo_for_stmt (phi
, new_stmt_vec_info (phi
, loop_vinfo
, NULL
));
4056 new_phis
.quick_push (phi
);
4059 def
= vect_get_vec_def_for_stmt_copy (dt
, def
);
4060 STMT_VINFO_RELATED_STMT (prev_phi_info
) = phi
;
4063 SET_PHI_ARG_DEF (phi
, single_exit (loop
)->dest_idx
, def
);
4064 prev_phi_info
= vinfo_for_stmt (phi
);
4068 /* The epilogue is created for the outer-loop, i.e., for the loop being
4069 vectorized. Create exit phis for the outer loop. */
4073 exit_bb
= single_exit (loop
)->dest
;
4074 inner_phis
.create (vect_defs
.length ());
4075 FOR_EACH_VEC_ELT (new_phis
, i
, phi
)
4077 tree new_result
= copy_ssa_name (PHI_RESULT (phi
), NULL
);
4078 gimple outer_phi
= create_phi_node (new_result
, exit_bb
);
4079 SET_PHI_ARG_DEF (outer_phi
, single_exit (loop
)->dest_idx
,
4081 set_vinfo_for_stmt (outer_phi
, new_stmt_vec_info (outer_phi
,
4083 inner_phis
.quick_push (phi
);
4084 new_phis
[i
] = outer_phi
;
4085 prev_phi_info
= vinfo_for_stmt (outer_phi
);
4086 while (STMT_VINFO_RELATED_STMT (vinfo_for_stmt (phi
)))
4088 phi
= STMT_VINFO_RELATED_STMT (vinfo_for_stmt (phi
));
4089 new_result
= copy_ssa_name (PHI_RESULT (phi
), NULL
);
4090 outer_phi
= create_phi_node (new_result
, exit_bb
);
4091 SET_PHI_ARG_DEF (outer_phi
, single_exit (loop
)->dest_idx
,
4093 set_vinfo_for_stmt (outer_phi
, new_stmt_vec_info (outer_phi
,
4095 STMT_VINFO_RELATED_STMT (prev_phi_info
) = outer_phi
;
4096 prev_phi_info
= vinfo_for_stmt (outer_phi
);
4101 exit_gsi
= gsi_after_labels (exit_bb
);
4103 /* 2.2 Get the relevant tree-code to use in the epilog for schemes 2,3
4104 (i.e. when reduc_code is not available) and in the final adjustment
4105 code (if needed). Also get the original scalar reduction variable as
4106 defined in the loop. In case STMT is a "pattern-stmt" (i.e. - it
4107 represents a reduction pattern), the tree-code and scalar-def are
4108 taken from the original stmt that the pattern-stmt (STMT) replaces.
4109 Otherwise (it is a regular reduction) - the tree-code and scalar-def
4110 are taken from STMT. */
4112 orig_stmt
= STMT_VINFO_RELATED_STMT (stmt_info
);
4115 /* Regular reduction */
4120 /* Reduction pattern */
4121 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (orig_stmt
);
4122 gcc_assert (STMT_VINFO_IN_PATTERN_P (stmt_vinfo
));
4123 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_vinfo
) == stmt
);
4126 code
= gimple_assign_rhs_code (orig_stmt
);
4127 /* For MINUS_EXPR the initial vector is [init_val,0,...,0], therefore,
4128 partial results are added and not subtracted. */
4129 if (code
== MINUS_EXPR
)
4132 scalar_dest
= gimple_assign_lhs (orig_stmt
);
4133 scalar_type
= TREE_TYPE (scalar_dest
);
4134 scalar_results
.create (group_size
);
4135 new_scalar_dest
= vect_create_destination_var (scalar_dest
, NULL
);
4136 bitsize
= TYPE_SIZE (scalar_type
);
4138 /* In case this is a reduction in an inner-loop while vectorizing an outer
4139 loop - we don't need to extract a single scalar result at the end of the
4140 inner-loop (unless it is double reduction, i.e., the use of reduction is
4141 outside the outer-loop). The final vector of partial results will be used
4142 in the vectorized outer-loop, or reduced to a scalar result at the end of
4144 if (nested_in_vect_loop
&& !double_reduc
)
4145 goto vect_finalize_reduction
;
4147 /* SLP reduction without reduction chain, e.g.,
4151 b2 = operation (b1) */
4152 slp_reduc
= (slp_node
&& !GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt
)));
4154 /* In case of reduction chain, e.g.,
4157 a3 = operation (a2),
4159 we may end up with more than one vector result. Here we reduce them to
4161 if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt
)))
4163 tree first_vect
= PHI_RESULT (new_phis
[0]);
4165 gimple new_vec_stmt
= NULL
;
4167 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
4168 for (k
= 1; k
< new_phis
.length (); k
++)
4170 gimple next_phi
= new_phis
[k
];
4171 tree second_vect
= PHI_RESULT (next_phi
);
4173 tmp
= build2 (code
, vectype
, first_vect
, second_vect
);
4174 new_vec_stmt
= gimple_build_assign (vec_dest
, tmp
);
4175 first_vect
= make_ssa_name (vec_dest
, new_vec_stmt
);
4176 gimple_assign_set_lhs (new_vec_stmt
, first_vect
);
4177 gsi_insert_before (&exit_gsi
, new_vec_stmt
, GSI_SAME_STMT
);
4180 new_phi_result
= first_vect
;
4183 new_phis
.truncate (0);
4184 new_phis
.safe_push (new_vec_stmt
);
4188 new_phi_result
= PHI_RESULT (new_phis
[0]);
4190 /* 2.3 Create the reduction code, using one of the three schemes described
4191 above. In SLP we simply need to extract all the elements from the
4192 vector (without reducing them), so we use scalar shifts. */
4193 if (reduc_code
!= ERROR_MARK
&& !slp_reduc
)
4197 /*** Case 1: Create:
4198 v_out2 = reduc_expr <v_out1> */
4200 if (dump_enabled_p ())
4201 dump_printf_loc (MSG_NOTE
, vect_location
,
4202 "Reduce using direct vector reduction.\n");
4204 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
4205 tmp
= build1 (reduc_code
, vectype
, new_phi_result
);
4206 epilog_stmt
= gimple_build_assign (vec_dest
, tmp
);
4207 new_temp
= make_ssa_name (vec_dest
, epilog_stmt
);
4208 gimple_assign_set_lhs (epilog_stmt
, new_temp
);
4209 gsi_insert_before (&exit_gsi
, epilog_stmt
, GSI_SAME_STMT
);
4211 extract_scalar_result
= true;
4215 enum tree_code shift_code
= ERROR_MARK
;
4216 bool have_whole_vector_shift
= true;
4218 int element_bitsize
= tree_to_uhwi (bitsize
);
4219 int vec_size_in_bits
= tree_to_uhwi (TYPE_SIZE (vectype
));
4222 if (optab_handler (vec_shr_optab
, mode
) != CODE_FOR_nothing
)
4223 shift_code
= VEC_RSHIFT_EXPR
;
4225 have_whole_vector_shift
= false;
4227 /* Regardless of whether we have a whole vector shift, if we're
4228 emulating the operation via tree-vect-generic, we don't want
4229 to use it. Only the first round of the reduction is likely
4230 to still be profitable via emulation. */
4231 /* ??? It might be better to emit a reduction tree code here, so that
4232 tree-vect-generic can expand the first round via bit tricks. */
4233 if (!VECTOR_MODE_P (mode
))
4234 have_whole_vector_shift
= false;
4237 optab optab
= optab_for_tree_code (code
, vectype
, optab_default
);
4238 if (optab_handler (optab
, mode
) == CODE_FOR_nothing
)
4239 have_whole_vector_shift
= false;
4242 if (have_whole_vector_shift
&& !slp_reduc
)
4244 /*** Case 2: Create:
4245 for (offset = VS/2; offset >= element_size; offset/=2)
4247 Create: va' = vec_shift <va, offset>
4248 Create: va = vop <va, va'>
4251 if (dump_enabled_p ())
4252 dump_printf_loc (MSG_NOTE
, vect_location
,
4253 "Reduce using vector shifts\n");
4255 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
4256 new_temp
= new_phi_result
;
4257 for (bit_offset
= vec_size_in_bits
/2;
4258 bit_offset
>= element_bitsize
;
4261 tree bitpos
= size_int (bit_offset
);
4263 epilog_stmt
= gimple_build_assign_with_ops (shift_code
,
4264 vec_dest
, new_temp
, bitpos
);
4265 new_name
= make_ssa_name (vec_dest
, epilog_stmt
);
4266 gimple_assign_set_lhs (epilog_stmt
, new_name
);
4267 gsi_insert_before (&exit_gsi
, epilog_stmt
, GSI_SAME_STMT
);
4269 epilog_stmt
= gimple_build_assign_with_ops (code
, vec_dest
,
4270 new_name
, new_temp
);
4271 new_temp
= make_ssa_name (vec_dest
, epilog_stmt
);
4272 gimple_assign_set_lhs (epilog_stmt
, new_temp
);
4273 gsi_insert_before (&exit_gsi
, epilog_stmt
, GSI_SAME_STMT
);
4276 extract_scalar_result
= true;
4282 /*** Case 3: Create:
4283 s = extract_field <v_out2, 0>
4284 for (offset = element_size;
4285 offset < vector_size;
4286 offset += element_size;)
4288 Create: s' = extract_field <v_out2, offset>
4289 Create: s = op <s, s'> // For non SLP cases
4292 if (dump_enabled_p ())
4293 dump_printf_loc (MSG_NOTE
, vect_location
,
4294 "Reduce using scalar code.\n");
4296 vec_size_in_bits
= tree_to_uhwi (TYPE_SIZE (vectype
));
4297 FOR_EACH_VEC_ELT (new_phis
, i
, new_phi
)
4299 if (gimple_code (new_phi
) == GIMPLE_PHI
)
4300 vec_temp
= PHI_RESULT (new_phi
);
4302 vec_temp
= gimple_assign_lhs (new_phi
);
4303 rhs
= build3 (BIT_FIELD_REF
, scalar_type
, vec_temp
, bitsize
,
4305 epilog_stmt
= gimple_build_assign (new_scalar_dest
, rhs
);
4306 new_temp
= make_ssa_name (new_scalar_dest
, epilog_stmt
);
4307 gimple_assign_set_lhs (epilog_stmt
, new_temp
);
4308 gsi_insert_before (&exit_gsi
, epilog_stmt
, GSI_SAME_STMT
);
4310 /* In SLP we don't need to apply reduction operation, so we just
4311 collect s' values in SCALAR_RESULTS. */
4313 scalar_results
.safe_push (new_temp
);
4315 for (bit_offset
= element_bitsize
;
4316 bit_offset
< vec_size_in_bits
;
4317 bit_offset
+= element_bitsize
)
4319 tree bitpos
= bitsize_int (bit_offset
);
4320 tree rhs
= build3 (BIT_FIELD_REF
, scalar_type
, vec_temp
,
4323 epilog_stmt
= gimple_build_assign (new_scalar_dest
, rhs
);
4324 new_name
= make_ssa_name (new_scalar_dest
, epilog_stmt
);
4325 gimple_assign_set_lhs (epilog_stmt
, new_name
);
4326 gsi_insert_before (&exit_gsi
, epilog_stmt
, GSI_SAME_STMT
);
4330 /* In SLP we don't need to apply reduction operation, so
4331 we just collect s' values in SCALAR_RESULTS. */
4332 new_temp
= new_name
;
4333 scalar_results
.safe_push (new_name
);
4337 epilog_stmt
= gimple_build_assign_with_ops (code
,
4338 new_scalar_dest
, new_name
, new_temp
);
4339 new_temp
= make_ssa_name (new_scalar_dest
, epilog_stmt
);
4340 gimple_assign_set_lhs (epilog_stmt
, new_temp
);
4341 gsi_insert_before (&exit_gsi
, epilog_stmt
, GSI_SAME_STMT
);
4346 /* The only case where we need to reduce scalar results in SLP, is
4347 unrolling. If the size of SCALAR_RESULTS is greater than
4348 GROUP_SIZE, we reduce them combining elements modulo
4352 tree res
, first_res
, new_res
;
4355 /* Reduce multiple scalar results in case of SLP unrolling. */
4356 for (j
= group_size
; scalar_results
.iterate (j
, &res
);
4359 first_res
= scalar_results
[j
% group_size
];
4360 new_stmt
= gimple_build_assign_with_ops (code
,
4361 new_scalar_dest
, first_res
, res
);
4362 new_res
= make_ssa_name (new_scalar_dest
, new_stmt
);
4363 gimple_assign_set_lhs (new_stmt
, new_res
);
4364 gsi_insert_before (&exit_gsi
, new_stmt
, GSI_SAME_STMT
);
4365 scalar_results
[j
% group_size
] = new_res
;
4369 /* Not SLP - we have one scalar to keep in SCALAR_RESULTS. */
4370 scalar_results
.safe_push (new_temp
);
4372 extract_scalar_result
= false;
4376 /* 2.4 Extract the final scalar result. Create:
4377 s_out3 = extract_field <v_out2, bitpos> */
4379 if (extract_scalar_result
)
4383 if (dump_enabled_p ())
4384 dump_printf_loc (MSG_NOTE
, vect_location
,
4385 "extract scalar result\n");
4387 if (BYTES_BIG_ENDIAN
)
4388 bitpos
= size_binop (MULT_EXPR
,
4389 bitsize_int (TYPE_VECTOR_SUBPARTS (vectype
) - 1),
4390 TYPE_SIZE (scalar_type
));
4392 bitpos
= bitsize_zero_node
;
4394 rhs
= build3 (BIT_FIELD_REF
, scalar_type
, new_temp
, bitsize
, bitpos
);
4395 epilog_stmt
= gimple_build_assign (new_scalar_dest
, rhs
);
4396 new_temp
= make_ssa_name (new_scalar_dest
, epilog_stmt
);
4397 gimple_assign_set_lhs (epilog_stmt
, new_temp
);
4398 gsi_insert_before (&exit_gsi
, epilog_stmt
, GSI_SAME_STMT
);
4399 scalar_results
.safe_push (new_temp
);
4402 vect_finalize_reduction
:
4407 /* 2.5 Adjust the final result by the initial value of the reduction
4408 variable. (When such adjustment is not needed, then
4409 'adjustment_def' is zero). For example, if code is PLUS we create:
4410 new_temp = loop_exit_def + adjustment_def */
4414 gcc_assert (!slp_reduc
);
4415 if (nested_in_vect_loop
)
4417 new_phi
= new_phis
[0];
4418 gcc_assert (TREE_CODE (TREE_TYPE (adjustment_def
)) == VECTOR_TYPE
);
4419 expr
= build2 (code
, vectype
, PHI_RESULT (new_phi
), adjustment_def
);
4420 new_dest
= vect_create_destination_var (scalar_dest
, vectype
);
4424 new_temp
= scalar_results
[0];
4425 gcc_assert (TREE_CODE (TREE_TYPE (adjustment_def
)) != VECTOR_TYPE
);
4426 expr
= build2 (code
, scalar_type
, new_temp
, adjustment_def
);
4427 new_dest
= vect_create_destination_var (scalar_dest
, scalar_type
);
4430 epilog_stmt
= gimple_build_assign (new_dest
, expr
);
4431 new_temp
= make_ssa_name (new_dest
, epilog_stmt
);
4432 gimple_assign_set_lhs (epilog_stmt
, new_temp
);
4433 gsi_insert_before (&exit_gsi
, epilog_stmt
, GSI_SAME_STMT
);
4434 if (nested_in_vect_loop
)
4436 set_vinfo_for_stmt (epilog_stmt
,
4437 new_stmt_vec_info (epilog_stmt
, loop_vinfo
,
4439 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (epilog_stmt
)) =
4440 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (new_phi
));
4443 scalar_results
.quick_push (new_temp
);
4445 scalar_results
[0] = new_temp
;
4448 scalar_results
[0] = new_temp
;
4450 new_phis
[0] = epilog_stmt
;
4453 /* 2.6 Handle the loop-exit phis. Replace the uses of scalar loop-exit
4454 phis with new adjusted scalar results, i.e., replace use <s_out0>
4459 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
4460 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
4461 v_out2 = reduce <v_out1>
4462 s_out3 = extract_field <v_out2, 0>
4463 s_out4 = adjust_result <s_out3>
4470 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
4471 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
4472 v_out2 = reduce <v_out1>
4473 s_out3 = extract_field <v_out2, 0>
4474 s_out4 = adjust_result <s_out3>
4479 /* In SLP reduction chain we reduce vector results into one vector if
4480 necessary, hence we set here GROUP_SIZE to 1. SCALAR_DEST is the LHS of
4481 the last stmt in the reduction chain, since we are looking for the loop
4483 if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt
)))
4485 scalar_dest
= gimple_assign_lhs (
4486 SLP_TREE_SCALAR_STMTS (slp_node
)[group_size
- 1]);
4490 /* In SLP we may have several statements in NEW_PHIS and REDUCTION_PHIS (in
4491 case that GROUP_SIZE is greater than vectorization factor). Therefore, we
4492 need to match SCALAR_RESULTS with corresponding statements. The first
4493 (GROUP_SIZE / number of new vector stmts) scalar results correspond to
4494 the first vector stmt, etc.
4495 (RATIO is equal to (GROUP_SIZE / number of new vector stmts)). */
4496 if (group_size
> new_phis
.length ())
4498 ratio
= group_size
/ new_phis
.length ();
4499 gcc_assert (!(group_size
% new_phis
.length ()));
4504 for (k
= 0; k
< group_size
; k
++)
4508 epilog_stmt
= new_phis
[k
/ ratio
];
4509 reduction_phi
= reduction_phis
[k
/ ratio
];
4511 inner_phi
= inner_phis
[k
/ ratio
];
4516 gimple current_stmt
= SLP_TREE_SCALAR_STMTS (slp_node
)[k
];
4518 orig_stmt
= STMT_VINFO_RELATED_STMT (vinfo_for_stmt (current_stmt
));
4519 /* SLP statements can't participate in patterns. */
4520 gcc_assert (!orig_stmt
);
4521 scalar_dest
= gimple_assign_lhs (current_stmt
);
4525 /* Find the loop-closed-use at the loop exit of the original scalar
4526 result. (The reduction result is expected to have two immediate uses -
4527 one at the latch block, and one at the loop exit). */
4528 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, scalar_dest
)
4529 if (!flow_bb_inside_loop_p (loop
, gimple_bb (USE_STMT (use_p
)))
4530 && !is_gimple_debug (USE_STMT (use_p
)))
4531 phis
.safe_push (USE_STMT (use_p
));
4533 /* While we expect to have found an exit_phi because of loop-closed-ssa
4534 form we can end up without one if the scalar cycle is dead. */
4536 FOR_EACH_VEC_ELT (phis
, i
, exit_phi
)
4540 stmt_vec_info exit_phi_vinfo
= vinfo_for_stmt (exit_phi
);
4543 /* FORNOW. Currently not supporting the case that an inner-loop
4544 reduction is not used in the outer-loop (but only outside the
4545 outer-loop), unless it is double reduction. */
4546 gcc_assert ((STMT_VINFO_RELEVANT_P (exit_phi_vinfo
)
4547 && !STMT_VINFO_LIVE_P (exit_phi_vinfo
))
4551 STMT_VINFO_VEC_STMT (exit_phi_vinfo
) = inner_phi
;
4553 STMT_VINFO_VEC_STMT (exit_phi_vinfo
) = epilog_stmt
;
4555 || STMT_VINFO_DEF_TYPE (exit_phi_vinfo
)
4556 != vect_double_reduction_def
)
4559 /* Handle double reduction:
4561 stmt1: s1 = phi <s0, s2> - double reduction phi (outer loop)
4562 stmt2: s3 = phi <s1, s4> - (regular) reduc phi (inner loop)
4563 stmt3: s4 = use (s3) - (regular) reduc stmt (inner loop)
4564 stmt4: s2 = phi <s4> - double reduction stmt (outer loop)
4566 At that point the regular reduction (stmt2 and stmt3) is
4567 already vectorized, as well as the exit phi node, stmt4.
4568 Here we vectorize the phi node of double reduction, stmt1, and
4569 update all relevant statements. */
4571 /* Go through all the uses of s2 to find double reduction phi
4572 node, i.e., stmt1 above. */
4573 orig_name
= PHI_RESULT (exit_phi
);
4574 FOR_EACH_IMM_USE_STMT (use_stmt
, imm_iter
, orig_name
)
4576 stmt_vec_info use_stmt_vinfo
;
4577 stmt_vec_info new_phi_vinfo
;
4578 tree vect_phi_init
, preheader_arg
, vect_phi_res
, init_def
;
4579 basic_block bb
= gimple_bb (use_stmt
);
4582 /* Check that USE_STMT is really double reduction phi
4584 if (gimple_code (use_stmt
) != GIMPLE_PHI
4585 || gimple_phi_num_args (use_stmt
) != 2
4586 || bb
->loop_father
!= outer_loop
)
4588 use_stmt_vinfo
= vinfo_for_stmt (use_stmt
);
4590 || STMT_VINFO_DEF_TYPE (use_stmt_vinfo
)
4591 != vect_double_reduction_def
)
4594 /* Create vector phi node for double reduction:
4595 vs1 = phi <vs0, vs2>
4596 vs1 was created previously in this function by a call to
4597 vect_get_vec_def_for_operand and is stored in
4599 vs2 is defined by INNER_PHI, the vectorized EXIT_PHI;
4600 vs0 is created here. */
4602 /* Create vector phi node. */
4603 vect_phi
= create_phi_node (vec_initial_def
, bb
);
4604 new_phi_vinfo
= new_stmt_vec_info (vect_phi
,
4605 loop_vec_info_for_loop (outer_loop
), NULL
);
4606 set_vinfo_for_stmt (vect_phi
, new_phi_vinfo
);
4608 /* Create vs0 - initial def of the double reduction phi. */
4609 preheader_arg
= PHI_ARG_DEF_FROM_EDGE (use_stmt
,
4610 loop_preheader_edge (outer_loop
));
4611 init_def
= get_initial_def_for_reduction (stmt
,
4612 preheader_arg
, NULL
);
4613 vect_phi_init
= vect_init_vector (use_stmt
, init_def
,
4616 /* Update phi node arguments with vs0 and vs2. */
4617 add_phi_arg (vect_phi
, vect_phi_init
,
4618 loop_preheader_edge (outer_loop
),
4620 add_phi_arg (vect_phi
, PHI_RESULT (inner_phi
),
4621 loop_latch_edge (outer_loop
), UNKNOWN_LOCATION
);
4622 if (dump_enabled_p ())
4624 dump_printf_loc (MSG_NOTE
, vect_location
,
4625 "created double reduction phi node: ");
4626 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, vect_phi
, 0);
4627 dump_printf (MSG_NOTE
, "\n");
4630 vect_phi_res
= PHI_RESULT (vect_phi
);
4632 /* Replace the use, i.e., set the correct vs1 in the regular
4633 reduction phi node. FORNOW, NCOPIES is always 1, so the
4634 loop is redundant. */
4635 use
= reduction_phi
;
4636 for (j
= 0; j
< ncopies
; j
++)
4638 edge pr_edge
= loop_preheader_edge (loop
);
4639 SET_PHI_ARG_DEF (use
, pr_edge
->dest_idx
, vect_phi_res
);
4640 use
= STMT_VINFO_RELATED_STMT (vinfo_for_stmt (use
));
4647 if (nested_in_vect_loop
)
4656 /* Find the loop-closed-use at the loop exit of the original scalar
4657 result. (The reduction result is expected to have two immediate uses,
4658 one at the latch block, and one at the loop exit). For double
4659 reductions we are looking for exit phis of the outer loop. */
4660 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, scalar_dest
)
4662 if (!flow_bb_inside_loop_p (loop
, gimple_bb (USE_STMT (use_p
))))
4664 if (!is_gimple_debug (USE_STMT (use_p
)))
4665 phis
.safe_push (USE_STMT (use_p
));
4669 if (double_reduc
&& gimple_code (USE_STMT (use_p
)) == GIMPLE_PHI
)
4671 tree phi_res
= PHI_RESULT (USE_STMT (use_p
));
4673 FOR_EACH_IMM_USE_FAST (phi_use_p
, phi_imm_iter
, phi_res
)
4675 if (!flow_bb_inside_loop_p (loop
,
4676 gimple_bb (USE_STMT (phi_use_p
)))
4677 && !is_gimple_debug (USE_STMT (phi_use_p
)))
4678 phis
.safe_push (USE_STMT (phi_use_p
));
4684 FOR_EACH_VEC_ELT (phis
, i
, exit_phi
)
4686 /* Replace the uses: */
4687 orig_name
= PHI_RESULT (exit_phi
);
4688 scalar_result
= scalar_results
[k
];
4689 FOR_EACH_IMM_USE_STMT (use_stmt
, imm_iter
, orig_name
)
4690 FOR_EACH_IMM_USE_ON_STMT (use_p
, imm_iter
)
4691 SET_USE (use_p
, scalar_result
);
4699 /* Function vectorizable_reduction.
4701 Check if STMT performs a reduction operation that can be vectorized.
4702 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4703 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
4704 Return FALSE if not a vectorizable STMT, TRUE otherwise.
4706 This function also handles reduction idioms (patterns) that have been
4707 recognized in advance during vect_pattern_recog. In this case, STMT may be
4709 X = pattern_expr (arg0, arg1, ..., X)
4710 and it's STMT_VINFO_RELATED_STMT points to the last stmt in the original
4711 sequence that had been detected and replaced by the pattern-stmt (STMT).
4713 In some cases of reduction patterns, the type of the reduction variable X is
4714 different than the type of the other arguments of STMT.
4715 In such cases, the vectype that is used when transforming STMT into a vector
4716 stmt is different than the vectype that is used to determine the
4717 vectorization factor, because it consists of a different number of elements
4718 than the actual number of elements that are being operated upon in parallel.
4720 For example, consider an accumulation of shorts into an int accumulator.
4721 On some targets it's possible to vectorize this pattern operating on 8
4722 shorts at a time (hence, the vectype for purposes of determining the
4723 vectorization factor should be V8HI); on the other hand, the vectype that
4724 is used to create the vector form is actually V4SI (the type of the result).
4726 Upon entry to this function, STMT_VINFO_VECTYPE records the vectype that
4727 indicates what is the actual level of parallelism (V8HI in the example), so
4728 that the right vectorization factor would be derived. This vectype
4729 corresponds to the type of arguments to the reduction stmt, and should *NOT*
4730 be used to create the vectorized stmt. The right vectype for the vectorized
4731 stmt is obtained from the type of the result X:
4732 get_vectype_for_scalar_type (TREE_TYPE (X))
4734 This means that, contrary to "regular" reductions (or "regular" stmts in
4735 general), the following equation:
4736 STMT_VINFO_VECTYPE == get_vectype_for_scalar_type (TREE_TYPE (X))
4737 does *NOT* necessarily hold for reduction patterns. */
4740 vectorizable_reduction (gimple stmt
, gimple_stmt_iterator
*gsi
,
4741 gimple
*vec_stmt
, slp_tree slp_node
)
4745 tree loop_vec_def0
= NULL_TREE
, loop_vec_def1
= NULL_TREE
;
4746 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
4747 tree vectype_out
= STMT_VINFO_VECTYPE (stmt_info
);
4748 tree vectype_in
= NULL_TREE
;
4749 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
4750 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
4751 enum tree_code code
, orig_code
, epilog_reduc_code
;
4752 enum machine_mode vec_mode
;
4754 optab optab
, reduc_optab
;
4755 tree new_temp
= NULL_TREE
;
4758 enum vect_def_type dt
;
4759 gimple new_phi
= NULL
;
4763 stmt_vec_info orig_stmt_info
;
4764 tree expr
= NULL_TREE
;
4768 stmt_vec_info prev_stmt_info
, prev_phi_info
;
4769 bool single_defuse_cycle
= false;
4770 tree reduc_def
= NULL_TREE
;
4771 gimple new_stmt
= NULL
;
4774 bool nested_cycle
= false, found_nested_cycle_def
= false;
4775 gimple reduc_def_stmt
= NULL
;
4776 /* The default is that the reduction variable is the last in statement. */
4777 int reduc_index
= 2;
4778 bool double_reduc
= false, dummy
;
4780 struct loop
* def_stmt_loop
, *outer_loop
= NULL
;
4782 gimple def_arg_stmt
;
4783 auto_vec
<tree
> vec_oprnds0
;
4784 auto_vec
<tree
> vec_oprnds1
;
4785 auto_vec
<tree
> vect_defs
;
4786 auto_vec
<gimple
> phis
;
4788 tree def0
, def1
, tem
, op0
, op1
= NULL_TREE
;
4790 /* In case of reduction chain we switch to the first stmt in the chain, but
4791 we don't update STMT_INFO, since only the last stmt is marked as reduction
4792 and has reduction properties. */
4793 if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt
)))
4794 stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
4796 if (nested_in_vect_loop_p (loop
, stmt
))
4800 nested_cycle
= true;
4803 /* 1. Is vectorizable reduction? */
4804 /* Not supportable if the reduction variable is used in the loop, unless
4805 it's a reduction chain. */
4806 if (STMT_VINFO_RELEVANT (stmt_info
) > vect_used_in_outer
4807 && !GROUP_FIRST_ELEMENT (stmt_info
))
4810 /* Reductions that are not used even in an enclosing outer-loop,
4811 are expected to be "live" (used out of the loop). */
4812 if (STMT_VINFO_RELEVANT (stmt_info
) == vect_unused_in_scope
4813 && !STMT_VINFO_LIVE_P (stmt_info
))
4816 /* Make sure it was already recognized as a reduction computation. */
4817 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_reduction_def
4818 && STMT_VINFO_DEF_TYPE (stmt_info
) != vect_nested_cycle
)
4821 /* 2. Has this been recognized as a reduction pattern?
4823 Check if STMT represents a pattern that has been recognized
4824 in earlier analysis stages. For stmts that represent a pattern,
4825 the STMT_VINFO_RELATED_STMT field records the last stmt in
4826 the original sequence that constitutes the pattern. */
4828 orig_stmt
= STMT_VINFO_RELATED_STMT (stmt_info
);
4831 orig_stmt_info
= vinfo_for_stmt (orig_stmt
);
4832 gcc_assert (STMT_VINFO_IN_PATTERN_P (orig_stmt_info
));
4833 gcc_assert (!STMT_VINFO_IN_PATTERN_P (stmt_info
));
4836 /* 3. Check the operands of the operation. The first operands are defined
4837 inside the loop body. The last operand is the reduction variable,
4838 which is defined by the loop-header-phi. */
4840 gcc_assert (is_gimple_assign (stmt
));
4843 switch (get_gimple_rhs_class (gimple_assign_rhs_code (stmt
)))
4845 case GIMPLE_SINGLE_RHS
:
4846 op_type
= TREE_OPERAND_LENGTH (gimple_assign_rhs1 (stmt
));
4847 if (op_type
== ternary_op
)
4849 tree rhs
= gimple_assign_rhs1 (stmt
);
4850 ops
[0] = TREE_OPERAND (rhs
, 0);
4851 ops
[1] = TREE_OPERAND (rhs
, 1);
4852 ops
[2] = TREE_OPERAND (rhs
, 2);
4853 code
= TREE_CODE (rhs
);
4859 case GIMPLE_BINARY_RHS
:
4860 code
= gimple_assign_rhs_code (stmt
);
4861 op_type
= TREE_CODE_LENGTH (code
);
4862 gcc_assert (op_type
== binary_op
);
4863 ops
[0] = gimple_assign_rhs1 (stmt
);
4864 ops
[1] = gimple_assign_rhs2 (stmt
);
4867 case GIMPLE_TERNARY_RHS
:
4868 code
= gimple_assign_rhs_code (stmt
);
4869 op_type
= TREE_CODE_LENGTH (code
);
4870 gcc_assert (op_type
== ternary_op
);
4871 ops
[0] = gimple_assign_rhs1 (stmt
);
4872 ops
[1] = gimple_assign_rhs2 (stmt
);
4873 ops
[2] = gimple_assign_rhs3 (stmt
);
4876 case GIMPLE_UNARY_RHS
:
4883 if (code
== COND_EXPR
&& slp_node
)
4886 scalar_dest
= gimple_assign_lhs (stmt
);
4887 scalar_type
= TREE_TYPE (scalar_dest
);
4888 if (!POINTER_TYPE_P (scalar_type
) && !INTEGRAL_TYPE_P (scalar_type
)
4889 && !SCALAR_FLOAT_TYPE_P (scalar_type
))
4892 /* Do not try to vectorize bit-precision reductions. */
4893 if ((TYPE_PRECISION (scalar_type
)
4894 != GET_MODE_PRECISION (TYPE_MODE (scalar_type
))))
4897 /* All uses but the last are expected to be defined in the loop.
4898 The last use is the reduction variable. In case of nested cycle this
4899 assumption is not true: we use reduc_index to record the index of the
4900 reduction variable. */
4901 for (i
= 0; i
< op_type
- 1; i
++)
4903 /* The condition of COND_EXPR is checked in vectorizable_condition(). */
4904 if (i
== 0 && code
== COND_EXPR
)
4907 is_simple_use
= vect_is_simple_use_1 (ops
[i
], stmt
, loop_vinfo
, NULL
,
4908 &def_stmt
, &def
, &dt
, &tem
);
4911 gcc_assert (is_simple_use
);
4913 if (dt
!= vect_internal_def
4914 && dt
!= vect_external_def
4915 && dt
!= vect_constant_def
4916 && dt
!= vect_induction_def
4917 && !(dt
== vect_nested_cycle
&& nested_cycle
))
4920 if (dt
== vect_nested_cycle
)
4922 found_nested_cycle_def
= true;
4923 reduc_def_stmt
= def_stmt
;
4928 is_simple_use
= vect_is_simple_use_1 (ops
[i
], stmt
, loop_vinfo
, NULL
,
4929 &def_stmt
, &def
, &dt
, &tem
);
4932 gcc_assert (is_simple_use
);
4933 if (!found_nested_cycle_def
)
4934 reduc_def_stmt
= def_stmt
;
4936 if (reduc_def_stmt
&& gimple_code (reduc_def_stmt
) != GIMPLE_PHI
)
4939 if (!(dt
== vect_reduction_def
4940 || dt
== vect_nested_cycle
4941 || ((dt
== vect_internal_def
|| dt
== vect_external_def
4942 || dt
== vect_constant_def
|| dt
== vect_induction_def
)
4943 && nested_cycle
&& found_nested_cycle_def
)))
4945 /* For pattern recognized stmts, orig_stmt might be a reduction,
4946 but some helper statements for the pattern might not, or
4947 might be COND_EXPRs with reduction uses in the condition. */
4948 gcc_assert (orig_stmt
);
4953 gcc_assert (orig_stmt
== vect_is_simple_reduction (loop_vinfo
,
4959 gimple tmp
= vect_is_simple_reduction (loop_vinfo
, reduc_def_stmt
,
4960 !nested_cycle
, &dummy
);
4961 /* We changed STMT to be the first stmt in reduction chain, hence we
4962 check that in this case the first element in the chain is STMT. */
4963 gcc_assert (stmt
== tmp
4964 || GROUP_FIRST_ELEMENT (vinfo_for_stmt (tmp
)) == stmt
);
4967 if (STMT_VINFO_LIVE_P (vinfo_for_stmt (reduc_def_stmt
)))
4970 if (slp_node
|| PURE_SLP_STMT (stmt_info
))
4973 ncopies
= (LOOP_VINFO_VECT_FACTOR (loop_vinfo
)
4974 / TYPE_VECTOR_SUBPARTS (vectype_in
));
4976 gcc_assert (ncopies
>= 1);
4978 vec_mode
= TYPE_MODE (vectype_in
);
4980 if (code
== COND_EXPR
)
4982 if (!vectorizable_condition (stmt
, gsi
, NULL
, ops
[reduc_index
], 0, NULL
))
4984 if (dump_enabled_p ())
4985 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4986 "unsupported condition in reduction\n");
4993 /* 4. Supportable by target? */
4995 if (code
== LSHIFT_EXPR
|| code
== RSHIFT_EXPR
4996 || code
== LROTATE_EXPR
|| code
== RROTATE_EXPR
)
4998 /* Shifts and rotates are only supported by vectorizable_shifts,
4999 not vectorizable_reduction. */
5000 if (dump_enabled_p ())
5001 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5002 "unsupported shift or rotation.\n");
5006 /* 4.1. check support for the operation in the loop */
5007 optab
= optab_for_tree_code (code
, vectype_in
, optab_default
);
5010 if (dump_enabled_p ())
5011 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5017 if (optab_handler (optab
, vec_mode
) == CODE_FOR_nothing
)
5019 if (dump_enabled_p ())
5020 dump_printf (MSG_NOTE
, "op not supported by target.\n");
5022 if (GET_MODE_SIZE (vec_mode
) != UNITS_PER_WORD
5023 || LOOP_VINFO_VECT_FACTOR (loop_vinfo
)
5024 < vect_min_worthwhile_factor (code
))
5027 if (dump_enabled_p ())
5028 dump_printf (MSG_NOTE
, "proceeding using word mode.\n");
5031 /* Worthwhile without SIMD support? */
5032 if (!VECTOR_MODE_P (TYPE_MODE (vectype_in
))
5033 && LOOP_VINFO_VECT_FACTOR (loop_vinfo
)
5034 < vect_min_worthwhile_factor (code
))
5036 if (dump_enabled_p ())
5037 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5038 "not worthwhile without SIMD support.\n");
5044 /* 4.2. Check support for the epilog operation.
5046 If STMT represents a reduction pattern, then the type of the
5047 reduction variable may be different than the type of the rest
5048 of the arguments. For example, consider the case of accumulation
5049 of shorts into an int accumulator; The original code:
5050 S1: int_a = (int) short_a;
5051 orig_stmt-> S2: int_acc = plus <int_a ,int_acc>;
5054 STMT: int_acc = widen_sum <short_a, int_acc>
5057 1. The tree-code that is used to create the vector operation in the
5058 epilog code (that reduces the partial results) is not the
5059 tree-code of STMT, but is rather the tree-code of the original
5060 stmt from the pattern that STMT is replacing. I.e, in the example
5061 above we want to use 'widen_sum' in the loop, but 'plus' in the
5063 2. The type (mode) we use to check available target support
5064 for the vector operation to be created in the *epilog*, is
5065 determined by the type of the reduction variable (in the example
5066 above we'd check this: optab_handler (plus_optab, vect_int_mode])).
5067 However the type (mode) we use to check available target support
5068 for the vector operation to be created *inside the loop*, is
5069 determined by the type of the other arguments to STMT (in the
5070 example we'd check this: optab_handler (widen_sum_optab,
5073 This is contrary to "regular" reductions, in which the types of all
5074 the arguments are the same as the type of the reduction variable.
5075 For "regular" reductions we can therefore use the same vector type
5076 (and also the same tree-code) when generating the epilog code and
5077 when generating the code inside the loop. */
5081 /* This is a reduction pattern: get the vectype from the type of the
5082 reduction variable, and get the tree-code from orig_stmt. */
5083 orig_code
= gimple_assign_rhs_code (orig_stmt
);
5084 gcc_assert (vectype_out
);
5085 vec_mode
= TYPE_MODE (vectype_out
);
5089 /* Regular reduction: use the same vectype and tree-code as used for
5090 the vector code inside the loop can be used for the epilog code. */
5096 def_bb
= gimple_bb (reduc_def_stmt
);
5097 def_stmt_loop
= def_bb
->loop_father
;
5098 def_arg
= PHI_ARG_DEF_FROM_EDGE (reduc_def_stmt
,
5099 loop_preheader_edge (def_stmt_loop
));
5100 if (TREE_CODE (def_arg
) == SSA_NAME
5101 && (def_arg_stmt
= SSA_NAME_DEF_STMT (def_arg
))
5102 && gimple_code (def_arg_stmt
) == GIMPLE_PHI
5103 && flow_bb_inside_loop_p (outer_loop
, gimple_bb (def_arg_stmt
))
5104 && vinfo_for_stmt (def_arg_stmt
)
5105 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_arg_stmt
))
5106 == vect_double_reduction_def
)
5107 double_reduc
= true;
5110 epilog_reduc_code
= ERROR_MARK
;
5111 if (reduction_code_for_scalar_code (orig_code
, &epilog_reduc_code
))
5113 reduc_optab
= optab_for_tree_code (epilog_reduc_code
, vectype_out
,
5117 if (dump_enabled_p ())
5118 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5119 "no optab for reduction.\n");
5121 epilog_reduc_code
= ERROR_MARK
;
5125 && optab_handler (reduc_optab
, vec_mode
) == CODE_FOR_nothing
)
5127 if (dump_enabled_p ())
5128 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5129 "reduc op not supported by target.\n");
5131 epilog_reduc_code
= ERROR_MARK
;
5136 if (!nested_cycle
|| double_reduc
)
5138 if (dump_enabled_p ())
5139 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5140 "no reduc code for scalar code.\n");
5146 if (double_reduc
&& ncopies
> 1)
5148 if (dump_enabled_p ())
5149 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5150 "multiple types in double reduction\n");
5155 /* In case of widenning multiplication by a constant, we update the type
5156 of the constant to be the type of the other operand. We check that the
5157 constant fits the type in the pattern recognition pass. */
5158 if (code
== DOT_PROD_EXPR
5159 && !types_compatible_p (TREE_TYPE (ops
[0]), TREE_TYPE (ops
[1])))
5161 if (TREE_CODE (ops
[0]) == INTEGER_CST
)
5162 ops
[0] = fold_convert (TREE_TYPE (ops
[1]), ops
[0]);
5163 else if (TREE_CODE (ops
[1]) == INTEGER_CST
)
5164 ops
[1] = fold_convert (TREE_TYPE (ops
[0]), ops
[1]);
5167 if (dump_enabled_p ())
5168 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5169 "invalid types in dot-prod\n");
5175 if (!vec_stmt
) /* transformation not required. */
5177 if (!vect_model_reduction_cost (stmt_info
, epilog_reduc_code
, ncopies
))
5179 STMT_VINFO_TYPE (stmt_info
) = reduc_vec_info_type
;
5185 if (dump_enabled_p ())
5186 dump_printf_loc (MSG_NOTE
, vect_location
, "transform reduction.\n");
5188 /* FORNOW: Multiple types are not supported for condition. */
5189 if (code
== COND_EXPR
)
5190 gcc_assert (ncopies
== 1);
5192 /* Create the destination vector */
5193 vec_dest
= vect_create_destination_var (scalar_dest
, vectype_out
);
5195 /* In case the vectorization factor (VF) is bigger than the number
5196 of elements that we can fit in a vectype (nunits), we have to generate
5197 more than one vector stmt - i.e - we need to "unroll" the
5198 vector stmt by a factor VF/nunits. For more details see documentation
5199 in vectorizable_operation. */
5201 /* If the reduction is used in an outer loop we need to generate
5202 VF intermediate results, like so (e.g. for ncopies=2):
5207 (i.e. we generate VF results in 2 registers).
5208 In this case we have a separate def-use cycle for each copy, and therefore
5209 for each copy we get the vector def for the reduction variable from the
5210 respective phi node created for this copy.
5212 Otherwise (the reduction is unused in the loop nest), we can combine
5213 together intermediate results, like so (e.g. for ncopies=2):
5217 (i.e. we generate VF/2 results in a single register).
5218 In this case for each copy we get the vector def for the reduction variable
5219 from the vectorized reduction operation generated in the previous iteration.
5222 if (STMT_VINFO_RELEVANT (stmt_info
) == vect_unused_in_scope
)
5224 single_defuse_cycle
= true;
5228 epilog_copies
= ncopies
;
5230 prev_stmt_info
= NULL
;
5231 prev_phi_info
= NULL
;
5234 vec_num
= SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node
);
5235 gcc_assert (TYPE_VECTOR_SUBPARTS (vectype_out
)
5236 == TYPE_VECTOR_SUBPARTS (vectype_in
));
5241 vec_oprnds0
.create (1);
5242 if (op_type
== ternary_op
)
5243 vec_oprnds1
.create (1);
5246 phis
.create (vec_num
);
5247 vect_defs
.create (vec_num
);
5249 vect_defs
.quick_push (NULL_TREE
);
5251 for (j
= 0; j
< ncopies
; j
++)
5253 if (j
== 0 || !single_defuse_cycle
)
5255 for (i
= 0; i
< vec_num
; i
++)
5257 /* Create the reduction-phi that defines the reduction
5259 new_phi
= create_phi_node (vec_dest
, loop
->header
);
5260 set_vinfo_for_stmt (new_phi
,
5261 new_stmt_vec_info (new_phi
, loop_vinfo
,
5263 if (j
== 0 || slp_node
)
5264 phis
.quick_push (new_phi
);
5268 if (code
== COND_EXPR
)
5270 gcc_assert (!slp_node
);
5271 vectorizable_condition (stmt
, gsi
, vec_stmt
,
5272 PHI_RESULT (phis
[0]),
5274 /* Multiple types are not supported for condition. */
5281 op0
= ops
[!reduc_index
];
5282 if (op_type
== ternary_op
)
5284 if (reduc_index
== 0)
5291 vect_get_vec_defs (op0
, op1
, stmt
, &vec_oprnds0
, &vec_oprnds1
,
5295 loop_vec_def0
= vect_get_vec_def_for_operand (ops
[!reduc_index
],
5297 vec_oprnds0
.quick_push (loop_vec_def0
);
5298 if (op_type
== ternary_op
)
5300 loop_vec_def1
= vect_get_vec_def_for_operand (op1
, stmt
,
5302 vec_oprnds1
.quick_push (loop_vec_def1
);
5310 enum vect_def_type dt
;
5314 vect_is_simple_use (ops
[!reduc_index
], stmt
, loop_vinfo
, NULL
,
5315 &dummy_stmt
, &dummy
, &dt
);
5316 loop_vec_def0
= vect_get_vec_def_for_stmt_copy (dt
,
5318 vec_oprnds0
[0] = loop_vec_def0
;
5319 if (op_type
== ternary_op
)
5321 vect_is_simple_use (op1
, stmt
, loop_vinfo
, NULL
, &dummy_stmt
,
5323 loop_vec_def1
= vect_get_vec_def_for_stmt_copy (dt
,
5325 vec_oprnds1
[0] = loop_vec_def1
;
5329 if (single_defuse_cycle
)
5330 reduc_def
= gimple_assign_lhs (new_stmt
);
5332 STMT_VINFO_RELATED_STMT (prev_phi_info
) = new_phi
;
5335 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, def0
)
5338 reduc_def
= PHI_RESULT (phis
[i
]);
5341 if (!single_defuse_cycle
|| j
== 0)
5342 reduc_def
= PHI_RESULT (new_phi
);
5345 def1
= ((op_type
== ternary_op
)
5346 ? vec_oprnds1
[i
] : NULL
);
5347 if (op_type
== binary_op
)
5349 if (reduc_index
== 0)
5350 expr
= build2 (code
, vectype_out
, reduc_def
, def0
);
5352 expr
= build2 (code
, vectype_out
, def0
, reduc_def
);
5356 if (reduc_index
== 0)
5357 expr
= build3 (code
, vectype_out
, reduc_def
, def0
, def1
);
5360 if (reduc_index
== 1)
5361 expr
= build3 (code
, vectype_out
, def0
, reduc_def
, def1
);
5363 expr
= build3 (code
, vectype_out
, def0
, def1
, reduc_def
);
5367 new_stmt
= gimple_build_assign (vec_dest
, expr
);
5368 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
5369 gimple_assign_set_lhs (new_stmt
, new_temp
);
5370 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
5374 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
5375 vect_defs
.quick_push (new_temp
);
5378 vect_defs
[0] = new_temp
;
5385 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
5387 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
5389 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
5390 prev_phi_info
= vinfo_for_stmt (new_phi
);
5393 /* Finalize the reduction-phi (set its arguments) and create the
5394 epilog reduction code. */
5395 if ((!single_defuse_cycle
|| code
== COND_EXPR
) && !slp_node
)
5397 new_temp
= gimple_assign_lhs (*vec_stmt
);
5398 vect_defs
[0] = new_temp
;
5401 vect_create_epilog_for_reduction (vect_defs
, stmt
, epilog_copies
,
5402 epilog_reduc_code
, phis
, reduc_index
,
5403 double_reduc
, slp_node
);
5408 /* Function vect_min_worthwhile_factor.
5410 For a loop where we could vectorize the operation indicated by CODE,
5411 return the minimum vectorization factor that makes it worthwhile
5412 to use generic vectors. */
5414 vect_min_worthwhile_factor (enum tree_code code
)
5435 /* Function vectorizable_induction
5437 Check if PHI performs an induction computation that can be vectorized.
5438 If VEC_STMT is also passed, vectorize the induction PHI: create a vectorized
5439 phi to replace it, put it in VEC_STMT, and add it to the same basic block.
5440 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
5443 vectorizable_induction (gimple phi
, gimple_stmt_iterator
*gsi ATTRIBUTE_UNUSED
,
5446 stmt_vec_info stmt_info
= vinfo_for_stmt (phi
);
5447 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
5448 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
5449 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
5450 int nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
5451 int ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits
;
5454 gcc_assert (ncopies
>= 1);
5455 /* FORNOW. These restrictions should be relaxed. */
5456 if (nested_in_vect_loop_p (loop
, phi
))
5458 imm_use_iterator imm_iter
;
5459 use_operand_p use_p
;
5466 if (dump_enabled_p ())
5467 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5468 "multiple types in nested loop.\n");
5473 latch_e
= loop_latch_edge (loop
->inner
);
5474 loop_arg
= PHI_ARG_DEF_FROM_EDGE (phi
, latch_e
);
5475 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, loop_arg
)
5477 gimple use_stmt
= USE_STMT (use_p
);
5478 if (is_gimple_debug (use_stmt
))
5481 if (!flow_bb_inside_loop_p (loop
->inner
, gimple_bb (use_stmt
)))
5483 exit_phi
= use_stmt
;
5489 stmt_vec_info exit_phi_vinfo
= vinfo_for_stmt (exit_phi
);
5490 if (!(STMT_VINFO_RELEVANT_P (exit_phi_vinfo
)
5491 && !STMT_VINFO_LIVE_P (exit_phi_vinfo
)))
5493 if (dump_enabled_p ())
5494 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5495 "inner-loop induction only used outside "
5496 "of the outer vectorized loop.\n");
5502 if (!STMT_VINFO_RELEVANT_P (stmt_info
))
5505 /* FORNOW: SLP not supported. */
5506 if (STMT_SLP_TYPE (stmt_info
))
5509 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_info
) == vect_induction_def
);
5511 if (gimple_code (phi
) != GIMPLE_PHI
)
5514 if (!vec_stmt
) /* transformation not required. */
5516 STMT_VINFO_TYPE (stmt_info
) = induc_vec_info_type
;
5517 if (dump_enabled_p ())
5518 dump_printf_loc (MSG_NOTE
, vect_location
,
5519 "=== vectorizable_induction ===\n");
5520 vect_model_induction_cost (stmt_info
, ncopies
);
5526 if (dump_enabled_p ())
5527 dump_printf_loc (MSG_NOTE
, vect_location
, "transform induction phi.\n");
5529 vec_def
= get_initial_def_for_induction (phi
);
5530 *vec_stmt
= SSA_NAME_DEF_STMT (vec_def
);
5534 /* Function vectorizable_live_operation.
5536 STMT computes a value that is used outside the loop. Check if
5537 it can be supported. */
5540 vectorizable_live_operation (gimple stmt
,
5541 gimple_stmt_iterator
*gsi ATTRIBUTE_UNUSED
,
5544 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
5545 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
5546 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
5552 enum vect_def_type dt
;
5553 enum tree_code code
;
5554 enum gimple_rhs_class rhs_class
;
5556 gcc_assert (STMT_VINFO_LIVE_P (stmt_info
));
5558 if (STMT_VINFO_DEF_TYPE (stmt_info
) == vect_reduction_def
)
5561 if (!is_gimple_assign (stmt
))
5563 if (gimple_call_internal_p (stmt
)
5564 && gimple_call_internal_fn (stmt
) == IFN_GOMP_SIMD_LANE
5565 && gimple_call_lhs (stmt
)
5567 && TREE_CODE (gimple_call_arg (stmt
, 0)) == SSA_NAME
5569 == SSA_NAME_VAR (gimple_call_arg (stmt
, 0)))
5571 edge e
= single_exit (loop
);
5572 basic_block merge_bb
= e
->dest
;
5573 imm_use_iterator imm_iter
;
5574 use_operand_p use_p
;
5575 tree lhs
= gimple_call_lhs (stmt
);
5577 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, lhs
)
5579 gimple use_stmt
= USE_STMT (use_p
);
5580 if (gimple_code (use_stmt
) == GIMPLE_PHI
5581 && gimple_bb (use_stmt
) == merge_bb
)
5586 = build_int_cst (unsigned_type_node
,
5587 loop_vinfo
->vectorization_factor
- 1);
5588 SET_PHI_ARG_DEF (use_stmt
, e
->dest_idx
, vfm1
);
5598 if (TREE_CODE (gimple_assign_lhs (stmt
)) != SSA_NAME
)
5601 /* FORNOW. CHECKME. */
5602 if (nested_in_vect_loop_p (loop
, stmt
))
5605 code
= gimple_assign_rhs_code (stmt
);
5606 op_type
= TREE_CODE_LENGTH (code
);
5607 rhs_class
= get_gimple_rhs_class (code
);
5608 gcc_assert (rhs_class
!= GIMPLE_UNARY_RHS
|| op_type
== unary_op
);
5609 gcc_assert (rhs_class
!= GIMPLE_BINARY_RHS
|| op_type
== binary_op
);
5611 /* FORNOW: support only if all uses are invariant. This means
5612 that the scalar operations can remain in place, unvectorized.
5613 The original last scalar value that they compute will be used. */
5615 for (i
= 0; i
< op_type
; i
++)
5617 if (rhs_class
== GIMPLE_SINGLE_RHS
)
5618 op
= TREE_OPERAND (gimple_op (stmt
, 1), i
);
5620 op
= gimple_op (stmt
, i
+ 1);
5622 && !vect_is_simple_use (op
, stmt
, loop_vinfo
, NULL
, &def_stmt
, &def
,
5625 if (dump_enabled_p ())
5626 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5627 "use not simple.\n");
5631 if (dt
!= vect_external_def
&& dt
!= vect_constant_def
)
5635 /* No transformation is required for the cases we currently support. */
5639 /* Kill any debug uses outside LOOP of SSA names defined in STMT. */
5642 vect_loop_kill_debug_uses (struct loop
*loop
, gimple stmt
)
5644 ssa_op_iter op_iter
;
5645 imm_use_iterator imm_iter
;
5646 def_operand_p def_p
;
5649 FOR_EACH_PHI_OR_STMT_DEF (def_p
, stmt
, op_iter
, SSA_OP_DEF
)
5651 FOR_EACH_IMM_USE_STMT (ustmt
, imm_iter
, DEF_FROM_PTR (def_p
))
5655 if (!is_gimple_debug (ustmt
))
5658 bb
= gimple_bb (ustmt
);
5660 if (!flow_bb_inside_loop_p (loop
, bb
))
5662 if (gimple_debug_bind_p (ustmt
))
5664 if (dump_enabled_p ())
5665 dump_printf_loc (MSG_NOTE
, vect_location
,
5666 "killing debug use\n");
5668 gimple_debug_bind_reset_value (ustmt
);
5669 update_stmt (ustmt
);
5679 /* This function builds ni_name = number of iterations. Statements
5680 are emitted on the loop preheader edge. */
5683 vect_build_loop_niters (loop_vec_info loop_vinfo
)
5685 tree ni
= unshare_expr (LOOP_VINFO_NITERS (loop_vinfo
));
5686 if (TREE_CODE (ni
) == INTEGER_CST
)
5691 gimple_seq stmts
= NULL
;
5692 edge pe
= loop_preheader_edge (LOOP_VINFO_LOOP (loop_vinfo
));
5694 var
= create_tmp_var (TREE_TYPE (ni
), "niters");
5695 ni_name
= force_gimple_operand (ni
, &stmts
, false, var
);
5697 gsi_insert_seq_on_edge_immediate (pe
, stmts
);
5704 /* This function generates the following statements:
5706 ni_name = number of iterations loop executes
5707 ratio = ni_name / vf
5708 ratio_mult_vf_name = ratio * vf
5710 and places them on the loop preheader edge. */
5713 vect_generate_tmps_on_preheader (loop_vec_info loop_vinfo
,
5715 tree
*ratio_mult_vf_name_ptr
,
5716 tree
*ratio_name_ptr
)
5718 tree ni_minus_gap_name
;
5721 tree ratio_mult_vf_name
;
5722 int vf
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
5723 edge pe
= loop_preheader_edge (LOOP_VINFO_LOOP (loop_vinfo
));
5726 log_vf
= build_int_cst (TREE_TYPE (ni_name
), exact_log2 (vf
));
5728 /* If epilogue loop is required because of data accesses with gaps, we
5729 subtract one iteration from the total number of iterations here for
5730 correct calculation of RATIO. */
5731 if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo
))
5733 ni_minus_gap_name
= fold_build2 (MINUS_EXPR
, TREE_TYPE (ni_name
),
5735 build_one_cst (TREE_TYPE (ni_name
)));
5736 if (!is_gimple_val (ni_minus_gap_name
))
5738 var
= create_tmp_var (TREE_TYPE (ni_name
), "ni_gap");
5739 gimple stmts
= NULL
;
5740 ni_minus_gap_name
= force_gimple_operand (ni_minus_gap_name
, &stmts
,
5742 gsi_insert_seq_on_edge_immediate (pe
, stmts
);
5746 ni_minus_gap_name
= ni_name
;
5748 /* Create: ratio = ni >> log2(vf) */
5749 /* ??? As we have ni == number of latch executions + 1, ni could
5750 have overflown to zero. So avoid computing ratio based on ni
5751 but compute it using the fact that we know ratio will be at least
5752 one, thus via (ni - vf) >> log2(vf) + 1. */
5754 = fold_build2 (PLUS_EXPR
, TREE_TYPE (ni_name
),
5755 fold_build2 (RSHIFT_EXPR
, TREE_TYPE (ni_name
),
5756 fold_build2 (MINUS_EXPR
, TREE_TYPE (ni_name
),
5759 (TREE_TYPE (ni_name
), vf
)),
5761 build_int_cst (TREE_TYPE (ni_name
), 1));
5762 if (!is_gimple_val (ratio_name
))
5764 var
= create_tmp_var (TREE_TYPE (ni_name
), "bnd");
5765 gimple stmts
= NULL
;
5766 ratio_name
= force_gimple_operand (ratio_name
, &stmts
, true, var
);
5767 gsi_insert_seq_on_edge_immediate (pe
, stmts
);
5769 *ratio_name_ptr
= ratio_name
;
5771 /* Create: ratio_mult_vf = ratio << log2 (vf). */
5773 if (ratio_mult_vf_name_ptr
)
5775 ratio_mult_vf_name
= fold_build2 (LSHIFT_EXPR
, TREE_TYPE (ratio_name
),
5776 ratio_name
, log_vf
);
5777 if (!is_gimple_val (ratio_mult_vf_name
))
5779 var
= create_tmp_var (TREE_TYPE (ni_name
), "ratio_mult_vf");
5780 gimple stmts
= NULL
;
5781 ratio_mult_vf_name
= force_gimple_operand (ratio_mult_vf_name
, &stmts
,
5783 gsi_insert_seq_on_edge_immediate (pe
, stmts
);
5785 *ratio_mult_vf_name_ptr
= ratio_mult_vf_name
;
5792 /* Function vect_transform_loop.
5794 The analysis phase has determined that the loop is vectorizable.
5795 Vectorize the loop - created vectorized stmts to replace the scalar
5796 stmts in the loop, and update the loop exit condition. */
5799 vect_transform_loop (loop_vec_info loop_vinfo
)
5801 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
5802 basic_block
*bbs
= LOOP_VINFO_BBS (loop_vinfo
);
5803 int nbbs
= loop
->num_nodes
;
5804 gimple_stmt_iterator si
;
5807 int vectorization_factor
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
5809 bool slp_scheduled
= false;
5810 gimple stmt
, pattern_stmt
;
5811 gimple_seq pattern_def_seq
= NULL
;
5812 gimple_stmt_iterator pattern_def_si
= gsi_none ();
5813 bool transform_pattern_stmt
= false;
5814 bool check_profitability
= false;
5816 /* Record number of iterations before we started tampering with the profile. */
5817 gcov_type expected_iterations
= expected_loop_iterations_unbounded (loop
);
5819 if (dump_enabled_p ())
5820 dump_printf_loc (MSG_NOTE
, vect_location
, "=== vec_transform_loop ===\n");
5822 /* If profile is inprecise, we have chance to fix it up. */
5823 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo
))
5824 expected_iterations
= LOOP_VINFO_INT_NITERS (loop_vinfo
);
5826 /* Use the more conservative vectorization threshold. If the number
5827 of iterations is constant assume the cost check has been performed
5828 by our caller. If the threshold makes all loops profitable that
5829 run at least the vectorization factor number of times checking
5830 is pointless, too. */
5831 th
= LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo
);
5832 if (th
>= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) - 1
5833 && !LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo
))
5835 if (dump_enabled_p ())
5836 dump_printf_loc (MSG_NOTE
, vect_location
,
5837 "Profitability threshold is %d loop iterations.\n",
5839 check_profitability
= true;
5842 /* Version the loop first, if required, so the profitability check
5845 if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo
)
5846 || LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo
))
5848 vect_loop_versioning (loop_vinfo
, th
, check_profitability
);
5849 check_profitability
= false;
5852 tree ni_name
= vect_build_loop_niters (loop_vinfo
);
5853 LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo
) = ni_name
;
5855 /* Peel the loop if there are data refs with unknown alignment.
5856 Only one data ref with unknown store is allowed. */
5858 if (LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo
))
5860 vect_do_peeling_for_alignment (loop_vinfo
, ni_name
,
5861 th
, check_profitability
);
5862 check_profitability
= false;
5863 /* The above adjusts LOOP_VINFO_NITERS, so cause ni_name to
5865 ni_name
= NULL_TREE
;
5868 /* If the loop has a symbolic number of iterations 'n' (i.e. it's not a
5869 compile time constant), or it is a constant that doesn't divide by the
5870 vectorization factor, then an epilog loop needs to be created.
5871 We therefore duplicate the loop: the original loop will be vectorized,
5872 and will compute the first (n/VF) iterations. The second copy of the loop
5873 will remain scalar and will compute the remaining (n%VF) iterations.
5874 (VF is the vectorization factor). */
5876 if (LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo
)
5877 || LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo
))
5881 ni_name
= vect_build_loop_niters (loop_vinfo
);
5882 vect_generate_tmps_on_preheader (loop_vinfo
, ni_name
, &ratio_mult_vf
,
5884 vect_do_peeling_for_loop_bound (loop_vinfo
, ni_name
, ratio_mult_vf
,
5885 th
, check_profitability
);
5887 else if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo
))
5888 ratio
= build_int_cst (TREE_TYPE (LOOP_VINFO_NITERS (loop_vinfo
)),
5889 LOOP_VINFO_INT_NITERS (loop_vinfo
) / vectorization_factor
);
5893 ni_name
= vect_build_loop_niters (loop_vinfo
);
5894 vect_generate_tmps_on_preheader (loop_vinfo
, ni_name
, NULL
, &ratio
);
5897 /* 1) Make sure the loop header has exactly two entries
5898 2) Make sure we have a preheader basic block. */
5900 gcc_assert (EDGE_COUNT (loop
->header
->preds
) == 2);
5902 split_edge (loop_preheader_edge (loop
));
5904 /* FORNOW: the vectorizer supports only loops which body consist
5905 of one basic block (header + empty latch). When the vectorizer will
5906 support more involved loop forms, the order by which the BBs are
5907 traversed need to be reconsidered. */
5909 for (i
= 0; i
< nbbs
; i
++)
5911 basic_block bb
= bbs
[i
];
5912 stmt_vec_info stmt_info
;
5915 for (si
= gsi_start_phis (bb
); !gsi_end_p (si
); gsi_next (&si
))
5917 phi
= gsi_stmt (si
);
5918 if (dump_enabled_p ())
5920 dump_printf_loc (MSG_NOTE
, vect_location
,
5921 "------>vectorizing phi: ");
5922 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, phi
, 0);
5923 dump_printf (MSG_NOTE
, "\n");
5925 stmt_info
= vinfo_for_stmt (phi
);
5929 if (MAY_HAVE_DEBUG_STMTS
&& !STMT_VINFO_LIVE_P (stmt_info
))
5930 vect_loop_kill_debug_uses (loop
, phi
);
5932 if (!STMT_VINFO_RELEVANT_P (stmt_info
)
5933 && !STMT_VINFO_LIVE_P (stmt_info
))
5936 if (STMT_VINFO_VECTYPE (stmt_info
)
5937 && (TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info
))
5938 != (unsigned HOST_WIDE_INT
) vectorization_factor
)
5939 && dump_enabled_p ())
5940 dump_printf_loc (MSG_NOTE
, vect_location
, "multiple-types.\n");
5942 if (STMT_VINFO_DEF_TYPE (stmt_info
) == vect_induction_def
)
5944 if (dump_enabled_p ())
5945 dump_printf_loc (MSG_NOTE
, vect_location
, "transform phi.\n");
5946 vect_transform_stmt (phi
, NULL
, NULL
, NULL
, NULL
);
5950 pattern_stmt
= NULL
;
5951 for (si
= gsi_start_bb (bb
); !gsi_end_p (si
) || transform_pattern_stmt
;)
5955 if (transform_pattern_stmt
)
5956 stmt
= pattern_stmt
;
5959 stmt
= gsi_stmt (si
);
5960 /* During vectorization remove existing clobber stmts. */
5961 if (gimple_clobber_p (stmt
))
5963 unlink_stmt_vdef (stmt
);
5964 gsi_remove (&si
, true);
5965 release_defs (stmt
);
5970 if (dump_enabled_p ())
5972 dump_printf_loc (MSG_NOTE
, vect_location
,
5973 "------>vectorizing statement: ");
5974 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
5975 dump_printf (MSG_NOTE
, "\n");
5978 stmt_info
= vinfo_for_stmt (stmt
);
5980 /* vector stmts created in the outer-loop during vectorization of
5981 stmts in an inner-loop may not have a stmt_info, and do not
5982 need to be vectorized. */
5989 if (MAY_HAVE_DEBUG_STMTS
&& !STMT_VINFO_LIVE_P (stmt_info
))
5990 vect_loop_kill_debug_uses (loop
, stmt
);
5992 if (!STMT_VINFO_RELEVANT_P (stmt_info
)
5993 && !STMT_VINFO_LIVE_P (stmt_info
))
5995 if (STMT_VINFO_IN_PATTERN_P (stmt_info
)
5996 && (pattern_stmt
= STMT_VINFO_RELATED_STMT (stmt_info
))
5997 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt
))
5998 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt
))))
6000 stmt
= pattern_stmt
;
6001 stmt_info
= vinfo_for_stmt (stmt
);
6009 else if (STMT_VINFO_IN_PATTERN_P (stmt_info
)
6010 && (pattern_stmt
= STMT_VINFO_RELATED_STMT (stmt_info
))
6011 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt
))
6012 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt
))))
6013 transform_pattern_stmt
= true;
6015 /* If pattern statement has def stmts, vectorize them too. */
6016 if (is_pattern_stmt_p (stmt_info
))
6018 if (pattern_def_seq
== NULL
)
6020 pattern_def_seq
= STMT_VINFO_PATTERN_DEF_SEQ (stmt_info
);
6021 pattern_def_si
= gsi_start (pattern_def_seq
);
6023 else if (!gsi_end_p (pattern_def_si
))
6024 gsi_next (&pattern_def_si
);
6025 if (pattern_def_seq
!= NULL
)
6027 gimple pattern_def_stmt
= NULL
;
6028 stmt_vec_info pattern_def_stmt_info
= NULL
;
6030 while (!gsi_end_p (pattern_def_si
))
6032 pattern_def_stmt
= gsi_stmt (pattern_def_si
);
6033 pattern_def_stmt_info
6034 = vinfo_for_stmt (pattern_def_stmt
);
6035 if (STMT_VINFO_RELEVANT_P (pattern_def_stmt_info
)
6036 || STMT_VINFO_LIVE_P (pattern_def_stmt_info
))
6038 gsi_next (&pattern_def_si
);
6041 if (!gsi_end_p (pattern_def_si
))
6043 if (dump_enabled_p ())
6045 dump_printf_loc (MSG_NOTE
, vect_location
,
6046 "==> vectorizing pattern def "
6048 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
,
6049 pattern_def_stmt
, 0);
6050 dump_printf (MSG_NOTE
, "\n");
6053 stmt
= pattern_def_stmt
;
6054 stmt_info
= pattern_def_stmt_info
;
6058 pattern_def_si
= gsi_none ();
6059 transform_pattern_stmt
= false;
6063 transform_pattern_stmt
= false;
6066 if (STMT_VINFO_VECTYPE (stmt_info
))
6070 TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info
));
6071 if (!STMT_SLP_TYPE (stmt_info
)
6072 && nunits
!= (unsigned int) vectorization_factor
6073 && dump_enabled_p ())
6074 /* For SLP VF is set according to unrolling factor, and not
6075 to vector size, hence for SLP this print is not valid. */
6076 dump_printf_loc (MSG_NOTE
, vect_location
, "multiple-types.\n");
6079 /* SLP. Schedule all the SLP instances when the first SLP stmt is
6081 if (STMT_SLP_TYPE (stmt_info
))
6085 slp_scheduled
= true;
6087 if (dump_enabled_p ())
6088 dump_printf_loc (MSG_NOTE
, vect_location
,
6089 "=== scheduling SLP instances ===\n");
6091 vect_schedule_slp (loop_vinfo
, NULL
);
6094 /* Hybrid SLP stmts must be vectorized in addition to SLP. */
6095 if (!vinfo_for_stmt (stmt
) || PURE_SLP_STMT (stmt_info
))
6097 if (!transform_pattern_stmt
&& gsi_end_p (pattern_def_si
))
6099 pattern_def_seq
= NULL
;
6106 /* -------- vectorize statement ------------ */
6107 if (dump_enabled_p ())
6108 dump_printf_loc (MSG_NOTE
, vect_location
, "transform statement.\n");
6110 grouped_store
= false;
6111 is_store
= vect_transform_stmt (stmt
, &si
, &grouped_store
, NULL
, NULL
);
6114 if (STMT_VINFO_GROUPED_ACCESS (stmt_info
))
6116 /* Interleaving. If IS_STORE is TRUE, the vectorization of the
6117 interleaving chain was completed - free all the stores in
6120 vect_remove_stores (GROUP_FIRST_ELEMENT (stmt_info
));
6124 /* Free the attached stmt_vec_info and remove the stmt. */
6125 gimple store
= gsi_stmt (si
);
6126 free_stmt_vec_info (store
);
6127 unlink_stmt_vdef (store
);
6128 gsi_remove (&si
, true);
6129 release_defs (store
);
6132 /* Stores can only appear at the end of pattern statements. */
6133 gcc_assert (!transform_pattern_stmt
);
6134 pattern_def_seq
= NULL
;
6136 else if (!transform_pattern_stmt
&& gsi_end_p (pattern_def_si
))
6138 pattern_def_seq
= NULL
;
6144 slpeel_make_loop_iterate_ntimes (loop
, ratio
);
6146 /* Reduce loop iterations by the vectorization factor. */
6147 scale_loop_profile (loop
, GCOV_COMPUTE_SCALE (1, vectorization_factor
),
6148 expected_iterations
/ vectorization_factor
);
6149 loop
->nb_iterations_upper_bound
6150 = loop
->nb_iterations_upper_bound
.udiv (double_int::from_uhwi (vectorization_factor
),
6152 if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo
)
6153 && loop
->nb_iterations_upper_bound
!= double_int_zero
)
6154 loop
->nb_iterations_upper_bound
= loop
->nb_iterations_upper_bound
- double_int_one
;
6155 if (loop
->any_estimate
)
6157 loop
->nb_iterations_estimate
6158 = loop
->nb_iterations_estimate
.udiv (double_int::from_uhwi (vectorization_factor
),
6160 if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo
)
6161 && loop
->nb_iterations_estimate
!= double_int_zero
)
6162 loop
->nb_iterations_estimate
= loop
->nb_iterations_estimate
- double_int_one
;
6165 if (dump_enabled_p ())
6167 dump_printf_loc (MSG_NOTE
, vect_location
,
6168 "LOOP VECTORIZED\n");
6170 dump_printf_loc (MSG_NOTE
, vect_location
,
6171 "OUTER LOOP VECTORIZED\n");
6172 dump_printf (MSG_NOTE
, "\n");