2 Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software
4 Contributed by Dorit Naishlos <dorit@il.ibm.com> and
5 Ira Rosen <irar@il.ibm.com>
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
25 #include "coretypes.h"
29 #include "basic-block.h"
30 #include "diagnostic.h"
31 #include "tree-flow.h"
32 #include "tree-dump.h"
34 #include "cfglayout.h"
40 #include "tree-chrec.h"
41 #include "tree-scalar-evolution.h"
42 #include "tree-vectorizer.h"
44 /* Loop Vectorization Pass.
46 This pass tries to vectorize loops.
48 For example, the vectorizer transforms the following simple loop:
50 short a[N]; short b[N]; short c[N]; int i;
56 as if it was manually vectorized by rewriting the source code into:
58 typedef int __attribute__((mode(V8HI))) v8hi;
59 short a[N]; short b[N]; short c[N]; int i;
60 v8hi *pa = (v8hi*)a, *pb = (v8hi*)b, *pc = (v8hi*)c;
63 for (i=0; i<N/8; i++){
70 The main entry to this pass is vectorize_loops(), in which
71 the vectorizer applies a set of analyses on a given set of loops,
72 followed by the actual vectorization transformation for the loops that
73 had successfully passed the analysis phase.
74 Throughout this pass we make a distinction between two types of
75 data: scalars (which are represented by SSA_NAMES), and memory references
76 ("data-refs"). These two types of data require different handling both
77 during analysis and transformation. The types of data-refs that the
78 vectorizer currently supports are ARRAY_REFS which base is an array DECL
79 (not a pointer), and INDIRECT_REFS through pointers; both array and pointer
80 accesses are required to have a simple (consecutive) access pattern.
84 The driver for the analysis phase is vect_analyze_loop().
85 It applies a set of analyses, some of which rely on the scalar evolution
86 analyzer (scev) developed by Sebastian Pop.
88 During the analysis phase the vectorizer records some information
89 per stmt in a "stmt_vec_info" struct which is attached to each stmt in the
90 loop, as well as general information about the loop as a whole, which is
91 recorded in a "loop_vec_info" struct attached to each loop.
95 The loop transformation phase scans all the stmts in the loop, and
96 creates a vector stmt (or a sequence of stmts) for each scalar stmt S in
97 the loop that needs to be vectorized. It inserts the vector code sequence
98 just before the scalar stmt S, and records a pointer to the vector code
99 in STMT_VINFO_VEC_STMT (stmt_info) (stmt_info is the stmt_vec_info struct
100 attached to S). This pointer will be used for the vectorization of following
101 stmts which use the def of stmt S. Stmt S is removed if it writes to memory;
102 otherwise, we rely on dead code elimination for removing it.
104 For example, say stmt S1 was vectorized into stmt VS1:
107 S1: b = x[i]; STMT_VINFO_VEC_STMT (stmt_info (S1)) = VS1
110 To vectorize stmt S2, the vectorizer first finds the stmt that defines
111 the operand 'b' (S1), and gets the relevant vector def 'vb' from the
112 vector stmt VS1 pointed to by STMT_VINFO_VEC_STMT (stmt_info (S1)). The
113 resulting sequence would be:
116 S1: b = x[i]; STMT_VINFO_VEC_STMT (stmt_info (S1)) = VS1
118 S2: a = b; STMT_VINFO_VEC_STMT (stmt_info (S2)) = VS2
120 Operands that are not SSA_NAMEs, are data-refs that appear in
121 load/store operations (like 'x[i]' in S1), and are handled differently.
125 Currently the only target specific information that is used is the
126 size of the vector (in bytes) - "UNITS_PER_SIMD_WORD". Targets that can
127 support different sizes of vectors, for now will need to specify one value
128 for "UNITS_PER_SIMD_WORD". More flexibility will be added in the future.
130 Since we only vectorize operations which vector form can be
131 expressed using existing tree codes, to verify that an operation is
132 supported, the vectorizer checks the relevant optab at the relevant
133 machine_mode (e.g, optab_handler (add_optab, V8HImode)->insn_code). If
134 the value found is CODE_FOR_nothing, then there's no target support, and
135 we can't vectorize the stmt.
137 For additional information on this project see:
138 http://gcc.gnu.org/projects/tree-ssa/vectorization.html
141 /* Function vect_determine_vectorization_factor
143 Determine the vectorization factor (VF). VF is the number of data elements
144 that are operated upon in parallel in a single iteration of the vectorized
145 loop. For example, when vectorizing a loop that operates on 4byte elements,
146 on a target with vector size (VS) 16byte, the VF is set to 4, since 4
147 elements can fit in a single vector register.
149 We currently support vectorization of loops in which all types operated upon
150 are of the same size. Therefore this function currently sets VF according to
151 the size of the types operated upon, and fails if there are multiple sizes
154 VF is also the factor by which the loop iterations are strip-mined, e.g.:
161 for (i=0; i<N; i+=VF){
162 a[i:VF] = b[i:VF] + c[i:VF];
167 vect_determine_vectorization_factor (loop_vec_info loop_vinfo
)
169 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
170 basic_block
*bbs
= LOOP_VINFO_BBS (loop_vinfo
);
171 int nbbs
= loop
->num_nodes
;
172 gimple_stmt_iterator si
;
173 unsigned int vectorization_factor
= 0;
178 stmt_vec_info stmt_info
;
182 if (vect_print_dump_info (REPORT_DETAILS
))
183 fprintf (vect_dump
, "=== vect_determine_vectorization_factor ===");
185 for (i
= 0; i
< nbbs
; i
++)
187 basic_block bb
= bbs
[i
];
189 for (si
= gsi_start_phis (bb
); !gsi_end_p (si
); gsi_next (&si
))
192 stmt_info
= vinfo_for_stmt (phi
);
193 if (vect_print_dump_info (REPORT_DETAILS
))
195 fprintf (vect_dump
, "==> examining phi: ");
196 print_gimple_stmt (vect_dump
, phi
, 0, TDF_SLIM
);
199 gcc_assert (stmt_info
);
201 if (STMT_VINFO_RELEVANT_P (stmt_info
))
203 gcc_assert (!STMT_VINFO_VECTYPE (stmt_info
));
204 scalar_type
= TREE_TYPE (PHI_RESULT (phi
));
206 if (vect_print_dump_info (REPORT_DETAILS
))
208 fprintf (vect_dump
, "get vectype for scalar type: ");
209 print_generic_expr (vect_dump
, scalar_type
, TDF_SLIM
);
212 vectype
= get_vectype_for_scalar_type (scalar_type
);
215 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS
))
218 "not vectorized: unsupported data-type ");
219 print_generic_expr (vect_dump
, scalar_type
, TDF_SLIM
);
223 STMT_VINFO_VECTYPE (stmt_info
) = vectype
;
225 if (vect_print_dump_info (REPORT_DETAILS
))
227 fprintf (vect_dump
, "vectype: ");
228 print_generic_expr (vect_dump
, vectype
, TDF_SLIM
);
231 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
232 if (vect_print_dump_info (REPORT_DETAILS
))
233 fprintf (vect_dump
, "nunits = %d", nunits
);
235 if (!vectorization_factor
236 || (nunits
> vectorization_factor
))
237 vectorization_factor
= nunits
;
241 for (si
= gsi_start_bb (bb
); !gsi_end_p (si
); gsi_next (&si
))
243 gimple stmt
= gsi_stmt (si
);
244 stmt_info
= vinfo_for_stmt (stmt
);
246 if (vect_print_dump_info (REPORT_DETAILS
))
248 fprintf (vect_dump
, "==> examining statement: ");
249 print_gimple_stmt (vect_dump
, stmt
, 0, TDF_SLIM
);
252 gcc_assert (stmt_info
);
254 /* skip stmts which do not need to be vectorized. */
255 if (!STMT_VINFO_RELEVANT_P (stmt_info
)
256 && !STMT_VINFO_LIVE_P (stmt_info
))
258 if (vect_print_dump_info (REPORT_DETAILS
))
259 fprintf (vect_dump
, "skip.");
263 if (gimple_get_lhs (stmt
) == NULL_TREE
)
265 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS
))
267 fprintf (vect_dump
, "not vectorized: irregular stmt.");
268 print_gimple_stmt (vect_dump
, stmt
, 0, TDF_SLIM
);
273 if (VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt
))))
275 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS
))
277 fprintf (vect_dump
, "not vectorized: vector stmt in loop:");
278 print_gimple_stmt (vect_dump
, stmt
, 0, TDF_SLIM
);
283 if (STMT_VINFO_VECTYPE (stmt_info
))
285 /* The only case when a vectype had been already set is for stmts
286 that contain a dataref, or for "pattern-stmts" (stmts generated
287 by the vectorizer to represent/replace a certain idiom). */
288 gcc_assert (STMT_VINFO_DATA_REF (stmt_info
)
289 || is_pattern_stmt_p (stmt_info
));
290 vectype
= STMT_VINFO_VECTYPE (stmt_info
);
294 gcc_assert (!STMT_VINFO_DATA_REF (stmt_info
)
295 && !is_pattern_stmt_p (stmt_info
));
297 scalar_type
= vect_get_smallest_scalar_type (stmt
, &dummy
,
299 if (vect_print_dump_info (REPORT_DETAILS
))
301 fprintf (vect_dump
, "get vectype for scalar type: ");
302 print_generic_expr (vect_dump
, scalar_type
, TDF_SLIM
);
305 vectype
= get_vectype_for_scalar_type (scalar_type
);
308 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS
))
311 "not vectorized: unsupported data-type ");
312 print_generic_expr (vect_dump
, scalar_type
, TDF_SLIM
);
316 STMT_VINFO_VECTYPE (stmt_info
) = vectype
;
319 if (vect_print_dump_info (REPORT_DETAILS
))
321 fprintf (vect_dump
, "vectype: ");
322 print_generic_expr (vect_dump
, vectype
, TDF_SLIM
);
325 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
326 if (vect_print_dump_info (REPORT_DETAILS
))
327 fprintf (vect_dump
, "nunits = %d", nunits
);
329 if (!vectorization_factor
330 || (nunits
> vectorization_factor
))
331 vectorization_factor
= nunits
;
336 /* TODO: Analyze cost. Decide if worth while to vectorize. */
337 if (vect_print_dump_info (REPORT_DETAILS
))
338 fprintf (vect_dump
, "vectorization factor = %d", vectorization_factor
);
339 if (vectorization_factor
<= 1)
341 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS
))
342 fprintf (vect_dump
, "not vectorized: unsupported data-type");
345 LOOP_VINFO_VECT_FACTOR (loop_vinfo
) = vectorization_factor
;
351 /* Function vect_is_simple_iv_evolution.
353 FORNOW: A simple evolution of an induction variables in the loop is
354 considered a polynomial evolution with constant step. */
357 vect_is_simple_iv_evolution (unsigned loop_nb
, tree access_fn
, tree
* init
,
362 tree evolution_part
= evolution_part_in_loop_num (access_fn
, loop_nb
);
364 /* When there is no evolution in this loop, the evolution function
366 if (evolution_part
== NULL_TREE
)
369 /* When the evolution is a polynomial of degree >= 2
370 the evolution function is not "simple". */
371 if (tree_is_chrec (evolution_part
))
374 step_expr
= evolution_part
;
375 init_expr
= unshare_expr (initial_condition_in_loop_num (access_fn
, loop_nb
));
377 if (vect_print_dump_info (REPORT_DETAILS
))
379 fprintf (vect_dump
, "step: ");
380 print_generic_expr (vect_dump
, step_expr
, TDF_SLIM
);
381 fprintf (vect_dump
, ", init: ");
382 print_generic_expr (vect_dump
, init_expr
, TDF_SLIM
);
388 if (TREE_CODE (step_expr
) != INTEGER_CST
)
390 if (vect_print_dump_info (REPORT_DETAILS
))
391 fprintf (vect_dump
, "step unknown.");
398 /* Function vect_analyze_scalar_cycles_1.
400 Examine the cross iteration def-use cycles of scalar variables
401 in LOOP. LOOP_VINFO represents the loop that is now being
402 considered for vectorization (can be LOOP, or an outer-loop
406 vect_analyze_scalar_cycles_1 (loop_vec_info loop_vinfo
, struct loop
*loop
)
408 basic_block bb
= loop
->header
;
410 VEC(gimple
,heap
) *worklist
= VEC_alloc (gimple
, heap
, 64);
411 gimple_stmt_iterator gsi
;
414 if (vect_print_dump_info (REPORT_DETAILS
))
415 fprintf (vect_dump
, "=== vect_analyze_scalar_cycles ===");
417 /* First - identify all inductions. Reduction detection assumes that all the
418 inductions have been identified, therefore, this order must not be
420 for (gsi
= gsi_start_phis (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
422 gimple phi
= gsi_stmt (gsi
);
423 tree access_fn
= NULL
;
424 tree def
= PHI_RESULT (phi
);
425 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (phi
);
427 if (vect_print_dump_info (REPORT_DETAILS
))
429 fprintf (vect_dump
, "Analyze phi: ");
430 print_gimple_stmt (vect_dump
, phi
, 0, TDF_SLIM
);
433 /* Skip virtual phi's. The data dependences that are associated with
434 virtual defs/uses (i.e., memory accesses) are analyzed elsewhere. */
435 if (!is_gimple_reg (SSA_NAME_VAR (def
)))
438 STMT_VINFO_DEF_TYPE (stmt_vinfo
) = vect_unknown_def_type
;
440 /* Analyze the evolution function. */
441 access_fn
= analyze_scalar_evolution (loop
, def
);
442 if (access_fn
&& vect_print_dump_info (REPORT_DETAILS
))
444 fprintf (vect_dump
, "Access function of PHI: ");
445 print_generic_expr (vect_dump
, access_fn
, TDF_SLIM
);
449 || !vect_is_simple_iv_evolution (loop
->num
, access_fn
, &dumy
, &dumy
))
451 VEC_safe_push (gimple
, heap
, worklist
, phi
);
455 if (vect_print_dump_info (REPORT_DETAILS
))
456 fprintf (vect_dump
, "Detected induction.");
457 STMT_VINFO_DEF_TYPE (stmt_vinfo
) = vect_induction_def
;
461 /* Second - identify all reductions and nested cycles. */
462 while (VEC_length (gimple
, worklist
) > 0)
464 gimple phi
= VEC_pop (gimple
, worklist
);
465 tree def
= PHI_RESULT (phi
);
466 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (phi
);
470 if (vect_print_dump_info (REPORT_DETAILS
))
472 fprintf (vect_dump
, "Analyze phi: ");
473 print_gimple_stmt (vect_dump
, phi
, 0, TDF_SLIM
);
476 gcc_assert (is_gimple_reg (SSA_NAME_VAR (def
)));
477 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_unknown_def_type
);
479 nested_cycle
= (loop
!= LOOP_VINFO_LOOP (loop_vinfo
));
480 reduc_stmt
= vect_is_simple_reduction (loop_vinfo
, phi
, !nested_cycle
,
486 if (vect_print_dump_info (REPORT_DETAILS
))
487 fprintf (vect_dump
, "Detected double reduction.");
489 STMT_VINFO_DEF_TYPE (stmt_vinfo
) = vect_double_reduction_def
;
490 STMT_VINFO_DEF_TYPE (vinfo_for_stmt (reduc_stmt
)) =
491 vect_double_reduction_def
;
497 if (vect_print_dump_info (REPORT_DETAILS
))
498 fprintf (vect_dump
, "Detected vectorizable nested cycle.");
500 STMT_VINFO_DEF_TYPE (stmt_vinfo
) = vect_nested_cycle
;
501 STMT_VINFO_DEF_TYPE (vinfo_for_stmt (reduc_stmt
)) =
506 if (vect_print_dump_info (REPORT_DETAILS
))
507 fprintf (vect_dump
, "Detected reduction.");
509 STMT_VINFO_DEF_TYPE (stmt_vinfo
) = vect_reduction_def
;
510 STMT_VINFO_DEF_TYPE (vinfo_for_stmt (reduc_stmt
)) =
516 if (vect_print_dump_info (REPORT_DETAILS
))
517 fprintf (vect_dump
, "Unknown def-use cycle pattern.");
520 VEC_free (gimple
, heap
, worklist
);
524 /* Function vect_analyze_scalar_cycles.
526 Examine the cross iteration def-use cycles of scalar variables, by
527 analyzing the loop-header PHIs of scalar variables; Classify each
528 cycle as one of the following: invariant, induction, reduction, unknown.
529 We do that for the loop represented by LOOP_VINFO, and also to its
530 inner-loop, if exists.
531 Examples for scalar cycles:
546 vect_analyze_scalar_cycles (loop_vec_info loop_vinfo
)
548 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
550 vect_analyze_scalar_cycles_1 (loop_vinfo
, loop
);
552 /* When vectorizing an outer-loop, the inner-loop is executed sequentially.
553 Reductions in such inner-loop therefore have different properties than
554 the reductions in the nest that gets vectorized:
555 1. When vectorized, they are executed in the same order as in the original
556 scalar loop, so we can't change the order of computation when
558 2. FIXME: Inner-loop reductions can be used in the inner-loop, so the
559 current checks are too strict. */
562 vect_analyze_scalar_cycles_1 (loop_vinfo
, loop
->inner
);
565 /* Function vect_get_loop_niters.
567 Determine how many iterations the loop is executed.
568 If an expression that represents the number of iterations
569 can be constructed, place it in NUMBER_OF_ITERATIONS.
570 Return the loop exit condition. */
573 vect_get_loop_niters (struct loop
*loop
, tree
*number_of_iterations
)
577 if (vect_print_dump_info (REPORT_DETAILS
))
578 fprintf (vect_dump
, "=== get_loop_niters ===");
580 niters
= number_of_exit_cond_executions (loop
);
582 if (niters
!= NULL_TREE
583 && niters
!= chrec_dont_know
)
585 *number_of_iterations
= niters
;
587 if (vect_print_dump_info (REPORT_DETAILS
))
589 fprintf (vect_dump
, "==> get_loop_niters:" );
590 print_generic_expr (vect_dump
, *number_of_iterations
, TDF_SLIM
);
594 return get_loop_exit_condition (loop
);
598 /* Function bb_in_loop_p
600 Used as predicate for dfs order traversal of the loop bbs. */
603 bb_in_loop_p (const_basic_block bb
, const void *data
)
605 const struct loop
*const loop
= (const struct loop
*)data
;
606 if (flow_bb_inside_loop_p (loop
, bb
))
612 /* Function new_loop_vec_info.
614 Create and initialize a new loop_vec_info struct for LOOP, as well as
615 stmt_vec_info structs for all the stmts in LOOP. */
618 new_loop_vec_info (struct loop
*loop
)
622 gimple_stmt_iterator si
;
623 unsigned int i
, nbbs
;
625 res
= (loop_vec_info
) xcalloc (1, sizeof (struct _loop_vec_info
));
626 LOOP_VINFO_LOOP (res
) = loop
;
628 bbs
= get_loop_body (loop
);
630 /* Create/Update stmt_info for all stmts in the loop. */
631 for (i
= 0; i
< loop
->num_nodes
; i
++)
633 basic_block bb
= bbs
[i
];
635 /* BBs in a nested inner-loop will have been already processed (because
636 we will have called vect_analyze_loop_form for any nested inner-loop).
637 Therefore, for stmts in an inner-loop we just want to update the
638 STMT_VINFO_LOOP_VINFO field of their stmt_info to point to the new
639 loop_info of the outer-loop we are currently considering to vectorize
640 (instead of the loop_info of the inner-loop).
641 For stmts in other BBs we need to create a stmt_info from scratch. */
642 if (bb
->loop_father
!= loop
)
645 gcc_assert (loop
->inner
&& bb
->loop_father
== loop
->inner
);
646 for (si
= gsi_start_phis (bb
); !gsi_end_p (si
); gsi_next (&si
))
648 gimple phi
= gsi_stmt (si
);
649 stmt_vec_info stmt_info
= vinfo_for_stmt (phi
);
650 loop_vec_info inner_loop_vinfo
=
651 STMT_VINFO_LOOP_VINFO (stmt_info
);
652 gcc_assert (loop
->inner
== LOOP_VINFO_LOOP (inner_loop_vinfo
));
653 STMT_VINFO_LOOP_VINFO (stmt_info
) = res
;
655 for (si
= gsi_start_bb (bb
); !gsi_end_p (si
); gsi_next (&si
))
657 gimple stmt
= gsi_stmt (si
);
658 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
659 loop_vec_info inner_loop_vinfo
=
660 STMT_VINFO_LOOP_VINFO (stmt_info
);
661 gcc_assert (loop
->inner
== LOOP_VINFO_LOOP (inner_loop_vinfo
));
662 STMT_VINFO_LOOP_VINFO (stmt_info
) = res
;
667 /* bb in current nest. */
668 for (si
= gsi_start_phis (bb
); !gsi_end_p (si
); gsi_next (&si
))
670 gimple phi
= gsi_stmt (si
);
671 gimple_set_uid (phi
, 0);
672 set_vinfo_for_stmt (phi
, new_stmt_vec_info (phi
, res
, NULL
));
675 for (si
= gsi_start_bb (bb
); !gsi_end_p (si
); gsi_next (&si
))
677 gimple stmt
= gsi_stmt (si
);
678 gimple_set_uid (stmt
, 0);
679 set_vinfo_for_stmt (stmt
, new_stmt_vec_info (stmt
, res
, NULL
));
684 /* CHECKME: We want to visit all BBs before their successors (except for
685 latch blocks, for which this assertion wouldn't hold). In the simple
686 case of the loop forms we allow, a dfs order of the BBs would the same
687 as reversed postorder traversal, so we are safe. */
690 bbs
= XCNEWVEC (basic_block
, loop
->num_nodes
);
691 nbbs
= dfs_enumerate_from (loop
->header
, 0, bb_in_loop_p
,
692 bbs
, loop
->num_nodes
, loop
);
693 gcc_assert (nbbs
== loop
->num_nodes
);
695 LOOP_VINFO_BBS (res
) = bbs
;
696 LOOP_VINFO_NITERS (res
) = NULL
;
697 LOOP_VINFO_NITERS_UNCHANGED (res
) = NULL
;
698 LOOP_VINFO_COST_MODEL_MIN_ITERS (res
) = 0;
699 LOOP_VINFO_VECTORIZABLE_P (res
) = 0;
700 LOOP_PEELING_FOR_ALIGNMENT (res
) = 0;
701 LOOP_VINFO_VECT_FACTOR (res
) = 0;
702 LOOP_VINFO_DATAREFS (res
) = VEC_alloc (data_reference_p
, heap
, 10);
703 LOOP_VINFO_DDRS (res
) = VEC_alloc (ddr_p
, heap
, 10 * 10);
704 LOOP_VINFO_UNALIGNED_DR (res
) = NULL
;
705 LOOP_VINFO_MAY_MISALIGN_STMTS (res
) =
706 VEC_alloc (gimple
, heap
,
707 PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIGNMENT_CHECKS
));
708 LOOP_VINFO_MAY_ALIAS_DDRS (res
) =
709 VEC_alloc (ddr_p
, heap
,
710 PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS
));
711 LOOP_VINFO_STRIDED_STORES (res
) = VEC_alloc (gimple
, heap
, 10);
712 LOOP_VINFO_SLP_INSTANCES (res
) = VEC_alloc (slp_instance
, heap
, 10);
713 LOOP_VINFO_SLP_UNROLLING_FACTOR (res
) = 1;
719 /* Function destroy_loop_vec_info.
721 Free LOOP_VINFO struct, as well as all the stmt_vec_info structs of all the
722 stmts in the loop. */
725 destroy_loop_vec_info (loop_vec_info loop_vinfo
, bool clean_stmts
)
730 gimple_stmt_iterator si
;
732 VEC (slp_instance
, heap
) *slp_instances
;
733 slp_instance instance
;
738 loop
= LOOP_VINFO_LOOP (loop_vinfo
);
740 bbs
= LOOP_VINFO_BBS (loop_vinfo
);
741 nbbs
= loop
->num_nodes
;
745 free (LOOP_VINFO_BBS (loop_vinfo
));
746 free_data_refs (LOOP_VINFO_DATAREFS (loop_vinfo
));
747 free_dependence_relations (LOOP_VINFO_DDRS (loop_vinfo
));
748 VEC_free (gimple
, heap
, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo
));
755 for (j
= 0; j
< nbbs
; j
++)
757 basic_block bb
= bbs
[j
];
758 for (si
= gsi_start_phis (bb
); !gsi_end_p (si
); gsi_next (&si
))
759 free_stmt_vec_info (gsi_stmt (si
));
761 for (si
= gsi_start_bb (bb
); !gsi_end_p (si
); )
763 gimple stmt
= gsi_stmt (si
);
764 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
768 /* Check if this is a "pattern stmt" (introduced by the
769 vectorizer during the pattern recognition pass). */
770 bool remove_stmt_p
= false;
771 gimple orig_stmt
= STMT_VINFO_RELATED_STMT (stmt_info
);
774 stmt_vec_info orig_stmt_info
= vinfo_for_stmt (orig_stmt
);
776 && STMT_VINFO_IN_PATTERN_P (orig_stmt_info
))
777 remove_stmt_p
= true;
780 /* Free stmt_vec_info. */
781 free_stmt_vec_info (stmt
);
783 /* Remove dead "pattern stmts". */
785 gsi_remove (&si
, true);
791 free (LOOP_VINFO_BBS (loop_vinfo
));
792 free_data_refs (LOOP_VINFO_DATAREFS (loop_vinfo
));
793 free_dependence_relations (LOOP_VINFO_DDRS (loop_vinfo
));
794 VEC_free (gimple
, heap
, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo
));
795 VEC_free (ddr_p
, heap
, LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo
));
796 slp_instances
= LOOP_VINFO_SLP_INSTANCES (loop_vinfo
);
797 for (j
= 0; VEC_iterate (slp_instance
, slp_instances
, j
, instance
); j
++)
798 vect_free_slp_instance (instance
);
800 VEC_free (slp_instance
, heap
, LOOP_VINFO_SLP_INSTANCES (loop_vinfo
));
801 VEC_free (gimple
, heap
, LOOP_VINFO_STRIDED_STORES (loop_vinfo
));
808 /* Function vect_analyze_loop_1.
810 Apply a set of analyses on LOOP, and create a loop_vec_info struct
811 for it. The different analyses will record information in the
812 loop_vec_info struct. This is a subset of the analyses applied in
813 vect_analyze_loop, to be applied on an inner-loop nested in the loop
814 that is now considered for (outer-loop) vectorization. */
817 vect_analyze_loop_1 (struct loop
*loop
)
819 loop_vec_info loop_vinfo
;
821 if (vect_print_dump_info (REPORT_DETAILS
))
822 fprintf (vect_dump
, "===== analyze_loop_nest_1 =====");
824 /* Check the CFG characteristics of the loop (nesting, entry/exit, etc. */
826 loop_vinfo
= vect_analyze_loop_form (loop
);
829 if (vect_print_dump_info (REPORT_DETAILS
))
830 fprintf (vect_dump
, "bad inner-loop form.");
838 /* Function vect_analyze_loop_form.
840 Verify that certain CFG restrictions hold, including:
841 - the loop has a pre-header
842 - the loop has a single entry and exit
843 - the loop exit condition is simple enough, and the number of iterations
844 can be analyzed (a countable loop). */
847 vect_analyze_loop_form (struct loop
*loop
)
849 loop_vec_info loop_vinfo
;
851 tree number_of_iterations
= NULL
;
852 loop_vec_info inner_loop_vinfo
= NULL
;
854 if (vect_print_dump_info (REPORT_DETAILS
))
855 fprintf (vect_dump
, "=== vect_analyze_loop_form ===");
857 /* Different restrictions apply when we are considering an inner-most loop,
858 vs. an outer (nested) loop.
859 (FORNOW. May want to relax some of these restrictions in the future). */
863 /* Inner-most loop. We currently require that the number of BBs is
864 exactly 2 (the header and latch). Vectorizable inner-most loops
875 if (loop
->num_nodes
!= 2)
877 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS
))
878 fprintf (vect_dump
, "not vectorized: control flow in loop.");
882 if (empty_block_p (loop
->header
))
884 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS
))
885 fprintf (vect_dump
, "not vectorized: empty loop.");
891 struct loop
*innerloop
= loop
->inner
;
894 /* Nested loop. We currently require that the loop is doubly-nested,
895 contains a single inner loop, and the number of BBs is exactly 5.
896 Vectorizable outer-loops look like this:
908 The inner-loop has the properties expected of inner-most loops
909 as described above. */
911 if ((loop
->inner
)->inner
|| (loop
->inner
)->next
)
913 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS
))
914 fprintf (vect_dump
, "not vectorized: multiple nested loops.");
918 /* Analyze the inner-loop. */
919 inner_loop_vinfo
= vect_analyze_loop_1 (loop
->inner
);
920 if (!inner_loop_vinfo
)
922 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS
))
923 fprintf (vect_dump
, "not vectorized: Bad inner loop.");
927 if (!expr_invariant_in_loop_p (loop
,
928 LOOP_VINFO_NITERS (inner_loop_vinfo
)))
930 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS
))
932 "not vectorized: inner-loop count not invariant.");
933 destroy_loop_vec_info (inner_loop_vinfo
, true);
937 if (loop
->num_nodes
!= 5)
939 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS
))
940 fprintf (vect_dump
, "not vectorized: control flow in loop.");
941 destroy_loop_vec_info (inner_loop_vinfo
, true);
945 gcc_assert (EDGE_COUNT (innerloop
->header
->preds
) == 2);
946 entryedge
= EDGE_PRED (innerloop
->header
, 0);
947 if (EDGE_PRED (innerloop
->header
, 0)->src
== innerloop
->latch
)
948 entryedge
= EDGE_PRED (innerloop
->header
, 1);
950 if (entryedge
->src
!= loop
->header
951 || !single_exit (innerloop
)
952 || single_exit (innerloop
)->dest
!= EDGE_PRED (loop
->latch
, 0)->src
)
954 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS
))
955 fprintf (vect_dump
, "not vectorized: unsupported outerloop form.");
956 destroy_loop_vec_info (inner_loop_vinfo
, true);
960 if (vect_print_dump_info (REPORT_DETAILS
))
961 fprintf (vect_dump
, "Considering outer-loop vectorization.");
964 if (!single_exit (loop
)
965 || EDGE_COUNT (loop
->header
->preds
) != 2)
967 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS
))
969 if (!single_exit (loop
))
970 fprintf (vect_dump
, "not vectorized: multiple exits.");
971 else if (EDGE_COUNT (loop
->header
->preds
) != 2)
972 fprintf (vect_dump
, "not vectorized: too many incoming edges.");
974 if (inner_loop_vinfo
)
975 destroy_loop_vec_info (inner_loop_vinfo
, true);
979 /* We assume that the loop exit condition is at the end of the loop. i.e,
980 that the loop is represented as a do-while (with a proper if-guard
981 before the loop if needed), where the loop header contains all the
982 executable statements, and the latch is empty. */
983 if (!empty_block_p (loop
->latch
)
984 || phi_nodes (loop
->latch
))
986 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS
))
987 fprintf (vect_dump
, "not vectorized: unexpected loop form.");
988 if (inner_loop_vinfo
)
989 destroy_loop_vec_info (inner_loop_vinfo
, true);
993 /* Make sure there exists a single-predecessor exit bb: */
994 if (!single_pred_p (single_exit (loop
)->dest
))
996 edge e
= single_exit (loop
);
997 if (!(e
->flags
& EDGE_ABNORMAL
))
999 split_loop_exit_edge (e
);
1000 if (vect_print_dump_info (REPORT_DETAILS
))
1001 fprintf (vect_dump
, "split exit edge.");
1005 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS
))
1006 fprintf (vect_dump
, "not vectorized: abnormal loop exit edge.");
1007 if (inner_loop_vinfo
)
1008 destroy_loop_vec_info (inner_loop_vinfo
, true);
1013 loop_cond
= vect_get_loop_niters (loop
, &number_of_iterations
);
1016 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS
))
1017 fprintf (vect_dump
, "not vectorized: complicated exit condition.");
1018 if (inner_loop_vinfo
)
1019 destroy_loop_vec_info (inner_loop_vinfo
, true);
1023 if (!number_of_iterations
)
1025 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS
))
1027 "not vectorized: number of iterations cannot be computed.");
1028 if (inner_loop_vinfo
)
1029 destroy_loop_vec_info (inner_loop_vinfo
, true);
1033 if (chrec_contains_undetermined (number_of_iterations
))
1035 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS
))
1036 fprintf (vect_dump
, "Infinite number of iterations.");
1037 if (inner_loop_vinfo
)
1038 destroy_loop_vec_info (inner_loop_vinfo
, true);
1042 if (!NITERS_KNOWN_P (number_of_iterations
))
1044 if (vect_print_dump_info (REPORT_DETAILS
))
1046 fprintf (vect_dump
, "Symbolic number of iterations is ");
1047 print_generic_expr (vect_dump
, number_of_iterations
, TDF_DETAILS
);
1050 else if (TREE_INT_CST_LOW (number_of_iterations
) == 0)
1052 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS
))
1053 fprintf (vect_dump
, "not vectorized: number of iterations = 0.");
1054 if (inner_loop_vinfo
)
1055 destroy_loop_vec_info (inner_loop_vinfo
, false);
1059 loop_vinfo
= new_loop_vec_info (loop
);
1060 LOOP_VINFO_NITERS (loop_vinfo
) = number_of_iterations
;
1061 LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo
) = number_of_iterations
;
1063 STMT_VINFO_TYPE (vinfo_for_stmt (loop_cond
)) = loop_exit_ctrl_vec_info_type
;
1065 /* CHECKME: May want to keep it around it in the future. */
1066 if (inner_loop_vinfo
)
1067 destroy_loop_vec_info (inner_loop_vinfo
, false);
1069 gcc_assert (!loop
->aux
);
1070 loop
->aux
= loop_vinfo
;
1075 /* Function vect_analyze_loop_operations.
1077 Scan the loop stmts and make sure they are all vectorizable. */
1080 vect_analyze_loop_operations (loop_vec_info loop_vinfo
)
1082 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
1083 basic_block
*bbs
= LOOP_VINFO_BBS (loop_vinfo
);
1084 int nbbs
= loop
->num_nodes
;
1085 gimple_stmt_iterator si
;
1086 unsigned int vectorization_factor
= 0;
1089 stmt_vec_info stmt_info
;
1090 bool need_to_vectorize
= false;
1091 int min_profitable_iters
;
1092 int min_scalar_loop_bound
;
1094 bool only_slp_in_loop
= true, ok
;
1096 if (vect_print_dump_info (REPORT_DETAILS
))
1097 fprintf (vect_dump
, "=== vect_analyze_loop_operations ===");
1099 gcc_assert (LOOP_VINFO_VECT_FACTOR (loop_vinfo
));
1100 vectorization_factor
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
1102 for (i
= 0; i
< nbbs
; i
++)
1104 basic_block bb
= bbs
[i
];
1106 for (si
= gsi_start_phis (bb
); !gsi_end_p (si
); gsi_next (&si
))
1108 phi
= gsi_stmt (si
);
1111 stmt_info
= vinfo_for_stmt (phi
);
1112 if (vect_print_dump_info (REPORT_DETAILS
))
1114 fprintf (vect_dump
, "examining phi: ");
1115 print_gimple_stmt (vect_dump
, phi
, 0, TDF_SLIM
);
1118 if (! is_loop_header_bb_p (bb
))
1120 /* inner-loop loop-closed exit phi in outer-loop vectorization
1121 (i.e. a phi in the tail of the outer-loop).
1122 FORNOW: we currently don't support the case that these phis
1123 are not used in the outerloop (unless it is double reduction,
1124 i.e., this phi is vect_reduction_def), cause this case
1125 requires to actually do something here. */
1126 if ((!STMT_VINFO_RELEVANT_P (stmt_info
)
1127 || STMT_VINFO_LIVE_P (stmt_info
))
1128 && STMT_VINFO_DEF_TYPE (stmt_info
)
1129 != vect_double_reduction_def
)
1131 if (vect_print_dump_info (REPORT_DETAILS
))
1133 "Unsupported loop-closed phi in outer-loop.");
1139 gcc_assert (stmt_info
);
1141 if (STMT_VINFO_LIVE_P (stmt_info
))
1143 /* FORNOW: not yet supported. */
1144 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS
))
1145 fprintf (vect_dump
, "not vectorized: value used after loop.");
1149 if (STMT_VINFO_RELEVANT (stmt_info
) == vect_used_in_scope
1150 && STMT_VINFO_DEF_TYPE (stmt_info
) != vect_induction_def
)
1152 /* A scalar-dependence cycle that we don't support. */
1153 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS
))
1154 fprintf (vect_dump
, "not vectorized: scalar dependence cycle.");
1158 if (STMT_VINFO_RELEVANT_P (stmt_info
))
1160 need_to_vectorize
= true;
1161 if (STMT_VINFO_DEF_TYPE (stmt_info
) == vect_induction_def
)
1162 ok
= vectorizable_induction (phi
, NULL
, NULL
);
1167 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS
))
1170 "not vectorized: relevant phi not supported: ");
1171 print_gimple_stmt (vect_dump
, phi
, 0, TDF_SLIM
);
1177 for (si
= gsi_start_bb (bb
); !gsi_end_p (si
); gsi_next (&si
))
1179 gimple stmt
= gsi_stmt (si
);
1180 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1182 gcc_assert (stmt_info
);
1184 if (!vect_analyze_stmt (stmt
, &need_to_vectorize
, NULL
))
1187 if (STMT_VINFO_RELEVANT_P (stmt_info
) && !PURE_SLP_STMT (stmt_info
))
1188 /* STMT needs both SLP and loop-based vectorization. */
1189 only_slp_in_loop
= false;
1193 /* All operations in the loop are either irrelevant (deal with loop
1194 control, or dead), or only used outside the loop and can be moved
1195 out of the loop (e.g. invariants, inductions). The loop can be
1196 optimized away by scalar optimizations. We're better off not
1197 touching this loop. */
1198 if (!need_to_vectorize
)
1200 if (vect_print_dump_info (REPORT_DETAILS
))
1202 "All the computation can be taken out of the loop.");
1203 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS
))
1205 "not vectorized: redundant loop. no profit to vectorize.");
1209 /* If all the stmts in the loop can be SLPed, we perform only SLP, and
1210 vectorization factor of the loop is the unrolling factor required by the
1211 SLP instances. If that unrolling factor is 1, we say, that we perform
1212 pure SLP on loop - cross iteration parallelism is not exploited. */
1213 if (only_slp_in_loop
)
1214 vectorization_factor
= LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo
);
1216 vectorization_factor
= least_common_multiple (vectorization_factor
,
1217 LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo
));
1219 LOOP_VINFO_VECT_FACTOR (loop_vinfo
) = vectorization_factor
;
1221 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo
)
1222 && vect_print_dump_info (REPORT_DETAILS
))
1224 "vectorization_factor = %d, niters = " HOST_WIDE_INT_PRINT_DEC
,
1225 vectorization_factor
, LOOP_VINFO_INT_NITERS (loop_vinfo
));
1227 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo
)
1228 && (LOOP_VINFO_INT_NITERS (loop_vinfo
) < vectorization_factor
))
1230 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS
))
1231 fprintf (vect_dump
, "not vectorized: iteration count too small.");
1232 if (vect_print_dump_info (REPORT_DETAILS
))
1233 fprintf (vect_dump
,"not vectorized: iteration count smaller than "
1234 "vectorization factor.");
1238 /* Analyze cost. Decide if worth while to vectorize. */
1240 /* Once VF is set, SLP costs should be updated since the number of created
1241 vector stmts depends on VF. */
1242 vect_update_slp_costs_according_to_vf (loop_vinfo
);
1244 min_profitable_iters
= vect_estimate_min_profitable_iters (loop_vinfo
);
1245 LOOP_VINFO_COST_MODEL_MIN_ITERS (loop_vinfo
) = min_profitable_iters
;
1247 if (min_profitable_iters
< 0)
1249 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS
))
1250 fprintf (vect_dump
, "not vectorized: vectorization not profitable.");
1251 if (vect_print_dump_info (REPORT_DETAILS
))
1252 fprintf (vect_dump
, "not vectorized: vector version will never be "
1257 min_scalar_loop_bound
= ((PARAM_VALUE (PARAM_MIN_VECT_LOOP_BOUND
)
1258 * vectorization_factor
) - 1);
1260 /* Use the cost model only if it is more conservative than user specified
1263 th
= (unsigned) min_scalar_loop_bound
;
1264 if (min_profitable_iters
1265 && (!min_scalar_loop_bound
1266 || min_profitable_iters
> min_scalar_loop_bound
))
1267 th
= (unsigned) min_profitable_iters
;
1269 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo
)
1270 && LOOP_VINFO_INT_NITERS (loop_vinfo
) <= th
)
1272 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS
))
1273 fprintf (vect_dump
, "not vectorized: vectorization not "
1275 if (vect_print_dump_info (REPORT_DETAILS
))
1276 fprintf (vect_dump
, "not vectorized: iteration count smaller than "
1277 "user specified loop bound parameter or minimum "
1278 "profitable iterations (whichever is more conservative).");
1282 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo
)
1283 || LOOP_VINFO_INT_NITERS (loop_vinfo
) % vectorization_factor
!= 0
1284 || LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo
))
1286 if (vect_print_dump_info (REPORT_DETAILS
))
1287 fprintf (vect_dump
, "epilog loop required.");
1288 if (!vect_can_advance_ivs_p (loop_vinfo
))
1290 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS
))
1292 "not vectorized: can't create epilog loop 1.");
1295 if (!slpeel_can_duplicate_loop_p (loop
, single_exit (loop
)))
1297 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS
))
1299 "not vectorized: can't create epilog loop 2.");
1308 /* Function vect_analyze_loop.
1310 Apply a set of analyses on LOOP, and create a loop_vec_info struct
1311 for it. The different analyses will record information in the
1312 loop_vec_info struct. */
1314 vect_analyze_loop (struct loop
*loop
)
1317 loop_vec_info loop_vinfo
;
1319 if (vect_print_dump_info (REPORT_DETAILS
))
1320 fprintf (vect_dump
, "===== analyze_loop_nest =====");
1322 if (loop_outer (loop
)
1323 && loop_vec_info_for_loop (loop_outer (loop
))
1324 && LOOP_VINFO_VECTORIZABLE_P (loop_vec_info_for_loop (loop_outer (loop
))))
1326 if (vect_print_dump_info (REPORT_DETAILS
))
1327 fprintf (vect_dump
, "outer-loop already vectorized.");
1331 /* Check the CFG characteristics of the loop (nesting, entry/exit, etc. */
1333 loop_vinfo
= vect_analyze_loop_form (loop
);
1336 if (vect_print_dump_info (REPORT_DETAILS
))
1337 fprintf (vect_dump
, "bad loop form.");
1341 /* Find all data references in the loop (which correspond to vdefs/vuses)
1342 and analyze their evolution in the loop.
1344 FORNOW: Handle only simple, array references, which
1345 alignment can be forced, and aligned pointer-references. */
1347 ok
= vect_analyze_data_refs (loop_vinfo
, NULL
);
1350 if (vect_print_dump_info (REPORT_DETAILS
))
1351 fprintf (vect_dump
, "bad data references.");
1352 destroy_loop_vec_info (loop_vinfo
, true);
1356 /* Classify all cross-iteration scalar data-flow cycles.
1357 Cross-iteration cycles caused by virtual phis are analyzed separately. */
1359 vect_analyze_scalar_cycles (loop_vinfo
);
1361 vect_pattern_recog (loop_vinfo
);
1363 /* Data-flow analysis to detect stmts that do not need to be vectorized. */
1365 ok
= vect_mark_stmts_to_be_vectorized (loop_vinfo
);
1368 if (vect_print_dump_info (REPORT_DETAILS
))
1369 fprintf (vect_dump
, "unexpected pattern.");
1370 destroy_loop_vec_info (loop_vinfo
, true);
1374 /* Analyze the alignment of the data-refs in the loop.
1375 Fail if a data reference is found that cannot be vectorized. */
1377 ok
= vect_analyze_data_refs_alignment (loop_vinfo
, NULL
);
1380 if (vect_print_dump_info (REPORT_DETAILS
))
1381 fprintf (vect_dump
, "bad data alignment.");
1382 destroy_loop_vec_info (loop_vinfo
, true);
1386 ok
= vect_determine_vectorization_factor (loop_vinfo
);
1389 if (vect_print_dump_info (REPORT_DETAILS
))
1390 fprintf (vect_dump
, "can't determine vectorization factor.");
1391 destroy_loop_vec_info (loop_vinfo
, true);
1395 /* Analyze data dependences between the data-refs in the loop.
1396 FORNOW: fail at the first data dependence that we encounter. */
1398 ok
= vect_analyze_data_ref_dependences (loop_vinfo
, NULL
);
1401 if (vect_print_dump_info (REPORT_DETAILS
))
1402 fprintf (vect_dump
, "bad data dependence.");
1403 destroy_loop_vec_info (loop_vinfo
, true);
1407 /* Analyze the access patterns of the data-refs in the loop (consecutive,
1408 complex, etc.). FORNOW: Only handle consecutive access pattern. */
1410 ok
= vect_analyze_data_ref_accesses (loop_vinfo
, NULL
);
1413 if (vect_print_dump_info (REPORT_DETAILS
))
1414 fprintf (vect_dump
, "bad data access.");
1415 destroy_loop_vec_info (loop_vinfo
, true);
1419 /* Prune the list of ddrs to be tested at run-time by versioning for alias.
1420 It is important to call pruning after vect_analyze_data_ref_accesses,
1421 since we use grouping information gathered by interleaving analysis. */
1422 ok
= vect_prune_runtime_alias_test_list (loop_vinfo
);
1425 if (vect_print_dump_info (REPORT_DETAILS
))
1426 fprintf (vect_dump
, "too long list of versioning for alias "
1428 destroy_loop_vec_info (loop_vinfo
, true);
1432 /* Check the SLP opportunities in the loop, analyze and build SLP trees. */
1433 ok
= vect_analyze_slp (loop_vinfo
, NULL
);
1436 /* Decide which possible SLP instances to SLP. */
1437 vect_make_slp_decision (loop_vinfo
);
1439 /* Find stmts that need to be both vectorized and SLPed. */
1440 vect_detect_hybrid_slp (loop_vinfo
);
1443 /* This pass will decide on using loop versioning and/or loop peeling in
1444 order to enhance the alignment of data references in the loop. */
1446 ok
= vect_enhance_data_refs_alignment (loop_vinfo
);
1449 if (vect_print_dump_info (REPORT_DETAILS
))
1450 fprintf (vect_dump
, "bad data alignment.");
1451 destroy_loop_vec_info (loop_vinfo
, true);
1455 /* Scan all the operations in the loop and make sure they are
1458 ok
= vect_analyze_loop_operations (loop_vinfo
);
1461 if (vect_print_dump_info (REPORT_DETAILS
))
1462 fprintf (vect_dump
, "bad operation or unsupported loop bound.");
1463 destroy_loop_vec_info (loop_vinfo
, true);
1467 LOOP_VINFO_VECTORIZABLE_P (loop_vinfo
) = 1;
1473 /* Function reduction_code_for_scalar_code
1476 CODE - tree_code of a reduction operations.
1479 REDUC_CODE - the corresponding tree-code to be used to reduce the
1480 vector of partial results into a single scalar result (which
1481 will also reside in a vector) or ERROR_MARK if the operation is
1482 a supported reduction operation, but does not have such tree-code.
1484 Return FALSE if CODE currently cannot be vectorized as reduction. */
1487 reduction_code_for_scalar_code (enum tree_code code
,
1488 enum tree_code
*reduc_code
)
1493 *reduc_code
= REDUC_MAX_EXPR
;
1497 *reduc_code
= REDUC_MIN_EXPR
;
1501 *reduc_code
= REDUC_PLUS_EXPR
;
1509 *reduc_code
= ERROR_MARK
;
1518 /* Error reporting helper for vect_is_simple_reduction below. GIMPLE statement
1519 STMT is printed with a message MSG. */
1522 report_vect_op (gimple stmt
, const char *msg
)
1524 fprintf (vect_dump
, "%s", msg
);
1525 print_gimple_stmt (vect_dump
, stmt
, 0, TDF_SLIM
);
1529 /* Function vect_is_simple_reduction
1531 (1) Detect a cross-iteration def-use cycle that represents a simple
1532 reduction computation. We look for the following pattern:
1537 a2 = operation (a3, a1)
1540 1. operation is commutative and associative and it is safe to
1541 change the order of the computation (if CHECK_REDUCTION is true)
1542 2. no uses for a2 in the loop (a2 is used out of the loop)
1543 3. no uses of a1 in the loop besides the reduction operation.
1545 Condition 1 is tested here.
1546 Conditions 2,3 are tested in vect_mark_stmts_to_be_vectorized.
1548 (2) Detect a cross-iteration def-use cycle in nested loops, i.e.,
1549 nested cycles, if CHECK_REDUCTION is false.
1551 (3) Detect cycles of phi nodes in outer-loop vectorization, i.e., double
1555 inner loop (def of a3)
1560 vect_is_simple_reduction (loop_vec_info loop_info
, gimple phi
,
1561 bool check_reduction
, bool *double_reduc
)
1563 struct loop
*loop
= (gimple_bb (phi
))->loop_father
;
1564 struct loop
*vect_loop
= LOOP_VINFO_LOOP (loop_info
);
1565 edge latch_e
= loop_latch_edge (loop
);
1566 tree loop_arg
= PHI_ARG_DEF_FROM_EDGE (phi
, latch_e
);
1567 gimple def_stmt
, def1
= NULL
, def2
= NULL
;
1568 enum tree_code code
;
1569 tree op1
, op2
, op3
= NULL_TREE
, op4
= NULL_TREE
;
1573 imm_use_iterator imm_iter
;
1574 use_operand_p use_p
;
1577 *double_reduc
= false;
1579 /* If CHECK_REDUCTION is true, we assume inner-most loop vectorization,
1580 otherwise, we assume outer loop vectorization. */
1581 gcc_assert ((check_reduction
&& loop
== vect_loop
)
1582 || (!check_reduction
&& flow_loop_nested_p (vect_loop
, loop
)));
1584 name
= PHI_RESULT (phi
);
1586 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, name
)
1588 gimple use_stmt
= USE_STMT (use_p
);
1589 if (is_gimple_debug (use_stmt
))
1591 if (flow_bb_inside_loop_p (loop
, gimple_bb (use_stmt
))
1592 && vinfo_for_stmt (use_stmt
)
1593 && !is_pattern_stmt_p (vinfo_for_stmt (use_stmt
)))
1597 if (vect_print_dump_info (REPORT_DETAILS
))
1598 fprintf (vect_dump
, "reduction used in loop.");
1603 if (TREE_CODE (loop_arg
) != SSA_NAME
)
1605 if (vect_print_dump_info (REPORT_DETAILS
))
1607 fprintf (vect_dump
, "reduction: not ssa_name: ");
1608 print_generic_expr (vect_dump
, loop_arg
, TDF_SLIM
);
1613 def_stmt
= SSA_NAME_DEF_STMT (loop_arg
);
1616 if (vect_print_dump_info (REPORT_DETAILS
))
1617 fprintf (vect_dump
, "reduction: no def_stmt.");
1621 if (!is_gimple_assign (def_stmt
) && gimple_code (def_stmt
) != GIMPLE_PHI
)
1623 if (vect_print_dump_info (REPORT_DETAILS
))
1624 print_gimple_stmt (vect_dump
, def_stmt
, 0, TDF_SLIM
);
1628 if (is_gimple_assign (def_stmt
))
1630 name
= gimple_assign_lhs (def_stmt
);
1635 name
= PHI_RESULT (def_stmt
);
1640 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, name
)
1642 gimple use_stmt
= USE_STMT (use_p
);
1643 if (is_gimple_debug (use_stmt
))
1645 if (flow_bb_inside_loop_p (loop
, gimple_bb (use_stmt
))
1646 && vinfo_for_stmt (use_stmt
)
1647 && !is_pattern_stmt_p (vinfo_for_stmt (use_stmt
)))
1651 if (vect_print_dump_info (REPORT_DETAILS
))
1652 fprintf (vect_dump
, "reduction used in loop.");
1657 /* If DEF_STMT is a phi node itself, we expect it to have a single argument
1658 defined in the inner loop. */
1661 op1
= PHI_ARG_DEF (def_stmt
, 0);
1663 if (gimple_phi_num_args (def_stmt
) != 1
1664 || TREE_CODE (op1
) != SSA_NAME
)
1666 if (vect_print_dump_info (REPORT_DETAILS
))
1667 fprintf (vect_dump
, "unsupported phi node definition.");
1672 def1
= SSA_NAME_DEF_STMT (op1
);
1673 if (flow_bb_inside_loop_p (loop
, gimple_bb (def_stmt
))
1675 && flow_bb_inside_loop_p (loop
->inner
, gimple_bb (def1
))
1676 && is_gimple_assign (def1
))
1678 if (vect_print_dump_info (REPORT_DETAILS
))
1679 report_vect_op (def_stmt
, "detected double reduction: ");
1681 *double_reduc
= true;
1688 code
= gimple_assign_rhs_code (def_stmt
);
1691 && (!commutative_tree_code (code
) || !associative_tree_code (code
)))
1693 if (vect_print_dump_info (REPORT_DETAILS
))
1694 report_vect_op (def_stmt
, "reduction: not commutative/associative: ");
1698 if (get_gimple_rhs_class (code
) != GIMPLE_BINARY_RHS
)
1700 if (code
!= COND_EXPR
)
1702 if (vect_print_dump_info (REPORT_DETAILS
))
1703 report_vect_op (def_stmt
, "reduction: not binary operation: ");
1708 op3
= TREE_OPERAND (gimple_assign_rhs1 (def_stmt
), 0);
1709 if (COMPARISON_CLASS_P (op3
))
1711 op4
= TREE_OPERAND (op3
, 1);
1712 op3
= TREE_OPERAND (op3
, 0);
1715 op1
= TREE_OPERAND (gimple_assign_rhs1 (def_stmt
), 1);
1716 op2
= TREE_OPERAND (gimple_assign_rhs1 (def_stmt
), 2);
1718 if (TREE_CODE (op1
) != SSA_NAME
&& TREE_CODE (op2
) != SSA_NAME
)
1720 if (vect_print_dump_info (REPORT_DETAILS
))
1721 report_vect_op (def_stmt
, "reduction: uses not ssa_names: ");
1728 op1
= gimple_assign_rhs1 (def_stmt
);
1729 op2
= gimple_assign_rhs2 (def_stmt
);
1731 if (TREE_CODE (op1
) != SSA_NAME
|| TREE_CODE (op2
) != SSA_NAME
)
1733 if (vect_print_dump_info (REPORT_DETAILS
))
1734 report_vect_op (def_stmt
, "reduction: uses not ssa_names: ");
1740 type
= TREE_TYPE (gimple_assign_lhs (def_stmt
));
1741 if ((TREE_CODE (op1
) == SSA_NAME
1742 && !types_compatible_p (type
,TREE_TYPE (op1
)))
1743 || (TREE_CODE (op2
) == SSA_NAME
1744 && !types_compatible_p (type
, TREE_TYPE (op2
)))
1745 || (op3
&& TREE_CODE (op3
) == SSA_NAME
1746 && !types_compatible_p (type
, TREE_TYPE (op3
)))
1747 || (op4
&& TREE_CODE (op4
) == SSA_NAME
1748 && !types_compatible_p (type
, TREE_TYPE (op4
))))
1750 if (vect_print_dump_info (REPORT_DETAILS
))
1752 fprintf (vect_dump
, "reduction: multiple types: operation type: ");
1753 print_generic_expr (vect_dump
, type
, TDF_SLIM
);
1754 fprintf (vect_dump
, ", operands types: ");
1755 print_generic_expr (vect_dump
, TREE_TYPE (op1
), TDF_SLIM
);
1756 fprintf (vect_dump
, ",");
1757 print_generic_expr (vect_dump
, TREE_TYPE (op2
), TDF_SLIM
);
1760 fprintf (vect_dump
, ",");
1761 print_generic_expr (vect_dump
, TREE_TYPE (op3
), TDF_SLIM
);
1766 fprintf (vect_dump
, ",");
1767 print_generic_expr (vect_dump
, TREE_TYPE (op4
), TDF_SLIM
);
1774 /* Check that it's ok to change the order of the computation.
1775 Generally, when vectorizing a reduction we change the order of the
1776 computation. This may change the behavior of the program in some
1777 cases, so we need to check that this is ok. One exception is when
1778 vectorizing an outer-loop: the inner-loop is executed sequentially,
1779 and therefore vectorizing reductions in the inner-loop during
1780 outer-loop vectorization is safe. */
1782 /* CHECKME: check for !flag_finite_math_only too? */
1783 if (SCALAR_FLOAT_TYPE_P (type
) && !flag_associative_math
1786 /* Changing the order of operations changes the semantics. */
1787 if (vect_print_dump_info (REPORT_DETAILS
))
1788 report_vect_op (def_stmt
, "reduction: unsafe fp math optimization: ");
1791 else if (INTEGRAL_TYPE_P (type
) && TYPE_OVERFLOW_TRAPS (type
)
1794 /* Changing the order of operations changes the semantics. */
1795 if (vect_print_dump_info (REPORT_DETAILS
))
1796 report_vect_op (def_stmt
, "reduction: unsafe int math optimization: ");
1799 else if (SAT_FIXED_POINT_TYPE_P (type
) && check_reduction
)
1801 /* Changing the order of operations changes the semantics. */
1802 if (vect_print_dump_info (REPORT_DETAILS
))
1803 report_vect_op (def_stmt
,
1804 "reduction: unsafe fixed-point math optimization: ");
1808 /* Reduction is safe. We're dealing with one of the following:
1809 1) integer arithmetic and no trapv
1810 2) floating point arithmetic, and special flags permit this optimization
1811 3) nested cycle (i.e., outer loop vectorization). */
1812 if (TREE_CODE (op1
) == SSA_NAME
)
1813 def1
= SSA_NAME_DEF_STMT (op1
);
1815 if (TREE_CODE (op2
) == SSA_NAME
)
1816 def2
= SSA_NAME_DEF_STMT (op2
);
1818 if (code
!= COND_EXPR
1819 && (!def1
|| !def2
|| gimple_nop_p (def1
) || gimple_nop_p (def2
)))
1821 if (vect_print_dump_info (REPORT_DETAILS
))
1822 report_vect_op (def_stmt
, "reduction: no defs for operands: ");
1826 /* Check that one def is the reduction def, defined by PHI,
1827 the other def is either defined in the loop ("vect_internal_def"),
1828 or it's an induction (defined by a loop-header phi-node). */
1830 if (def2
&& def2
== phi
1831 && (code
== COND_EXPR
1832 || (def1
&& flow_bb_inside_loop_p (loop
, gimple_bb (def1
))
1833 && (is_gimple_assign (def1
)
1834 || STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def1
))
1835 == vect_induction_def
1836 || (gimple_code (def1
) == GIMPLE_PHI
1837 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def1
))
1838 == vect_internal_def
1839 && !is_loop_header_bb_p (gimple_bb (def1
)))))))
1841 if (vect_print_dump_info (REPORT_DETAILS
))
1842 report_vect_op (def_stmt
, "detected reduction: ");
1845 else if (def1
&& def1
== phi
1846 && (code
== COND_EXPR
1847 || (def2
&& flow_bb_inside_loop_p (loop
, gimple_bb (def2
))
1848 && (is_gimple_assign (def2
)
1849 || STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def2
))
1850 == vect_induction_def
1851 || (gimple_code (def2
) == GIMPLE_PHI
1852 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def2
))
1853 == vect_internal_def
1854 && !is_loop_header_bb_p (gimple_bb (def2
)))))))
1856 if (check_reduction
)
1858 /* Swap operands (just for simplicity - so that the rest of the code
1859 can assume that the reduction variable is always the last (second)
1861 if (vect_print_dump_info (REPORT_DETAILS
))
1862 report_vect_op (def_stmt
,
1863 "detected reduction: need to swap operands: ");
1865 swap_tree_operands (def_stmt
, gimple_assign_rhs1_ptr (def_stmt
),
1866 gimple_assign_rhs2_ptr (def_stmt
));
1870 if (vect_print_dump_info (REPORT_DETAILS
))
1871 report_vect_op (def_stmt
, "detected reduction: ");
1878 if (vect_print_dump_info (REPORT_DETAILS
))
1879 report_vect_op (def_stmt
, "reduction: unknown pattern: ");
1886 /* Function vect_estimate_min_profitable_iters
1888 Return the number of iterations required for the vector version of the
1889 loop to be profitable relative to the cost of the scalar version of the
1892 TODO: Take profile info into account before making vectorization
1893 decisions, if available. */
1896 vect_estimate_min_profitable_iters (loop_vec_info loop_vinfo
)
1899 int min_profitable_iters
;
1900 int peel_iters_prologue
;
1901 int peel_iters_epilogue
;
1902 int vec_inside_cost
= 0;
1903 int vec_outside_cost
= 0;
1904 int scalar_single_iter_cost
= 0;
1905 int scalar_outside_cost
= 0;
1906 int vf
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
1907 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
1908 basic_block
*bbs
= LOOP_VINFO_BBS (loop_vinfo
);
1909 int nbbs
= loop
->num_nodes
;
1910 int byte_misalign
= LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo
);
1911 int peel_guard_costs
= 0;
1912 int innerloop_iters
= 0, factor
;
1913 VEC (slp_instance
, heap
) *slp_instances
;
1914 slp_instance instance
;
1916 /* Cost model disabled. */
1917 if (!flag_vect_cost_model
)
1919 if (vect_print_dump_info (REPORT_COST
))
1920 fprintf (vect_dump
, "cost model disabled.");
1924 /* Requires loop versioning tests to handle misalignment. */
1925 if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo
))
1927 /* FIXME: Make cost depend on complexity of individual check. */
1929 VEC_length (gimple
, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo
));
1930 if (vect_print_dump_info (REPORT_COST
))
1931 fprintf (vect_dump
, "cost model: Adding cost of checks for loop "
1932 "versioning to treat misalignment.\n");
1935 /* Requires loop versioning with alias checks. */
1936 if (LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo
))
1938 /* FIXME: Make cost depend on complexity of individual check. */
1940 VEC_length (ddr_p
, LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo
));
1941 if (vect_print_dump_info (REPORT_COST
))
1942 fprintf (vect_dump
, "cost model: Adding cost of checks for loop "
1943 "versioning aliasing.\n");
1946 if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo
)
1947 || LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo
))
1948 vec_outside_cost
+= TARG_COND_TAKEN_BRANCH_COST
;
1950 /* Count statements in scalar loop. Using this as scalar cost for a single
1953 TODO: Add outer loop support.
1955 TODO: Consider assigning different costs to different scalar
1960 innerloop_iters
= 50; /* FIXME */
1962 for (i
= 0; i
< nbbs
; i
++)
1964 gimple_stmt_iterator si
;
1965 basic_block bb
= bbs
[i
];
1967 if (bb
->loop_father
== loop
->inner
)
1968 factor
= innerloop_iters
;
1972 for (si
= gsi_start_bb (bb
); !gsi_end_p (si
); gsi_next (&si
))
1974 gimple stmt
= gsi_stmt (si
);
1975 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1976 /* Skip stmts that are not vectorized inside the loop. */
1977 if (!STMT_VINFO_RELEVANT_P (stmt_info
)
1978 && (!STMT_VINFO_LIVE_P (stmt_info
)
1979 || STMT_VINFO_DEF_TYPE (stmt_info
) != vect_reduction_def
))
1981 scalar_single_iter_cost
+= cost_for_stmt (stmt
) * factor
;
1982 vec_inside_cost
+= STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info
) * factor
;
1983 /* FIXME: for stmts in the inner-loop in outer-loop vectorization,
1984 some of the "outside" costs are generated inside the outer-loop. */
1985 vec_outside_cost
+= STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info
);
1989 /* Add additional cost for the peeled instructions in prologue and epilogue
1992 FORNOW: If we don't know the value of peel_iters for prologue or epilogue
1993 at compile-time - we assume it's vf/2 (the worst would be vf-1).
1995 TODO: Build an expression that represents peel_iters for prologue and
1996 epilogue to be used in a run-time test. */
1998 if (byte_misalign
< 0)
2000 peel_iters_prologue
= vf
/2;
2001 if (vect_print_dump_info (REPORT_COST
))
2002 fprintf (vect_dump
, "cost model: "
2003 "prologue peel iters set to vf/2.");
2005 /* If peeling for alignment is unknown, loop bound of main loop becomes
2007 peel_iters_epilogue
= vf
/2;
2008 if (vect_print_dump_info (REPORT_COST
))
2009 fprintf (vect_dump
, "cost model: "
2010 "epilogue peel iters set to vf/2 because "
2011 "peeling for alignment is unknown .");
2013 /* If peeled iterations are unknown, count a taken branch and a not taken
2014 branch per peeled loop. Even if scalar loop iterations are known,
2015 vector iterations are not known since peeled prologue iterations are
2016 not known. Hence guards remain the same. */
2017 peel_guard_costs
+= 2 * (TARG_COND_TAKEN_BRANCH_COST
2018 + TARG_COND_NOT_TAKEN_BRANCH_COST
);
2024 struct data_reference
*dr
= LOOP_VINFO_UNALIGNED_DR (loop_vinfo
);
2025 int element_size
= GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (dr
))));
2026 tree vectype
= STMT_VINFO_VECTYPE (vinfo_for_stmt (DR_STMT (dr
)));
2027 int nelements
= TYPE_VECTOR_SUBPARTS (vectype
);
2029 peel_iters_prologue
= nelements
- (byte_misalign
/ element_size
);
2032 peel_iters_prologue
= 0;
2034 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo
))
2036 peel_iters_epilogue
= vf
/2;
2037 if (vect_print_dump_info (REPORT_COST
))
2038 fprintf (vect_dump
, "cost model: "
2039 "epilogue peel iters set to vf/2 because "
2040 "loop iterations are unknown .");
2042 /* If peeled iterations are known but number of scalar loop
2043 iterations are unknown, count a taken branch per peeled loop. */
2044 peel_guard_costs
+= 2 * TARG_COND_TAKEN_BRANCH_COST
;
2049 int niters
= LOOP_VINFO_INT_NITERS (loop_vinfo
);
2050 peel_iters_prologue
= niters
< peel_iters_prologue
?
2051 niters
: peel_iters_prologue
;
2052 peel_iters_epilogue
= (niters
- peel_iters_prologue
) % vf
;
2056 vec_outside_cost
+= (peel_iters_prologue
* scalar_single_iter_cost
)
2057 + (peel_iters_epilogue
* scalar_single_iter_cost
)
2060 /* FORNOW: The scalar outside cost is incremented in one of the
2063 1. The vectorizer checks for alignment and aliasing and generates
2064 a condition that allows dynamic vectorization. A cost model
2065 check is ANDED with the versioning condition. Hence scalar code
2066 path now has the added cost of the versioning check.
2068 if (cost > th & versioning_check)
2071 Hence run-time scalar is incremented by not-taken branch cost.
2073 2. The vectorizer then checks if a prologue is required. If the
2074 cost model check was not done before during versioning, it has to
2075 be done before the prologue check.
2078 prologue = scalar_iters
2083 if (prologue == num_iters)
2086 Hence the run-time scalar cost is incremented by a taken branch,
2087 plus a not-taken branch, plus a taken branch cost.
2089 3. The vectorizer then checks if an epilogue is required. If the
2090 cost model check was not done before during prologue check, it
2091 has to be done with the epilogue check.
2097 if (prologue == num_iters)
2100 if ((cost <= th) | (scalar_iters-prologue-epilogue == 0))
2103 Hence the run-time scalar cost should be incremented by 2 taken
2106 TODO: The back end may reorder the BBS's differently and reverse
2107 conditions/branch directions. Change the estimates below to
2108 something more reasonable. */
2110 /* If the number of iterations is known and we do not do versioning, we can
2111 decide whether to vectorize at compile time. Hence the scalar version
2112 do not carry cost model guard costs. */
2113 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo
)
2114 || LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo
)
2115 || LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo
))
2117 /* Cost model check occurs at versioning. */
2118 if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo
)
2119 || LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo
))
2120 scalar_outside_cost
+= TARG_COND_NOT_TAKEN_BRANCH_COST
;
2123 /* Cost model check occurs at prologue generation. */
2124 if (LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo
) < 0)
2125 scalar_outside_cost
+= 2 * TARG_COND_TAKEN_BRANCH_COST
2126 + TARG_COND_NOT_TAKEN_BRANCH_COST
;
2127 /* Cost model check occurs at epilogue generation. */
2129 scalar_outside_cost
+= 2 * TARG_COND_TAKEN_BRANCH_COST
;
2133 /* Add SLP costs. */
2134 slp_instances
= LOOP_VINFO_SLP_INSTANCES (loop_vinfo
);
2135 for (i
= 0; VEC_iterate (slp_instance
, slp_instances
, i
, instance
); i
++)
2137 vec_outside_cost
+= SLP_INSTANCE_OUTSIDE_OF_LOOP_COST (instance
);
2138 vec_inside_cost
+= SLP_INSTANCE_INSIDE_OF_LOOP_COST (instance
);
2141 /* Calculate number of iterations required to make the vector version
2142 profitable, relative to the loop bodies only. The following condition
2144 SIC * niters + SOC > VIC * ((niters-PL_ITERS-EP_ITERS)/VF) + VOC
2146 SIC = scalar iteration cost, VIC = vector iteration cost,
2147 VOC = vector outside cost, VF = vectorization factor,
2148 PL_ITERS = prologue iterations, EP_ITERS= epilogue iterations
2149 SOC = scalar outside cost for run time cost model check. */
2151 if ((scalar_single_iter_cost
* vf
) > vec_inside_cost
)
2153 if (vec_outside_cost
<= 0)
2154 min_profitable_iters
= 1;
2157 min_profitable_iters
= ((vec_outside_cost
- scalar_outside_cost
) * vf
2158 - vec_inside_cost
* peel_iters_prologue
2159 - vec_inside_cost
* peel_iters_epilogue
)
2160 / ((scalar_single_iter_cost
* vf
)
2163 if ((scalar_single_iter_cost
* vf
* min_profitable_iters
)
2164 <= ((vec_inside_cost
* min_profitable_iters
)
2165 + ((vec_outside_cost
- scalar_outside_cost
) * vf
)))
2166 min_profitable_iters
++;
2169 /* vector version will never be profitable. */
2172 if (vect_print_dump_info (REPORT_COST
))
2173 fprintf (vect_dump
, "cost model: vector iteration cost = %d "
2174 "is divisible by scalar iteration cost = %d by a factor "
2175 "greater than or equal to the vectorization factor = %d .",
2176 vec_inside_cost
, scalar_single_iter_cost
, vf
);
2180 if (vect_print_dump_info (REPORT_COST
))
2182 fprintf (vect_dump
, "Cost model analysis: \n");
2183 fprintf (vect_dump
, " Vector inside of loop cost: %d\n",
2185 fprintf (vect_dump
, " Vector outside of loop cost: %d\n",
2187 fprintf (vect_dump
, " Scalar iteration cost: %d\n",
2188 scalar_single_iter_cost
);
2189 fprintf (vect_dump
, " Scalar outside cost: %d\n", scalar_outside_cost
);
2190 fprintf (vect_dump
, " prologue iterations: %d\n",
2191 peel_iters_prologue
);
2192 fprintf (vect_dump
, " epilogue iterations: %d\n",
2193 peel_iters_epilogue
);
2194 fprintf (vect_dump
, " Calculated minimum iters for profitability: %d\n",
2195 min_profitable_iters
);
2198 min_profitable_iters
=
2199 min_profitable_iters
< vf
? vf
: min_profitable_iters
;
2201 /* Because the condition we create is:
2202 if (niters <= min_profitable_iters)
2203 then skip the vectorized loop. */
2204 min_profitable_iters
--;
2206 if (vect_print_dump_info (REPORT_COST
))
2207 fprintf (vect_dump
, " Profitability threshold = %d\n",
2208 min_profitable_iters
);
2210 return min_profitable_iters
;
2214 /* TODO: Close dependency between vect_model_*_cost and vectorizable_*
2215 functions. Design better to avoid maintenance issues. */
2217 /* Function vect_model_reduction_cost.
2219 Models cost for a reduction operation, including the vector ops
2220 generated within the strip-mine loop, the initial definition before
2221 the loop, and the epilogue code that must be generated. */
2224 vect_model_reduction_cost (stmt_vec_info stmt_info
, enum tree_code reduc_code
,
2228 enum tree_code code
;
2231 gimple stmt
, orig_stmt
;
2233 enum machine_mode mode
;
2234 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
2235 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
2238 /* Cost of reduction op inside loop. */
2239 STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info
) += ncopies
* TARG_VEC_STMT_COST
;
2241 stmt
= STMT_VINFO_STMT (stmt_info
);
2243 switch (get_gimple_rhs_class (gimple_assign_rhs_code (stmt
)))
2245 case GIMPLE_SINGLE_RHS
:
2246 gcc_assert (TREE_OPERAND_LENGTH (gimple_assign_rhs1 (stmt
)) == ternary_op
);
2247 reduction_op
= TREE_OPERAND (gimple_assign_rhs1 (stmt
), 2);
2249 case GIMPLE_UNARY_RHS
:
2250 reduction_op
= gimple_assign_rhs1 (stmt
);
2252 case GIMPLE_BINARY_RHS
:
2253 reduction_op
= gimple_assign_rhs2 (stmt
);
2259 vectype
= get_vectype_for_scalar_type (TREE_TYPE (reduction_op
));
2262 if (vect_print_dump_info (REPORT_COST
))
2264 fprintf (vect_dump
, "unsupported data-type ");
2265 print_generic_expr (vect_dump
, TREE_TYPE (reduction_op
), TDF_SLIM
);
2270 mode
= TYPE_MODE (vectype
);
2271 orig_stmt
= STMT_VINFO_RELATED_STMT (stmt_info
);
2274 orig_stmt
= STMT_VINFO_STMT (stmt_info
);
2276 code
= gimple_assign_rhs_code (orig_stmt
);
2278 /* Add in cost for initial definition. */
2279 outer_cost
+= TARG_SCALAR_TO_VEC_COST
;
2281 /* Determine cost of epilogue code.
2283 We have a reduction operator that will reduce the vector in one statement.
2284 Also requires scalar extract. */
2286 if (!nested_in_vect_loop_p (loop
, orig_stmt
))
2288 if (reduc_code
!= ERROR_MARK
)
2289 outer_cost
+= TARG_VEC_STMT_COST
+ TARG_VEC_TO_SCALAR_COST
;
2292 int vec_size_in_bits
= tree_low_cst (TYPE_SIZE (vectype
), 1);
2294 TYPE_SIZE (TREE_TYPE (gimple_assign_lhs (orig_stmt
)));
2295 int element_bitsize
= tree_low_cst (bitsize
, 1);
2296 int nelements
= vec_size_in_bits
/ element_bitsize
;
2298 optab
= optab_for_tree_code (code
, vectype
, optab_default
);
2300 /* We have a whole vector shift available. */
2301 if (VECTOR_MODE_P (mode
)
2302 && optab_handler (optab
, mode
)->insn_code
!= CODE_FOR_nothing
2303 && optab_handler (vec_shr_optab
, mode
)->insn_code
!= CODE_FOR_nothing
)
2304 /* Final reduction via vector shifts and the reduction operator. Also
2305 requires scalar extract. */
2306 outer_cost
+= ((exact_log2(nelements
) * 2) * TARG_VEC_STMT_COST
2307 + TARG_VEC_TO_SCALAR_COST
);
2309 /* Use extracts and reduction op for final reduction. For N elements,
2310 we have N extracts and N-1 reduction ops. */
2311 outer_cost
+= ((nelements
+ nelements
- 1) * TARG_VEC_STMT_COST
);
2315 STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info
) = outer_cost
;
2317 if (vect_print_dump_info (REPORT_COST
))
2318 fprintf (vect_dump
, "vect_model_reduction_cost: inside_cost = %d, "
2319 "outside_cost = %d .", STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info
),
2320 STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info
));
2326 /* Function vect_model_induction_cost.
2328 Models cost for induction operations. */
2331 vect_model_induction_cost (stmt_vec_info stmt_info
, int ncopies
)
2333 /* loop cost for vec_loop. */
2334 STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info
) = ncopies
* TARG_VEC_STMT_COST
;
2335 /* prologue cost for vec_init and vec_step. */
2336 STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info
) = 2 * TARG_SCALAR_TO_VEC_COST
;
2338 if (vect_print_dump_info (REPORT_COST
))
2339 fprintf (vect_dump
, "vect_model_induction_cost: inside_cost = %d, "
2340 "outside_cost = %d .", STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info
),
2341 STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info
));
2345 /* Function get_initial_def_for_induction
2348 STMT - a stmt that performs an induction operation in the loop.
2349 IV_PHI - the initial value of the induction variable
2352 Return a vector variable, initialized with the first VF values of
2353 the induction variable. E.g., for an iv with IV_PHI='X' and
2354 evolution S, for a vector of 4 units, we want to return:
2355 [X, X + S, X + 2*S, X + 3*S]. */
2358 get_initial_def_for_induction (gimple iv_phi
)
2360 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (iv_phi
);
2361 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_vinfo
);
2362 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
2363 tree scalar_type
= TREE_TYPE (gimple_phi_result (iv_phi
));
2366 edge pe
= loop_preheader_edge (loop
);
2367 struct loop
*iv_loop
;
2369 tree vec
, vec_init
, vec_step
, t
;
2373 gimple init_stmt
, induction_phi
, new_stmt
;
2374 tree induc_def
, vec_def
, vec_dest
;
2375 tree init_expr
, step_expr
;
2376 int vf
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
2381 stmt_vec_info phi_info
= vinfo_for_stmt (iv_phi
);
2382 bool nested_in_vect_loop
= false;
2383 gimple_seq stmts
= NULL
;
2384 imm_use_iterator imm_iter
;
2385 use_operand_p use_p
;
2389 gimple_stmt_iterator si
;
2390 basic_block bb
= gimple_bb (iv_phi
);
2393 vectype
= get_vectype_for_scalar_type (scalar_type
);
2394 gcc_assert (vectype
);
2395 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
2396 ncopies
= vf
/ nunits
;
2398 gcc_assert (phi_info
);
2399 gcc_assert (ncopies
>= 1);
2401 /* Find the first insertion point in the BB. */
2402 si
= gsi_after_labels (bb
);
2404 if (INTEGRAL_TYPE_P (scalar_type
))
2405 step_expr
= build_int_cst (scalar_type
, 0);
2406 else if (POINTER_TYPE_P (scalar_type
))
2407 step_expr
= build_int_cst (sizetype
, 0);
2409 step_expr
= build_real (scalar_type
, dconst0
);
2411 /* Is phi in an inner-loop, while vectorizing an enclosing outer-loop? */
2412 if (nested_in_vect_loop_p (loop
, iv_phi
))
2414 nested_in_vect_loop
= true;
2415 iv_loop
= loop
->inner
;
2419 gcc_assert (iv_loop
== (gimple_bb (iv_phi
))->loop_father
);
2421 latch_e
= loop_latch_edge (iv_loop
);
2422 loop_arg
= PHI_ARG_DEF_FROM_EDGE (iv_phi
, latch_e
);
2424 access_fn
= analyze_scalar_evolution (iv_loop
, PHI_RESULT (iv_phi
));
2425 gcc_assert (access_fn
);
2426 ok
= vect_is_simple_iv_evolution (iv_loop
->num
, access_fn
,
2427 &init_expr
, &step_expr
);
2429 pe
= loop_preheader_edge (iv_loop
);
2431 /* Create the vector that holds the initial_value of the induction. */
2432 if (nested_in_vect_loop
)
2434 /* iv_loop is nested in the loop to be vectorized. init_expr had already
2435 been created during vectorization of previous stmts; We obtain it from
2436 the STMT_VINFO_VEC_STMT of the defining stmt. */
2437 tree iv_def
= PHI_ARG_DEF_FROM_EDGE (iv_phi
,
2438 loop_preheader_edge (iv_loop
));
2439 vec_init
= vect_get_vec_def_for_operand (iv_def
, iv_phi
, NULL
);
2443 /* iv_loop is the loop to be vectorized. Create:
2444 vec_init = [X, X+S, X+2*S, X+3*S] (S = step_expr, X = init_expr) */
2445 new_var
= vect_get_new_vect_var (scalar_type
, vect_scalar_var
, "var_");
2446 add_referenced_var (new_var
);
2448 new_name
= force_gimple_operand (init_expr
, &stmts
, false, new_var
);
2451 new_bb
= gsi_insert_seq_on_edge_immediate (pe
, stmts
);
2452 gcc_assert (!new_bb
);
2456 t
= tree_cons (NULL_TREE
, init_expr
, t
);
2457 for (i
= 1; i
< nunits
; i
++)
2459 /* Create: new_name_i = new_name + step_expr */
2460 enum tree_code code
= POINTER_TYPE_P (scalar_type
)
2461 ? POINTER_PLUS_EXPR
: PLUS_EXPR
;
2462 init_stmt
= gimple_build_assign_with_ops (code
, new_var
,
2463 new_name
, step_expr
);
2464 new_name
= make_ssa_name (new_var
, init_stmt
);
2465 gimple_assign_set_lhs (init_stmt
, new_name
);
2467 new_bb
= gsi_insert_on_edge_immediate (pe
, init_stmt
);
2468 gcc_assert (!new_bb
);
2470 if (vect_print_dump_info (REPORT_DETAILS
))
2472 fprintf (vect_dump
, "created new init_stmt: ");
2473 print_gimple_stmt (vect_dump
, init_stmt
, 0, TDF_SLIM
);
2475 t
= tree_cons (NULL_TREE
, new_name
, t
);
2477 /* Create a vector from [new_name_0, new_name_1, ..., new_name_nunits-1] */
2478 vec
= build_constructor_from_list (vectype
, nreverse (t
));
2479 vec_init
= vect_init_vector (iv_phi
, vec
, vectype
, NULL
);
2483 /* Create the vector that holds the step of the induction. */
2484 if (nested_in_vect_loop
)
2485 /* iv_loop is nested in the loop to be vectorized. Generate:
2486 vec_step = [S, S, S, S] */
2487 new_name
= step_expr
;
2490 /* iv_loop is the loop to be vectorized. Generate:
2491 vec_step = [VF*S, VF*S, VF*S, VF*S] */
2492 expr
= build_int_cst (TREE_TYPE (step_expr
), vf
);
2493 new_name
= fold_build2 (MULT_EXPR
, TREE_TYPE (step_expr
),
2498 for (i
= 0; i
< nunits
; i
++)
2499 t
= tree_cons (NULL_TREE
, unshare_expr (new_name
), t
);
2500 gcc_assert (CONSTANT_CLASS_P (new_name
));
2501 stepvectype
= get_vectype_for_scalar_type (TREE_TYPE (new_name
));
2502 gcc_assert (stepvectype
);
2503 vec
= build_vector (stepvectype
, t
);
2504 vec_step
= vect_init_vector (iv_phi
, vec
, stepvectype
, NULL
);
2507 /* Create the following def-use cycle:
2512 vec_iv = PHI <vec_init, vec_loop>
2516 vec_loop = vec_iv + vec_step; */
2518 /* Create the induction-phi that defines the induction-operand. */
2519 vec_dest
= vect_get_new_vect_var (vectype
, vect_simple_var
, "vec_iv_");
2520 add_referenced_var (vec_dest
);
2521 induction_phi
= create_phi_node (vec_dest
, iv_loop
->header
);
2522 set_vinfo_for_stmt (induction_phi
,
2523 new_stmt_vec_info (induction_phi
, loop_vinfo
, NULL
));
2524 induc_def
= PHI_RESULT (induction_phi
);
2526 /* Create the iv update inside the loop */
2527 new_stmt
= gimple_build_assign_with_ops (PLUS_EXPR
, vec_dest
,
2528 induc_def
, vec_step
);
2529 vec_def
= make_ssa_name (vec_dest
, new_stmt
);
2530 gimple_assign_set_lhs (new_stmt
, vec_def
);
2531 gsi_insert_before (&si
, new_stmt
, GSI_SAME_STMT
);
2532 set_vinfo_for_stmt (new_stmt
, new_stmt_vec_info (new_stmt
, loop_vinfo
,
2535 /* Set the arguments of the phi node: */
2536 add_phi_arg (induction_phi
, vec_init
, pe
, UNKNOWN_LOCATION
);
2537 add_phi_arg (induction_phi
, vec_def
, loop_latch_edge (iv_loop
),
2541 /* In case that vectorization factor (VF) is bigger than the number
2542 of elements that we can fit in a vectype (nunits), we have to generate
2543 more than one vector stmt - i.e - we need to "unroll" the
2544 vector stmt by a factor VF/nunits. For more details see documentation
2545 in vectorizable_operation. */
2549 stmt_vec_info prev_stmt_vinfo
;
2550 /* FORNOW. This restriction should be relaxed. */
2551 gcc_assert (!nested_in_vect_loop
);
2553 /* Create the vector that holds the step of the induction. */
2554 expr
= build_int_cst (TREE_TYPE (step_expr
), nunits
);
2555 new_name
= fold_build2 (MULT_EXPR
, TREE_TYPE (step_expr
),
2558 for (i
= 0; i
< nunits
; i
++)
2559 t
= tree_cons (NULL_TREE
, unshare_expr (new_name
), t
);
2560 gcc_assert (CONSTANT_CLASS_P (new_name
));
2561 vec
= build_vector (stepvectype
, t
);
2562 vec_step
= vect_init_vector (iv_phi
, vec
, stepvectype
, NULL
);
2564 vec_def
= induc_def
;
2565 prev_stmt_vinfo
= vinfo_for_stmt (induction_phi
);
2566 for (i
= 1; i
< ncopies
; i
++)
2568 /* vec_i = vec_prev + vec_step */
2569 new_stmt
= gimple_build_assign_with_ops (PLUS_EXPR
, vec_dest
,
2571 vec_def
= make_ssa_name (vec_dest
, new_stmt
);
2572 gimple_assign_set_lhs (new_stmt
, vec_def
);
2574 gsi_insert_before (&si
, new_stmt
, GSI_SAME_STMT
);
2575 set_vinfo_for_stmt (new_stmt
,
2576 new_stmt_vec_info (new_stmt
, loop_vinfo
, NULL
));
2577 STMT_VINFO_RELATED_STMT (prev_stmt_vinfo
) = new_stmt
;
2578 prev_stmt_vinfo
= vinfo_for_stmt (new_stmt
);
2582 if (nested_in_vect_loop
)
2584 /* Find the loop-closed exit-phi of the induction, and record
2585 the final vector of induction results: */
2587 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, loop_arg
)
2589 if (!flow_bb_inside_loop_p (iv_loop
, gimple_bb (USE_STMT (use_p
))))
2591 exit_phi
= USE_STMT (use_p
);
2597 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (exit_phi
);
2598 /* FORNOW. Currently not supporting the case that an inner-loop induction
2599 is not used in the outer-loop (i.e. only outside the outer-loop). */
2600 gcc_assert (STMT_VINFO_RELEVANT_P (stmt_vinfo
)
2601 && !STMT_VINFO_LIVE_P (stmt_vinfo
));
2603 STMT_VINFO_VEC_STMT (stmt_vinfo
) = new_stmt
;
2604 if (vect_print_dump_info (REPORT_DETAILS
))
2606 fprintf (vect_dump
, "vector of inductions after inner-loop:");
2607 print_gimple_stmt (vect_dump
, new_stmt
, 0, TDF_SLIM
);
2613 if (vect_print_dump_info (REPORT_DETAILS
))
2615 fprintf (vect_dump
, "transform induction: created def-use cycle: ");
2616 print_gimple_stmt (vect_dump
, induction_phi
, 0, TDF_SLIM
);
2617 fprintf (vect_dump
, "\n");
2618 print_gimple_stmt (vect_dump
, SSA_NAME_DEF_STMT (vec_def
), 0, TDF_SLIM
);
2621 STMT_VINFO_VEC_STMT (phi_info
) = induction_phi
;
2626 /* Function get_initial_def_for_reduction
2629 STMT - a stmt that performs a reduction operation in the loop.
2630 INIT_VAL - the initial value of the reduction variable
2633 ADJUSTMENT_DEF - a tree that holds a value to be added to the final result
2634 of the reduction (used for adjusting the epilog - see below).
2635 Return a vector variable, initialized according to the operation that STMT
2636 performs. This vector will be used as the initial value of the
2637 vector of partial results.
2639 Option1 (adjust in epilog): Initialize the vector as follows:
2640 add/bit or/xor: [0,0,...,0,0]
2641 mult/bit and: [1,1,...,1,1]
2642 min/max/cond_expr: [init_val,init_val,..,init_val,init_val]
2643 and when necessary (e.g. add/mult case) let the caller know
2644 that it needs to adjust the result by init_val.
2646 Option2: Initialize the vector as follows:
2647 add/bit or/xor: [init_val,0,0,...,0]
2648 mult/bit and: [init_val,1,1,...,1]
2649 min/max/cond_expr: [init_val,init_val,...,init_val]
2650 and no adjustments are needed.
2652 For example, for the following code:
2658 STMT is 's = s + a[i]', and the reduction variable is 's'.
2659 For a vector of 4 units, we want to return either [0,0,0,init_val],
2660 or [0,0,0,0] and let the caller know that it needs to adjust
2661 the result at the end by 'init_val'.
2663 FORNOW, we are using the 'adjust in epilog' scheme, because this way the
2664 initialization vector is simpler (same element in all entries), if
2665 ADJUSTMENT_DEF is not NULL, and Option2 otherwise.
2667 A cost model should help decide between these two schemes. */
2670 get_initial_def_for_reduction (gimple stmt
, tree init_val
,
2671 tree
*adjustment_def
)
2673 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (stmt
);
2674 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_vinfo
);
2675 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
2676 tree scalar_type
= TREE_TYPE (init_val
);
2677 tree vectype
= get_vectype_for_scalar_type (scalar_type
);
2679 enum tree_code code
= gimple_assign_rhs_code (stmt
);
2684 bool nested_in_vect_loop
= false;
2686 REAL_VALUE_TYPE real_init_val
= dconst0
;
2687 int int_init_val
= 0;
2688 gimple def_stmt
= NULL
;
2690 gcc_assert (vectype
);
2691 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
2693 gcc_assert (POINTER_TYPE_P (scalar_type
) || INTEGRAL_TYPE_P (scalar_type
)
2694 || SCALAR_FLOAT_TYPE_P (scalar_type
));
2696 if (nested_in_vect_loop_p (loop
, stmt
))
2697 nested_in_vect_loop
= true;
2699 gcc_assert (loop
== (gimple_bb (stmt
))->loop_father
);
2701 /* In case of double reduction we only create a vector variable to be put
2702 in the reduction phi node. The actual statement creation is done in
2703 vect_create_epilog_for_reduction. */
2704 if (adjustment_def
&& nested_in_vect_loop
2705 && TREE_CODE (init_val
) == SSA_NAME
2706 && (def_stmt
= SSA_NAME_DEF_STMT (init_val
))
2707 && gimple_code (def_stmt
) == GIMPLE_PHI
2708 && flow_bb_inside_loop_p (loop
, gimple_bb (def_stmt
))
2709 && vinfo_for_stmt (def_stmt
)
2710 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt
))
2711 == vect_double_reduction_def
)
2713 *adjustment_def
= NULL
;
2714 return vect_create_destination_var (init_val
, vectype
);
2717 if (TREE_CONSTANT (init_val
))
2719 if (SCALAR_FLOAT_TYPE_P (scalar_type
))
2720 init_value
= build_real (scalar_type
, TREE_REAL_CST (init_val
));
2722 init_value
= build_int_cst (scalar_type
, TREE_INT_CST_LOW (init_val
));
2725 init_value
= init_val
;
2729 case WIDEN_SUM_EXPR
:
2737 /* ADJUSMENT_DEF is NULL when called from
2738 vect_create_epilog_for_reduction to vectorize double reduction. */
2741 if (nested_in_vect_loop
)
2742 *adjustment_def
= vect_get_vec_def_for_operand (init_val
, stmt
,
2745 *adjustment_def
= init_val
;
2748 if (code
== MULT_EXPR
|| code
== BIT_AND_EXPR
)
2750 real_init_val
= dconst1
;
2754 if (SCALAR_FLOAT_TYPE_P (scalar_type
))
2755 def_for_init
= build_real (scalar_type
, real_init_val
);
2757 def_for_init
= build_int_cst (scalar_type
, int_init_val
);
2759 /* Create a vector of '0' or '1' except the first element. */
2760 for (i
= nunits
- 2; i
>= 0; --i
)
2761 t
= tree_cons (NULL_TREE
, def_for_init
, t
);
2763 /* Option1: the first element is '0' or '1' as well. */
2766 t
= tree_cons (NULL_TREE
, def_for_init
, t
);
2767 init_def
= build_vector (vectype
, t
);
2771 /* Option2: the first element is INIT_VAL. */
2772 t
= tree_cons (NULL_TREE
, init_value
, t
);
2773 if (TREE_CONSTANT (init_val
))
2774 init_def
= build_vector (vectype
, t
);
2776 init_def
= build_constructor_from_list (vectype
, t
);
2785 *adjustment_def
= NULL_TREE
;
2786 init_def
= vect_get_vec_def_for_operand (init_val
, stmt
, NULL
);
2790 for (i
= nunits
- 1; i
>= 0; --i
)
2791 t
= tree_cons (NULL_TREE
, init_value
, t
);
2793 if (TREE_CONSTANT (init_val
))
2794 init_def
= build_vector (vectype
, t
);
2796 init_def
= build_constructor_from_list (vectype
, t
);
2808 /* Function vect_create_epilog_for_reduction
2810 Create code at the loop-epilog to finalize the result of a reduction
2813 VECT_DEF is a vector of partial results.
2814 REDUC_CODE is the tree-code for the epilog reduction.
2815 NCOPIES is > 1 in case the vectorization factor (VF) is bigger than the
2816 number of elements that we can fit in a vectype (nunits). In this case
2817 we have to generate more than one vector stmt - i.e - we need to "unroll"
2818 the vector stmt by a factor VF/nunits. For more details see documentation
2819 in vectorizable_operation.
2820 STMT is the scalar reduction stmt that is being vectorized.
2821 REDUCTION_PHI is the phi-node that carries the reduction computation.
2822 REDUC_INDEX is the index of the operand in the right hand side of the
2823 statement that is defined by REDUCTION_PHI.
2824 DOUBLE_REDUC is TRUE if double reduction phi nodes should be handled.
2827 1. Creates the reduction def-use cycle: sets the arguments for
2829 The loop-entry argument is the vectorized initial-value of the reduction.
2830 The loop-latch argument is VECT_DEF - the vector of partial sums.
2831 2. "Reduces" the vector of partial results VECT_DEF into a single result,
2832 by applying the operation specified by REDUC_CODE if available, or by
2833 other means (whole-vector shifts or a scalar loop).
2834 The function also creates a new phi node at the loop exit to preserve
2835 loop-closed form, as illustrated below.
2837 The flow at the entry to this function:
2840 vec_def = phi <null, null> # REDUCTION_PHI
2841 VECT_DEF = vector_stmt # vectorized form of STMT
2842 s_loop = scalar_stmt # (scalar) STMT
2844 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
2848 The above is transformed by this function into:
2851 vec_def = phi <vec_init, VECT_DEF> # REDUCTION_PHI
2852 VECT_DEF = vector_stmt # vectorized form of STMT
2853 s_loop = scalar_stmt # (scalar) STMT
2855 s_out0 = phi <s_loop> # (scalar) EXIT_PHI
2856 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
2857 v_out2 = reduce <v_out1>
2858 s_out3 = extract_field <v_out2, 0>
2859 s_out4 = adjust_result <s_out3>
2865 vect_create_epilog_for_reduction (tree vect_def
, gimple stmt
,
2867 enum tree_code reduc_code
,
2868 gimple reduction_phi
,
2872 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
2873 stmt_vec_info prev_phi_info
;
2875 enum machine_mode mode
;
2876 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
2877 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
), *outer_loop
= NULL
;
2878 basic_block exit_bb
;
2881 gimple new_phi
= NULL
, phi
;
2882 gimple_stmt_iterator exit_gsi
;
2884 tree new_temp
= NULL_TREE
;
2886 gimple epilog_stmt
= NULL
;
2887 tree new_scalar_dest
, new_dest
;
2889 tree bitsize
, bitpos
;
2890 enum tree_code code
= gimple_assign_rhs_code (stmt
);
2891 tree adjustment_def
;
2892 tree vec_initial_def
, def
;
2894 imm_use_iterator imm_iter
;
2895 use_operand_p use_p
;
2896 bool extract_scalar_result
= false;
2897 tree reduction_op
, expr
;
2900 bool nested_in_vect_loop
= false;
2901 VEC(gimple
,heap
) *phis
= NULL
;
2902 enum vect_def_type dt
= vect_unknown_def_type
;
2905 if (nested_in_vect_loop_p (loop
, stmt
))
2909 nested_in_vect_loop
= true;
2912 switch (get_gimple_rhs_class (gimple_assign_rhs_code (stmt
)))
2914 case GIMPLE_SINGLE_RHS
:
2915 gcc_assert (TREE_OPERAND_LENGTH (gimple_assign_rhs1 (stmt
))
2917 reduction_op
= TREE_OPERAND (gimple_assign_rhs1 (stmt
), reduc_index
);
2919 case GIMPLE_UNARY_RHS
:
2920 reduction_op
= gimple_assign_rhs1 (stmt
);
2922 case GIMPLE_BINARY_RHS
:
2923 reduction_op
= reduc_index
?
2924 gimple_assign_rhs2 (stmt
) : gimple_assign_rhs1 (stmt
);
2930 vectype
= get_vectype_for_scalar_type (TREE_TYPE (reduction_op
));
2931 gcc_assert (vectype
);
2932 mode
= TYPE_MODE (vectype
);
2934 /*** 1. Create the reduction def-use cycle ***/
2936 /* For the case of reduction, vect_get_vec_def_for_operand returns
2937 the scalar def before the loop, that defines the initial value
2938 of the reduction variable. */
2939 vec_initial_def
= vect_get_vec_def_for_operand (reduction_op
, stmt
,
2942 phi
= reduction_phi
;
2944 for (j
= 0; j
< ncopies
; j
++)
2946 /* 1.1 set the loop-entry arg of the reduction-phi: */
2947 add_phi_arg (phi
, vec_initial_def
, loop_preheader_edge (loop
),
2950 /* 1.2 set the loop-latch arg for the reduction-phi: */
2952 def
= vect_get_vec_def_for_stmt_copy (dt
, def
);
2953 add_phi_arg (phi
, def
, loop_latch_edge (loop
), UNKNOWN_LOCATION
);
2955 if (vect_print_dump_info (REPORT_DETAILS
))
2957 fprintf (vect_dump
, "transform reduction: created def-use cycle: ");
2958 print_gimple_stmt (vect_dump
, phi
, 0, TDF_SLIM
);
2959 fprintf (vect_dump
, "\n");
2960 print_gimple_stmt (vect_dump
, SSA_NAME_DEF_STMT (def
), 0, TDF_SLIM
);
2963 phi
= STMT_VINFO_RELATED_STMT (vinfo_for_stmt (phi
));
2966 /*** 2. Create epilog code
2967 The reduction epilog code operates across the elements of the vector
2968 of partial results computed by the vectorized loop.
2969 The reduction epilog code consists of:
2970 step 1: compute the scalar result in a vector (v_out2)
2971 step 2: extract the scalar result (s_out3) from the vector (v_out2)
2972 step 3: adjust the scalar result (s_out3) if needed.
2974 Step 1 can be accomplished using one the following three schemes:
2975 (scheme 1) using reduc_code, if available.
2976 (scheme 2) using whole-vector shifts, if available.
2977 (scheme 3) using a scalar loop. In this case steps 1+2 above are
2980 The overall epilog code looks like this:
2982 s_out0 = phi <s_loop> # original EXIT_PHI
2983 v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
2984 v_out2 = reduce <v_out1> # step 1
2985 s_out3 = extract_field <v_out2, 0> # step 2
2986 s_out4 = adjust_result <s_out3> # step 3
2988 (step 3 is optional, and steps 1 and 2 may be combined).
2989 Lastly, the uses of s_out0 are replaced by s_out4.
2993 /* 2.1 Create new loop-exit-phi to preserve loop-closed form:
2994 v_out1 = phi <v_loop> */
2996 exit_bb
= single_exit (loop
)->dest
;
2998 prev_phi_info
= NULL
;
2999 for (j
= 0; j
< ncopies
; j
++)
3001 phi
= create_phi_node (SSA_NAME_VAR (vect_def
), exit_bb
);
3002 set_vinfo_for_stmt (phi
, new_stmt_vec_info (phi
, loop_vinfo
, NULL
));
3007 def
= vect_get_vec_def_for_stmt_copy (dt
, def
);
3008 STMT_VINFO_RELATED_STMT (prev_phi_info
) = phi
;
3010 SET_PHI_ARG_DEF (phi
, single_exit (loop
)->dest_idx
, def
);
3011 prev_phi_info
= vinfo_for_stmt (phi
);
3014 exit_gsi
= gsi_after_labels (exit_bb
);
3016 /* 2.2 Get the relevant tree-code to use in the epilog for schemes 2,3
3017 (i.e. when reduc_code is not available) and in the final adjustment
3018 code (if needed). Also get the original scalar reduction variable as
3019 defined in the loop. In case STMT is a "pattern-stmt" (i.e. - it
3020 represents a reduction pattern), the tree-code and scalar-def are
3021 taken from the original stmt that the pattern-stmt (STMT) replaces.
3022 Otherwise (it is a regular reduction) - the tree-code and scalar-def
3023 are taken from STMT. */
3025 orig_stmt
= STMT_VINFO_RELATED_STMT (stmt_info
);
3028 /* Regular reduction */
3033 /* Reduction pattern */
3034 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (orig_stmt
);
3035 gcc_assert (STMT_VINFO_IN_PATTERN_P (stmt_vinfo
));
3036 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_vinfo
) == stmt
);
3039 code
= gimple_assign_rhs_code (orig_stmt
);
3040 scalar_dest
= gimple_assign_lhs (orig_stmt
);
3041 scalar_type
= TREE_TYPE (scalar_dest
);
3042 new_scalar_dest
= vect_create_destination_var (scalar_dest
, NULL
);
3043 bitsize
= TYPE_SIZE (scalar_type
);
3045 /* For MINUS_EXPR the initial vector is [init_val,0,...,0], therefore,
3046 partial results are added and not subtracted. */
3047 if (code
== MINUS_EXPR
)
3050 /* In case this is a reduction in an inner-loop while vectorizing an outer
3051 loop - we don't need to extract a single scalar result at the end of the
3052 inner-loop (unless it is double reduction, i.e., the use of reduction is
3053 outside the outer-loop). The final vector of partial results will be used
3054 in the vectorized outer-loop, or reduced to a scalar result at the end of
3056 if (nested_in_vect_loop
&& !double_reduc
)
3057 goto vect_finalize_reduction
;
3059 /* The epilogue is created for the outer-loop, i.e., for the loop being
3065 gcc_assert (ncopies
== 1);
3067 /* 2.3 Create the reduction code, using one of the three schemes described
3070 if (reduc_code
!= ERROR_MARK
)
3074 /*** Case 1: Create:
3075 v_out2 = reduc_expr <v_out1> */
3077 if (vect_print_dump_info (REPORT_DETAILS
))
3078 fprintf (vect_dump
, "Reduce using direct vector reduction.");
3080 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
3081 tmp
= build1 (reduc_code
, vectype
, PHI_RESULT (new_phi
));
3082 epilog_stmt
= gimple_build_assign (vec_dest
, tmp
);
3083 new_temp
= make_ssa_name (vec_dest
, epilog_stmt
);
3084 gimple_assign_set_lhs (epilog_stmt
, new_temp
);
3085 gsi_insert_before (&exit_gsi
, epilog_stmt
, GSI_SAME_STMT
);
3087 extract_scalar_result
= true;
3091 enum tree_code shift_code
= ERROR_MARK
;
3092 bool have_whole_vector_shift
= true;
3094 int element_bitsize
= tree_low_cst (bitsize
, 1);
3095 int vec_size_in_bits
= tree_low_cst (TYPE_SIZE (vectype
), 1);
3098 if (optab_handler (vec_shr_optab
, mode
)->insn_code
!= CODE_FOR_nothing
)
3099 shift_code
= VEC_RSHIFT_EXPR
;
3101 have_whole_vector_shift
= false;
3103 /* Regardless of whether we have a whole vector shift, if we're
3104 emulating the operation via tree-vect-generic, we don't want
3105 to use it. Only the first round of the reduction is likely
3106 to still be profitable via emulation. */
3107 /* ??? It might be better to emit a reduction tree code here, so that
3108 tree-vect-generic can expand the first round via bit tricks. */
3109 if (!VECTOR_MODE_P (mode
))
3110 have_whole_vector_shift
= false;
3113 optab optab
= optab_for_tree_code (code
, vectype
, optab_default
);
3114 if (optab_handler (optab
, mode
)->insn_code
== CODE_FOR_nothing
)
3115 have_whole_vector_shift
= false;
3118 if (have_whole_vector_shift
)
3120 /*** Case 2: Create:
3121 for (offset = VS/2; offset >= element_size; offset/=2)
3123 Create: va' = vec_shift <va, offset>
3124 Create: va = vop <va, va'>
3127 if (vect_print_dump_info (REPORT_DETAILS
))
3128 fprintf (vect_dump
, "Reduce using vector shifts");
3130 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
3131 new_temp
= PHI_RESULT (new_phi
);
3133 for (bit_offset
= vec_size_in_bits
/2;
3134 bit_offset
>= element_bitsize
;
3137 tree bitpos
= size_int (bit_offset
);
3139 epilog_stmt
= gimple_build_assign_with_ops (shift_code
, vec_dest
,
3141 new_name
= make_ssa_name (vec_dest
, epilog_stmt
);
3142 gimple_assign_set_lhs (epilog_stmt
, new_name
);
3143 gsi_insert_before (&exit_gsi
, epilog_stmt
, GSI_SAME_STMT
);
3145 epilog_stmt
= gimple_build_assign_with_ops (code
, vec_dest
,
3146 new_name
, new_temp
);
3147 new_temp
= make_ssa_name (vec_dest
, epilog_stmt
);
3148 gimple_assign_set_lhs (epilog_stmt
, new_temp
);
3149 gsi_insert_before (&exit_gsi
, epilog_stmt
, GSI_SAME_STMT
);
3152 extract_scalar_result
= true;
3158 /*** Case 3: Create:
3159 s = extract_field <v_out2, 0>
3160 for (offset = element_size;
3161 offset < vector_size;
3162 offset += element_size;)
3164 Create: s' = extract_field <v_out2, offset>
3165 Create: s = op <s, s'>
3168 if (vect_print_dump_info (REPORT_DETAILS
))
3169 fprintf (vect_dump
, "Reduce using scalar code. ");
3171 vec_temp
= PHI_RESULT (new_phi
);
3172 vec_size_in_bits
= tree_low_cst (TYPE_SIZE (vectype
), 1);
3173 rhs
= build3 (BIT_FIELD_REF
, scalar_type
, vec_temp
, bitsize
,
3175 epilog_stmt
= gimple_build_assign (new_scalar_dest
, rhs
);
3176 new_temp
= make_ssa_name (new_scalar_dest
, epilog_stmt
);
3177 gimple_assign_set_lhs (epilog_stmt
, new_temp
);
3178 gsi_insert_before (&exit_gsi
, epilog_stmt
, GSI_SAME_STMT
);
3180 for (bit_offset
= element_bitsize
;
3181 bit_offset
< vec_size_in_bits
;
3182 bit_offset
+= element_bitsize
)
3184 tree bitpos
= bitsize_int (bit_offset
);
3185 tree rhs
= build3 (BIT_FIELD_REF
, scalar_type
, vec_temp
, bitsize
,
3188 epilog_stmt
= gimple_build_assign (new_scalar_dest
, rhs
);
3189 new_name
= make_ssa_name (new_scalar_dest
, epilog_stmt
);
3190 gimple_assign_set_lhs (epilog_stmt
, new_name
);
3191 gsi_insert_before (&exit_gsi
, epilog_stmt
, GSI_SAME_STMT
);
3193 epilog_stmt
= gimple_build_assign_with_ops (code
,
3195 new_name
, new_temp
);
3196 new_temp
= make_ssa_name (new_scalar_dest
, epilog_stmt
);
3197 gimple_assign_set_lhs (epilog_stmt
, new_temp
);
3198 gsi_insert_before (&exit_gsi
, epilog_stmt
, GSI_SAME_STMT
);
3201 extract_scalar_result
= false;
3205 /* 2.4 Extract the final scalar result. Create:
3206 s_out3 = extract_field <v_out2, bitpos> */
3208 if (extract_scalar_result
)
3212 gcc_assert (!nested_in_vect_loop
|| double_reduc
);
3213 if (vect_print_dump_info (REPORT_DETAILS
))
3214 fprintf (vect_dump
, "extract scalar result");
3216 if (BYTES_BIG_ENDIAN
)
3217 bitpos
= size_binop (MULT_EXPR
,
3218 bitsize_int (TYPE_VECTOR_SUBPARTS (vectype
) - 1),
3219 TYPE_SIZE (scalar_type
));
3221 bitpos
= bitsize_zero_node
;
3223 rhs
= build3 (BIT_FIELD_REF
, scalar_type
, new_temp
, bitsize
, bitpos
);
3224 epilog_stmt
= gimple_build_assign (new_scalar_dest
, rhs
);
3225 new_temp
= make_ssa_name (new_scalar_dest
, epilog_stmt
);
3226 gimple_assign_set_lhs (epilog_stmt
, new_temp
);
3227 gsi_insert_before (&exit_gsi
, epilog_stmt
, GSI_SAME_STMT
);
3230 vect_finalize_reduction
:
3235 /* 2.5 Adjust the final result by the initial value of the reduction
3236 variable. (When such adjustment is not needed, then
3237 'adjustment_def' is zero). For example, if code is PLUS we create:
3238 new_temp = loop_exit_def + adjustment_def */
3242 if (nested_in_vect_loop
)
3244 gcc_assert (TREE_CODE (TREE_TYPE (adjustment_def
)) == VECTOR_TYPE
);
3245 expr
= build2 (code
, vectype
, PHI_RESULT (new_phi
), adjustment_def
);
3246 new_dest
= vect_create_destination_var (scalar_dest
, vectype
);
3250 gcc_assert (TREE_CODE (TREE_TYPE (adjustment_def
)) != VECTOR_TYPE
);
3251 expr
= build2 (code
, scalar_type
, new_temp
, adjustment_def
);
3252 new_dest
= vect_create_destination_var (scalar_dest
, scalar_type
);
3255 epilog_stmt
= gimple_build_assign (new_dest
, expr
);
3256 new_temp
= make_ssa_name (new_dest
, epilog_stmt
);
3257 gimple_assign_set_lhs (epilog_stmt
, new_temp
);
3258 SSA_NAME_DEF_STMT (new_temp
) = epilog_stmt
;
3259 gsi_insert_before (&exit_gsi
, epilog_stmt
, GSI_SAME_STMT
);
3263 /* 2.6 Handle the loop-exit phi */
3265 /* Replace uses of s_out0 with uses of s_out3:
3266 Find the loop-closed-use at the loop exit of the original scalar result.
3267 (The reduction result is expected to have two immediate uses - one at the
3268 latch block, and one at the loop exit). */
3269 phis
= VEC_alloc (gimple
, heap
, 10);
3270 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, scalar_dest
)
3272 if (!flow_bb_inside_loop_p (loop
, gimple_bb (USE_STMT (use_p
))))
3274 exit_phi
= USE_STMT (use_p
);
3275 VEC_quick_push (gimple
, phis
, exit_phi
);
3279 /* We expect to have found an exit_phi because of loop-closed-ssa form. */
3280 gcc_assert (!VEC_empty (gimple
, phis
));
3282 for (i
= 0; VEC_iterate (gimple
, phis
, i
, exit_phi
); i
++)
3284 if (nested_in_vect_loop
)
3286 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (exit_phi
);
3289 /* FORNOW. Currently not supporting the case that an inner-loop
3290 reduction is not used in the outer-loop (but only outside the
3291 outer-loop), unless it is double reduction. */
3292 gcc_assert ((STMT_VINFO_RELEVANT_P (stmt_vinfo
)
3293 && !STMT_VINFO_LIVE_P (stmt_vinfo
)) || double_reduc
);
3295 epilog_stmt
= adjustment_def
? epilog_stmt
: new_phi
;
3296 STMT_VINFO_VEC_STMT (stmt_vinfo
) = epilog_stmt
;
3297 set_vinfo_for_stmt (epilog_stmt
,
3298 new_stmt_vec_info (epilog_stmt
, loop_vinfo
,
3301 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (epilog_stmt
)) =
3302 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (new_phi
));
3305 || STMT_VINFO_DEF_TYPE (stmt_vinfo
) != vect_double_reduction_def
)
3308 /* Handle double reduction:
3310 stmt1: s1 = phi <s0, s2> - double reduction phi (outer loop)
3311 stmt2: s3 = phi <s1, s4> - (regular) reduction phi (inner loop)
3312 stmt3: s4 = use (s3) - (regular) reduction stmt (inner loop)
3313 stmt4: s2 = phi <s4> - double reduction stmt (outer loop)
3315 At that point the regular reduction (stmt2 and stmt3) is already
3316 vectorized, as well as the exit phi node, stmt4.
3317 Here we vectorize the phi node of double reduction, stmt1, and
3318 update all relevant statements. */
3320 /* Go through all the uses of s2 to find double reduction phi node,
3321 i.e., stmt1 above. */
3322 orig_name
= PHI_RESULT (exit_phi
);
3323 FOR_EACH_IMM_USE_STMT (use_stmt
, imm_iter
, orig_name
)
3325 stmt_vec_info use_stmt_vinfo
= vinfo_for_stmt (use_stmt
);
3326 stmt_vec_info new_phi_vinfo
;
3327 tree vect_phi_init
, preheader_arg
, vect_phi_res
, init_def
;
3328 basic_block bb
= gimple_bb (use_stmt
);
3331 /* Check that USE_STMT is really double reduction phi node. */
3332 if (gimple_code (use_stmt
) != GIMPLE_PHI
3333 || gimple_phi_num_args (use_stmt
) != 2
3335 || STMT_VINFO_DEF_TYPE (use_stmt_vinfo
)
3336 != vect_double_reduction_def
3337 || bb
->loop_father
!= outer_loop
)
3340 /* Create vector phi node for double reduction:
3341 vs1 = phi <vs0, vs2>
3342 vs1 was created previously in this function by a call to
3343 vect_get_vec_def_for_operand and is stored in vec_initial_def;
3344 vs2 is defined by EPILOG_STMT, the vectorized EXIT_PHI;
3345 vs0 is created here. */
3347 /* Create vector phi node. */
3348 vect_phi
= create_phi_node (vec_initial_def
, bb
);
3349 new_phi_vinfo
= new_stmt_vec_info (vect_phi
,
3350 loop_vec_info_for_loop (outer_loop
), NULL
);
3351 set_vinfo_for_stmt (vect_phi
, new_phi_vinfo
);
3353 /* Create vs0 - initial def of the double reduction phi. */
3354 preheader_arg
= PHI_ARG_DEF_FROM_EDGE (use_stmt
,
3355 loop_preheader_edge (outer_loop
));
3356 init_def
= get_initial_def_for_reduction (stmt
, preheader_arg
,
3358 vect_phi_init
= vect_init_vector (use_stmt
, init_def
, vectype
,
3361 /* Update phi node arguments with vs0 and vs2. */
3362 add_phi_arg (vect_phi
, vect_phi_init
,
3363 loop_preheader_edge (outer_loop
), UNKNOWN_LOCATION
);
3364 add_phi_arg (vect_phi
, PHI_RESULT (epilog_stmt
),
3365 loop_latch_edge (outer_loop
), UNKNOWN_LOCATION
);
3366 if (vect_print_dump_info (REPORT_DETAILS
))
3368 fprintf (vect_dump
, "created double reduction phi node: ");
3369 print_gimple_stmt (vect_dump
, vect_phi
, 0, TDF_SLIM
);
3372 vect_phi_res
= PHI_RESULT (vect_phi
);
3374 /* Replace the use, i.e., set the correct vs1 in the regular
3375 reduction phi node. FORNOW, NCOPIES is always 1, so the loop
3377 use
= reduction_phi
;
3378 for (j
= 0; j
< ncopies
; j
++)
3380 edge pr_edge
= loop_preheader_edge (loop
);
3381 SET_PHI_ARG_DEF (use
, pr_edge
->dest_idx
, vect_phi_res
);
3382 use
= STMT_VINFO_RELATED_STMT (vinfo_for_stmt (use
));
3387 /* Replace the uses: */
3388 orig_name
= PHI_RESULT (exit_phi
);
3389 FOR_EACH_IMM_USE_STMT (use_stmt
, imm_iter
, orig_name
)
3390 FOR_EACH_IMM_USE_ON_STMT (use_p
, imm_iter
)
3391 SET_USE (use_p
, new_temp
);
3394 VEC_free (gimple
, heap
, phis
);
3398 /* Function vectorizable_reduction.
3400 Check if STMT performs a reduction operation that can be vectorized.
3401 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3402 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
3403 Return FALSE if not a vectorizable STMT, TRUE otherwise.
3405 This function also handles reduction idioms (patterns) that have been
3406 recognized in advance during vect_pattern_recog. In this case, STMT may be
3408 X = pattern_expr (arg0, arg1, ..., X)
3409 and it's STMT_VINFO_RELATED_STMT points to the last stmt in the original
3410 sequence that had been detected and replaced by the pattern-stmt (STMT).
3412 In some cases of reduction patterns, the type of the reduction variable X is
3413 different than the type of the other arguments of STMT.
3414 In such cases, the vectype that is used when transforming STMT into a vector
3415 stmt is different than the vectype that is used to determine the
3416 vectorization factor, because it consists of a different number of elements
3417 than the actual number of elements that are being operated upon in parallel.
3419 For example, consider an accumulation of shorts into an int accumulator.
3420 On some targets it's possible to vectorize this pattern operating on 8
3421 shorts at a time (hence, the vectype for purposes of determining the
3422 vectorization factor should be V8HI); on the other hand, the vectype that
3423 is used to create the vector form is actually V4SI (the type of the result).
3425 Upon entry to this function, STMT_VINFO_VECTYPE records the vectype that
3426 indicates what is the actual level of parallelism (V8HI in the example), so
3427 that the right vectorization factor would be derived. This vectype
3428 corresponds to the type of arguments to the reduction stmt, and should *NOT*
3429 be used to create the vectorized stmt. The right vectype for the vectorized
3430 stmt is obtained from the type of the result X:
3431 get_vectype_for_scalar_type (TREE_TYPE (X))
3433 This means that, contrary to "regular" reductions (or "regular" stmts in
3434 general), the following equation:
3435 STMT_VINFO_VECTYPE == get_vectype_for_scalar_type (TREE_TYPE (X))
3436 does *NOT* necessarily hold for reduction patterns. */
3439 vectorizable_reduction (gimple stmt
, gimple_stmt_iterator
*gsi
,
3444 tree loop_vec_def0
= NULL_TREE
, loop_vec_def1
= NULL_TREE
;
3445 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
3446 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
3447 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
3448 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
3449 enum tree_code code
, orig_code
, epilog_reduc_code
;
3450 enum machine_mode vec_mode
;
3452 optab optab
, reduc_optab
;
3453 tree new_temp
= NULL_TREE
;
3456 enum vect_def_type dt
;
3457 gimple new_phi
= NULL
;
3461 stmt_vec_info orig_stmt_info
;
3462 tree expr
= NULL_TREE
;
3464 int nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
3465 int ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits
;
3467 stmt_vec_info prev_stmt_info
, prev_phi_info
;
3468 gimple first_phi
= NULL
;
3469 bool single_defuse_cycle
= false;
3470 tree reduc_def
= NULL_TREE
;
3471 gimple new_stmt
= NULL
;
3474 bool nested_cycle
= false, found_nested_cycle_def
= false;
3475 gimple reduc_def_stmt
= NULL
;
3476 /* The default is that the reduction variable is the last in statement. */
3477 int reduc_index
= 2;
3478 bool double_reduc
= false, dummy
;
3480 struct loop
* def_stmt_loop
, *outer_loop
= NULL
;
3482 gimple def_arg_stmt
;
3484 if (nested_in_vect_loop_p (loop
, stmt
))
3488 nested_cycle
= true;
3491 gcc_assert (ncopies
>= 1);
3493 /* FORNOW: SLP not supported. */
3494 if (STMT_SLP_TYPE (stmt_info
))
3497 /* 1. Is vectorizable reduction? */
3498 /* Not supportable if the reduction variable is used in the loop. */
3499 if (STMT_VINFO_RELEVANT (stmt_info
) > vect_used_in_outer
)
3502 /* Reductions that are not used even in an enclosing outer-loop,
3503 are expected to be "live" (used out of the loop). */
3504 if (STMT_VINFO_RELEVANT (stmt_info
) == vect_unused_in_scope
3505 && !STMT_VINFO_LIVE_P (stmt_info
))
3508 /* Make sure it was already recognized as a reduction computation. */
3509 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_reduction_def
3510 && STMT_VINFO_DEF_TYPE (stmt_info
) != vect_nested_cycle
)
3513 /* 2. Has this been recognized as a reduction pattern?
3515 Check if STMT represents a pattern that has been recognized
3516 in earlier analysis stages. For stmts that represent a pattern,
3517 the STMT_VINFO_RELATED_STMT field records the last stmt in
3518 the original sequence that constitutes the pattern. */
3520 orig_stmt
= STMT_VINFO_RELATED_STMT (stmt_info
);
3523 orig_stmt_info
= vinfo_for_stmt (orig_stmt
);
3524 gcc_assert (STMT_VINFO_RELATED_STMT (orig_stmt_info
) == stmt
);
3525 gcc_assert (STMT_VINFO_IN_PATTERN_P (orig_stmt_info
));
3526 gcc_assert (!STMT_VINFO_IN_PATTERN_P (stmt_info
));
3529 /* 3. Check the operands of the operation. The first operands are defined
3530 inside the loop body. The last operand is the reduction variable,
3531 which is defined by the loop-header-phi. */
3533 gcc_assert (is_gimple_assign (stmt
));
3536 switch (get_gimple_rhs_class (gimple_assign_rhs_code (stmt
)))
3538 case GIMPLE_SINGLE_RHS
:
3539 op_type
= TREE_OPERAND_LENGTH (gimple_assign_rhs1 (stmt
));
3540 if (op_type
== ternary_op
)
3542 tree rhs
= gimple_assign_rhs1 (stmt
);
3543 ops
[0] = TREE_OPERAND (rhs
, 0);
3544 ops
[1] = TREE_OPERAND (rhs
, 1);
3545 ops
[2] = TREE_OPERAND (rhs
, 2);
3546 code
= TREE_CODE (rhs
);
3552 case GIMPLE_BINARY_RHS
:
3553 code
= gimple_assign_rhs_code (stmt
);
3554 op_type
= TREE_CODE_LENGTH (code
);
3555 gcc_assert (op_type
== binary_op
);
3556 ops
[0] = gimple_assign_rhs1 (stmt
);
3557 ops
[1] = gimple_assign_rhs2 (stmt
);
3560 case GIMPLE_UNARY_RHS
:
3567 scalar_dest
= gimple_assign_lhs (stmt
);
3568 scalar_type
= TREE_TYPE (scalar_dest
);
3569 if (!POINTER_TYPE_P (scalar_type
) && !INTEGRAL_TYPE_P (scalar_type
)
3570 && !SCALAR_FLOAT_TYPE_P (scalar_type
))
3573 /* All uses but the last are expected to be defined in the loop.
3574 The last use is the reduction variable. In case of nested cycle this
3575 assumption is not true: we use reduc_index to record the index of the
3576 reduction variable. */
3577 for (i
= 0; i
< op_type
-1; i
++)
3579 /* The condition of COND_EXPR is checked in vectorizable_condition(). */
3580 if (i
== 0 && code
== COND_EXPR
)
3583 is_simple_use
= vect_is_simple_use (ops
[i
], loop_vinfo
, NULL
, &def_stmt
,
3585 gcc_assert (is_simple_use
);
3586 if (dt
!= vect_internal_def
3587 && dt
!= vect_external_def
3588 && dt
!= vect_constant_def
3589 && dt
!= vect_induction_def
3590 && !(dt
== vect_nested_cycle
&& nested_cycle
))
3593 if (dt
== vect_nested_cycle
)
3595 found_nested_cycle_def
= true;
3596 reduc_def_stmt
= def_stmt
;
3601 is_simple_use
= vect_is_simple_use (ops
[i
], loop_vinfo
, NULL
, &def_stmt
,
3603 gcc_assert (is_simple_use
);
3604 gcc_assert (dt
== vect_reduction_def
3605 || dt
== vect_nested_cycle
3606 || ((dt
== vect_internal_def
|| dt
== vect_external_def
3607 || dt
== vect_constant_def
|| dt
== vect_induction_def
)
3608 && nested_cycle
&& found_nested_cycle_def
));
3609 if (!found_nested_cycle_def
)
3610 reduc_def_stmt
= def_stmt
;
3612 gcc_assert (gimple_code (reduc_def_stmt
) == GIMPLE_PHI
);
3614 gcc_assert (orig_stmt
== vect_is_simple_reduction (loop_vinfo
,
3619 gcc_assert (stmt
== vect_is_simple_reduction (loop_vinfo
, reduc_def_stmt
,
3620 !nested_cycle
, &dummy
));
3622 if (STMT_VINFO_LIVE_P (vinfo_for_stmt (reduc_def_stmt
)))
3625 vec_mode
= TYPE_MODE (vectype
);
3627 if (code
== COND_EXPR
)
3629 if (!vectorizable_condition (stmt
, gsi
, NULL
, ops
[reduc_index
], 0))
3631 if (vect_print_dump_info (REPORT_DETAILS
))
3632 fprintf (vect_dump
, "unsupported condition in reduction");
3639 /* 4. Supportable by target? */
3641 /* 4.1. check support for the operation in the loop */
3642 optab
= optab_for_tree_code (code
, vectype
, optab_default
);
3645 if (vect_print_dump_info (REPORT_DETAILS
))
3646 fprintf (vect_dump
, "no optab.");
3651 if (optab_handler (optab
, vec_mode
)->insn_code
== CODE_FOR_nothing
)
3653 if (vect_print_dump_info (REPORT_DETAILS
))
3654 fprintf (vect_dump
, "op not supported by target.");
3656 if (GET_MODE_SIZE (vec_mode
) != UNITS_PER_WORD
3657 || LOOP_VINFO_VECT_FACTOR (loop_vinfo
)
3658 < vect_min_worthwhile_factor (code
))
3661 if (vect_print_dump_info (REPORT_DETAILS
))
3662 fprintf (vect_dump
, "proceeding using word mode.");
3665 /* Worthwhile without SIMD support? */
3666 if (!VECTOR_MODE_P (TYPE_MODE (vectype
))
3667 && LOOP_VINFO_VECT_FACTOR (loop_vinfo
)
3668 < vect_min_worthwhile_factor (code
))
3670 if (vect_print_dump_info (REPORT_DETAILS
))
3671 fprintf (vect_dump
, "not worthwhile without SIMD support.");
3677 /* 4.2. Check support for the epilog operation.
3679 If STMT represents a reduction pattern, then the type of the
3680 reduction variable may be different than the type of the rest
3681 of the arguments. For example, consider the case of accumulation
3682 of shorts into an int accumulator; The original code:
3683 S1: int_a = (int) short_a;
3684 orig_stmt-> S2: int_acc = plus <int_a ,int_acc>;
3687 STMT: int_acc = widen_sum <short_a, int_acc>
3690 1. The tree-code that is used to create the vector operation in the
3691 epilog code (that reduces the partial results) is not the
3692 tree-code of STMT, but is rather the tree-code of the original
3693 stmt from the pattern that STMT is replacing. I.e, in the example
3694 above we want to use 'widen_sum' in the loop, but 'plus' in the
3696 2. The type (mode) we use to check available target support
3697 for the vector operation to be created in the *epilog*, is
3698 determined by the type of the reduction variable (in the example
3699 above we'd check this: plus_optab[vect_int_mode]).
3700 However the type (mode) we use to check available target support
3701 for the vector operation to be created *inside the loop*, is
3702 determined by the type of the other arguments to STMT (in the
3703 example we'd check this: widen_sum_optab[vect_short_mode]).
3705 This is contrary to "regular" reductions, in which the types of all
3706 the arguments are the same as the type of the reduction variable.
3707 For "regular" reductions we can therefore use the same vector type
3708 (and also the same tree-code) when generating the epilog code and
3709 when generating the code inside the loop. */
3713 /* This is a reduction pattern: get the vectype from the type of the
3714 reduction variable, and get the tree-code from orig_stmt. */
3715 orig_code
= gimple_assign_rhs_code (orig_stmt
);
3716 vectype
= get_vectype_for_scalar_type (TREE_TYPE (def
));
3719 if (vect_print_dump_info (REPORT_DETAILS
))
3721 fprintf (vect_dump
, "unsupported data-type ");
3722 print_generic_expr (vect_dump
, TREE_TYPE (def
), TDF_SLIM
);
3727 vec_mode
= TYPE_MODE (vectype
);
3731 /* Regular reduction: use the same vectype and tree-code as used for
3732 the vector code inside the loop can be used for the epilog code. */
3738 def_bb
= gimple_bb (reduc_def_stmt
);
3739 def_stmt_loop
= def_bb
->loop_father
;
3740 def_arg
= PHI_ARG_DEF_FROM_EDGE (reduc_def_stmt
,
3741 loop_preheader_edge (def_stmt_loop
));
3742 if (TREE_CODE (def_arg
) == SSA_NAME
3743 && (def_arg_stmt
= SSA_NAME_DEF_STMT (def_arg
))
3744 && gimple_code (def_arg_stmt
) == GIMPLE_PHI
3745 && flow_bb_inside_loop_p (outer_loop
, gimple_bb (def_arg_stmt
))
3746 && vinfo_for_stmt (def_arg_stmt
)
3747 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_arg_stmt
))
3748 == vect_double_reduction_def
)
3749 double_reduc
= true;
3752 epilog_reduc_code
= ERROR_MARK
;
3753 if (reduction_code_for_scalar_code (orig_code
, &epilog_reduc_code
))
3755 reduc_optab
= optab_for_tree_code (epilog_reduc_code
, vectype
,
3759 if (vect_print_dump_info (REPORT_DETAILS
))
3760 fprintf (vect_dump
, "no optab for reduction.");
3762 epilog_reduc_code
= ERROR_MARK
;
3766 && optab_handler (reduc_optab
, vec_mode
)->insn_code
3767 == CODE_FOR_nothing
)
3769 if (vect_print_dump_info (REPORT_DETAILS
))
3770 fprintf (vect_dump
, "reduc op not supported by target.");
3772 epilog_reduc_code
= ERROR_MARK
;
3777 if (!nested_cycle
|| double_reduc
)
3779 if (vect_print_dump_info (REPORT_DETAILS
))
3780 fprintf (vect_dump
, "no reduc code for scalar code.");
3786 if (double_reduc
&& ncopies
> 1)
3788 if (vect_print_dump_info (REPORT_DETAILS
))
3789 fprintf (vect_dump
, "multiple types in double reduction");
3794 if (!vec_stmt
) /* transformation not required. */
3796 STMT_VINFO_TYPE (stmt_info
) = reduc_vec_info_type
;
3797 if (!vect_model_reduction_cost (stmt_info
, epilog_reduc_code
, ncopies
))
3804 if (vect_print_dump_info (REPORT_DETAILS
))
3805 fprintf (vect_dump
, "transform reduction.");
3807 /* FORNOW: Multiple types are not supported for condition. */
3808 if (code
== COND_EXPR
)
3809 gcc_assert (ncopies
== 1);
3811 /* Create the destination vector */
3812 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
3814 /* In case the vectorization factor (VF) is bigger than the number
3815 of elements that we can fit in a vectype (nunits), we have to generate
3816 more than one vector stmt - i.e - we need to "unroll" the
3817 vector stmt by a factor VF/nunits. For more details see documentation
3818 in vectorizable_operation. */
3820 /* If the reduction is used in an outer loop we need to generate
3821 VF intermediate results, like so (e.g. for ncopies=2):
3826 (i.e. we generate VF results in 2 registers).
3827 In this case we have a separate def-use cycle for each copy, and therefore
3828 for each copy we get the vector def for the reduction variable from the
3829 respective phi node created for this copy.
3831 Otherwise (the reduction is unused in the loop nest), we can combine
3832 together intermediate results, like so (e.g. for ncopies=2):
3836 (i.e. we generate VF/2 results in a single register).
3837 In this case for each copy we get the vector def for the reduction variable
3838 from the vectorized reduction operation generated in the previous iteration.
3841 if (STMT_VINFO_RELEVANT (stmt_info
) == vect_unused_in_scope
)
3843 single_defuse_cycle
= true;
3847 epilog_copies
= ncopies
;
3849 prev_stmt_info
= NULL
;
3850 prev_phi_info
= NULL
;
3851 for (j
= 0; j
< ncopies
; j
++)
3853 if (j
== 0 || !single_defuse_cycle
)
3855 /* Create the reduction-phi that defines the reduction-operand. */
3856 new_phi
= create_phi_node (vec_dest
, loop
->header
);
3857 set_vinfo_for_stmt (new_phi
, new_stmt_vec_info (new_phi
, loop_vinfo
,
3859 /* Get the vector def for the reduction variable from the phi
3861 reduc_def
= PHI_RESULT (new_phi
);
3864 if (code
== COND_EXPR
)
3866 first_phi
= new_phi
;
3867 vectorizable_condition (stmt
, gsi
, vec_stmt
, reduc_def
, reduc_index
);
3868 /* Multiple types are not supported for condition. */
3875 loop_vec_def0
= vect_get_vec_def_for_operand (ops
[!reduc_index
],
3877 if (op_type
== ternary_op
)
3879 if (reduc_index
== 0)
3880 loop_vec_def1
= vect_get_vec_def_for_operand (ops
[2], stmt
,
3883 loop_vec_def1
= vect_get_vec_def_for_operand (ops
[1], stmt
,
3887 /* Get the vector def for the reduction variable from the phi
3889 first_phi
= new_phi
;
3893 enum vect_def_type dt
= vect_unknown_def_type
; /* Dummy */
3894 loop_vec_def0
= vect_get_vec_def_for_stmt_copy (dt
, loop_vec_def0
);
3895 if (op_type
== ternary_op
)
3896 loop_vec_def1
= vect_get_vec_def_for_stmt_copy (dt
, loop_vec_def1
);
3898 if (single_defuse_cycle
)
3899 reduc_def
= gimple_assign_lhs (new_stmt
);
3901 reduc_def
= PHI_RESULT (new_phi
);
3903 STMT_VINFO_RELATED_STMT (prev_phi_info
) = new_phi
;
3906 /* Arguments are ready. Create the new vector stmt. */
3907 if (op_type
== binary_op
)
3909 if (reduc_index
== 0)
3910 expr
= build2 (code
, vectype
, reduc_def
, loop_vec_def0
);
3912 expr
= build2 (code
, vectype
, loop_vec_def0
, reduc_def
);
3916 if (reduc_index
== 0)
3917 expr
= build3 (code
, vectype
, reduc_def
, loop_vec_def0
,
3921 if (reduc_index
== 1)
3922 expr
= build3 (code
, vectype
, loop_vec_def0
, reduc_def
,
3925 expr
= build3 (code
, vectype
, loop_vec_def0
, loop_vec_def1
,
3930 new_stmt
= gimple_build_assign (vec_dest
, expr
);
3931 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
3932 gimple_assign_set_lhs (new_stmt
, new_temp
);
3933 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3936 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
3938 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
3940 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
3941 prev_phi_info
= vinfo_for_stmt (new_phi
);
3944 /* Finalize the reduction-phi (set its arguments) and create the
3945 epilog reduction code. */
3946 if (!single_defuse_cycle
|| code
== COND_EXPR
)
3947 new_temp
= gimple_assign_lhs (*vec_stmt
);
3949 vect_create_epilog_for_reduction (new_temp
, stmt
, epilog_copies
,
3950 epilog_reduc_code
, first_phi
, reduc_index
,
3955 /* Function vect_min_worthwhile_factor.
3957 For a loop where we could vectorize the operation indicated by CODE,
3958 return the minimum vectorization factor that makes it worthwhile
3959 to use generic vectors. */
3961 vect_min_worthwhile_factor (enum tree_code code
)
3982 /* Function vectorizable_induction
3984 Check if PHI performs an induction computation that can be vectorized.
3985 If VEC_STMT is also passed, vectorize the induction PHI: create a vectorized
3986 phi to replace it, put it in VEC_STMT, and add it to the same basic block.
3987 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3990 vectorizable_induction (gimple phi
, gimple_stmt_iterator
*gsi ATTRIBUTE_UNUSED
,
3993 stmt_vec_info stmt_info
= vinfo_for_stmt (phi
);
3994 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
3995 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
3996 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
3997 int nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
3998 int ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits
;
4001 gcc_assert (ncopies
>= 1);
4002 /* FORNOW. This restriction should be relaxed. */
4003 if (nested_in_vect_loop_p (loop
, phi
) && ncopies
> 1)
4005 if (vect_print_dump_info (REPORT_DETAILS
))
4006 fprintf (vect_dump
, "multiple types in nested loop.");
4010 if (!STMT_VINFO_RELEVANT_P (stmt_info
))
4013 /* FORNOW: SLP not supported. */
4014 if (STMT_SLP_TYPE (stmt_info
))
4017 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_info
) == vect_induction_def
);
4019 if (gimple_code (phi
) != GIMPLE_PHI
)
4022 if (!vec_stmt
) /* transformation not required. */
4024 STMT_VINFO_TYPE (stmt_info
) = induc_vec_info_type
;
4025 if (vect_print_dump_info (REPORT_DETAILS
))
4026 fprintf (vect_dump
, "=== vectorizable_induction ===");
4027 vect_model_induction_cost (stmt_info
, ncopies
);
4033 if (vect_print_dump_info (REPORT_DETAILS
))
4034 fprintf (vect_dump
, "transform induction phi.");
4036 vec_def
= get_initial_def_for_induction (phi
);
4037 *vec_stmt
= SSA_NAME_DEF_STMT (vec_def
);
4041 /* Function vectorizable_live_operation.
4043 STMT computes a value that is used outside the loop. Check if
4044 it can be supported. */
4047 vectorizable_live_operation (gimple stmt
,
4048 gimple_stmt_iterator
*gsi ATTRIBUTE_UNUSED
,
4049 gimple
*vec_stmt ATTRIBUTE_UNUSED
)
4051 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
4052 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
4053 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
4059 enum vect_def_type dt
;
4060 enum tree_code code
;
4061 enum gimple_rhs_class rhs_class
;
4063 gcc_assert (STMT_VINFO_LIVE_P (stmt_info
));
4065 if (STMT_VINFO_DEF_TYPE (stmt_info
) == vect_reduction_def
)
4068 if (!is_gimple_assign (stmt
))
4071 if (TREE_CODE (gimple_assign_lhs (stmt
)) != SSA_NAME
)
4074 /* FORNOW. CHECKME. */
4075 if (nested_in_vect_loop_p (loop
, stmt
))
4078 code
= gimple_assign_rhs_code (stmt
);
4079 op_type
= TREE_CODE_LENGTH (code
);
4080 rhs_class
= get_gimple_rhs_class (code
);
4081 gcc_assert (rhs_class
!= GIMPLE_UNARY_RHS
|| op_type
== unary_op
);
4082 gcc_assert (rhs_class
!= GIMPLE_BINARY_RHS
|| op_type
== binary_op
);
4084 /* FORNOW: support only if all uses are invariant. This means
4085 that the scalar operations can remain in place, unvectorized.
4086 The original last scalar value that they compute will be used. */
4088 for (i
= 0; i
< op_type
; i
++)
4090 if (rhs_class
== GIMPLE_SINGLE_RHS
)
4091 op
= TREE_OPERAND (gimple_op (stmt
, 1), i
);
4093 op
= gimple_op (stmt
, i
+ 1);
4095 && !vect_is_simple_use (op
, loop_vinfo
, NULL
, &def_stmt
, &def
, &dt
))
4097 if (vect_print_dump_info (REPORT_DETAILS
))
4098 fprintf (vect_dump
, "use not simple.");
4102 if (dt
!= vect_external_def
&& dt
!= vect_constant_def
)
4106 /* No transformation is required for the cases we currently support. */
4110 /* Kill any debug uses outside LOOP of SSA names defined in STMT. */
4113 vect_loop_kill_debug_uses (struct loop
*loop
, gimple stmt
)
4115 ssa_op_iter op_iter
;
4116 imm_use_iterator imm_iter
;
4117 def_operand_p def_p
;
4120 FOR_EACH_PHI_OR_STMT_DEF (def_p
, stmt
, op_iter
, SSA_OP_DEF
)
4122 FOR_EACH_IMM_USE_STMT (ustmt
, imm_iter
, DEF_FROM_PTR (def_p
))
4126 if (!is_gimple_debug (ustmt
))
4129 bb
= gimple_bb (ustmt
);
4131 if (!flow_bb_inside_loop_p (loop
, bb
))
4133 if (gimple_debug_bind_p (ustmt
))
4135 if (vect_print_dump_info (REPORT_DETAILS
))
4136 fprintf (vect_dump
, "killing debug use");
4138 gimple_debug_bind_reset_value (ustmt
);
4139 update_stmt (ustmt
);
4148 /* Function vect_transform_loop.
4150 The analysis phase has determined that the loop is vectorizable.
4151 Vectorize the loop - created vectorized stmts to replace the scalar
4152 stmts in the loop, and update the loop exit condition. */
4155 vect_transform_loop (loop_vec_info loop_vinfo
)
4157 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
4158 basic_block
*bbs
= LOOP_VINFO_BBS (loop_vinfo
);
4159 int nbbs
= loop
->num_nodes
;
4160 gimple_stmt_iterator si
;
4163 int vectorization_factor
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
4165 bool slp_scheduled
= false;
4166 unsigned int nunits
;
4167 tree cond_expr
= NULL_TREE
;
4168 gimple_seq cond_expr_stmt_list
= NULL
;
4169 bool do_peeling_for_loop_bound
;
4171 if (vect_print_dump_info (REPORT_DETAILS
))
4172 fprintf (vect_dump
, "=== vec_transform_loop ===");
4174 /* Peel the loop if there are data refs with unknown alignment.
4175 Only one data ref with unknown store is allowed. */
4177 if (LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo
))
4178 vect_do_peeling_for_alignment (loop_vinfo
);
4180 do_peeling_for_loop_bound
4181 = (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo
)
4182 || (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo
)
4183 && LOOP_VINFO_INT_NITERS (loop_vinfo
) % vectorization_factor
!= 0));
4185 if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo
)
4186 || LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo
))
4187 vect_loop_versioning (loop_vinfo
,
4188 !do_peeling_for_loop_bound
,
4189 &cond_expr
, &cond_expr_stmt_list
);
4191 /* If the loop has a symbolic number of iterations 'n' (i.e. it's not a
4192 compile time constant), or it is a constant that doesn't divide by the
4193 vectorization factor, then an epilog loop needs to be created.
4194 We therefore duplicate the loop: the original loop will be vectorized,
4195 and will compute the first (n/VF) iterations. The second copy of the loop
4196 will remain scalar and will compute the remaining (n%VF) iterations.
4197 (VF is the vectorization factor). */
4199 if (do_peeling_for_loop_bound
)
4200 vect_do_peeling_for_loop_bound (loop_vinfo
, &ratio
,
4201 cond_expr
, cond_expr_stmt_list
);
4203 ratio
= build_int_cst (TREE_TYPE (LOOP_VINFO_NITERS (loop_vinfo
)),
4204 LOOP_VINFO_INT_NITERS (loop_vinfo
) / vectorization_factor
);
4206 /* 1) Make sure the loop header has exactly two entries
4207 2) Make sure we have a preheader basic block. */
4209 gcc_assert (EDGE_COUNT (loop
->header
->preds
) == 2);
4211 split_edge (loop_preheader_edge (loop
));
4213 /* FORNOW: the vectorizer supports only loops which body consist
4214 of one basic block (header + empty latch). When the vectorizer will
4215 support more involved loop forms, the order by which the BBs are
4216 traversed need to be reconsidered. */
4218 for (i
= 0; i
< nbbs
; i
++)
4220 basic_block bb
= bbs
[i
];
4221 stmt_vec_info stmt_info
;
4224 for (si
= gsi_start_phis (bb
); !gsi_end_p (si
); gsi_next (&si
))
4226 phi
= gsi_stmt (si
);
4227 if (vect_print_dump_info (REPORT_DETAILS
))
4229 fprintf (vect_dump
, "------>vectorizing phi: ");
4230 print_gimple_stmt (vect_dump
, phi
, 0, TDF_SLIM
);
4232 stmt_info
= vinfo_for_stmt (phi
);
4236 if (!STMT_VINFO_RELEVANT_P (stmt_info
)
4237 && !STMT_VINFO_LIVE_P (stmt_info
))
4239 if (MAY_HAVE_DEBUG_STMTS
)
4240 vect_loop_kill_debug_uses (loop
, phi
);
4244 if ((TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info
))
4245 != (unsigned HOST_WIDE_INT
) vectorization_factor
)
4246 && vect_print_dump_info (REPORT_DETAILS
))
4247 fprintf (vect_dump
, "multiple-types.");
4249 if (STMT_VINFO_DEF_TYPE (stmt_info
) == vect_induction_def
)
4251 if (vect_print_dump_info (REPORT_DETAILS
))
4252 fprintf (vect_dump
, "transform phi.");
4253 vect_transform_stmt (phi
, NULL
, NULL
, NULL
, NULL
);
4257 for (si
= gsi_start_bb (bb
); !gsi_end_p (si
);)
4259 gimple stmt
= gsi_stmt (si
);
4262 if (vect_print_dump_info (REPORT_DETAILS
))
4264 fprintf (vect_dump
, "------>vectorizing statement: ");
4265 print_gimple_stmt (vect_dump
, stmt
, 0, TDF_SLIM
);
4268 stmt_info
= vinfo_for_stmt (stmt
);
4270 /* vector stmts created in the outer-loop during vectorization of
4271 stmts in an inner-loop may not have a stmt_info, and do not
4272 need to be vectorized. */
4279 if (!STMT_VINFO_RELEVANT_P (stmt_info
)
4280 && !STMT_VINFO_LIVE_P (stmt_info
))
4282 if (MAY_HAVE_DEBUG_STMTS
)
4283 vect_loop_kill_debug_uses (loop
, stmt
);
4288 gcc_assert (STMT_VINFO_VECTYPE (stmt_info
));
4290 (unsigned int) TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info
));
4291 if (!STMT_SLP_TYPE (stmt_info
)
4292 && nunits
!= (unsigned int) vectorization_factor
4293 && vect_print_dump_info (REPORT_DETAILS
))
4294 /* For SLP VF is set according to unrolling factor, and not to
4295 vector size, hence for SLP this print is not valid. */
4296 fprintf (vect_dump
, "multiple-types.");
4298 /* SLP. Schedule all the SLP instances when the first SLP stmt is
4300 if (STMT_SLP_TYPE (stmt_info
))
4304 slp_scheduled
= true;
4306 if (vect_print_dump_info (REPORT_DETAILS
))
4307 fprintf (vect_dump
, "=== scheduling SLP instances ===");
4309 vect_schedule_slp (loop_vinfo
, NULL
);
4312 /* Hybrid SLP stmts must be vectorized in addition to SLP. */
4313 if (!vinfo_for_stmt (stmt
) || PURE_SLP_STMT (stmt_info
))
4320 /* -------- vectorize statement ------------ */
4321 if (vect_print_dump_info (REPORT_DETAILS
))
4322 fprintf (vect_dump
, "transform statement.");
4324 strided_store
= false;
4325 is_store
= vect_transform_stmt (stmt
, &si
, &strided_store
, NULL
, NULL
);
4328 if (STMT_VINFO_STRIDED_ACCESS (stmt_info
))
4330 /* Interleaving. If IS_STORE is TRUE, the vectorization of the
4331 interleaving chain was completed - free all the stores in
4333 vect_remove_stores (DR_GROUP_FIRST_DR (stmt_info
));
4334 gsi_remove (&si
, true);
4339 /* Free the attached stmt_vec_info and remove the stmt. */
4340 free_stmt_vec_info (stmt
);
4341 gsi_remove (&si
, true);
4349 slpeel_make_loop_iterate_ntimes (loop
, ratio
);
4351 /* The memory tags and pointers in vectorized statements need to
4352 have their SSA forms updated. FIXME, why can't this be delayed
4353 until all the loops have been transformed? */
4354 update_ssa (TODO_update_ssa
);
4356 if (vect_print_dump_info (REPORT_VECTORIZED_LOCATIONS
))
4357 fprintf (vect_dump
, "LOOP VECTORIZED.");
4358 if (loop
->inner
&& vect_print_dump_info (REPORT_VECTORIZED_LOCATIONS
))
4359 fprintf (vect_dump
, "OUTER LOOP VECTORIZED.");