1 /* Analysis Utilities for Loop Vectorization.
2 Copyright (C) 2003, 2004, 2005, 2006, 2007 Free Software Foundation, Inc.
3 Contributed by Dorit Naishlos <dorit@il.ibm.com>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
28 #include "basic-block.h"
29 #include "diagnostic.h"
30 #include "tree-flow.h"
31 #include "tree-dump.h"
37 #include "tree-chrec.h"
38 #include "tree-data-ref.h"
39 #include "tree-scalar-evolution.h"
40 #include "tree-vectorizer.h"
44 static bool vect_can_advance_ivs_p (loop_vec_info
);
46 /* Function vect_determine_vectorization_factor
48 Determine the vectorization factor (VF). VF is the number of data elements
49 that are operated upon in parallel in a single iteration of the vectorized
50 loop. For example, when vectorizing a loop that operates on 4byte elements,
51 on a target with vector size (VS) 16byte, the VF is set to 4, since 4
52 elements can fit in a single vector register.
54 We currently support vectorization of loops in which all types operated upon
55 are of the same size. Therefore this function currently sets VF according to
56 the size of the types operated upon, and fails if there are multiple sizes
59 VF is also the factor by which the loop iterations are strip-mined, e.g.:
66 for (i=0; i<N; i+=VF){
67 a[i:VF] = b[i:VF] + c[i:VF];
72 vect_determine_vectorization_factor (loop_vec_info loop_vinfo
)
74 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
75 basic_block
*bbs
= LOOP_VINFO_BBS (loop_vinfo
);
76 int nbbs
= loop
->num_nodes
;
77 block_stmt_iterator si
;
78 unsigned int vectorization_factor
= 0;
83 stmt_vec_info stmt_info
;
86 if (vect_print_dump_info (REPORT_DETAILS
))
87 fprintf (vect_dump
, "=== vect_determine_vectorization_factor ===");
89 for (i
= 0; i
< nbbs
; i
++)
91 basic_block bb
= bbs
[i
];
93 for (phi
= phi_nodes (bb
); phi
; phi
= PHI_CHAIN (phi
))
95 stmt_info
= vinfo_for_stmt (phi
);
96 if (vect_print_dump_info (REPORT_DETAILS
))
98 fprintf (vect_dump
, "==> examining phi: ");
99 print_generic_expr (vect_dump
, phi
, TDF_SLIM
);
102 gcc_assert (stmt_info
);
104 if (STMT_VINFO_RELEVANT_P (stmt_info
))
106 gcc_assert (!STMT_VINFO_VECTYPE (stmt_info
));
107 scalar_type
= TREE_TYPE (PHI_RESULT (phi
));
109 if (vect_print_dump_info (REPORT_DETAILS
))
111 fprintf (vect_dump
, "get vectype for scalar type: ");
112 print_generic_expr (vect_dump
, scalar_type
, TDF_SLIM
);
115 vectype
= get_vectype_for_scalar_type (scalar_type
);
118 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOOPS
))
121 "not vectorized: unsupported data-type ");
122 print_generic_expr (vect_dump
, scalar_type
, TDF_SLIM
);
126 STMT_VINFO_VECTYPE (stmt_info
) = vectype
;
128 if (vect_print_dump_info (REPORT_DETAILS
))
130 fprintf (vect_dump
, "vectype: ");
131 print_generic_expr (vect_dump
, vectype
, TDF_SLIM
);
134 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
135 if (vect_print_dump_info (REPORT_DETAILS
))
136 fprintf (vect_dump
, "nunits = %d", nunits
);
138 if (!vectorization_factor
139 || (nunits
> vectorization_factor
))
140 vectorization_factor
= nunits
;
144 for (si
= bsi_start (bb
); !bsi_end_p (si
); bsi_next (&si
))
146 tree stmt
= bsi_stmt (si
);
147 stmt_info
= vinfo_for_stmt (stmt
);
149 if (vect_print_dump_info (REPORT_DETAILS
))
151 fprintf (vect_dump
, "==> examining statement: ");
152 print_generic_expr (vect_dump
, stmt
, TDF_SLIM
);
155 gcc_assert (stmt_info
);
157 /* skip stmts which do not need to be vectorized. */
158 if (!STMT_VINFO_RELEVANT_P (stmt_info
)
159 && !STMT_VINFO_LIVE_P (stmt_info
))
161 if (vect_print_dump_info (REPORT_DETAILS
))
162 fprintf (vect_dump
, "skip.");
166 if (TREE_CODE (stmt
) != GIMPLE_MODIFY_STMT
)
168 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOOPS
))
170 fprintf (vect_dump
, "not vectorized: irregular stmt.");
171 print_generic_expr (vect_dump
, stmt
, TDF_SLIM
);
176 if (!GIMPLE_STMT_P (stmt
)
177 && VECTOR_MODE_P (TYPE_MODE (TREE_TYPE (stmt
))))
179 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOOPS
))
181 fprintf (vect_dump
, "not vectorized: vector stmt in loop:");
182 print_generic_expr (vect_dump
, stmt
, TDF_SLIM
);
187 if (STMT_VINFO_VECTYPE (stmt_info
))
189 /* The only case when a vectype had been already set is for stmts
190 that contain a dataref, or for "pattern-stmts" (stmts generated
191 by the vectorizer to represent/replace a certain idiom). */
192 gcc_assert (STMT_VINFO_DATA_REF (stmt_info
)
193 || is_pattern_stmt_p (stmt_info
));
194 vectype
= STMT_VINFO_VECTYPE (stmt_info
);
200 gcc_assert (! STMT_VINFO_DATA_REF (stmt_info
)
201 && !is_pattern_stmt_p (stmt_info
));
203 /* We generally set the vectype according to the type of the
205 For stmts whose result-type is different than the type of the
206 arguments (e.g. demotion, promotion), vectype will be reset
207 appropriately (later). Note that we have to visit the smallest
208 datatype in this function, because that determines the VF.
209 If the smallest datatype in the loop is present only as the
210 rhs of a promotion operation - we'd miss it here.
211 Such a case, where a variable of this datatype does not appear
212 in the lhs anywhere in the loop, can only occur if it's an
213 invariant: e.g.: 'int_x = (int) short_inv', which we'd expect
214 to have been optimized away by invariant motion. However, we
215 cannot rely on invariant motion to always take invariants out
216 of the loop, and so in the case of promotion we also have to
218 scalar_type
= TREE_TYPE (GIMPLE_STMT_OPERAND (stmt
, 0));
220 operation
= GIMPLE_STMT_OPERAND (stmt
, 1);
221 if (CONVERT_EXPR_P (operation
)
222 || TREE_CODE (operation
) == WIDEN_MULT_EXPR
223 || TREE_CODE (operation
) == FLOAT_EXPR
)
225 tree rhs_type
= TREE_TYPE (TREE_OPERAND (operation
, 0));
226 if (TREE_INT_CST_LOW (TYPE_SIZE_UNIT (rhs_type
)) <
227 TREE_INT_CST_LOW (TYPE_SIZE_UNIT (scalar_type
)))
228 scalar_type
= rhs_type
;
231 if (vect_print_dump_info (REPORT_DETAILS
))
233 fprintf (vect_dump
, "get vectype for scalar type: ");
234 print_generic_expr (vect_dump
, scalar_type
, TDF_SLIM
);
237 vectype
= get_vectype_for_scalar_type (scalar_type
);
240 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOOPS
))
243 "not vectorized: unsupported data-type ");
244 print_generic_expr (vect_dump
, scalar_type
, TDF_SLIM
);
248 STMT_VINFO_VECTYPE (stmt_info
) = vectype
;
251 if (vect_print_dump_info (REPORT_DETAILS
))
253 fprintf (vect_dump
, "vectype: ");
254 print_generic_expr (vect_dump
, vectype
, TDF_SLIM
);
257 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
258 if (vect_print_dump_info (REPORT_DETAILS
))
259 fprintf (vect_dump
, "nunits = %d", nunits
);
261 if (!vectorization_factor
262 || (nunits
> vectorization_factor
))
263 vectorization_factor
= nunits
;
268 /* TODO: Analyze cost. Decide if worth while to vectorize. */
269 if (vect_print_dump_info (REPORT_DETAILS
))
270 fprintf (vect_dump
, "vectorization factor = %d", vectorization_factor
);
271 if (vectorization_factor
<= 1)
273 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOOPS
))
274 fprintf (vect_dump
, "not vectorized: unsupported data-type");
277 LOOP_VINFO_VECT_FACTOR (loop_vinfo
) = vectorization_factor
;
283 /* SLP costs are calculated according to SLP instance unrolling factor (i.e.,
284 the number of created vector stmts depends on the unrolling factor). However,
285 the actual number of vector stmts for every SLP node depends on VF which is
286 set later in vect_analyze_operations(). Hence, SLP costs should be updated.
287 In this function we assume that the inside costs calculated in
288 vect_model_xxx_cost are linear in ncopies. */
291 vect_update_slp_costs_according_to_vf (loop_vec_info loop_vinfo
)
293 unsigned int i
, vf
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
294 VEC (slp_instance
, heap
) *slp_instances
= LOOP_VINFO_SLP_INSTANCES (loop_vinfo
);
295 slp_instance instance
;
297 if (vect_print_dump_info (REPORT_SLP
))
298 fprintf (vect_dump
, "=== vect_update_slp_costs_according_to_vf ===");
300 for (i
= 0; VEC_iterate (slp_instance
, slp_instances
, i
, instance
); i
++)
301 /* We assume that costs are linear in ncopies. */
302 SLP_INSTANCE_INSIDE_OF_LOOP_COST (instance
) *= vf
303 / SLP_INSTANCE_UNROLLING_FACTOR (instance
);
307 /* Function vect_analyze_operations.
309 Scan the loop stmts and make sure they are all vectorizable. */
312 vect_analyze_operations (loop_vec_info loop_vinfo
)
314 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
315 basic_block
*bbs
= LOOP_VINFO_BBS (loop_vinfo
);
316 int nbbs
= loop
->num_nodes
;
317 block_stmt_iterator si
;
318 unsigned int vectorization_factor
= 0;
322 stmt_vec_info stmt_info
;
323 bool need_to_vectorize
= false;
324 int min_profitable_iters
;
325 int min_scalar_loop_bound
;
327 bool only_slp_in_loop
= true;
329 if (vect_print_dump_info (REPORT_DETAILS
))
330 fprintf (vect_dump
, "=== vect_analyze_operations ===");
332 gcc_assert (LOOP_VINFO_VECT_FACTOR (loop_vinfo
));
333 vectorization_factor
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
335 for (i
= 0; i
< nbbs
; i
++)
337 basic_block bb
= bbs
[i
];
339 for (phi
= phi_nodes (bb
); phi
; phi
= PHI_CHAIN (phi
))
343 stmt_info
= vinfo_for_stmt (phi
);
344 if (vect_print_dump_info (REPORT_DETAILS
))
346 fprintf (vect_dump
, "examining phi: ");
347 print_generic_expr (vect_dump
, phi
, TDF_SLIM
);
350 if (! is_loop_header_bb_p (bb
))
352 /* inner-loop loop-closed exit phi in outer-loop vectorization
353 (i.e. a phi in the tail of the outer-loop).
354 FORNOW: we currently don't support the case that these phis
355 are not used in the outerloop, cause this case requires
356 to actually do something here. */
357 if (!STMT_VINFO_RELEVANT_P (stmt_info
)
358 || STMT_VINFO_LIVE_P (stmt_info
))
360 if (vect_print_dump_info (REPORT_DETAILS
))
362 "Unsupported loop-closed phi in outer-loop.");
368 gcc_assert (stmt_info
);
370 if (STMT_VINFO_LIVE_P (stmt_info
))
372 /* FORNOW: not yet supported. */
373 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOOPS
))
374 fprintf (vect_dump
, "not vectorized: value used after loop.");
378 if (STMT_VINFO_RELEVANT (stmt_info
) == vect_used_in_loop
379 && STMT_VINFO_DEF_TYPE (stmt_info
) != vect_induction_def
)
381 /* A scalar-dependence cycle that we don't support. */
382 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOOPS
))
383 fprintf (vect_dump
, "not vectorized: scalar dependence cycle.");
387 if (STMT_VINFO_RELEVANT_P (stmt_info
))
389 need_to_vectorize
= true;
390 if (STMT_VINFO_DEF_TYPE (stmt_info
) == vect_induction_def
)
391 ok
= vectorizable_induction (phi
, NULL
, NULL
);
396 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOOPS
))
399 "not vectorized: relevant phi not supported: ");
400 print_generic_expr (vect_dump
, phi
, TDF_SLIM
);
406 for (si
= bsi_start (bb
); !bsi_end_p (si
); bsi_next (&si
))
408 tree stmt
= bsi_stmt (si
);
409 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
410 enum vect_def_type relevance
= STMT_VINFO_RELEVANT (stmt_info
);
412 if (vect_print_dump_info (REPORT_DETAILS
))
414 fprintf (vect_dump
, "==> examining statement: ");
415 print_generic_expr (vect_dump
, stmt
, TDF_SLIM
);
418 gcc_assert (stmt_info
);
420 /* skip stmts which do not need to be vectorized.
421 this is expected to include:
422 - the COND_EXPR which is the loop exit condition
423 - any LABEL_EXPRs in the loop
424 - computations that are used only for array indexing or loop
427 if (!STMT_VINFO_RELEVANT_P (stmt_info
)
428 && !STMT_VINFO_LIVE_P (stmt_info
))
430 if (vect_print_dump_info (REPORT_DETAILS
))
431 fprintf (vect_dump
, "irrelevant.");
435 switch (STMT_VINFO_DEF_TYPE (stmt_info
))
440 case vect_reduction_def
:
441 gcc_assert (relevance
== vect_used_in_outer
442 || relevance
== vect_used_in_outer_by_reduction
443 || relevance
== vect_unused_in_loop
);
446 case vect_induction_def
:
447 case vect_constant_def
:
448 case vect_invariant_def
:
449 case vect_unknown_def_type
:
454 if (STMT_VINFO_RELEVANT_P (stmt_info
))
456 gcc_assert (GIMPLE_STMT_P (stmt
)
457 || !VECTOR_MODE_P (TYPE_MODE (TREE_TYPE (stmt
))));
458 gcc_assert (STMT_VINFO_VECTYPE (stmt_info
));
459 need_to_vectorize
= true;
463 if (STMT_VINFO_RELEVANT_P (stmt_info
)
464 || STMT_VINFO_DEF_TYPE (stmt_info
) == vect_reduction_def
)
465 ok
= (vectorizable_type_promotion (stmt
, NULL
, NULL
)
466 || vectorizable_type_demotion (stmt
, NULL
, NULL
)
467 || vectorizable_conversion (stmt
, NULL
, NULL
, NULL
)
468 || vectorizable_operation (stmt
, NULL
, NULL
, NULL
)
469 || vectorizable_assignment (stmt
, NULL
, NULL
, NULL
)
470 || vectorizable_load (stmt
, NULL
, NULL
, NULL
)
471 || vectorizable_call (stmt
, NULL
, NULL
)
472 || vectorizable_store (stmt
, NULL
, NULL
, NULL
)
473 || vectorizable_condition (stmt
, NULL
, NULL
)
474 || vectorizable_reduction (stmt
, NULL
, NULL
));
478 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOOPS
))
480 fprintf (vect_dump
, "not vectorized: relevant stmt not ");
481 fprintf (vect_dump
, "supported: ");
482 print_generic_expr (vect_dump
, stmt
, TDF_SLIM
);
487 /* Stmts that are (also) "live" (i.e. - that are used out of the loop)
488 need extra handling, except for vectorizable reductions. */
489 if (STMT_VINFO_LIVE_P (stmt_info
)
490 && STMT_VINFO_TYPE (stmt_info
) != reduc_vec_info_type
)
491 ok
= vectorizable_live_operation (stmt
, NULL
, NULL
);
495 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOOPS
))
497 fprintf (vect_dump
, "not vectorized: live stmt not ");
498 fprintf (vect_dump
, "supported: ");
499 print_generic_expr (vect_dump
, stmt
, TDF_SLIM
);
504 if (!PURE_SLP_STMT (stmt_info
))
506 /* STMT needs loop-based vectorization. */
507 only_slp_in_loop
= false;
509 /* Groups of strided accesses whose size is not a power of 2 are
510 not vectorizable yet using loop-vectorization. Therefore, if
511 this stmt feeds non-SLP-able stmts (i.e., this stmt has to be
512 both SLPed and loop-based vectorzed), the loop cannot be
514 if (STMT_VINFO_STRIDED_ACCESS (stmt_info
)
515 && exact_log2 (DR_GROUP_SIZE (vinfo_for_stmt (
516 DR_GROUP_FIRST_DR (stmt_info
)))) == -1)
518 if (vect_print_dump_info (REPORT_DETAILS
))
520 fprintf (vect_dump
, "not vectorized: the size of group "
521 "of strided accesses is not a power of 2");
522 print_generic_expr (vect_dump
, stmt
, TDF_SLIM
);
530 /* All operations in the loop are either irrelevant (deal with loop
531 control, or dead), or only used outside the loop and can be moved
532 out of the loop (e.g. invariants, inductions). The loop can be
533 optimized away by scalar optimizations. We're better off not
534 touching this loop. */
535 if (!need_to_vectorize
)
537 if (vect_print_dump_info (REPORT_DETAILS
))
539 "All the computation can be taken out of the loop.");
540 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOOPS
))
542 "not vectorized: redundant loop. no profit to vectorize.");
546 /* If all the stmts in the loop can be SLPed, we perform only SLP, and
547 vectorization factor of the loop is the unrolling factor required by the
548 SLP instances. If that unrolling factor is 1, we say, that we perform
549 pure SLP on loop - cross iteration parallelism is not exploited. */
550 if (only_slp_in_loop
)
551 vectorization_factor
= LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo
);
553 vectorization_factor
= least_common_multiple (vectorization_factor
,
554 LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo
));
556 LOOP_VINFO_VECT_FACTOR (loop_vinfo
) = vectorization_factor
;
558 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo
)
559 && vect_print_dump_info (REPORT_DETAILS
))
561 "vectorization_factor = %d, niters = " HOST_WIDE_INT_PRINT_DEC
,
562 vectorization_factor
, LOOP_VINFO_INT_NITERS (loop_vinfo
));
564 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo
)
565 && (LOOP_VINFO_INT_NITERS (loop_vinfo
) < vectorization_factor
))
567 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOOPS
))
568 fprintf (vect_dump
, "not vectorized: iteration count too small.");
569 if (vect_print_dump_info (REPORT_DETAILS
))
570 fprintf (vect_dump
,"not vectorized: iteration count smaller than "
571 "vectorization factor.");
575 /* Analyze cost. Decide if worth while to vectorize. */
577 /* Once VF is set, SLP costs should be updated since the number of created
578 vector stmts depends on VF. */
579 vect_update_slp_costs_according_to_vf (loop_vinfo
);
581 min_profitable_iters
= vect_estimate_min_profitable_iters (loop_vinfo
);
582 LOOP_VINFO_COST_MODEL_MIN_ITERS (loop_vinfo
) = min_profitable_iters
;
584 if (min_profitable_iters
< 0)
586 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOOPS
))
587 fprintf (vect_dump
, "not vectorized: vectorization not profitable.");
588 if (vect_print_dump_info (REPORT_DETAILS
))
589 fprintf (vect_dump
, "not vectorized: vector version will never be "
594 min_scalar_loop_bound
= ((PARAM_VALUE (PARAM_MIN_VECT_LOOP_BOUND
)
595 * vectorization_factor
) - 1);
597 /* Use the cost model only if it is more conservative than user specified
600 th
= (unsigned) min_scalar_loop_bound
;
601 if (min_profitable_iters
602 && (!min_scalar_loop_bound
603 || min_profitable_iters
> min_scalar_loop_bound
))
604 th
= (unsigned) min_profitable_iters
;
606 if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo
)
607 && LOOP_VINFO_INT_NITERS (loop_vinfo
) <= th
)
609 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOOPS
))
610 fprintf (vect_dump
, "not vectorized: vectorization not "
612 if (vect_print_dump_info (REPORT_DETAILS
))
613 fprintf (vect_dump
, "not vectorized: iteration count smaller than "
614 "user specified loop bound parameter or minimum "
615 "profitable iterations (whichever is more conservative).");
619 if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo
)
620 || LOOP_VINFO_INT_NITERS (loop_vinfo
) % vectorization_factor
!= 0
621 || LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo
))
623 if (vect_print_dump_info (REPORT_DETAILS
))
624 fprintf (vect_dump
, "epilog loop required.");
625 if (!vect_can_advance_ivs_p (loop_vinfo
))
627 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOOPS
))
629 "not vectorized: can't create epilog loop 1.");
632 if (!slpeel_can_duplicate_loop_p (loop
, single_exit (loop
)))
634 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOOPS
))
636 "not vectorized: can't create epilog loop 2.");
645 /* Function exist_non_indexing_operands_for_use_p
647 USE is one of the uses attached to STMT. Check if USE is
648 used in STMT for anything other than indexing an array. */
651 exist_non_indexing_operands_for_use_p (tree use
, tree stmt
)
654 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
656 /* USE corresponds to some operand in STMT. If there is no data
657 reference in STMT, then any operand that corresponds to USE
658 is not indexing an array. */
659 if (!STMT_VINFO_DATA_REF (stmt_info
))
662 /* STMT has a data_ref. FORNOW this means that its of one of
666 (This should have been verified in analyze_data_refs).
668 'var' in the second case corresponds to a def, not a use,
669 so USE cannot correspond to any operands that are not used
672 Therefore, all we need to check is if STMT falls into the
673 first case, and whether var corresponds to USE. */
675 if (TREE_CODE (GIMPLE_STMT_OPERAND (stmt
, 0)) == SSA_NAME
)
678 operand
= GIMPLE_STMT_OPERAND (stmt
, 1);
680 if (TREE_CODE (operand
) != SSA_NAME
)
690 /* Function vect_analyze_scalar_cycles_1.
692 Examine the cross iteration def-use cycles of scalar variables
693 in LOOP. LOOP_VINFO represents the loop that is noe being
694 considered for vectorization (can be LOOP, or an outer-loop
698 vect_analyze_scalar_cycles_1 (loop_vec_info loop_vinfo
, struct loop
*loop
)
701 basic_block bb
= loop
->header
;
703 VEC(tree
,heap
) *worklist
= VEC_alloc (tree
, heap
, 64);
705 if (vect_print_dump_info (REPORT_DETAILS
))
706 fprintf (vect_dump
, "=== vect_analyze_scalar_cycles ===");
708 /* First - identify all inductions. */
709 for (phi
= phi_nodes (bb
); phi
; phi
= PHI_CHAIN (phi
))
711 tree access_fn
= NULL
;
712 tree def
= PHI_RESULT (phi
);
713 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (phi
);
715 if (vect_print_dump_info (REPORT_DETAILS
))
717 fprintf (vect_dump
, "Analyze phi: ");
718 print_generic_expr (vect_dump
, phi
, TDF_SLIM
);
721 /* Skip virtual phi's. The data dependences that are associated with
722 virtual defs/uses (i.e., memory accesses) are analyzed elsewhere. */
723 if (!is_gimple_reg (SSA_NAME_VAR (def
)))
726 STMT_VINFO_DEF_TYPE (stmt_vinfo
) = vect_unknown_def_type
;
728 /* Analyze the evolution function. */
729 access_fn
= analyze_scalar_evolution (loop
, def
);
730 if (access_fn
&& vect_print_dump_info (REPORT_DETAILS
))
732 fprintf (vect_dump
, "Access function of PHI: ");
733 print_generic_expr (vect_dump
, access_fn
, TDF_SLIM
);
737 || !vect_is_simple_iv_evolution (loop
->num
, access_fn
, &dumy
, &dumy
))
739 VEC_safe_push (tree
, heap
, worklist
, phi
);
743 if (vect_print_dump_info (REPORT_DETAILS
))
744 fprintf (vect_dump
, "Detected induction.");
745 STMT_VINFO_DEF_TYPE (stmt_vinfo
) = vect_induction_def
;
749 /* Second - identify all reductions. */
750 while (VEC_length (tree
, worklist
) > 0)
752 tree phi
= VEC_pop (tree
, worklist
);
753 tree def
= PHI_RESULT (phi
);
754 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (phi
);
757 if (vect_print_dump_info (REPORT_DETAILS
))
759 fprintf (vect_dump
, "Analyze phi: ");
760 print_generic_expr (vect_dump
, phi
, TDF_SLIM
);
763 gcc_assert (is_gimple_reg (SSA_NAME_VAR (def
)));
764 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_unknown_def_type
);
766 reduc_stmt
= vect_is_simple_reduction (loop_vinfo
, phi
);
769 if (vect_print_dump_info (REPORT_DETAILS
))
770 fprintf (vect_dump
, "Detected reduction.");
771 STMT_VINFO_DEF_TYPE (stmt_vinfo
) = vect_reduction_def
;
772 STMT_VINFO_DEF_TYPE (vinfo_for_stmt (reduc_stmt
)) =
776 if (vect_print_dump_info (REPORT_DETAILS
))
777 fprintf (vect_dump
, "Unknown def-use cycle pattern.");
780 VEC_free (tree
, heap
, worklist
);
785 /* Function vect_analyze_scalar_cycles.
787 Examine the cross iteration def-use cycles of scalar variables, by
788 analyzing the loop-header PHIs of scalar variables; Classify each
789 cycle as one of the following: invariant, induction, reduction, unknown.
790 We do that for the loop represented by LOOP_VINFO, and also to its
791 inner-loop, if exists.
792 Examples for scalar cycles:
807 vect_analyze_scalar_cycles (loop_vec_info loop_vinfo
)
809 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
811 vect_analyze_scalar_cycles_1 (loop_vinfo
, loop
);
813 /* When vectorizing an outer-loop, the inner-loop is executed sequentially.
814 Reductions in such inner-loop therefore have different properties than
815 the reductions in the nest that gets vectorized:
816 1. When vectorized, they are executed in the same order as in the original
817 scalar loop, so we can't change the order of computation when
819 2. FIXME: Inner-loop reductions can be used in the inner-loop, so the
820 current checks are too strict. */
823 vect_analyze_scalar_cycles_1 (loop_vinfo
, loop
->inner
);
827 /* Function vect_insert_into_interleaving_chain.
829 Insert DRA into the interleaving chain of DRB according to DRA's INIT. */
832 vect_insert_into_interleaving_chain (struct data_reference
*dra
,
833 struct data_reference
*drb
)
835 tree prev
, next
, next_init
;
836 stmt_vec_info stmtinfo_a
= vinfo_for_stmt (DR_STMT (dra
));
837 stmt_vec_info stmtinfo_b
= vinfo_for_stmt (DR_STMT (drb
));
839 prev
= DR_GROUP_FIRST_DR (stmtinfo_b
);
840 next
= DR_GROUP_NEXT_DR (vinfo_for_stmt (prev
));
843 next_init
= DR_INIT (STMT_VINFO_DATA_REF (vinfo_for_stmt (next
)));
844 if (tree_int_cst_compare (next_init
, DR_INIT (dra
)) > 0)
847 DR_GROUP_NEXT_DR (vinfo_for_stmt (prev
)) = DR_STMT (dra
);
848 DR_GROUP_NEXT_DR (stmtinfo_a
) = next
;
852 next
= DR_GROUP_NEXT_DR (vinfo_for_stmt (prev
));
855 /* We got to the end of the list. Insert here. */
856 DR_GROUP_NEXT_DR (vinfo_for_stmt (prev
)) = DR_STMT (dra
);
857 DR_GROUP_NEXT_DR (stmtinfo_a
) = NULL_TREE
;
861 /* Function vect_update_interleaving_chain.
863 For two data-refs DRA and DRB that are a part of a chain interleaved data
864 accesses, update the interleaving chain. DRB's INIT is smaller than DRA's.
866 There are four possible cases:
867 1. New stmts - both DRA and DRB are not a part of any chain:
870 2. DRB is a part of a chain and DRA is not:
871 no need to update FIRST_DR
872 no need to insert DRB
873 insert DRA according to init
874 3. DRA is a part of a chain and DRB is not:
875 if (init of FIRST_DR > init of DRB)
877 NEXT(FIRST_DR) = previous FIRST_DR
879 insert DRB according to its init
880 4. both DRA and DRB are in some interleaving chains:
881 choose the chain with the smallest init of FIRST_DR
882 insert the nodes of the second chain into the first one. */
885 vect_update_interleaving_chain (struct data_reference
*drb
,
886 struct data_reference
*dra
)
888 stmt_vec_info stmtinfo_a
= vinfo_for_stmt (DR_STMT (dra
));
889 stmt_vec_info stmtinfo_b
= vinfo_for_stmt (DR_STMT (drb
));
890 tree next_init
, init_dra_chain
, init_drb_chain
, first_a
, first_b
;
891 tree node
, prev
, next
, node_init
, first_stmt
;
893 /* 1. New stmts - both DRA and DRB are not a part of any chain. */
894 if (!DR_GROUP_FIRST_DR (stmtinfo_a
) && !DR_GROUP_FIRST_DR (stmtinfo_b
))
896 DR_GROUP_FIRST_DR (stmtinfo_a
) = DR_STMT (drb
);
897 DR_GROUP_FIRST_DR (stmtinfo_b
) = DR_STMT (drb
);
898 DR_GROUP_NEXT_DR (stmtinfo_b
) = DR_STMT (dra
);
902 /* 2. DRB is a part of a chain and DRA is not. */
903 if (!DR_GROUP_FIRST_DR (stmtinfo_a
) && DR_GROUP_FIRST_DR (stmtinfo_b
))
905 DR_GROUP_FIRST_DR (stmtinfo_a
) = DR_GROUP_FIRST_DR (stmtinfo_b
);
906 /* Insert DRA into the chain of DRB. */
907 vect_insert_into_interleaving_chain (dra
, drb
);
911 /* 3. DRA is a part of a chain and DRB is not. */
912 if (DR_GROUP_FIRST_DR (stmtinfo_a
) && !DR_GROUP_FIRST_DR (stmtinfo_b
))
914 tree old_first_stmt
= DR_GROUP_FIRST_DR (stmtinfo_a
);
915 tree init_old
= DR_INIT (STMT_VINFO_DATA_REF (vinfo_for_stmt (
919 if (tree_int_cst_compare (init_old
, DR_INIT (drb
)) > 0)
921 /* DRB's init is smaller than the init of the stmt previously marked
922 as the first stmt of the interleaving chain of DRA. Therefore, we
923 update FIRST_STMT and put DRB in the head of the list. */
924 DR_GROUP_FIRST_DR (stmtinfo_b
) = DR_STMT (drb
);
925 DR_GROUP_NEXT_DR (stmtinfo_b
) = old_first_stmt
;
927 /* Update all the stmts in the list to point to the new FIRST_STMT. */
928 tmp
= old_first_stmt
;
931 DR_GROUP_FIRST_DR (vinfo_for_stmt (tmp
)) = DR_STMT (drb
);
932 tmp
= DR_GROUP_NEXT_DR (vinfo_for_stmt (tmp
));
937 /* Insert DRB in the list of DRA. */
938 vect_insert_into_interleaving_chain (drb
, dra
);
939 DR_GROUP_FIRST_DR (stmtinfo_b
) = DR_GROUP_FIRST_DR (stmtinfo_a
);
944 /* 4. both DRA and DRB are in some interleaving chains. */
945 first_a
= DR_GROUP_FIRST_DR (stmtinfo_a
);
946 first_b
= DR_GROUP_FIRST_DR (stmtinfo_b
);
947 if (first_a
== first_b
)
949 init_dra_chain
= DR_INIT (STMT_VINFO_DATA_REF (vinfo_for_stmt (first_a
)));
950 init_drb_chain
= DR_INIT (STMT_VINFO_DATA_REF (vinfo_for_stmt (first_b
)));
952 if (tree_int_cst_compare (init_dra_chain
, init_drb_chain
) > 0)
954 /* Insert the nodes of DRA chain into the DRB chain.
955 After inserting a node, continue from this node of the DRB chain (don't
956 start from the beginning. */
957 node
= DR_GROUP_FIRST_DR (stmtinfo_a
);
958 prev
= DR_GROUP_FIRST_DR (stmtinfo_b
);
959 first_stmt
= first_b
;
963 /* Insert the nodes of DRB chain into the DRA chain.
964 After inserting a node, continue from this node of the DRA chain (don't
965 start from the beginning. */
966 node
= DR_GROUP_FIRST_DR (stmtinfo_b
);
967 prev
= DR_GROUP_FIRST_DR (stmtinfo_a
);
968 first_stmt
= first_a
;
973 node_init
= DR_INIT (STMT_VINFO_DATA_REF (vinfo_for_stmt (node
)));
974 next
= DR_GROUP_NEXT_DR (vinfo_for_stmt (prev
));
977 next_init
= DR_INIT (STMT_VINFO_DATA_REF (vinfo_for_stmt (next
)));
978 if (tree_int_cst_compare (next_init
, node_init
) > 0)
981 DR_GROUP_NEXT_DR (vinfo_for_stmt (prev
)) = node
;
982 DR_GROUP_NEXT_DR (vinfo_for_stmt (node
)) = next
;
987 next
= DR_GROUP_NEXT_DR (vinfo_for_stmt (prev
));
991 /* We got to the end of the list. Insert here. */
992 DR_GROUP_NEXT_DR (vinfo_for_stmt (prev
)) = node
;
993 DR_GROUP_NEXT_DR (vinfo_for_stmt (node
)) = NULL_TREE
;
996 DR_GROUP_FIRST_DR (vinfo_for_stmt (node
)) = first_stmt
;
997 node
= DR_GROUP_NEXT_DR (vinfo_for_stmt (node
));
1002 /* Function vect_equal_offsets.
1004 Check if OFFSET1 and OFFSET2 are identical expressions. */
1007 vect_equal_offsets (tree offset1
, tree offset2
)
1011 STRIP_NOPS (offset1
);
1012 STRIP_NOPS (offset2
);
1014 if (offset1
== offset2
)
1017 if (TREE_CODE (offset1
) != TREE_CODE (offset2
)
1018 || !BINARY_CLASS_P (offset1
)
1019 || !BINARY_CLASS_P (offset2
))
1022 res0
= vect_equal_offsets (TREE_OPERAND (offset1
, 0),
1023 TREE_OPERAND (offset2
, 0));
1024 res1
= vect_equal_offsets (TREE_OPERAND (offset1
, 1),
1025 TREE_OPERAND (offset2
, 1));
1027 return (res0
&& res1
);
1031 /* Function vect_check_interleaving.
1033 Check if DRA and DRB are a part of interleaving. In case they are, insert
1034 DRA and DRB in an interleaving chain. */
1037 vect_check_interleaving (struct data_reference
*dra
,
1038 struct data_reference
*drb
)
1040 HOST_WIDE_INT type_size_a
, type_size_b
, diff_mod_size
, step
, init_a
, init_b
;
1042 /* Check that the data-refs have same first location (except init) and they
1043 are both either store or load (not load and store). */
1044 if ((DR_BASE_ADDRESS (dra
) != DR_BASE_ADDRESS (drb
)
1045 && (TREE_CODE (DR_BASE_ADDRESS (dra
)) != ADDR_EXPR
1046 || TREE_CODE (DR_BASE_ADDRESS (drb
)) != ADDR_EXPR
1047 || TREE_OPERAND (DR_BASE_ADDRESS (dra
), 0)
1048 != TREE_OPERAND (DR_BASE_ADDRESS (drb
),0)))
1049 || !vect_equal_offsets (DR_OFFSET (dra
), DR_OFFSET (drb
))
1050 || !tree_int_cst_compare (DR_INIT (dra
), DR_INIT (drb
))
1051 || DR_IS_READ (dra
) != DR_IS_READ (drb
))
1055 1. data-refs are of the same type
1056 2. their steps are equal
1057 3. the step is greater than the difference between data-refs' inits */
1058 type_size_a
= TREE_INT_CST_LOW (TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dra
))));
1059 type_size_b
= TREE_INT_CST_LOW (TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (drb
))));
1061 if (type_size_a
!= type_size_b
1062 || tree_int_cst_compare (DR_STEP (dra
), DR_STEP (drb
))
1063 || !types_compatible_p (TREE_TYPE (DR_REF (dra
)),
1064 TREE_TYPE (DR_REF (drb
))))
1067 init_a
= TREE_INT_CST_LOW (DR_INIT (dra
));
1068 init_b
= TREE_INT_CST_LOW (DR_INIT (drb
));
1069 step
= TREE_INT_CST_LOW (DR_STEP (dra
));
1071 if (init_a
> init_b
)
1073 /* If init_a == init_b + the size of the type * k, we have an interleaving,
1074 and DRB is accessed before DRA. */
1075 diff_mod_size
= (init_a
- init_b
) % type_size_a
;
1077 if ((init_a
- init_b
) > step
)
1080 if (diff_mod_size
== 0)
1082 vect_update_interleaving_chain (drb
, dra
);
1083 if (vect_print_dump_info (REPORT_DR_DETAILS
))
1085 fprintf (vect_dump
, "Detected interleaving ");
1086 print_generic_expr (vect_dump
, DR_REF (dra
), TDF_SLIM
);
1087 fprintf (vect_dump
, " and ");
1088 print_generic_expr (vect_dump
, DR_REF (drb
), TDF_SLIM
);
1095 /* If init_b == init_a + the size of the type * k, we have an
1096 interleaving, and DRA is accessed before DRB. */
1097 diff_mod_size
= (init_b
- init_a
) % type_size_a
;
1099 if ((init_b
- init_a
) > step
)
1102 if (diff_mod_size
== 0)
1104 vect_update_interleaving_chain (dra
, drb
);
1105 if (vect_print_dump_info (REPORT_DR_DETAILS
))
1107 fprintf (vect_dump
, "Detected interleaving ");
1108 print_generic_expr (vect_dump
, DR_REF (dra
), TDF_SLIM
);
1109 fprintf (vect_dump
, " and ");
1110 print_generic_expr (vect_dump
, DR_REF (drb
), TDF_SLIM
);
1117 /* Check if data references pointed by DR_I and DR_J are same or
1118 belong to same interleaving group. Return FALSE if drs are
1119 different, otherwise return TRUE. */
1122 vect_same_range_drs (data_reference_p dr_i
, data_reference_p dr_j
)
1124 tree stmt_i
= DR_STMT (dr_i
);
1125 tree stmt_j
= DR_STMT (dr_j
);
1127 if (operand_equal_p (DR_REF (dr_i
), DR_REF (dr_j
), 0)
1128 || (DR_GROUP_FIRST_DR (vinfo_for_stmt (stmt_i
))
1129 && DR_GROUP_FIRST_DR (vinfo_for_stmt (stmt_j
))
1130 && (DR_GROUP_FIRST_DR (vinfo_for_stmt (stmt_i
))
1131 == DR_GROUP_FIRST_DR (vinfo_for_stmt (stmt_j
)))))
1137 /* If address ranges represented by DDR_I and DDR_J are equal,
1138 return TRUE, otherwise return FALSE. */
1141 vect_vfa_range_equal (ddr_p ddr_i
, ddr_p ddr_j
)
1143 if ((vect_same_range_drs (DDR_A (ddr_i
), DDR_A (ddr_j
))
1144 && vect_same_range_drs (DDR_B (ddr_i
), DDR_B (ddr_j
)))
1145 || (vect_same_range_drs (DDR_A (ddr_i
), DDR_B (ddr_j
))
1146 && vect_same_range_drs (DDR_B (ddr_i
), DDR_A (ddr_j
))))
1152 /* Insert DDR into LOOP_VINFO list of ddrs that may alias and need to be
1153 tested at run-time. Return TRUE if DDR was successfully inserted.
1154 Return false if versioning is not supported. */
1157 vect_mark_for_runtime_alias_test (ddr_p ddr
, loop_vec_info loop_vinfo
)
1159 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
1161 if ((unsigned) PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS
) == 0)
1164 if (vect_print_dump_info (REPORT_DR_DETAILS
))
1166 fprintf (vect_dump
, "mark for run-time aliasing test between ");
1167 print_generic_expr (vect_dump
, DR_REF (DDR_A (ddr
)), TDF_SLIM
);
1168 fprintf (vect_dump
, " and ");
1169 print_generic_expr (vect_dump
, DR_REF (DDR_B (ddr
)), TDF_SLIM
);
1174 if (vect_print_dump_info (REPORT_DR_DETAILS
))
1175 fprintf (vect_dump
, "versioning not supported when optimizing for size.");
1179 /* FORNOW: We don't support versioning with outer-loop vectorization. */
1182 if (vect_print_dump_info (REPORT_DR_DETAILS
))
1183 fprintf (vect_dump
, "versioning not yet supported for outer-loops.");
1187 VEC_safe_push (ddr_p
, heap
, LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo
), ddr
);
1191 /* Function vect_analyze_data_ref_dependence.
1193 Return TRUE if there (might) exist a dependence between a memory-reference
1194 DRA and a memory-reference DRB. When versioning for alias may check a
1195 dependence at run-time, return FALSE. */
1198 vect_analyze_data_ref_dependence (struct data_dependence_relation
*ddr
,
1199 loop_vec_info loop_vinfo
)
1202 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
1203 int vectorization_factor
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
1204 struct data_reference
*dra
= DDR_A (ddr
);
1205 struct data_reference
*drb
= DDR_B (ddr
);
1206 stmt_vec_info stmtinfo_a
= vinfo_for_stmt (DR_STMT (dra
));
1207 stmt_vec_info stmtinfo_b
= vinfo_for_stmt (DR_STMT (drb
));
1208 int dra_size
= GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (dra
))));
1209 int drb_size
= GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (drb
))));
1210 lambda_vector dist_v
;
1211 unsigned int loop_depth
;
1213 if (DDR_ARE_DEPENDENT (ddr
) == chrec_known
)
1215 /* Independent data accesses. */
1216 vect_check_interleaving (dra
, drb
);
1220 if ((DR_IS_READ (dra
) && DR_IS_READ (drb
)) || dra
== drb
)
1223 if (DDR_ARE_DEPENDENT (ddr
) == chrec_dont_know
)
1225 if (vect_print_dump_info (REPORT_DR_DETAILS
))
1228 "versioning for alias required: can't determine dependence between ");
1229 print_generic_expr (vect_dump
, DR_REF (dra
), TDF_SLIM
);
1230 fprintf (vect_dump
, " and ");
1231 print_generic_expr (vect_dump
, DR_REF (drb
), TDF_SLIM
);
1233 /* Add to list of ddrs that need to be tested at run-time. */
1234 return !vect_mark_for_runtime_alias_test (ddr
, loop_vinfo
);
1237 if (DDR_NUM_DIST_VECTS (ddr
) == 0)
1239 if (vect_print_dump_info (REPORT_DR_DETAILS
))
1241 fprintf (vect_dump
, "versioning for alias required: bad dist vector for ");
1242 print_generic_expr (vect_dump
, DR_REF (dra
), TDF_SLIM
);
1243 fprintf (vect_dump
, " and ");
1244 print_generic_expr (vect_dump
, DR_REF (drb
), TDF_SLIM
);
1246 /* Add to list of ddrs that need to be tested at run-time. */
1247 return !vect_mark_for_runtime_alias_test (ddr
, loop_vinfo
);
1250 loop_depth
= index_in_loop_nest (loop
->num
, DDR_LOOP_NEST (ddr
));
1251 for (i
= 0; VEC_iterate (lambda_vector
, DDR_DIST_VECTS (ddr
), i
, dist_v
); i
++)
1253 int dist
= dist_v
[loop_depth
];
1255 if (vect_print_dump_info (REPORT_DR_DETAILS
))
1256 fprintf (vect_dump
, "dependence distance = %d.", dist
);
1258 /* Same loop iteration. */
1259 if (dist
% vectorization_factor
== 0 && dra_size
== drb_size
)
1261 /* Two references with distance zero have the same alignment. */
1262 VEC_safe_push (dr_p
, heap
, STMT_VINFO_SAME_ALIGN_REFS (stmtinfo_a
), drb
);
1263 VEC_safe_push (dr_p
, heap
, STMT_VINFO_SAME_ALIGN_REFS (stmtinfo_b
), dra
);
1264 if (vect_print_dump_info (REPORT_ALIGNMENT
))
1265 fprintf (vect_dump
, "accesses have the same alignment.");
1266 if (vect_print_dump_info (REPORT_DR_DETAILS
))
1268 fprintf (vect_dump
, "dependence distance modulo vf == 0 between ");
1269 print_generic_expr (vect_dump
, DR_REF (dra
), TDF_SLIM
);
1270 fprintf (vect_dump
, " and ");
1271 print_generic_expr (vect_dump
, DR_REF (drb
), TDF_SLIM
);
1274 /* For interleaving, mark that there is a read-write dependency if
1275 necessary. We check before that one of the data-refs is store. */
1276 if (DR_IS_READ (dra
))
1277 DR_GROUP_READ_WRITE_DEPENDENCE (stmtinfo_a
) = true;
1280 if (DR_IS_READ (drb
))
1281 DR_GROUP_READ_WRITE_DEPENDENCE (stmtinfo_b
) = true;
1287 if (abs (dist
) >= vectorization_factor
1288 || (dist
> 0 && DDR_REVERSED_P (ddr
)))
1290 /* Dependence distance does not create dependence, as far as
1291 vectorization is concerned, in this case. If DDR_REVERSED_P the
1292 order of the data-refs in DDR was reversed (to make distance
1293 vector positive), and the actual distance is negative. */
1294 if (vect_print_dump_info (REPORT_DR_DETAILS
))
1295 fprintf (vect_dump
, "dependence distance >= VF or negative.");
1299 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOOPS
))
1302 "not vectorized, possible dependence "
1303 "between data-refs ");
1304 print_generic_expr (vect_dump
, DR_REF (dra
), TDF_SLIM
);
1305 fprintf (vect_dump
, " and ");
1306 print_generic_expr (vect_dump
, DR_REF (drb
), TDF_SLIM
);
1315 /* Function vect_analyze_data_ref_dependences.
1317 Examine all the data references in the loop, and make sure there do not
1318 exist any data dependences between them. */
1321 vect_analyze_data_ref_dependences (loop_vec_info loop_vinfo
)
1324 VEC (ddr_p
, heap
) * ddrs
= LOOP_VINFO_DDRS (loop_vinfo
);
1325 struct data_dependence_relation
*ddr
;
1327 if (vect_print_dump_info (REPORT_DETAILS
))
1328 fprintf (vect_dump
, "=== vect_analyze_dependences ===");
1330 for (i
= 0; VEC_iterate (ddr_p
, ddrs
, i
, ddr
); i
++)
1331 if (vect_analyze_data_ref_dependence (ddr
, loop_vinfo
))
1338 /* Function vect_compute_data_ref_alignment
1340 Compute the misalignment of the data reference DR.
1343 1. If during the misalignment computation it is found that the data reference
1344 cannot be vectorized then false is returned.
1345 2. DR_MISALIGNMENT (DR) is defined.
1347 FOR NOW: No analysis is actually performed. Misalignment is calculated
1348 only for trivial cases. TODO. */
1351 vect_compute_data_ref_alignment (struct data_reference
*dr
)
1353 tree stmt
= DR_STMT (dr
);
1354 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1355 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
1356 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
1357 tree ref
= DR_REF (dr
);
1359 tree base
, base_addr
;
1362 tree aligned_to
, alignment
;
1364 if (vect_print_dump_info (REPORT_DETAILS
))
1365 fprintf (vect_dump
, "vect_compute_data_ref_alignment:");
1367 /* Initialize misalignment to unknown. */
1368 SET_DR_MISALIGNMENT (dr
, -1);
1370 misalign
= DR_INIT (dr
);
1371 aligned_to
= DR_ALIGNED_TO (dr
);
1372 base_addr
= DR_BASE_ADDRESS (dr
);
1374 /* In case the dataref is in an inner-loop of the loop that is being
1375 vectorized (LOOP), we use the base and misalignment information
1376 relative to the outer-loop (LOOP). This is ok only if the misalignment
1377 stays the same throughout the execution of the inner-loop, which is why
1378 we have to check that the stride of the dataref in the inner-loop evenly
1379 divides by the vector size. */
1380 if (nested_in_vect_loop_p (loop
, stmt
))
1382 tree step
= DR_STEP (dr
);
1383 HOST_WIDE_INT dr_step
= TREE_INT_CST_LOW (step
);
1385 if (dr_step
% UNITS_PER_SIMD_WORD
== 0)
1387 if (vect_print_dump_info (REPORT_ALIGNMENT
))
1388 fprintf (vect_dump
, "inner step divides the vector-size.");
1389 misalign
= STMT_VINFO_DR_INIT (stmt_info
);
1390 aligned_to
= STMT_VINFO_DR_ALIGNED_TO (stmt_info
);
1391 base_addr
= STMT_VINFO_DR_BASE_ADDRESS (stmt_info
);
1395 if (vect_print_dump_info (REPORT_ALIGNMENT
))
1396 fprintf (vect_dump
, "inner step doesn't divide the vector-size.");
1397 misalign
= NULL_TREE
;
1401 base
= build_fold_indirect_ref (base_addr
);
1402 vectype
= STMT_VINFO_VECTYPE (stmt_info
);
1403 alignment
= ssize_int (TYPE_ALIGN (vectype
)/BITS_PER_UNIT
);
1405 if ((aligned_to
&& tree_int_cst_compare (aligned_to
, alignment
) < 0)
1408 if (vect_print_dump_info (REPORT_ALIGNMENT
))
1410 fprintf (vect_dump
, "Unknown alignment for access: ");
1411 print_generic_expr (vect_dump
, base
, TDF_SLIM
);
1417 && tree_int_cst_compare (ssize_int (DECL_ALIGN_UNIT (base
)),
1419 || (TREE_CODE (base_addr
) == SSA_NAME
1420 && tree_int_cst_compare (ssize_int (TYPE_ALIGN_UNIT (TREE_TYPE (
1421 TREE_TYPE (base_addr
)))),
1423 base_aligned
= true;
1425 base_aligned
= false;
1429 /* Do not change the alignment of global variables if
1430 flag_section_anchors is enabled. */
1431 if (!vect_can_force_dr_alignment_p (base
, TYPE_ALIGN (vectype
))
1432 || (TREE_STATIC (base
) && flag_section_anchors
))
1434 if (vect_print_dump_info (REPORT_DETAILS
))
1436 fprintf (vect_dump
, "can't force alignment of ref: ");
1437 print_generic_expr (vect_dump
, ref
, TDF_SLIM
);
1442 /* Force the alignment of the decl.
1443 NOTE: This is the only change to the code we make during
1444 the analysis phase, before deciding to vectorize the loop. */
1445 if (vect_print_dump_info (REPORT_DETAILS
))
1446 fprintf (vect_dump
, "force alignment");
1447 DECL_ALIGN (base
) = TYPE_ALIGN (vectype
);
1448 DECL_USER_ALIGN (base
) = 1;
1451 /* At this point we assume that the base is aligned. */
1452 gcc_assert (base_aligned
1453 || (TREE_CODE (base
) == VAR_DECL
1454 && DECL_ALIGN (base
) >= TYPE_ALIGN (vectype
)));
1456 /* Modulo alignment. */
1457 misalign
= size_binop (TRUNC_MOD_EXPR
, misalign
, alignment
);
1459 if (!host_integerp (misalign
, 1))
1461 /* Negative or overflowed misalignment value. */
1462 if (vect_print_dump_info (REPORT_DETAILS
))
1463 fprintf (vect_dump
, "unexpected misalign value");
1467 SET_DR_MISALIGNMENT (dr
, TREE_INT_CST_LOW (misalign
));
1469 if (vect_print_dump_info (REPORT_DETAILS
))
1471 fprintf (vect_dump
, "misalign = %d bytes of ref ", DR_MISALIGNMENT (dr
));
1472 print_generic_expr (vect_dump
, ref
, TDF_SLIM
);
1479 /* Function vect_compute_data_refs_alignment
1481 Compute the misalignment of data references in the loop.
1482 Return FALSE if a data reference is found that cannot be vectorized. */
1485 vect_compute_data_refs_alignment (loop_vec_info loop_vinfo
)
1487 VEC (data_reference_p
, heap
) *datarefs
= LOOP_VINFO_DATAREFS (loop_vinfo
);
1488 struct data_reference
*dr
;
1491 for (i
= 0; VEC_iterate (data_reference_p
, datarefs
, i
, dr
); i
++)
1492 if (!vect_compute_data_ref_alignment (dr
))
1499 /* Function vect_update_misalignment_for_peel
1501 DR - the data reference whose misalignment is to be adjusted.
1502 DR_PEEL - the data reference whose misalignment is being made
1503 zero in the vector loop by the peel.
1504 NPEEL - the number of iterations in the peel loop if the misalignment
1505 of DR_PEEL is known at compile time. */
1508 vect_update_misalignment_for_peel (struct data_reference
*dr
,
1509 struct data_reference
*dr_peel
, int npeel
)
1512 VEC(dr_p
,heap
) *same_align_drs
;
1513 struct data_reference
*current_dr
;
1514 int dr_size
= GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (dr
))));
1515 int dr_peel_size
= GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (dr_peel
))));
1516 stmt_vec_info stmt_info
= vinfo_for_stmt (DR_STMT (dr
));
1517 stmt_vec_info peel_stmt_info
= vinfo_for_stmt (DR_STMT (dr_peel
));
1519 /* For interleaved data accesses the step in the loop must be multiplied by
1520 the size of the interleaving group. */
1521 if (STMT_VINFO_STRIDED_ACCESS (stmt_info
))
1522 dr_size
*= DR_GROUP_SIZE (vinfo_for_stmt (DR_GROUP_FIRST_DR (stmt_info
)));
1523 if (STMT_VINFO_STRIDED_ACCESS (peel_stmt_info
))
1524 dr_peel_size
*= DR_GROUP_SIZE (peel_stmt_info
);
1526 /* It can be assumed that the data refs with the same alignment as dr_peel
1527 are aligned in the vector loop. */
1529 = STMT_VINFO_SAME_ALIGN_REFS (vinfo_for_stmt (DR_STMT (dr_peel
)));
1530 for (i
= 0; VEC_iterate (dr_p
, same_align_drs
, i
, current_dr
); i
++)
1532 if (current_dr
!= dr
)
1534 gcc_assert (DR_MISALIGNMENT (dr
) / dr_size
==
1535 DR_MISALIGNMENT (dr_peel
) / dr_peel_size
);
1536 SET_DR_MISALIGNMENT (dr
, 0);
1540 if (known_alignment_for_access_p (dr
)
1541 && known_alignment_for_access_p (dr_peel
))
1543 int misal
= DR_MISALIGNMENT (dr
);
1544 misal
+= npeel
* dr_size
;
1545 misal
%= UNITS_PER_SIMD_WORD
;
1546 SET_DR_MISALIGNMENT (dr
, misal
);
1550 if (vect_print_dump_info (REPORT_DETAILS
))
1551 fprintf (vect_dump
, "Setting misalignment to -1.");
1552 SET_DR_MISALIGNMENT (dr
, -1);
1556 /* Function vect_verify_datarefs_alignment
1558 Return TRUE if all data references in the loop can be
1559 handled with respect to alignment. */
1562 vect_verify_datarefs_alignment (loop_vec_info loop_vinfo
)
1564 VEC (data_reference_p
, heap
) *datarefs
= LOOP_VINFO_DATAREFS (loop_vinfo
);
1565 struct data_reference
*dr
;
1566 enum dr_alignment_support supportable_dr_alignment
;
1569 for (i
= 0; VEC_iterate (data_reference_p
, datarefs
, i
, dr
); i
++)
1571 tree stmt
= DR_STMT (dr
);
1572 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1574 /* For interleaving, only the alignment of the first access matters. */
1575 if (STMT_VINFO_STRIDED_ACCESS (stmt_info
)
1576 && DR_GROUP_FIRST_DR (stmt_info
) != stmt
)
1579 supportable_dr_alignment
= vect_supportable_dr_alignment (dr
);
1580 if (!supportable_dr_alignment
)
1582 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOOPS
))
1584 if (DR_IS_READ (dr
))
1586 "not vectorized: unsupported unaligned load.");
1589 "not vectorized: unsupported unaligned store.");
1593 if (supportable_dr_alignment
!= dr_aligned
1594 && vect_print_dump_info (REPORT_ALIGNMENT
))
1595 fprintf (vect_dump
, "Vectorizing an unaligned access.");
1601 /* Function vector_alignment_reachable_p
1603 Return true if vector alignment for DR is reachable by peeling
1604 a few loop iterations. Return false otherwise. */
1607 vector_alignment_reachable_p (struct data_reference
*dr
)
1609 tree stmt
= DR_STMT (dr
);
1610 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1611 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
1613 if (STMT_VINFO_STRIDED_ACCESS (stmt_info
))
1615 /* For interleaved access we peel only if number of iterations in
1616 the prolog loop ({VF - misalignment}), is a multiple of the
1617 number of the interleaved accesses. */
1618 int elem_size
, mis_in_elements
;
1619 int nelements
= TYPE_VECTOR_SUBPARTS (vectype
);
1621 /* FORNOW: handle only known alignment. */
1622 if (!known_alignment_for_access_p (dr
))
1625 elem_size
= UNITS_PER_SIMD_WORD
/ nelements
;
1626 mis_in_elements
= DR_MISALIGNMENT (dr
) / elem_size
;
1628 if ((nelements
- mis_in_elements
) % DR_GROUP_SIZE (stmt_info
))
1632 /* If misalignment is known at the compile time then allow peeling
1633 only if natural alignment is reachable through peeling. */
1634 if (known_alignment_for_access_p (dr
) && !aligned_access_p (dr
))
1636 HOST_WIDE_INT elmsize
=
1637 int_cst_value (TYPE_SIZE_UNIT (TREE_TYPE (vectype
)));
1638 if (vect_print_dump_info (REPORT_DETAILS
))
1640 fprintf (vect_dump
, "data size =" HOST_WIDE_INT_PRINT_DEC
, elmsize
);
1641 fprintf (vect_dump
, ". misalignment = %d. ", DR_MISALIGNMENT (dr
));
1643 if (DR_MISALIGNMENT (dr
) % elmsize
)
1645 if (vect_print_dump_info (REPORT_DETAILS
))
1646 fprintf (vect_dump
, "data size does not divide the misalignment.\n");
1651 if (!known_alignment_for_access_p (dr
))
1653 tree type
= (TREE_TYPE (DR_REF (dr
)));
1654 tree ba
= DR_BASE_OBJECT (dr
);
1655 bool is_packed
= false;
1658 is_packed
= contains_packed_reference (ba
);
1660 if (vect_print_dump_info (REPORT_DETAILS
))
1661 fprintf (vect_dump
, "Unknown misalignment, is_packed = %d",is_packed
);
1662 if (targetm
.vectorize
.vector_alignment_reachable (type
, is_packed
))
1671 /* Function vect_enhance_data_refs_alignment
1673 This pass will use loop versioning and loop peeling in order to enhance
1674 the alignment of data references in the loop.
1676 FOR NOW: we assume that whatever versioning/peeling takes place, only the
1677 original loop is to be vectorized; Any other loops that are created by
1678 the transformations performed in this pass - are not supposed to be
1679 vectorized. This restriction will be relaxed.
1681 This pass will require a cost model to guide it whether to apply peeling
1682 or versioning or a combination of the two. For example, the scheme that
1683 intel uses when given a loop with several memory accesses, is as follows:
1684 choose one memory access ('p') which alignment you want to force by doing
1685 peeling. Then, either (1) generate a loop in which 'p' is aligned and all
1686 other accesses are not necessarily aligned, or (2) use loop versioning to
1687 generate one loop in which all accesses are aligned, and another loop in
1688 which only 'p' is necessarily aligned.
1690 ("Automatic Intra-Register Vectorization for the Intel Architecture",
1691 Aart J.C. Bik, Milind Girkar, Paul M. Grey and Ximmin Tian, International
1692 Journal of Parallel Programming, Vol. 30, No. 2, April 2002.)
1694 Devising a cost model is the most critical aspect of this work. It will
1695 guide us on which access to peel for, whether to use loop versioning, how
1696 many versions to create, etc. The cost model will probably consist of
1697 generic considerations as well as target specific considerations (on
1698 powerpc for example, misaligned stores are more painful than misaligned
1701 Here are the general steps involved in alignment enhancements:
1703 -- original loop, before alignment analysis:
1704 for (i=0; i<N; i++){
1705 x = q[i]; # DR_MISALIGNMENT(q) = unknown
1706 p[i] = y; # DR_MISALIGNMENT(p) = unknown
1709 -- After vect_compute_data_refs_alignment:
1710 for (i=0; i<N; i++){
1711 x = q[i]; # DR_MISALIGNMENT(q) = 3
1712 p[i] = y; # DR_MISALIGNMENT(p) = unknown
1715 -- Possibility 1: we do loop versioning:
1717 for (i=0; i<N; i++){ # loop 1A
1718 x = q[i]; # DR_MISALIGNMENT(q) = 3
1719 p[i] = y; # DR_MISALIGNMENT(p) = 0
1723 for (i=0; i<N; i++){ # loop 1B
1724 x = q[i]; # DR_MISALIGNMENT(q) = 3
1725 p[i] = y; # DR_MISALIGNMENT(p) = unaligned
1729 -- Possibility 2: we do loop peeling:
1730 for (i = 0; i < 3; i++){ # (scalar loop, not to be vectorized).
1734 for (i = 3; i < N; i++){ # loop 2A
1735 x = q[i]; # DR_MISALIGNMENT(q) = 0
1736 p[i] = y; # DR_MISALIGNMENT(p) = unknown
1739 -- Possibility 3: combination of loop peeling and versioning:
1740 for (i = 0; i < 3; i++){ # (scalar loop, not to be vectorized).
1745 for (i = 3; i<N; i++){ # loop 3A
1746 x = q[i]; # DR_MISALIGNMENT(q) = 0
1747 p[i] = y; # DR_MISALIGNMENT(p) = 0
1751 for (i = 3; i<N; i++){ # loop 3B
1752 x = q[i]; # DR_MISALIGNMENT(q) = 0
1753 p[i] = y; # DR_MISALIGNMENT(p) = unaligned
1757 These loops are later passed to loop_transform to be vectorized. The
1758 vectorizer will use the alignment information to guide the transformation
1759 (whether to generate regular loads/stores, or with special handling for
1763 vect_enhance_data_refs_alignment (loop_vec_info loop_vinfo
)
1765 VEC (data_reference_p
, heap
) *datarefs
= LOOP_VINFO_DATAREFS (loop_vinfo
);
1766 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
1767 enum dr_alignment_support supportable_dr_alignment
;
1768 struct data_reference
*dr0
= NULL
;
1769 struct data_reference
*dr
;
1771 bool do_peeling
= false;
1772 bool do_versioning
= false;
1775 stmt_vec_info stmt_info
;
1776 int vect_versioning_for_alias_required
;
1778 if (vect_print_dump_info (REPORT_DETAILS
))
1779 fprintf (vect_dump
, "=== vect_enhance_data_refs_alignment ===");
1781 /* While cost model enhancements are expected in the future, the high level
1782 view of the code at this time is as follows:
1784 A) If there is a misaligned write then see if peeling to align this write
1785 can make all data references satisfy vect_supportable_dr_alignment.
1786 If so, update data structures as needed and return true. Note that
1787 at this time vect_supportable_dr_alignment is known to return false
1788 for a misaligned write.
1790 B) If peeling wasn't possible and there is a data reference with an
1791 unknown misalignment that does not satisfy vect_supportable_dr_alignment
1792 then see if loop versioning checks can be used to make all data
1793 references satisfy vect_supportable_dr_alignment. If so, update
1794 data structures as needed and return true.
1796 C) If neither peeling nor versioning were successful then return false if
1797 any data reference does not satisfy vect_supportable_dr_alignment.
1799 D) Return true (all data references satisfy vect_supportable_dr_alignment).
1801 Note, Possibility 3 above (which is peeling and versioning together) is not
1802 being done at this time. */
1804 /* (1) Peeling to force alignment. */
1806 /* (1.1) Decide whether to perform peeling, and how many iterations to peel:
1808 + How many accesses will become aligned due to the peeling
1809 - How many accesses will become unaligned due to the peeling,
1810 and the cost of misaligned accesses.
1811 - The cost of peeling (the extra runtime checks, the increase
1814 The scheme we use FORNOW: peel to force the alignment of the first
1815 misaligned store in the loop.
1816 Rationale: misaligned stores are not yet supported.
1818 TODO: Use a cost model. */
1820 for (i
= 0; VEC_iterate (data_reference_p
, datarefs
, i
, dr
); i
++)
1822 stmt
= DR_STMT (dr
);
1823 stmt_info
= vinfo_for_stmt (stmt
);
1825 /* For interleaving, only the alignment of the first access
1827 if (STMT_VINFO_STRIDED_ACCESS (stmt_info
)
1828 && DR_GROUP_FIRST_DR (stmt_info
) != stmt
)
1831 if (!DR_IS_READ (dr
) && !aligned_access_p (dr
))
1833 do_peeling
= vector_alignment_reachable_p (dr
);
1836 if (!do_peeling
&& vect_print_dump_info (REPORT_DETAILS
))
1837 fprintf (vect_dump
, "vector alignment may not be reachable");
1842 vect_versioning_for_alias_required
=
1843 (VEC_length (ddr_p
, LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo
)) > 0);
1845 /* Temporarily, if versioning for alias is required, we disable peeling
1846 until we support peeling and versioning. Often peeling for alignment
1847 will require peeling for loop-bound, which in turn requires that we
1848 know how to adjust the loop ivs after the loop. */
1849 if (vect_versioning_for_alias_required
1850 || !vect_can_advance_ivs_p (loop_vinfo
)
1851 || !slpeel_can_duplicate_loop_p (loop
, single_exit (loop
)))
1858 tree stmt
= DR_STMT (dr0
);
1859 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1860 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
1861 int nelements
= TYPE_VECTOR_SUBPARTS (vectype
);
1863 if (known_alignment_for_access_p (dr0
))
1865 /* Since it's known at compile time, compute the number of iterations
1866 in the peeled loop (the peeling factor) for use in updating
1867 DR_MISALIGNMENT values. The peeling factor is the vectorization
1868 factor minus the misalignment as an element count. */
1869 mis
= DR_MISALIGNMENT (dr0
);
1870 mis
/= GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (DR_REF (dr0
))));
1871 npeel
= nelements
- mis
;
1873 /* For interleaved data access every iteration accesses all the
1874 members of the group, therefore we divide the number of iterations
1875 by the group size. */
1876 stmt_info
= vinfo_for_stmt (DR_STMT (dr0
));
1877 if (STMT_VINFO_STRIDED_ACCESS (stmt_info
))
1878 npeel
/= DR_GROUP_SIZE (stmt_info
);
1880 if (vect_print_dump_info (REPORT_DETAILS
))
1881 fprintf (vect_dump
, "Try peeling by %d", npeel
);
1884 /* Ensure that all data refs can be vectorized after the peel. */
1885 for (i
= 0; VEC_iterate (data_reference_p
, datarefs
, i
, dr
); i
++)
1887 int save_misalignment
;
1892 stmt
= DR_STMT (dr
);
1893 stmt_info
= vinfo_for_stmt (stmt
);
1894 /* For interleaving, only the alignment of the first access
1896 if (STMT_VINFO_STRIDED_ACCESS (stmt_info
)
1897 && DR_GROUP_FIRST_DR (stmt_info
) != stmt
)
1900 save_misalignment
= DR_MISALIGNMENT (dr
);
1901 vect_update_misalignment_for_peel (dr
, dr0
, npeel
);
1902 supportable_dr_alignment
= vect_supportable_dr_alignment (dr
);
1903 SET_DR_MISALIGNMENT (dr
, save_misalignment
);
1905 if (!supportable_dr_alignment
)
1914 /* (1.2) Update the DR_MISALIGNMENT of each data reference DR_i.
1915 If the misalignment of DR_i is identical to that of dr0 then set
1916 DR_MISALIGNMENT (DR_i) to zero. If the misalignment of DR_i and
1917 dr0 are known at compile time then increment DR_MISALIGNMENT (DR_i)
1918 by the peeling factor times the element size of DR_i (MOD the
1919 vectorization factor times the size). Otherwise, the
1920 misalignment of DR_i must be set to unknown. */
1921 for (i
= 0; VEC_iterate (data_reference_p
, datarefs
, i
, dr
); i
++)
1923 vect_update_misalignment_for_peel (dr
, dr0
, npeel
);
1925 LOOP_VINFO_UNALIGNED_DR (loop_vinfo
) = dr0
;
1926 LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo
) = DR_MISALIGNMENT (dr0
);
1927 SET_DR_MISALIGNMENT (dr0
, 0);
1928 if (vect_print_dump_info (REPORT_ALIGNMENT
))
1929 fprintf (vect_dump
, "Alignment of access forced using peeling.");
1931 if (vect_print_dump_info (REPORT_DETAILS
))
1932 fprintf (vect_dump
, "Peeling for alignment will be applied.");
1934 stat
= vect_verify_datarefs_alignment (loop_vinfo
);
1941 /* (2) Versioning to force alignment. */
1943 /* Try versioning if:
1944 1) flag_tree_vect_loop_version is TRUE
1945 2) optimize_size is FALSE
1946 3) there is at least one unsupported misaligned data ref with an unknown
1948 4) all misaligned data refs with a known misalignment are supported, and
1949 5) the number of runtime alignment checks is within reason. */
1952 flag_tree_vect_loop_version
1954 && (!loop
->inner
); /* FORNOW */
1958 for (i
= 0; VEC_iterate (data_reference_p
, datarefs
, i
, dr
); i
++)
1960 stmt
= DR_STMT (dr
);
1961 stmt_info
= vinfo_for_stmt (stmt
);
1963 /* For interleaving, only the alignment of the first access
1965 if (aligned_access_p (dr
)
1966 || (STMT_VINFO_STRIDED_ACCESS (stmt_info
)
1967 && DR_GROUP_FIRST_DR (stmt_info
) != stmt
))
1970 supportable_dr_alignment
= vect_supportable_dr_alignment (dr
);
1972 if (!supportable_dr_alignment
)
1978 if (known_alignment_for_access_p (dr
)
1979 || VEC_length (tree
,
1980 LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo
))
1981 >= (unsigned) PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIGNMENT_CHECKS
))
1983 do_versioning
= false;
1987 stmt
= DR_STMT (dr
);
1988 vectype
= STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt
));
1989 gcc_assert (vectype
);
1991 /* The rightmost bits of an aligned address must be zeros.
1992 Construct the mask needed for this test. For example,
1993 GET_MODE_SIZE for the vector mode V4SI is 16 bytes so the
1994 mask must be 15 = 0xf. */
1995 mask
= GET_MODE_SIZE (TYPE_MODE (vectype
)) - 1;
1997 /* FORNOW: use the same mask to test all potentially unaligned
1998 references in the loop. The vectorizer currently supports
1999 a single vector size, see the reference to
2000 GET_MODE_NUNITS (TYPE_MODE (vectype)) where the
2001 vectorization factor is computed. */
2002 gcc_assert (!LOOP_VINFO_PTR_MASK (loop_vinfo
)
2003 || LOOP_VINFO_PTR_MASK (loop_vinfo
) == mask
);
2004 LOOP_VINFO_PTR_MASK (loop_vinfo
) = mask
;
2005 VEC_safe_push (tree
, heap
,
2006 LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo
),
2011 /* Versioning requires at least one misaligned data reference. */
2012 if (VEC_length (tree
, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo
)) == 0)
2013 do_versioning
= false;
2014 else if (!do_versioning
)
2015 VEC_truncate (tree
, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo
), 0);
2020 VEC(tree
,heap
) *may_misalign_stmts
2021 = LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo
);
2024 /* It can now be assumed that the data references in the statements
2025 in LOOP_VINFO_MAY_MISALIGN_STMTS will be aligned in the version
2026 of the loop being vectorized. */
2027 for (i
= 0; VEC_iterate (tree
, may_misalign_stmts
, i
, stmt
); i
++)
2029 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
2030 dr
= STMT_VINFO_DATA_REF (stmt_info
);
2031 SET_DR_MISALIGNMENT (dr
, 0);
2032 if (vect_print_dump_info (REPORT_ALIGNMENT
))
2033 fprintf (vect_dump
, "Alignment of access forced using versioning.");
2036 if (vect_print_dump_info (REPORT_DETAILS
))
2037 fprintf (vect_dump
, "Versioning for alignment will be applied.");
2039 /* Peeling and versioning can't be done together at this time. */
2040 gcc_assert (! (do_peeling
&& do_versioning
));
2042 stat
= vect_verify_datarefs_alignment (loop_vinfo
);
2047 /* This point is reached if neither peeling nor versioning is being done. */
2048 gcc_assert (! (do_peeling
|| do_versioning
));
2050 stat
= vect_verify_datarefs_alignment (loop_vinfo
);
2055 /* Function vect_analyze_data_refs_alignment
2057 Analyze the alignment of the data-references in the loop.
2058 Return FALSE if a data reference is found that cannot be vectorized. */
2061 vect_analyze_data_refs_alignment (loop_vec_info loop_vinfo
)
2063 if (vect_print_dump_info (REPORT_DETAILS
))
2064 fprintf (vect_dump
, "=== vect_analyze_data_refs_alignment ===");
2066 if (!vect_compute_data_refs_alignment (loop_vinfo
))
2068 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOOPS
))
2070 "not vectorized: can't calculate alignment for data ref.");
2078 /* Analyze groups of strided accesses: check that DR belongs to a group of
2079 strided accesses of legal size, step, etc. Detect gaps, single element
2080 interleaving, and other special cases. Set strided access info.
2081 Collect groups of strided stores for further use in SLP analysis. */
2084 vect_analyze_group_access (struct data_reference
*dr
)
2086 tree step
= DR_STEP (dr
);
2087 tree scalar_type
= TREE_TYPE (DR_REF (dr
));
2088 HOST_WIDE_INT type_size
= TREE_INT_CST_LOW (TYPE_SIZE_UNIT (scalar_type
));
2089 tree stmt
= DR_STMT (dr
);
2090 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
2091 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
2092 HOST_WIDE_INT dr_step
= TREE_INT_CST_LOW (step
);
2093 HOST_WIDE_INT stride
;
2094 bool slp_impossible
= false;
2096 /* For interleaving, STRIDE is STEP counted in elements, i.e., the size of the
2097 interleaving group (including gaps). */
2098 stride
= dr_step
/ type_size
;
2100 /* Not consecutive access is possible only if it is a part of interleaving. */
2101 if (!DR_GROUP_FIRST_DR (vinfo_for_stmt (stmt
)))
2103 /* Check if it this DR is a part of interleaving, and is a single
2104 element of the group that is accessed in the loop. */
2106 /* Gaps are supported only for loads. STEP must be a multiple of the type
2107 size. The size of the group must be a power of 2. */
2109 && (dr_step
% type_size
) == 0
2111 && exact_log2 (stride
) != -1)
2113 DR_GROUP_FIRST_DR (vinfo_for_stmt (stmt
)) = stmt
;
2114 DR_GROUP_SIZE (vinfo_for_stmt (stmt
)) = stride
;
2115 if (vect_print_dump_info (REPORT_DR_DETAILS
))
2117 fprintf (vect_dump
, "Detected single element interleaving %d ",
2118 DR_GROUP_SIZE (vinfo_for_stmt (stmt
)));
2119 print_generic_expr (vect_dump
, DR_REF (dr
), TDF_SLIM
);
2120 fprintf (vect_dump
, " step ");
2121 print_generic_expr (vect_dump
, step
, TDF_SLIM
);
2125 if (vect_print_dump_info (REPORT_DETAILS
))
2126 fprintf (vect_dump
, "not consecutive access");
2130 if (DR_GROUP_FIRST_DR (vinfo_for_stmt (stmt
)) == stmt
)
2132 /* First stmt in the interleaving chain. Check the chain. */
2133 tree next
= DR_GROUP_NEXT_DR (vinfo_for_stmt (stmt
));
2134 struct data_reference
*data_ref
= dr
;
2135 unsigned int count
= 1;
2137 tree prev_init
= DR_INIT (data_ref
);
2139 HOST_WIDE_INT diff
, count_in_bytes
;
2143 /* Skip same data-refs. In case that two or more stmts share data-ref
2144 (supported only for loads), we vectorize only the first stmt, and
2145 the rest get their vectorized loads from the first one. */
2146 if (!tree_int_cst_compare (DR_INIT (data_ref
),
2147 DR_INIT (STMT_VINFO_DATA_REF (
2148 vinfo_for_stmt (next
)))))
2150 if (!DR_IS_READ (data_ref
))
2152 if (vect_print_dump_info (REPORT_DETAILS
))
2153 fprintf (vect_dump
, "Two store stmts share the same dr.");
2157 /* Check that there is no load-store dependencies for this loads
2158 to prevent a case of load-store-load to the same location. */
2159 if (DR_GROUP_READ_WRITE_DEPENDENCE (vinfo_for_stmt (next
))
2160 || DR_GROUP_READ_WRITE_DEPENDENCE (vinfo_for_stmt (prev
)))
2162 if (vect_print_dump_info (REPORT_DETAILS
))
2164 "READ_WRITE dependence in interleaving.");
2168 /* For load use the same data-ref load. */
2169 DR_GROUP_SAME_DR_STMT (vinfo_for_stmt (next
)) = prev
;
2172 next
= DR_GROUP_NEXT_DR (vinfo_for_stmt (next
));
2177 /* Check that all the accesses have the same STEP. */
2178 next_step
= DR_STEP (STMT_VINFO_DATA_REF (vinfo_for_stmt (next
)));
2179 if (tree_int_cst_compare (step
, next_step
))
2181 if (vect_print_dump_info (REPORT_DETAILS
))
2182 fprintf (vect_dump
, "not consecutive access in interleaving");
2186 data_ref
= STMT_VINFO_DATA_REF (vinfo_for_stmt (next
));
2187 /* Check that the distance between two accesses is equal to the type
2188 size. Otherwise, we have gaps. */
2189 diff
= (TREE_INT_CST_LOW (DR_INIT (data_ref
))
2190 - TREE_INT_CST_LOW (prev_init
)) / type_size
;
2193 /* FORNOW: SLP of accesses with gaps is not supported. */
2194 slp_impossible
= true;
2195 if (!DR_IS_READ (data_ref
))
2197 if (vect_print_dump_info (REPORT_DETAILS
))
2198 fprintf (vect_dump
, "interleaved store with gaps");
2203 /* Store the gap from the previous member of the group. If there is no
2204 gap in the access, DR_GROUP_GAP is always 1. */
2205 DR_GROUP_GAP (vinfo_for_stmt (next
)) = diff
;
2207 prev_init
= DR_INIT (data_ref
);
2208 next
= DR_GROUP_NEXT_DR (vinfo_for_stmt (next
));
2209 /* Count the number of data-refs in the chain. */
2213 /* COUNT is the number of accesses found, we multiply it by the size of
2214 the type to get COUNT_IN_BYTES. */
2215 count_in_bytes
= type_size
* count
;
2217 /* Check that the size of the interleaving is not greater than STEP. */
2218 if (dr_step
< count_in_bytes
)
2220 if (vect_print_dump_info (REPORT_DETAILS
))
2222 fprintf (vect_dump
, "interleaving size is greater than step for ");
2223 print_generic_expr (vect_dump
, DR_REF (dr
), TDF_SLIM
);
2228 /* Check that the size of the interleaving is equal to STEP for stores,
2229 i.e., that there are no gaps. */
2230 if (dr_step
!= count_in_bytes
)
2232 if (DR_IS_READ (dr
))
2234 slp_impossible
= true;
2235 /* There is a gap after the last load in the group. This gap is a
2236 difference between the stride and the number of elements. When
2237 there is no gap, this difference should be 0. */
2238 DR_GROUP_GAP (vinfo_for_stmt (stmt
)) = stride
- count
;
2242 if (vect_print_dump_info (REPORT_DETAILS
))
2243 fprintf (vect_dump
, "interleaved store with gaps");
2248 /* Check that STEP is a multiple of type size. */
2249 if ((dr_step
% type_size
) != 0)
2251 if (vect_print_dump_info (REPORT_DETAILS
))
2253 fprintf (vect_dump
, "step is not a multiple of type size: step ");
2254 print_generic_expr (vect_dump
, step
, TDF_SLIM
);
2255 fprintf (vect_dump
, " size ");
2256 print_generic_expr (vect_dump
, TYPE_SIZE_UNIT (scalar_type
),
2262 /* FORNOW: we handle only interleaving that is a power of 2.
2263 We don't fail here if it may be still possible to vectorize the
2264 group using SLP. If not, the size of the group will be checked in
2265 vect_analyze_operations, and the vectorization will fail. */
2266 if (exact_log2 (stride
) == -1)
2268 if (vect_print_dump_info (REPORT_DETAILS
))
2269 fprintf (vect_dump
, "interleaving is not a power of 2");
2274 DR_GROUP_SIZE (vinfo_for_stmt (stmt
)) = stride
;
2275 if (vect_print_dump_info (REPORT_DETAILS
))
2276 fprintf (vect_dump
, "Detected interleaving of size %d", (int)stride
);
2278 /* SLP: create an SLP data structure for every interleaving group of
2279 stores for further analysis in vect_analyse_slp. */
2280 if (!DR_IS_READ (dr
) && !slp_impossible
)
2281 VEC_safe_push (tree
, heap
, LOOP_VINFO_STRIDED_STORES (loop_vinfo
), stmt
);
2288 /* Analyze the access pattern of the data-reference DR.
2289 In case of non-consecutive accesses call vect_analyze_group_access() to
2290 analyze groups of strided accesses. */
2293 vect_analyze_data_ref_access (struct data_reference
*dr
)
2295 tree step
= DR_STEP (dr
);
2296 tree scalar_type
= TREE_TYPE (DR_REF (dr
));
2297 tree stmt
= DR_STMT (dr
);
2298 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
2299 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
2300 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
2301 HOST_WIDE_INT dr_step
= TREE_INT_CST_LOW (step
);
2305 if (vect_print_dump_info (REPORT_DETAILS
))
2306 fprintf (vect_dump
, "bad data-ref access");
2310 /* Don't allow invariant accesses. */
2314 if (nested_in_vect_loop_p (loop
, stmt
))
2316 /* Interleaved accesses are not yet supported within outer-loop
2317 vectorization for references in the inner-loop. */
2318 DR_GROUP_FIRST_DR (vinfo_for_stmt (stmt
)) = NULL_TREE
;
2320 /* For the rest of the analysis we use the outer-loop step. */
2321 step
= STMT_VINFO_DR_STEP (stmt_info
);
2322 dr_step
= TREE_INT_CST_LOW (step
);
2326 if (vect_print_dump_info (REPORT_ALIGNMENT
))
2327 fprintf (vect_dump
, "zero step in outer loop.");
2328 if (DR_IS_READ (dr
))
2336 if (!tree_int_cst_compare (step
, TYPE_SIZE_UNIT (scalar_type
)))
2338 /* Mark that it is not interleaving. */
2339 DR_GROUP_FIRST_DR (vinfo_for_stmt (stmt
)) = NULL_TREE
;
2343 if (nested_in_vect_loop_p (loop
, stmt
))
2345 if (vect_print_dump_info (REPORT_ALIGNMENT
))
2346 fprintf (vect_dump
, "strided access in outer loop.");
2350 /* Not consecutive access - check if it's a part of interleaving group. */
2351 return vect_analyze_group_access (dr
);
2355 /* Function vect_analyze_data_ref_accesses.
2357 Analyze the access pattern of all the data references in the loop.
2359 FORNOW: the only access pattern that is considered vectorizable is a
2360 simple step 1 (consecutive) access.
2362 FORNOW: handle only arrays and pointer accesses. */
2365 vect_analyze_data_ref_accesses (loop_vec_info loop_vinfo
)
2368 VEC (data_reference_p
, heap
) *datarefs
= LOOP_VINFO_DATAREFS (loop_vinfo
);
2369 struct data_reference
*dr
;
2371 if (vect_print_dump_info (REPORT_DETAILS
))
2372 fprintf (vect_dump
, "=== vect_analyze_data_ref_accesses ===");
2374 for (i
= 0; VEC_iterate (data_reference_p
, datarefs
, i
, dr
); i
++)
2375 if (!vect_analyze_data_ref_access (dr
))
2377 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOOPS
))
2378 fprintf (vect_dump
, "not vectorized: complicated access pattern.");
2385 /* Function vect_prune_runtime_alias_test_list.
2387 Prune a list of ddrs to be tested at run-time by versioning for alias.
2388 Return FALSE if resulting list of ddrs is longer then allowed by
2389 PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS, otherwise return TRUE. */
2392 vect_prune_runtime_alias_test_list (loop_vec_info loop_vinfo
)
2394 VEC (ddr_p
, heap
) * ddrs
=
2395 LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo
);
2398 if (vect_print_dump_info (REPORT_DETAILS
))
2399 fprintf (vect_dump
, "=== vect_prune_runtime_alias_test_list ===");
2401 for (i
= 0; i
< VEC_length (ddr_p
, ddrs
); )
2406 ddr_i
= VEC_index (ddr_p
, ddrs
, i
);
2409 for (j
= 0; j
< i
; j
++)
2411 ddr_p ddr_j
= VEC_index (ddr_p
, ddrs
, j
);
2413 if (vect_vfa_range_equal (ddr_i
, ddr_j
))
2415 if (vect_print_dump_info (REPORT_DR_DETAILS
))
2417 fprintf (vect_dump
, "found equal ranges ");
2418 print_generic_expr (vect_dump
, DR_REF (DDR_A (ddr_i
)), TDF_SLIM
);
2419 fprintf (vect_dump
, ", ");
2420 print_generic_expr (vect_dump
, DR_REF (DDR_B (ddr_i
)), TDF_SLIM
);
2421 fprintf (vect_dump
, " and ");
2422 print_generic_expr (vect_dump
, DR_REF (DDR_A (ddr_j
)), TDF_SLIM
);
2423 fprintf (vect_dump
, ", ");
2424 print_generic_expr (vect_dump
, DR_REF (DDR_B (ddr_j
)), TDF_SLIM
);
2433 VEC_ordered_remove (ddr_p
, ddrs
, i
);
2439 if (VEC_length (ddr_p
, ddrs
) >
2440 (unsigned) PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS
))
2442 if (vect_print_dump_info (REPORT_DR_DETAILS
))
2445 "disable versioning for alias - max number of generated "
2446 "checks exceeded.");
2449 VEC_truncate (ddr_p
, LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo
), 0);
2457 /* Recursively free the memory allocated for the SLP tree rooted at NODE. */
2460 vect_free_slp_tree (slp_tree node
)
2465 if (SLP_TREE_LEFT (node
))
2466 vect_free_slp_tree (SLP_TREE_LEFT (node
));
2468 if (SLP_TREE_RIGHT (node
))
2469 vect_free_slp_tree (SLP_TREE_RIGHT (node
));
2471 VEC_free (tree
, heap
, SLP_TREE_SCALAR_STMTS (node
));
2473 if (SLP_TREE_VEC_STMTS (node
))
2474 VEC_free (tree
, heap
, SLP_TREE_VEC_STMTS (node
));
2480 /* Get the defs for the RHS (collect them in DEF_STMTS0/1), check that they are
2481 of a legal type and that they match the defs of the first stmt of the SLP
2482 group (stored in FIRST_STMT_...). */
2485 vect_get_and_check_slp_defs (loop_vec_info loop_vinfo
, slp_tree slp_node
,
2486 tree rhs
, VEC (tree
, heap
) **def_stmts0
,
2487 VEC (tree
, heap
) **def_stmts1
,
2488 enum vect_def_type
*first_stmt_dt0
,
2489 enum vect_def_type
*first_stmt_dt1
,
2490 tree
*first_stmt_def0_type
,
2491 tree
*first_stmt_def1_type
,
2492 tree
*first_stmt_const_oprnd
,
2493 int ncopies_for_cost
)
2496 enum operation_type op_type
= TREE_OPERAND_LENGTH (rhs
);
2497 unsigned int i
, number_of_oprnds
= op_type
;
2499 enum vect_def_type dt
[2] = {vect_unknown_def_type
, vect_unknown_def_type
};
2500 stmt_vec_info stmt_info
=
2501 vinfo_for_stmt (VEC_index (tree
, SLP_TREE_SCALAR_STMTS (slp_node
), 0));
2505 number_of_oprnds
= 1;
2507 gcc_assert (op_type
== unary_op
|| op_type
== binary_op
);
2509 for (i
= 0; i
< number_of_oprnds
; i
++)
2512 oprnd
= TREE_OPERAND (rhs
, i
);
2516 if (!vect_is_simple_use (oprnd
, loop_vinfo
, &def_stmt
, &def
, &dt
[i
])
2517 || (!def_stmt
&& dt
[i
] != vect_constant_def
))
2519 if (vect_print_dump_info (REPORT_SLP
))
2521 fprintf (vect_dump
, "Build SLP failed: can't find def for ");
2522 print_generic_expr (vect_dump
, oprnd
, TDF_SLIM
);
2528 if (!*first_stmt_dt0
)
2530 /* op0 of the first stmt of the group - store its info. */
2531 *first_stmt_dt0
= dt
[i
];
2533 *first_stmt_def0_type
= TREE_TYPE (def
);
2535 *first_stmt_const_oprnd
= oprnd
;
2537 /* Analyze costs (for the first stmt of the group only). */
2539 /* Not memory operation (we don't call this functions for loads). */
2540 vect_model_simple_cost (stmt_info
, ncopies_for_cost
, dt
, slp_node
);
2543 vect_model_store_cost (stmt_info
, ncopies_for_cost
, dt
[0], slp_node
);
2548 if (!*first_stmt_dt1
&& i
== 1)
2550 /* op1 of the first stmt of the group - store its info. */
2551 *first_stmt_dt1
= dt
[i
];
2553 *first_stmt_def1_type
= TREE_TYPE (def
);
2556 /* We assume that the stmt contains only one constant
2557 operand. We fail otherwise, to be on the safe side. */
2558 if (*first_stmt_const_oprnd
)
2560 if (vect_print_dump_info (REPORT_SLP
))
2561 fprintf (vect_dump
, "Build SLP failed: two constant "
2565 *first_stmt_const_oprnd
= oprnd
;
2570 /* Not first stmt of the group, check that the def-stmt/s match
2571 the def-stmt/s of the first stmt. */
2573 && (*first_stmt_dt0
!= dt
[i
]
2574 || (*first_stmt_def0_type
&& def
2575 && *first_stmt_def0_type
!= TREE_TYPE (def
))))
2577 && (*first_stmt_dt1
!= dt
[i
]
2578 || (*first_stmt_def1_type
&& def
2579 && *first_stmt_def1_type
!= TREE_TYPE (def
))))
2581 && TREE_TYPE (*first_stmt_const_oprnd
)
2582 != TREE_TYPE (oprnd
)))
2584 if (vect_print_dump_info (REPORT_SLP
))
2585 fprintf (vect_dump
, "Build SLP failed: different types ");
2592 /* Check the types of the definitions. */
2595 case vect_constant_def
:
2596 case vect_invariant_def
:
2601 VEC_safe_push (tree
, heap
, *def_stmts0
, def_stmt
);
2603 VEC_safe_push (tree
, heap
, *def_stmts1
, def_stmt
);
2607 /* FORNOW: Not supported. */
2608 if (vect_print_dump_info (REPORT_SLP
))
2610 fprintf (vect_dump
, "Build SLP failed: illegal type of def ");
2611 print_generic_expr (vect_dump
, def
, TDF_SLIM
);
2622 /* Recursively build an SLP tree starting from NODE.
2623 Fail (and return FALSE) if def-stmts are not isomorphic, require data
2624 permutation or are of unsupported types of operation. Otherwise, return
2626 SLP_IMPOSSIBLE is TRUE if it is impossible to SLP in the loop, for example
2627 in the case of multiple types for now. */
2630 vect_build_slp_tree (loop_vec_info loop_vinfo
, slp_tree
*node
,
2631 unsigned int group_size
, bool *slp_impossible
,
2632 int *inside_cost
, int *outside_cost
,
2633 int ncopies_for_cost
)
2635 VEC (tree
, heap
) *def_stmts0
= VEC_alloc (tree
, heap
, group_size
);
2636 VEC (tree
, heap
) *def_stmts1
= VEC_alloc (tree
, heap
, group_size
);
2638 VEC (tree
, heap
) *stmts
= SLP_TREE_SCALAR_STMTS (*node
);
2639 tree stmt
= VEC_index (tree
, stmts
, 0);
2640 enum vect_def_type first_stmt_dt0
= 0, first_stmt_dt1
= 0;
2641 enum tree_code first_stmt_code
= 0;
2642 tree first_stmt_def1_type
= NULL_TREE
, first_stmt_def0_type
= NULL_TREE
;
2643 tree lhs
, rhs
, prev_stmt
= NULL_TREE
;
2644 bool stop_recursion
= false, need_same_oprnds
= false;
2645 tree vectype
, scalar_type
, first_op1
= NULL_TREE
;
2646 unsigned int vectorization_factor
= 0, ncopies
;
2649 enum machine_mode optab_op2_mode
;
2650 enum machine_mode vec_mode
;
2651 tree first_stmt_const_oprnd
= NULL_TREE
;
2652 struct data_reference
*first_dr
;
2654 /* For every stmt in NODE find its def stmt/s. */
2655 for (i
= 0; VEC_iterate (tree
, stmts
, i
, stmt
); i
++)
2657 if (vect_print_dump_info (REPORT_SLP
))
2659 fprintf (vect_dump
, "Build SLP for ");
2660 print_generic_expr (vect_dump
, stmt
, TDF_SLIM
);
2663 if (TREE_CODE (stmt
) != GIMPLE_MODIFY_STMT
)
2665 if (vect_print_dump_info (REPORT_SLP
))
2667 fprintf (vect_dump
, "Build SLP failed: not MODIFY_STMT ");
2668 print_generic_expr (vect_dump
, stmt
, TDF_SLIM
);
2674 scalar_type
= TREE_TYPE (GIMPLE_STMT_OPERAND (stmt
, 0));
2675 vectype
= get_vectype_for_scalar_type (scalar_type
);
2678 if (vect_print_dump_info (REPORT_SLP
))
2680 fprintf (vect_dump
, "Build SLP failed: unsupported data-type ");
2681 print_generic_expr (vect_dump
, scalar_type
, TDF_SLIM
);
2686 gcc_assert (LOOP_VINFO_VECT_FACTOR (loop_vinfo
));
2687 vectorization_factor
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
2688 ncopies
= vectorization_factor
/ TYPE_VECTOR_SUBPARTS (vectype
);
2692 if (vect_print_dump_info (REPORT_SLP
))
2693 fprintf (vect_dump
, "SLP failed - multiple types ");
2695 *slp_impossible
= true;
2699 lhs
= GIMPLE_STMT_OPERAND (stmt
, 0);
2700 rhs
= GIMPLE_STMT_OPERAND (stmt
, 1);
2702 /* Check the operation. */
2705 first_stmt_code
= TREE_CODE (rhs
);
2707 /* Shift arguments should be equal in all the packed stmts for a
2708 vector shift with scalar shift operand. */
2709 if (TREE_CODE (rhs
) == LSHIFT_EXPR
|| TREE_CODE (rhs
) == RSHIFT_EXPR
2710 || TREE_CODE (rhs
) == LROTATE_EXPR
2711 || TREE_CODE (rhs
) == RROTATE_EXPR
)
2713 vec_mode
= TYPE_MODE (vectype
);
2715 /* First see if we have a vector/vector shift. */
2716 optab
= optab_for_tree_code (TREE_CODE (rhs
), vectype
,
2720 || (optab
->handlers
[(int) vec_mode
].insn_code
2721 == CODE_FOR_nothing
))
2723 /* No vector/vector shift, try for a vector/scalar shift. */
2724 optab
= optab_for_tree_code (TREE_CODE (rhs
), vectype
,
2729 if (vect_print_dump_info (REPORT_SLP
))
2730 fprintf (vect_dump
, "Build SLP failed: no optab.");
2733 icode
= (int) optab
->handlers
[(int) vec_mode
].insn_code
;
2734 if (icode
== CODE_FOR_nothing
)
2736 if (vect_print_dump_info (REPORT_SLP
))
2738 "Build SLP failed: op not supported by target.");
2741 optab_op2_mode
= insn_data
[icode
].operand
[2].mode
;
2742 if (!VECTOR_MODE_P (optab_op2_mode
))
2744 need_same_oprnds
= true;
2745 first_op1
= TREE_OPERAND (rhs
, 1);
2752 if (first_stmt_code
!= TREE_CODE (rhs
))
2754 if (vect_print_dump_info (REPORT_SLP
))
2757 "Build SLP failed: different operation in stmt ");
2758 print_generic_expr (vect_dump
, stmt
, TDF_SLIM
);
2764 if (need_same_oprnds
2765 && !operand_equal_p (first_op1
, TREE_OPERAND (rhs
, 1), 0))
2767 if (vect_print_dump_info (REPORT_SLP
))
2770 "Build SLP failed: different shift arguments in ");
2771 print_generic_expr (vect_dump
, stmt
, TDF_SLIM
);
2778 /* Strided store or load. */
2779 if (STMT_VINFO_STRIDED_ACCESS (vinfo_for_stmt (stmt
)))
2781 if (REFERENCE_CLASS_P (lhs
))
2784 if (!vect_get_and_check_slp_defs (loop_vinfo
, *node
, rhs
,
2785 &def_stmts0
, &def_stmts1
,
2788 &first_stmt_def0_type
,
2789 &first_stmt_def1_type
,
2790 &first_stmt_const_oprnd
,
2799 /* First stmt of the SLP group should be the first load of
2800 the interleaving loop if data permutation is not allowed.
2801 Check that there is no gap between the loads. */
2802 if (DR_GROUP_FIRST_DR (vinfo_for_stmt (stmt
)) != stmt
2803 || DR_GROUP_GAP (vinfo_for_stmt (stmt
)) != 0)
2805 /* FORNOW: data permutations and gaps in loads are not
2807 if (vect_print_dump_info (REPORT_SLP
))
2809 fprintf (vect_dump
, "Build SLP failed: strided "
2810 " loads need permutation or have gaps ");
2811 print_generic_expr (vect_dump
, stmt
, TDF_SLIM
);
2817 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt
));
2818 if (vect_supportable_dr_alignment (first_dr
)
2819 == dr_unaligned_unsupported
)
2821 if (vect_print_dump_info (REPORT_SLP
))
2823 fprintf (vect_dump
, "Build SLP failed: unsupported "
2824 " unaligned load ");
2825 print_generic_expr (vect_dump
, stmt
, TDF_SLIM
);
2831 /* Analyze costs (for the first stmt in the group). */
2832 vect_model_load_cost (vinfo_for_stmt (stmt
),
2833 ncopies_for_cost
, *node
);
2837 /* Check that we have consecutive loads from interleaving
2838 chain and that there is no gap between the loads. */
2839 if (DR_GROUP_NEXT_DR (vinfo_for_stmt (prev_stmt
)) != stmt
2840 || DR_GROUP_GAP (vinfo_for_stmt (stmt
)) != 1)
2842 /* FORNOW: data permutations and gaps in loads are not
2844 if (vect_print_dump_info (REPORT_SLP
))
2846 fprintf (vect_dump
, "Build SLP failed: strided "
2847 " loads need permutation or have gaps ");
2848 print_generic_expr (vect_dump
, stmt
, TDF_SLIM
);
2856 /* We stop the tree when we reach a group of loads. */
2857 stop_recursion
= true;
2860 } /* Strided access. */
2863 if (REFERENCE_CLASS_P (rhs
))
2865 /* Not strided load. */
2866 if (vect_print_dump_info (REPORT_SLP
))
2868 fprintf (vect_dump
, "Build SLP failed: not strided load ");
2869 print_generic_expr (vect_dump
, stmt
, TDF_SLIM
);
2872 /* FORNOW: Not strided loads are not supported. */
2876 /* Not memory operation. */
2877 if (!BINARY_CLASS_P (rhs
) && !UNARY_CLASS_P (rhs
))
2879 if (vect_print_dump_info (REPORT_SLP
))
2881 fprintf (vect_dump
, "Build SLP failed: operation");
2882 fprintf (vect_dump
, " unsupported ");
2883 print_generic_expr (vect_dump
, stmt
, TDF_SLIM
);
2889 /* Find the def-stmts. */
2890 if (!vect_get_and_check_slp_defs (loop_vinfo
, *node
, rhs
, &def_stmts0
,
2891 &def_stmts1
, &first_stmt_dt0
,
2893 &first_stmt_def0_type
,
2894 &first_stmt_def1_type
,
2895 &first_stmt_const_oprnd
,
2901 /* Add the costs of the node to the overall instance costs. */
2902 *inside_cost
+= SLP_TREE_INSIDE_OF_LOOP_COST (*node
);
2903 *outside_cost
+= SLP_TREE_OUTSIDE_OF_LOOP_COST (*node
);
2905 /* Strided loads were reached - stop the recursion. */
2909 /* Create SLP_TREE nodes for the definition node/s. */
2910 if (first_stmt_dt0
== vect_loop_def
)
2912 slp_tree left_node
= XNEW (struct _slp_tree
);
2913 SLP_TREE_SCALAR_STMTS (left_node
) = def_stmts0
;
2914 SLP_TREE_VEC_STMTS (left_node
) = NULL
;
2915 SLP_TREE_LEFT (left_node
) = NULL
;
2916 SLP_TREE_RIGHT (left_node
) = NULL
;
2917 SLP_TREE_OUTSIDE_OF_LOOP_COST (left_node
) = 0;
2918 SLP_TREE_INSIDE_OF_LOOP_COST (left_node
) = 0;
2919 if (!vect_build_slp_tree (loop_vinfo
, &left_node
, group_size
,
2920 slp_impossible
, inside_cost
, outside_cost
,
2924 SLP_TREE_LEFT (*node
) = left_node
;
2927 if (first_stmt_dt1
== vect_loop_def
)
2929 slp_tree right_node
= XNEW (struct _slp_tree
);
2930 SLP_TREE_SCALAR_STMTS (right_node
) = def_stmts1
;
2931 SLP_TREE_VEC_STMTS (right_node
) = NULL
;
2932 SLP_TREE_LEFT (right_node
) = NULL
;
2933 SLP_TREE_RIGHT (right_node
) = NULL
;
2934 SLP_TREE_OUTSIDE_OF_LOOP_COST (right_node
) = 0;
2935 SLP_TREE_INSIDE_OF_LOOP_COST (right_node
) = 0;
2936 if (!vect_build_slp_tree (loop_vinfo
, &right_node
, group_size
,
2937 slp_impossible
, inside_cost
, outside_cost
,
2941 SLP_TREE_RIGHT (*node
) = right_node
;
2949 vect_print_slp_tree (slp_tree node
)
2957 fprintf (vect_dump
, "node ");
2958 for (i
= 0; VEC_iterate (tree
, SLP_TREE_SCALAR_STMTS (node
), i
, stmt
); i
++)
2960 fprintf (vect_dump
, "\n\tstmt %d ", i
);
2961 print_generic_expr (vect_dump
, stmt
, TDF_SLIM
);
2963 fprintf (vect_dump
, "\n");
2965 vect_print_slp_tree (SLP_TREE_LEFT (node
));
2966 vect_print_slp_tree (SLP_TREE_RIGHT (node
));
2970 /* Mark the tree rooted at NODE with MARK (PURE_SLP or HYBRID).
2971 If MARK is HYBRID, it refers to a specific stmt in NODE (the stmt at index
2972 J). Otherwise, MARK is PURE_SLP and J is -1, which indicates that all the
2973 stmts in NODE are to be marked. */
2976 vect_mark_slp_stmts (slp_tree node
, enum slp_vect_type mark
, int j
)
2984 for (i
= 0; VEC_iterate (tree
, SLP_TREE_SCALAR_STMTS (node
), i
, stmt
); i
++)
2985 if (j
< 0 || i
== j
)
2986 STMT_SLP_TYPE (vinfo_for_stmt (stmt
)) = mark
;
2988 vect_mark_slp_stmts (SLP_TREE_LEFT (node
), mark
, j
);
2989 vect_mark_slp_stmts (SLP_TREE_RIGHT (node
), mark
, j
);
2993 /* Analyze an SLP instance starting from a group of strided stores. Call
2994 vect_build_slp_tree to build a tree of packed stmts if possible.
2995 Return FALSE if it's impossible to SLP any stmt in the loop. */
2998 vect_analyze_slp_instance (loop_vec_info loop_vinfo
, tree stmt
)
3000 slp_instance new_instance
;
3001 slp_tree node
= XNEW (struct _slp_tree
);
3002 unsigned int group_size
= DR_GROUP_SIZE (vinfo_for_stmt (stmt
));
3003 unsigned int unrolling_factor
= 1, nunits
;
3004 tree vectype
, scalar_type
, next
;
3005 unsigned int vectorization_factor
= 0, ncopies
;
3006 bool slp_impossible
= false;
3007 int inside_cost
= 0, outside_cost
= 0, ncopies_for_cost
;
3009 /* FORNOW: multiple types are not supported. */
3010 scalar_type
= TREE_TYPE (DR_REF (STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt
))));
3011 vectype
= get_vectype_for_scalar_type (scalar_type
);
3014 if (vect_print_dump_info (REPORT_SLP
))
3016 fprintf (vect_dump
, "Build SLP failed: unsupported data-type ");
3017 print_generic_expr (vect_dump
, scalar_type
, TDF_SLIM
);
3022 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
3023 vectorization_factor
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
3024 ncopies
= vectorization_factor
/ nunits
;
3027 if (vect_print_dump_info (REPORT_SLP
))
3028 fprintf (vect_dump
, "SLP failed - multiple types ");
3033 /* Create a node (a root of the SLP tree) for the packed strided stores. */
3034 SLP_TREE_SCALAR_STMTS (node
) = VEC_alloc (tree
, heap
, group_size
);
3036 /* Collect the stores and store them in SLP_TREE_SCALAR_STMTS. */
3039 VEC_safe_push (tree
, heap
, SLP_TREE_SCALAR_STMTS (node
), next
);
3040 next
= DR_GROUP_NEXT_DR (vinfo_for_stmt (next
));
3043 SLP_TREE_VEC_STMTS (node
) = NULL
;
3044 SLP_TREE_NUMBER_OF_VEC_STMTS (node
) = 0;
3045 SLP_TREE_LEFT (node
) = NULL
;
3046 SLP_TREE_RIGHT (node
) = NULL
;
3047 SLP_TREE_OUTSIDE_OF_LOOP_COST (node
) = 0;
3048 SLP_TREE_INSIDE_OF_LOOP_COST (node
) = 0;
3050 /* Calculate the unrolling factor. */
3051 unrolling_factor
= least_common_multiple (nunits
, group_size
) / group_size
;
3053 /* Calculate the number of vector stmts to create based on the unrolling
3054 factor (number of vectors is 1 if NUNITS >= GROUP_SIZE, and is
3055 GROUP_SIZE / NUNITS otherwise. */
3056 ncopies_for_cost
= unrolling_factor
* group_size
/ nunits
;
3058 /* Build the tree for the SLP instance. */
3059 if (vect_build_slp_tree (loop_vinfo
, &node
, group_size
, &slp_impossible
,
3060 &inside_cost
, &outside_cost
, ncopies_for_cost
))
3062 /* Create a new SLP instance. */
3063 new_instance
= XNEW (struct _slp_instance
);
3064 SLP_INSTANCE_TREE (new_instance
) = node
;
3065 SLP_INSTANCE_GROUP_SIZE (new_instance
) = group_size
;
3066 SLP_INSTANCE_UNROLLING_FACTOR (new_instance
) = unrolling_factor
;
3067 SLP_INSTANCE_OUTSIDE_OF_LOOP_COST (new_instance
) = outside_cost
;
3068 SLP_INSTANCE_INSIDE_OF_LOOP_COST (new_instance
) = inside_cost
;
3069 VEC_safe_push (slp_instance
, heap
, LOOP_VINFO_SLP_INSTANCES (loop_vinfo
),
3071 if (vect_print_dump_info (REPORT_SLP
))
3072 vect_print_slp_tree (node
);
3077 /* Failed to SLP. */
3078 /* Free the allocated memory. */
3079 vect_free_slp_tree (node
);
3084 /* SLP failed for this instance, but it is still possible to SLP other stmts
3090 /* Check if there are stmts in the loop can be vectorized using SLP. Build SLP
3091 trees of packed scalar stmts if SLP is possible. */
3094 vect_analyze_slp (loop_vec_info loop_vinfo
)
3097 VEC (tree
, heap
) *strided_stores
= LOOP_VINFO_STRIDED_STORES (loop_vinfo
);
3100 if (vect_print_dump_info (REPORT_SLP
))
3101 fprintf (vect_dump
, "=== vect_analyze_slp ===");
3103 for (i
= 0; VEC_iterate (tree
, strided_stores
, i
, store
); i
++)
3104 if (!vect_analyze_slp_instance (loop_vinfo
, store
))
3106 /* SLP failed. No instance can be SLPed in the loop. */
3107 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOOPS
))
3108 fprintf (vect_dump
, "SLP failed.");
3117 /* For each possible SLP instance decide whether to SLP it and calculate overall
3118 unrolling factor needed to SLP the loop. */
3121 vect_make_slp_decision (loop_vec_info loop_vinfo
)
3123 unsigned int i
, unrolling_factor
= 1;
3124 VEC (slp_instance
, heap
) *slp_instances
= LOOP_VINFO_SLP_INSTANCES (loop_vinfo
);
3125 slp_instance instance
;
3126 int decided_to_slp
= 0;
3128 if (vect_print_dump_info (REPORT_SLP
))
3129 fprintf (vect_dump
, "=== vect_make_slp_decision ===");
3131 for (i
= 0; VEC_iterate (slp_instance
, slp_instances
, i
, instance
); i
++)
3133 /* FORNOW: SLP if you can. */
3134 if (unrolling_factor
< SLP_INSTANCE_UNROLLING_FACTOR (instance
))
3135 unrolling_factor
= SLP_INSTANCE_UNROLLING_FACTOR (instance
);
3137 /* Mark all the stmts that belong to INSTANCE as PURE_SLP stmts. Later we
3138 call vect_detect_hybrid_slp () to find stmts that need hybrid SLP and
3139 loop-based vectorization. Such stmts will be marked as HYBRID. */
3140 vect_mark_slp_stmts (SLP_INSTANCE_TREE (instance
), pure_slp
, -1);
3144 LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo
) = unrolling_factor
;
3146 if (decided_to_slp
&& vect_print_dump_info (REPORT_SLP
))
3147 fprintf (vect_dump
, "Decided to SLP %d instances. Unrolling factor %d",
3148 decided_to_slp
, unrolling_factor
);
3152 /* Find stmts that must be both vectorized and SLPed (since they feed stmts that
3153 can't be SLPed) in the tree rooted at NODE. Mark such stmts as HYBRID. */
3156 vect_detect_hybrid_slp_stmts (slp_tree node
)
3160 imm_use_iterator imm_iter
;
3166 for (i
= 0; VEC_iterate (tree
, SLP_TREE_SCALAR_STMTS (node
), i
, stmt
); i
++)
3167 if (PURE_SLP_STMT (vinfo_for_stmt (stmt
))
3168 && TREE_CODE (GIMPLE_STMT_OPERAND (stmt
, 0)) == SSA_NAME
)
3169 FOR_EACH_IMM_USE_STMT (use_stmt
, imm_iter
, GIMPLE_STMT_OPERAND (stmt
, 0))
3170 if (vinfo_for_stmt (use_stmt
)
3171 && !STMT_SLP_TYPE (vinfo_for_stmt (use_stmt
)))
3172 vect_mark_slp_stmts (node
, hybrid
, i
);
3174 vect_detect_hybrid_slp_stmts (SLP_TREE_LEFT (node
));
3175 vect_detect_hybrid_slp_stmts (SLP_TREE_RIGHT (node
));
3179 /* Find stmts that must be both vectorized and SLPed. */
3182 vect_detect_hybrid_slp (loop_vec_info loop_vinfo
)
3185 VEC (slp_instance
, heap
) *slp_instances
= LOOP_VINFO_SLP_INSTANCES (loop_vinfo
);
3186 slp_instance instance
;
3188 if (vect_print_dump_info (REPORT_SLP
))
3189 fprintf (vect_dump
, "=== vect_detect_hybrid_slp ===");
3191 for (i
= 0; VEC_iterate (slp_instance
, slp_instances
, i
, instance
); i
++)
3192 vect_detect_hybrid_slp_stmts (SLP_INSTANCE_TREE (instance
));
3196 /* Function vect_analyze_data_refs.
3198 Find all the data references in the loop.
3200 The general structure of the analysis of data refs in the vectorizer is as
3202 1- vect_analyze_data_refs(loop): call compute_data_dependences_for_loop to
3203 find and analyze all data-refs in the loop and their dependences.
3204 2- vect_analyze_dependences(): apply dependence testing using ddrs.
3205 3- vect_analyze_drs_alignment(): check that ref_stmt.alignment is ok.
3206 4- vect_analyze_drs_access(): check that ref_stmt.step is ok.
3211 vect_analyze_data_refs (loop_vec_info loop_vinfo
)
3213 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
3215 VEC (data_reference_p
, heap
) *datarefs
;
3216 struct data_reference
*dr
;
3219 if (vect_print_dump_info (REPORT_DETAILS
))
3220 fprintf (vect_dump
, "=== vect_analyze_data_refs ===\n");
3222 compute_data_dependences_for_loop (loop
, true,
3223 &LOOP_VINFO_DATAREFS (loop_vinfo
),
3224 &LOOP_VINFO_DDRS (loop_vinfo
));
3226 /* Go through the data-refs, check that the analysis succeeded. Update pointer
3227 from stmt_vec_info struct to DR and vectype. */
3228 datarefs
= LOOP_VINFO_DATAREFS (loop_vinfo
);
3230 for (i
= 0; VEC_iterate (data_reference_p
, datarefs
, i
, dr
); i
++)
3233 stmt_vec_info stmt_info
;
3235 tree base
, offset
, init
;
3237 if (!dr
|| !DR_REF (dr
))
3239 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOOPS
))
3240 fprintf (vect_dump
, "not vectorized: unhandled data-ref ");
3244 stmt
= DR_STMT (dr
);
3245 stmt_info
= vinfo_for_stmt (stmt
);
3247 /* Check that analysis of the data-ref succeeded. */
3248 if (!DR_BASE_ADDRESS (dr
) || !DR_OFFSET (dr
) || !DR_INIT (dr
)
3251 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOOPS
))
3253 fprintf (vect_dump
, "not vectorized: data ref analysis failed ");
3254 print_generic_expr (vect_dump
, stmt
, TDF_SLIM
);
3259 if (TREE_CODE (DR_BASE_ADDRESS (dr
)) == INTEGER_CST
)
3261 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOOPS
))
3262 fprintf (vect_dump
, "not vectorized: base addr of dr is a "
3267 if (!DR_SYMBOL_TAG (dr
))
3269 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOOPS
))
3271 fprintf (vect_dump
, "not vectorized: no memory tag for ");
3272 print_generic_expr (vect_dump
, DR_REF (dr
), TDF_SLIM
);
3277 base
= unshare_expr (DR_BASE_ADDRESS (dr
));
3278 offset
= unshare_expr (DR_OFFSET (dr
));
3279 init
= unshare_expr (DR_INIT (dr
));
3281 /* Update DR field in stmt_vec_info struct. */
3282 bb
= bb_for_stmt (stmt
);
3284 /* If the dataref is in an inner-loop of the loop that is considered for
3285 for vectorization, we also want to analyze the access relative to
3286 the outer-loop (DR contains information only relative to the
3287 inner-most enclosing loop). We do that by building a reference to the
3288 first location accessed by the inner-loop, and analyze it relative to
3290 if (nested_in_vect_loop_p (loop
, stmt
))
3292 tree outer_step
, outer_base
, outer_init
;
3293 HOST_WIDE_INT pbitsize
, pbitpos
;
3295 enum machine_mode pmode
;
3296 int punsignedp
, pvolatilep
;
3297 affine_iv base_iv
, offset_iv
;
3300 /* Build a reference to the first location accessed by the
3301 inner-loop: *(BASE+INIT). (The first location is actually
3302 BASE+INIT+OFFSET, but we add OFFSET separately later). */
3303 tree inner_base
= build_fold_indirect_ref
3304 (fold_build2 (POINTER_PLUS_EXPR
,
3305 TREE_TYPE (base
), base
,
3306 fold_convert (sizetype
, init
)));
3308 if (vect_print_dump_info (REPORT_DETAILS
))
3310 fprintf (dump_file
, "analyze in outer-loop: ");
3311 print_generic_expr (dump_file
, inner_base
, TDF_SLIM
);
3314 outer_base
= get_inner_reference (inner_base
, &pbitsize
, &pbitpos
,
3315 &poffset
, &pmode
, &punsignedp
, &pvolatilep
, false);
3316 gcc_assert (outer_base
!= NULL_TREE
);
3318 if (pbitpos
% BITS_PER_UNIT
!= 0)
3320 if (vect_print_dump_info (REPORT_DETAILS
))
3321 fprintf (dump_file
, "failed: bit offset alignment.\n");
3325 outer_base
= build_fold_addr_expr (outer_base
);
3326 if (!simple_iv (loop
, stmt
, outer_base
, &base_iv
, false))
3328 if (vect_print_dump_info (REPORT_DETAILS
))
3329 fprintf (dump_file
, "failed: evolution of base is not affine.\n");
3336 poffset
= fold_build2 (PLUS_EXPR
, TREE_TYPE (offset
), offset
, poffset
);
3343 offset_iv
.base
= ssize_int (0);
3344 offset_iv
.step
= ssize_int (0);
3346 else if (!simple_iv (loop
, stmt
, poffset
, &offset_iv
, false))
3348 if (vect_print_dump_info (REPORT_DETAILS
))
3349 fprintf (dump_file
, "evolution of offset is not affine.\n");
3353 outer_init
= ssize_int (pbitpos
/ BITS_PER_UNIT
);
3354 split_constant_offset (base_iv
.base
, &base_iv
.base
, &dinit
);
3355 outer_init
= size_binop (PLUS_EXPR
, outer_init
, dinit
);
3356 split_constant_offset (offset_iv
.base
, &offset_iv
.base
, &dinit
);
3357 outer_init
= size_binop (PLUS_EXPR
, outer_init
, dinit
);
3359 outer_step
= size_binop (PLUS_EXPR
,
3360 fold_convert (ssizetype
, base_iv
.step
),
3361 fold_convert (ssizetype
, offset_iv
.step
));
3363 STMT_VINFO_DR_STEP (stmt_info
) = outer_step
;
3364 /* FIXME: Use canonicalize_base_object_address (base_iv.base); */
3365 STMT_VINFO_DR_BASE_ADDRESS (stmt_info
) = base_iv
.base
;
3366 STMT_VINFO_DR_INIT (stmt_info
) = outer_init
;
3367 STMT_VINFO_DR_OFFSET (stmt_info
) =
3368 fold_convert (ssizetype
, offset_iv
.base
);
3369 STMT_VINFO_DR_ALIGNED_TO (stmt_info
) =
3370 size_int (highest_pow2_factor (offset_iv
.base
));
3372 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3374 fprintf (dump_file
, "\touter base_address: ");
3375 print_generic_expr (dump_file
, STMT_VINFO_DR_BASE_ADDRESS (stmt_info
), TDF_SLIM
);
3376 fprintf (dump_file
, "\n\touter offset from base address: ");
3377 print_generic_expr (dump_file
, STMT_VINFO_DR_OFFSET (stmt_info
), TDF_SLIM
);
3378 fprintf (dump_file
, "\n\touter constant offset from base address: ");
3379 print_generic_expr (dump_file
, STMT_VINFO_DR_INIT (stmt_info
), TDF_SLIM
);
3380 fprintf (dump_file
, "\n\touter step: ");
3381 print_generic_expr (dump_file
, STMT_VINFO_DR_STEP (stmt_info
), TDF_SLIM
);
3382 fprintf (dump_file
, "\n\touter aligned to: ");
3383 print_generic_expr (dump_file
, STMT_VINFO_DR_ALIGNED_TO (stmt_info
), TDF_SLIM
);
3387 if (STMT_VINFO_DATA_REF (stmt_info
))
3389 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOOPS
))
3392 "not vectorized: more than one data ref in stmt: ");
3393 print_generic_expr (vect_dump
, stmt
, TDF_SLIM
);
3397 STMT_VINFO_DATA_REF (stmt_info
) = dr
;
3399 /* Set vectype for STMT. */
3400 scalar_type
= TREE_TYPE (DR_REF (dr
));
3401 STMT_VINFO_VECTYPE (stmt_info
) =
3402 get_vectype_for_scalar_type (scalar_type
);
3403 if (!STMT_VINFO_VECTYPE (stmt_info
))
3405 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOOPS
))
3408 "not vectorized: no vectype for stmt: ");
3409 print_generic_expr (vect_dump
, stmt
, TDF_SLIM
);
3410 fprintf (vect_dump
, " scalar_type: ");
3411 print_generic_expr (vect_dump
, scalar_type
, TDF_DETAILS
);
3421 /* Utility functions used by vect_mark_stmts_to_be_vectorized. */
3423 /* Function vect_mark_relevant.
3425 Mark STMT as "relevant for vectorization" and add it to WORKLIST. */
3428 vect_mark_relevant (VEC(tree
,heap
) **worklist
, tree stmt
,
3429 enum vect_relevant relevant
, bool live_p
)
3431 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
3432 enum vect_relevant save_relevant
= STMT_VINFO_RELEVANT (stmt_info
);
3433 bool save_live_p
= STMT_VINFO_LIVE_P (stmt_info
);
3435 if (vect_print_dump_info (REPORT_DETAILS
))
3436 fprintf (vect_dump
, "mark relevant %d, live %d.", relevant
, live_p
);
3438 if (STMT_VINFO_IN_PATTERN_P (stmt_info
))
3442 /* This is the last stmt in a sequence that was detected as a
3443 pattern that can potentially be vectorized. Don't mark the stmt
3444 as relevant/live because it's not going to be vectorized.
3445 Instead mark the pattern-stmt that replaces it. */
3447 pattern_stmt
= STMT_VINFO_RELATED_STMT (stmt_info
);
3449 if (vect_print_dump_info (REPORT_DETAILS
))
3450 fprintf (vect_dump
, "last stmt in pattern. don't mark relevant/live.");
3451 stmt_info
= vinfo_for_stmt (pattern_stmt
);
3452 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_info
) == stmt
);
3453 save_relevant
= STMT_VINFO_RELEVANT (stmt_info
);
3454 save_live_p
= STMT_VINFO_LIVE_P (stmt_info
);
3455 stmt
= pattern_stmt
;
3458 STMT_VINFO_LIVE_P (stmt_info
) |= live_p
;
3459 if (relevant
> STMT_VINFO_RELEVANT (stmt_info
))
3460 STMT_VINFO_RELEVANT (stmt_info
) = relevant
;
3462 if (STMT_VINFO_RELEVANT (stmt_info
) == save_relevant
3463 && STMT_VINFO_LIVE_P (stmt_info
) == save_live_p
)
3465 if (vect_print_dump_info (REPORT_DETAILS
))
3466 fprintf (vect_dump
, "already marked relevant/live.");
3470 VEC_safe_push (tree
, heap
, *worklist
, stmt
);
3474 /* Function vect_stmt_relevant_p.
3476 Return true if STMT in loop that is represented by LOOP_VINFO is
3477 "relevant for vectorization".
3479 A stmt is considered "relevant for vectorization" if:
3480 - it has uses outside the loop.
3481 - it has vdefs (it alters memory).
3482 - control stmts in the loop (except for the exit condition).
3484 CHECKME: what other side effects would the vectorizer allow? */
3487 vect_stmt_relevant_p (tree stmt
, loop_vec_info loop_vinfo
,
3488 enum vect_relevant
*relevant
, bool *live_p
)
3490 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
3491 ssa_op_iter op_iter
;
3492 imm_use_iterator imm_iter
;
3493 use_operand_p use_p
;
3494 def_operand_p def_p
;
3496 *relevant
= vect_unused_in_loop
;
3499 /* cond stmt other than loop exit cond. */
3500 if (is_ctrl_stmt (stmt
)
3501 && STMT_VINFO_TYPE (vinfo_for_stmt (stmt
)) != loop_exit_ctrl_vec_info_type
)
3502 *relevant
= vect_used_in_loop
;
3504 /* changing memory. */
3505 if (TREE_CODE (stmt
) != PHI_NODE
)
3506 if (!ZERO_SSA_OPERANDS (stmt
, SSA_OP_VIRTUAL_DEFS
))
3508 if (vect_print_dump_info (REPORT_DETAILS
))
3509 fprintf (vect_dump
, "vec_stmt_relevant_p: stmt has vdefs.");
3510 *relevant
= vect_used_in_loop
;
3513 /* uses outside the loop. */
3514 FOR_EACH_PHI_OR_STMT_DEF (def_p
, stmt
, op_iter
, SSA_OP_DEF
)
3516 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, DEF_FROM_PTR (def_p
))
3518 basic_block bb
= bb_for_stmt (USE_STMT (use_p
));
3519 if (!flow_bb_inside_loop_p (loop
, bb
))
3521 if (vect_print_dump_info (REPORT_DETAILS
))
3522 fprintf (vect_dump
, "vec_stmt_relevant_p: used out of loop.");
3524 /* We expect all such uses to be in the loop exit phis
3525 (because of loop closed form) */
3526 gcc_assert (TREE_CODE (USE_STMT (use_p
)) == PHI_NODE
);
3527 gcc_assert (bb
== single_exit (loop
)->dest
);
3534 return (*live_p
|| *relevant
);
3539 Function process_use.
3542 - a USE in STMT in a loop represented by LOOP_VINFO
3543 - LIVE_P, RELEVANT - enum values to be set in the STMT_VINFO of the stmt
3544 that defined USE. This is dont by calling mark_relevant and passing it
3545 the WORKLIST (to add DEF_STMT to the WORKlist in case itis relevant).
3548 Generally, LIVE_P and RELEVANT are used to define the liveness and
3549 relevance info of the DEF_STMT of this USE:
3550 STMT_VINFO_LIVE_P (DEF_STMT_info) <-- live_p
3551 STMT_VINFO_RELEVANT (DEF_STMT_info) <-- relevant
3553 - case 1: If USE is used only for address computations (e.g. array indexing),
3554 which does not need to be directly vectorized, then the liveness/relevance
3555 of the respective DEF_STMT is left unchanged.
3556 - case 2: If STMT is a reduction phi and DEF_STMT is a reduction stmt, we
3557 skip DEF_STMT cause it had already been processed.
3558 - case 3: If DEF_STMT and STMT are in different nests, then "relevant" will
3559 be modified accordingly.
3561 Return true if everything is as expected. Return false otherwise. */
3564 process_use (tree stmt
, tree use
, loop_vec_info loop_vinfo
, bool live_p
,
3565 enum vect_relevant relevant
, VEC(tree
,heap
) **worklist
)
3567 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
3568 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (stmt
);
3569 stmt_vec_info dstmt_vinfo
;
3570 basic_block bb
, def_bb
;
3572 enum vect_def_type dt
;
3574 /* case 1: we are only interested in uses that need to be vectorized. Uses
3575 that are used for address computation are not considered relevant. */
3576 if (!exist_non_indexing_operands_for_use_p (use
, stmt
))
3579 if (!vect_is_simple_use (use
, loop_vinfo
, &def_stmt
, &def
, &dt
))
3581 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOOPS
))
3582 fprintf (vect_dump
, "not vectorized: unsupported use in stmt.");
3586 if (!def_stmt
|| IS_EMPTY_STMT (def_stmt
))
3589 def_bb
= bb_for_stmt (def_stmt
);
3590 if (!flow_bb_inside_loop_p (loop
, def_bb
))
3592 if (vect_print_dump_info (REPORT_DETAILS
))
3593 fprintf (vect_dump
, "def_stmt is out of loop.");
3597 /* case 2: A reduction phi (STMT) defined by a reduction stmt (DEF_STMT).
3598 DEF_STMT must have already been processed, because this should be the
3599 only way that STMT, which is a reduction-phi, was put in the worklist,
3600 as there should be no other uses for DEF_STMT in the loop. So we just
3601 check that everything is as expected, and we are done. */
3602 dstmt_vinfo
= vinfo_for_stmt (def_stmt
);
3603 bb
= bb_for_stmt (stmt
);
3604 if (TREE_CODE (stmt
) == PHI_NODE
3605 && STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_reduction_def
3606 && TREE_CODE (def_stmt
) != PHI_NODE
3607 && STMT_VINFO_DEF_TYPE (dstmt_vinfo
) == vect_reduction_def
3608 && bb
->loop_father
== def_bb
->loop_father
)
3610 if (vect_print_dump_info (REPORT_DETAILS
))
3611 fprintf (vect_dump
, "reduc-stmt defining reduc-phi in the same nest.");
3612 if (STMT_VINFO_IN_PATTERN_P (dstmt_vinfo
))
3613 dstmt_vinfo
= vinfo_for_stmt (STMT_VINFO_RELATED_STMT (dstmt_vinfo
));
3614 gcc_assert (STMT_VINFO_RELEVANT (dstmt_vinfo
) < vect_used_by_reduction
);
3615 gcc_assert (STMT_VINFO_LIVE_P (dstmt_vinfo
)
3616 || STMT_VINFO_RELEVANT (dstmt_vinfo
) > vect_unused_in_loop
);
3620 /* case 3a: outer-loop stmt defining an inner-loop stmt:
3621 outer-loop-header-bb:
3627 if (flow_loop_nested_p (def_bb
->loop_father
, bb
->loop_father
))
3629 if (vect_print_dump_info (REPORT_DETAILS
))
3630 fprintf (vect_dump
, "outer-loop def-stmt defining inner-loop stmt.");
3633 case vect_unused_in_loop
:
3634 relevant
= (STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_reduction_def
) ?
3635 vect_used_by_reduction
: vect_unused_in_loop
;
3637 case vect_used_in_outer_by_reduction
:
3638 relevant
= vect_used_by_reduction
;
3640 case vect_used_in_outer
:
3641 relevant
= vect_used_in_loop
;
3643 case vect_used_by_reduction
:
3644 case vect_used_in_loop
:
3652 /* case 3b: inner-loop stmt defining an outer-loop stmt:
3653 outer-loop-header-bb:
3659 else if (flow_loop_nested_p (bb
->loop_father
, def_bb
->loop_father
))
3661 if (vect_print_dump_info (REPORT_DETAILS
))
3662 fprintf (vect_dump
, "inner-loop def-stmt defining outer-loop stmt.");
3665 case vect_unused_in_loop
:
3666 relevant
= (STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_reduction_def
) ?
3667 vect_used_in_outer_by_reduction
: vect_unused_in_loop
;
3670 case vect_used_in_outer_by_reduction
:
3671 case vect_used_in_outer
:
3674 case vect_used_by_reduction
:
3675 relevant
= vect_used_in_outer_by_reduction
;
3678 case vect_used_in_loop
:
3679 relevant
= vect_used_in_outer
;
3687 vect_mark_relevant (worklist
, def_stmt
, relevant
, live_p
);
3692 /* Function vect_mark_stmts_to_be_vectorized.
3694 Not all stmts in the loop need to be vectorized. For example:
3703 Stmt 1 and 3 do not need to be vectorized, because loop control and
3704 addressing of vectorized data-refs are handled differently.
3706 This pass detects such stmts. */
3709 vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo
)
3711 VEC(tree
,heap
) *worklist
;
3712 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
3713 basic_block
*bbs
= LOOP_VINFO_BBS (loop_vinfo
);
3714 unsigned int nbbs
= loop
->num_nodes
;
3715 block_stmt_iterator si
;
3719 stmt_vec_info stmt_vinfo
;
3723 enum vect_relevant relevant
;
3725 if (vect_print_dump_info (REPORT_DETAILS
))
3726 fprintf (vect_dump
, "=== vect_mark_stmts_to_be_vectorized ===");
3728 worklist
= VEC_alloc (tree
, heap
, 64);
3730 /* 1. Init worklist. */
3731 for (i
= 0; i
< nbbs
; i
++)
3734 for (phi
= phi_nodes (bb
); phi
; phi
= PHI_CHAIN (phi
))
3736 if (vect_print_dump_info (REPORT_DETAILS
))
3738 fprintf (vect_dump
, "init: phi relevant? ");
3739 print_generic_expr (vect_dump
, phi
, TDF_SLIM
);
3742 if (vect_stmt_relevant_p (phi
, loop_vinfo
, &relevant
, &live_p
))
3743 vect_mark_relevant (&worklist
, phi
, relevant
, live_p
);
3745 for (si
= bsi_start (bb
); !bsi_end_p (si
); bsi_next (&si
))
3747 stmt
= bsi_stmt (si
);
3748 if (vect_print_dump_info (REPORT_DETAILS
))
3750 fprintf (vect_dump
, "init: stmt relevant? ");
3751 print_generic_expr (vect_dump
, stmt
, TDF_SLIM
);
3754 if (vect_stmt_relevant_p (stmt
, loop_vinfo
, &relevant
, &live_p
))
3755 vect_mark_relevant (&worklist
, stmt
, relevant
, live_p
);
3759 /* 2. Process_worklist */
3760 while (VEC_length (tree
, worklist
) > 0)
3762 use_operand_p use_p
;
3765 stmt
= VEC_pop (tree
, worklist
);
3766 if (vect_print_dump_info (REPORT_DETAILS
))
3768 fprintf (vect_dump
, "worklist: examine stmt: ");
3769 print_generic_expr (vect_dump
, stmt
, TDF_SLIM
);
3772 /* Examine the USEs of STMT. For each USE, mark the stmt that defines it
3773 (DEF_STMT) as relevant/irrelevant and live/dead according to the
3774 liveness and relevance properties of STMT. */
3775 ann
= stmt_ann (stmt
);
3776 stmt_vinfo
= vinfo_for_stmt (stmt
);
3777 relevant
= STMT_VINFO_RELEVANT (stmt_vinfo
);
3778 live_p
= STMT_VINFO_LIVE_P (stmt_vinfo
);
3780 /* Generally, the liveness and relevance properties of STMT are
3781 propagated as is to the DEF_STMTs of its USEs:
3782 live_p <-- STMT_VINFO_LIVE_P (STMT_VINFO)
3783 relevant <-- STMT_VINFO_RELEVANT (STMT_VINFO)
3785 One exception is when STMT has been identified as defining a reduction
3786 variable; in this case we set the liveness/relevance as follows:
3788 relevant = vect_used_by_reduction
3789 This is because we distinguish between two kinds of relevant stmts -
3790 those that are used by a reduction computation, and those that are
3791 (also) used by a regular computation. This allows us later on to
3792 identify stmts that are used solely by a reduction, and therefore the
3793 order of the results that they produce does not have to be kept.
3795 Reduction phis are expected to be used by a reduction stmt, or by
3796 in an outer loop; Other reduction stmts are expected to be
3797 in the loop, and possibly used by a stmt in an outer loop.
3798 Here are the expected values of "relevant" for reduction phis/stmts:
3801 vect_unused_in_loop ok
3802 vect_used_in_outer_by_reduction ok ok
3803 vect_used_in_outer ok ok
3804 vect_used_by_reduction ok
3805 vect_used_in_loop */
3807 if (STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_reduction_def
)
3809 enum vect_relevant tmp_relevant
= relevant
;
3810 switch (tmp_relevant
)
3812 case vect_unused_in_loop
:
3813 gcc_assert (TREE_CODE (stmt
) != PHI_NODE
);
3814 relevant
= vect_used_by_reduction
;
3817 case vect_used_in_outer_by_reduction
:
3818 case vect_used_in_outer
:
3819 gcc_assert (TREE_CODE (stmt
) != WIDEN_SUM_EXPR
3820 && TREE_CODE (stmt
) != DOT_PROD_EXPR
);
3823 case vect_used_by_reduction
:
3824 if (TREE_CODE (stmt
) == PHI_NODE
)
3827 case vect_used_in_loop
:
3829 if (vect_print_dump_info (REPORT_DETAILS
))
3830 fprintf (vect_dump
, "unsupported use of reduction.");
3831 VEC_free (tree
, heap
, worklist
);
3837 FOR_EACH_PHI_OR_STMT_USE (use_p
, stmt
, iter
, SSA_OP_USE
)
3839 tree op
= USE_FROM_PTR (use_p
);
3840 if (!process_use (stmt
, op
, loop_vinfo
, live_p
, relevant
, &worklist
))
3842 VEC_free (tree
, heap
, worklist
);
3846 } /* while worklist */
3848 VEC_free (tree
, heap
, worklist
);
3853 /* Function vect_can_advance_ivs_p
3855 In case the number of iterations that LOOP iterates is unknown at compile
3856 time, an epilog loop will be generated, and the loop induction variables
3857 (IVs) will be "advanced" to the value they are supposed to take just before
3858 the epilog loop. Here we check that the access function of the loop IVs
3859 and the expression that represents the loop bound are simple enough.
3860 These restrictions will be relaxed in the future. */
3863 vect_can_advance_ivs_p (loop_vec_info loop_vinfo
)
3865 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
3866 basic_block bb
= loop
->header
;
3869 /* Analyze phi functions of the loop header. */
3871 if (vect_print_dump_info (REPORT_DETAILS
))
3872 fprintf (vect_dump
, "vect_can_advance_ivs_p:");
3874 for (phi
= phi_nodes (bb
); phi
; phi
= PHI_CHAIN (phi
))
3876 tree access_fn
= NULL
;
3877 tree evolution_part
;
3879 if (vect_print_dump_info (REPORT_DETAILS
))
3881 fprintf (vect_dump
, "Analyze phi: ");
3882 print_generic_expr (vect_dump
, phi
, TDF_SLIM
);
3885 /* Skip virtual phi's. The data dependences that are associated with
3886 virtual defs/uses (i.e., memory accesses) are analyzed elsewhere. */
3888 if (!is_gimple_reg (SSA_NAME_VAR (PHI_RESULT (phi
))))
3890 if (vect_print_dump_info (REPORT_DETAILS
))
3891 fprintf (vect_dump
, "virtual phi. skip.");
3895 /* Skip reduction phis. */
3897 if (STMT_VINFO_DEF_TYPE (vinfo_for_stmt (phi
)) == vect_reduction_def
)
3899 if (vect_print_dump_info (REPORT_DETAILS
))
3900 fprintf (vect_dump
, "reduc phi. skip.");
3904 /* Analyze the evolution function. */
3906 access_fn
= instantiate_parameters
3907 (loop
, analyze_scalar_evolution (loop
, PHI_RESULT (phi
)));
3911 if (vect_print_dump_info (REPORT_DETAILS
))
3912 fprintf (vect_dump
, "No Access function.");
3916 if (vect_print_dump_info (REPORT_DETAILS
))
3918 fprintf (vect_dump
, "Access function of PHI: ");
3919 print_generic_expr (vect_dump
, access_fn
, TDF_SLIM
);
3922 evolution_part
= evolution_part_in_loop_num (access_fn
, loop
->num
);
3924 if (evolution_part
== NULL_TREE
)
3926 if (vect_print_dump_info (REPORT_DETAILS
))
3927 fprintf (vect_dump
, "No evolution.");
3931 /* FORNOW: We do not transform initial conditions of IVs
3932 which evolution functions are a polynomial of degree >= 2. */
3934 if (tree_is_chrec (evolution_part
))
3942 /* Function vect_get_loop_niters.
3944 Determine how many iterations the loop is executed.
3945 If an expression that represents the number of iterations
3946 can be constructed, place it in NUMBER_OF_ITERATIONS.
3947 Return the loop exit condition. */
3950 vect_get_loop_niters (struct loop
*loop
, tree
*number_of_iterations
)
3954 if (vect_print_dump_info (REPORT_DETAILS
))
3955 fprintf (vect_dump
, "=== get_loop_niters ===");
3957 niters
= number_of_exit_cond_executions (loop
);
3959 if (niters
!= NULL_TREE
3960 && niters
!= chrec_dont_know
)
3962 *number_of_iterations
= niters
;
3964 if (vect_print_dump_info (REPORT_DETAILS
))
3966 fprintf (vect_dump
, "==> get_loop_niters:" );
3967 print_generic_expr (vect_dump
, *number_of_iterations
, TDF_SLIM
);
3971 return get_loop_exit_condition (loop
);
3975 /* Function vect_analyze_loop_1.
3977 Apply a set of analyses on LOOP, and create a loop_vec_info struct
3978 for it. The different analyses will record information in the
3979 loop_vec_info struct. This is a subset of the analyses applied in
3980 vect_analyze_loop, to be applied on an inner-loop nested in the loop
3981 that is now considered for (outer-loop) vectorization. */
3983 static loop_vec_info
3984 vect_analyze_loop_1 (struct loop
*loop
)
3986 loop_vec_info loop_vinfo
;
3988 if (vect_print_dump_info (REPORT_DETAILS
))
3989 fprintf (vect_dump
, "===== analyze_loop_nest_1 =====");
3991 /* Check the CFG characteristics of the loop (nesting, entry/exit, etc. */
3993 loop_vinfo
= vect_analyze_loop_form (loop
);
3996 if (vect_print_dump_info (REPORT_DETAILS
))
3997 fprintf (vect_dump
, "bad inner-loop form.");
4005 /* Function vect_analyze_loop_form.
4007 Verify that certain CFG restrictions hold, including:
4008 - the loop has a pre-header
4009 - the loop has a single entry and exit
4010 - the loop exit condition is simple enough, and the number of iterations
4011 can be analyzed (a countable loop). */
4014 vect_analyze_loop_form (struct loop
*loop
)
4016 loop_vec_info loop_vinfo
;
4018 tree number_of_iterations
= NULL
;
4019 loop_vec_info inner_loop_vinfo
= NULL
;
4021 if (vect_print_dump_info (REPORT_DETAILS
))
4022 fprintf (vect_dump
, "=== vect_analyze_loop_form ===");
4024 /* Different restrictions apply when we are considering an inner-most loop,
4025 vs. an outer (nested) loop.
4026 (FORNOW. May want to relax some of these restrictions in the future). */
4030 /* Inner-most loop. We currently require that the number of BBs is
4031 exactly 2 (the header and latch). Vectorizable inner-most loops
4042 if (loop
->num_nodes
!= 2)
4044 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS
))
4045 fprintf (vect_dump
, "not vectorized: too many BBs in loop.");
4049 if (empty_block_p (loop
->header
))
4051 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS
))
4052 fprintf (vect_dump
, "not vectorized: empty loop.");
4058 struct loop
*innerloop
= loop
->inner
;
4059 edge backedge
, entryedge
;
4061 /* Nested loop. We currently require that the loop is doubly-nested,
4062 contains a single inner loop, and the number of BBs is exactly 5.
4063 Vectorizable outer-loops look like this:
4075 The inner-loop has the properties expected of inner-most loops
4076 as described above. */
4078 if ((loop
->inner
)->inner
|| (loop
->inner
)->next
)
4080 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS
))
4081 fprintf (vect_dump
, "not vectorized: multiple nested loops.");
4085 /* Analyze the inner-loop. */
4086 inner_loop_vinfo
= vect_analyze_loop_1 (loop
->inner
);
4087 if (!inner_loop_vinfo
)
4089 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS
))
4090 fprintf (vect_dump
, "not vectorized: Bad inner loop.");
4094 if (!expr_invariant_in_loop_p (loop
,
4095 LOOP_VINFO_NITERS (inner_loop_vinfo
)))
4097 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS
))
4099 "not vectorized: inner-loop count not invariant.");
4100 destroy_loop_vec_info (inner_loop_vinfo
, true);
4104 if (loop
->num_nodes
!= 5)
4106 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS
))
4107 fprintf (vect_dump
, "not vectorized: too many BBs in loop.");
4108 destroy_loop_vec_info (inner_loop_vinfo
, true);
4112 gcc_assert (EDGE_COUNT (innerloop
->header
->preds
) == 2);
4113 backedge
= EDGE_PRED (innerloop
->header
, 1);
4114 entryedge
= EDGE_PRED (innerloop
->header
, 0);
4115 if (EDGE_PRED (innerloop
->header
, 0)->src
== innerloop
->latch
)
4117 backedge
= EDGE_PRED (innerloop
->header
, 0);
4118 entryedge
= EDGE_PRED (innerloop
->header
, 1);
4121 if (entryedge
->src
!= loop
->header
4122 || !single_exit (innerloop
)
4123 || single_exit (innerloop
)->dest
!= EDGE_PRED (loop
->latch
, 0)->src
)
4125 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS
))
4126 fprintf (vect_dump
, "not vectorized: unsupported outerloop form.");
4127 destroy_loop_vec_info (inner_loop_vinfo
, true);
4131 if (vect_print_dump_info (REPORT_DETAILS
))
4132 fprintf (vect_dump
, "Considering outer-loop vectorization.");
4135 if (!single_exit (loop
)
4136 || EDGE_COUNT (loop
->header
->preds
) != 2)
4138 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS
))
4140 if (!single_exit (loop
))
4141 fprintf (vect_dump
, "not vectorized: multiple exits.");
4142 else if (EDGE_COUNT (loop
->header
->preds
) != 2)
4143 fprintf (vect_dump
, "not vectorized: too many incoming edges.");
4145 if (inner_loop_vinfo
)
4146 destroy_loop_vec_info (inner_loop_vinfo
, true);
4150 /* We assume that the loop exit condition is at the end of the loop. i.e,
4151 that the loop is represented as a do-while (with a proper if-guard
4152 before the loop if needed), where the loop header contains all the
4153 executable statements, and the latch is empty. */
4154 if (!empty_block_p (loop
->latch
)
4155 || phi_nodes (loop
->latch
))
4157 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS
))
4158 fprintf (vect_dump
, "not vectorized: unexpected loop form.");
4159 if (inner_loop_vinfo
)
4160 destroy_loop_vec_info (inner_loop_vinfo
, true);
4164 /* Make sure there exists a single-predecessor exit bb: */
4165 if (!single_pred_p (single_exit (loop
)->dest
))
4167 edge e
= single_exit (loop
);
4168 if (!(e
->flags
& EDGE_ABNORMAL
))
4170 split_loop_exit_edge (e
);
4171 if (vect_print_dump_info (REPORT_DETAILS
))
4172 fprintf (vect_dump
, "split exit edge.");
4176 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS
))
4177 fprintf (vect_dump
, "not vectorized: abnormal loop exit edge.");
4178 if (inner_loop_vinfo
)
4179 destroy_loop_vec_info (inner_loop_vinfo
, true);
4184 loop_cond
= vect_get_loop_niters (loop
, &number_of_iterations
);
4187 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS
))
4188 fprintf (vect_dump
, "not vectorized: complicated exit condition.");
4189 if (inner_loop_vinfo
)
4190 destroy_loop_vec_info (inner_loop_vinfo
, true);
4194 if (!number_of_iterations
)
4196 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS
))
4198 "not vectorized: number of iterations cannot be computed.");
4199 if (inner_loop_vinfo
)
4200 destroy_loop_vec_info (inner_loop_vinfo
, true);
4204 if (chrec_contains_undetermined (number_of_iterations
))
4206 if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS
))
4207 fprintf (vect_dump
, "Infinite number of iterations.");
4208 if (inner_loop_vinfo
)
4209 destroy_loop_vec_info (inner_loop_vinfo
, true);
4213 if (!NITERS_KNOWN_P (number_of_iterations
))
4215 if (vect_print_dump_info (REPORT_DETAILS
))
4217 fprintf (vect_dump
, "Symbolic number of iterations is ");
4218 print_generic_expr (vect_dump
, number_of_iterations
, TDF_DETAILS
);
4221 else if (TREE_INT_CST_LOW (number_of_iterations
) == 0)
4223 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOOPS
))
4224 fprintf (vect_dump
, "not vectorized: number of iterations = 0.");
4225 if (inner_loop_vinfo
)
4226 destroy_loop_vec_info (inner_loop_vinfo
, false);
4230 loop_vinfo
= new_loop_vec_info (loop
);
4231 LOOP_VINFO_NITERS (loop_vinfo
) = number_of_iterations
;
4232 LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo
) = number_of_iterations
;
4234 STMT_VINFO_TYPE (vinfo_for_stmt (loop_cond
)) = loop_exit_ctrl_vec_info_type
;
4236 /* CHECKME: May want to keep it around it in the future. */
4237 if (inner_loop_vinfo
)
4238 destroy_loop_vec_info (inner_loop_vinfo
, false);
4240 gcc_assert (!loop
->aux
);
4241 loop
->aux
= loop_vinfo
;
4246 /* Function vect_analyze_loop.
4248 Apply a set of analyses on LOOP, and create a loop_vec_info struct
4249 for it. The different analyses will record information in the
4250 loop_vec_info struct. */
4252 vect_analyze_loop (struct loop
*loop
)
4255 loop_vec_info loop_vinfo
;
4257 if (vect_print_dump_info (REPORT_DETAILS
))
4258 fprintf (vect_dump
, "===== analyze_loop_nest =====");
4260 if (loop_outer (loop
)
4261 && loop_vec_info_for_loop (loop_outer (loop
))
4262 && LOOP_VINFO_VECTORIZABLE_P (loop_vec_info_for_loop (loop_outer (loop
))))
4264 if (vect_print_dump_info (REPORT_DETAILS
))
4265 fprintf (vect_dump
, "outer-loop already vectorized.");
4269 /* Check the CFG characteristics of the loop (nesting, entry/exit, etc. */
4271 loop_vinfo
= vect_analyze_loop_form (loop
);
4274 if (vect_print_dump_info (REPORT_DETAILS
))
4275 fprintf (vect_dump
, "bad loop form.");
4279 /* Find all data references in the loop (which correspond to vdefs/vuses)
4280 and analyze their evolution in the loop.
4282 FORNOW: Handle only simple, array references, which
4283 alignment can be forced, and aligned pointer-references. */
4285 ok
= vect_analyze_data_refs (loop_vinfo
);
4288 if (vect_print_dump_info (REPORT_DETAILS
))
4289 fprintf (vect_dump
, "bad data references.");
4290 destroy_loop_vec_info (loop_vinfo
, true);
4294 /* Classify all cross-iteration scalar data-flow cycles.
4295 Cross-iteration cycles caused by virtual phis are analyzed separately. */
4297 vect_analyze_scalar_cycles (loop_vinfo
);
4299 vect_pattern_recog (loop_vinfo
);
4301 /* Data-flow analysis to detect stmts that do not need to be vectorized. */
4303 ok
= vect_mark_stmts_to_be_vectorized (loop_vinfo
);
4306 if (vect_print_dump_info (REPORT_DETAILS
))
4307 fprintf (vect_dump
, "unexpected pattern.");
4308 destroy_loop_vec_info (loop_vinfo
, true);
4312 /* Analyze the alignment of the data-refs in the loop.
4313 Fail if a data reference is found that cannot be vectorized. */
4315 ok
= vect_analyze_data_refs_alignment (loop_vinfo
);
4318 if (vect_print_dump_info (REPORT_DETAILS
))
4319 fprintf (vect_dump
, "bad data alignment.");
4320 destroy_loop_vec_info (loop_vinfo
, true);
4324 ok
= vect_determine_vectorization_factor (loop_vinfo
);
4327 if (vect_print_dump_info (REPORT_DETAILS
))
4328 fprintf (vect_dump
, "can't determine vectorization factor.");
4329 destroy_loop_vec_info (loop_vinfo
, true);
4333 /* Analyze data dependences between the data-refs in the loop.
4334 FORNOW: fail at the first data dependence that we encounter. */
4336 ok
= vect_analyze_data_ref_dependences (loop_vinfo
);
4339 if (vect_print_dump_info (REPORT_DETAILS
))
4340 fprintf (vect_dump
, "bad data dependence.");
4341 destroy_loop_vec_info (loop_vinfo
, true);
4345 /* Analyze the access patterns of the data-refs in the loop (consecutive,
4346 complex, etc.). FORNOW: Only handle consecutive access pattern. */
4348 ok
= vect_analyze_data_ref_accesses (loop_vinfo
);
4351 if (vect_print_dump_info (REPORT_DETAILS
))
4352 fprintf (vect_dump
, "bad data access.");
4353 destroy_loop_vec_info (loop_vinfo
, true);
4357 /* Prune the list of ddrs to be tested at run-time by versioning for alias.
4358 It is important to call pruning after vect_analyze_data_ref_accesses,
4359 since we use grouping information gathered by interleaving analysis. */
4360 ok
= vect_prune_runtime_alias_test_list (loop_vinfo
);
4363 if (vect_print_dump_info (REPORT_DETAILS
))
4364 fprintf (vect_dump
, "too long list of versioning for alias "
4366 destroy_loop_vec_info (loop_vinfo
, true);
4370 /* Check the SLP opportunities in the loop, analyze and build SLP trees. */
4371 ok
= vect_analyze_slp (loop_vinfo
);
4374 /* Decide which possible SLP instances to SLP. */
4375 vect_make_slp_decision (loop_vinfo
);
4377 /* Find stmts that need to be both vectorized and SLPed. */
4378 vect_detect_hybrid_slp (loop_vinfo
);
4381 /* This pass will decide on using loop versioning and/or loop peeling in
4382 order to enhance the alignment of data references in the loop. */
4384 ok
= vect_enhance_data_refs_alignment (loop_vinfo
);
4387 if (vect_print_dump_info (REPORT_DETAILS
))
4388 fprintf (vect_dump
, "bad data alignment.");
4389 destroy_loop_vec_info (loop_vinfo
, true);
4393 /* Scan all the operations in the loop and make sure they are
4396 ok
= vect_analyze_operations (loop_vinfo
);
4399 if (vect_print_dump_info (REPORT_DETAILS
))
4400 fprintf (vect_dump
, "bad operation or unsupported loop bound.");
4401 destroy_loop_vec_info (loop_vinfo
, true);
4405 LOOP_VINFO_VECTORIZABLE_P (loop_vinfo
) = 1;