2 Copyright (C) 2003-2018 Free Software Foundation, Inc.
3 Contributed by Dorit Naishlos <dorit@il.ibm.com>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #ifndef GCC_TREE_VECTORIZER_H
22 #define GCC_TREE_VECTORIZER_H
24 typedef struct _stmt_vec_info
*stmt_vec_info
;
26 #include "tree-data-ref.h"
27 #include "tree-hash-traits.h"
30 /* Used for naming of new temporaries. */
38 /* Defines type of operation. */
45 /* Define type of available alignment support. */
46 enum dr_alignment_support
{
47 dr_unaligned_unsupported
,
48 dr_unaligned_supported
,
50 dr_explicit_realign_optimized
,
54 /* Define type of def-use cross-iteration cycle. */
56 vect_uninitialized_def
= 0,
57 vect_constant_def
= 1,
62 vect_double_reduction_def
,
67 /* Define type of reduction. */
68 enum vect_reduction_type
{
71 INTEGER_INDUC_COND_REDUCTION
,
74 /* Retain a scalar phi and use a FOLD_EXTRACT_LAST within the loop
77 for (int i = 0; i < VF; ++i)
78 res = cond[i] ? val[i] : res; */
79 EXTRACT_LAST_REDUCTION
,
81 /* Use a folding reduction within the loop to implement:
83 for (int i = 0; i < VF; ++i)
86 (with no reassocation). */
90 #define VECTORIZABLE_CYCLE_DEF(D) (((D) == vect_reduction_def) \
91 || ((D) == vect_double_reduction_def) \
92 || ((D) == vect_nested_cycle))
94 /* Structure to encapsulate information about a group of like
95 instructions to be presented to the target cost model. */
96 struct stmt_info_for_cost
{
98 enum vect_cost_for_stmt kind
;
99 enum vect_cost_model_location where
;
100 stmt_vec_info stmt_info
;
104 typedef vec
<stmt_info_for_cost
> stmt_vector_for_cost
;
106 /* Maps base addresses to an innermost_loop_behavior that gives the maximum
107 known alignment for that base. */
108 typedef hash_map
<tree_operand_hash
,
109 innermost_loop_behavior
*> vec_base_alignments
;
111 /************************************************************************
113 ************************************************************************/
114 typedef struct _slp_tree
*slp_tree
;
116 /* A computation tree of an SLP instance. Each node corresponds to a group of
117 stmts to be packed in a SIMD stmt. */
119 /* Nodes that contain def-stmts of this node statements operands. */
120 vec
<slp_tree
> children
;
121 /* A group of scalar stmts to be vectorized together. */
122 vec
<stmt_vec_info
> stmts
;
123 /* Load permutation relative to the stores, NULL if there is no
125 vec
<unsigned> load_permutation
;
126 /* Vectorized stmt/s. */
127 vec
<stmt_vec_info
> vec_stmts
;
128 /* Number of vector stmts that are created to replace the group of scalar
129 stmts. It is calculated during the transformation phase as the number of
130 scalar elements in one scalar iteration (GROUP_SIZE) multiplied by VF
131 divided by vector size. */
132 unsigned int vec_stmts_size
;
133 /* Whether the scalar computations use two different operators. */
135 /* The DEF type of this node. */
136 enum vect_def_type def_type
;
140 /* SLP instance is a sequence of stmts in a loop that can be packed into
142 typedef struct _slp_instance
{
143 /* The root of SLP tree. */
146 /* Size of groups of scalar stmts that will be replaced by SIMD stmt/s. */
147 unsigned int group_size
;
149 /* The unrolling factor required to vectorized this SLP instance. */
150 poly_uint64 unrolling_factor
;
152 /* The group of nodes that contain loads of this SLP instance. */
155 /* The SLP node containing the reduction PHIs. */
160 /* Access Functions. */
161 #define SLP_INSTANCE_TREE(S) (S)->root
162 #define SLP_INSTANCE_GROUP_SIZE(S) (S)->group_size
163 #define SLP_INSTANCE_UNROLLING_FACTOR(S) (S)->unrolling_factor
164 #define SLP_INSTANCE_LOADS(S) (S)->loads
166 #define SLP_TREE_CHILDREN(S) (S)->children
167 #define SLP_TREE_SCALAR_STMTS(S) (S)->stmts
168 #define SLP_TREE_VEC_STMTS(S) (S)->vec_stmts
169 #define SLP_TREE_NUMBER_OF_VEC_STMTS(S) (S)->vec_stmts_size
170 #define SLP_TREE_LOAD_PERMUTATION(S) (S)->load_permutation
171 #define SLP_TREE_TWO_OPERATORS(S) (S)->two_operators
172 #define SLP_TREE_DEF_TYPE(S) (S)->def_type
176 /* Describes two objects whose addresses must be unequal for the vectorized
178 typedef std::pair
<tree
, tree
> vec_object_pair
;
180 /* Records that vectorization is only possible if abs (EXPR) >= MIN_VALUE.
181 UNSIGNED_P is true if we can assume that abs (EXPR) == EXPR. */
182 struct vec_lower_bound
{
183 vec_lower_bound () {}
184 vec_lower_bound (tree e
, bool u
, poly_uint64 m
)
185 : expr (e
), unsigned_p (u
), min_value (m
) {}
189 poly_uint64 min_value
;
192 /* Vectorizer state shared between different analyses like vector sizes
193 of the same CFG region. */
194 struct vec_info_shared
{
198 void save_datarefs();
199 void check_datarefs();
201 /* All data references. Freed by free_data_refs, so not an auto_vec. */
202 vec
<data_reference_p
> datarefs
;
203 vec
<data_reference
> datarefs_copy
;
205 /* The loop nest in which the data dependences are computed. */
206 auto_vec
<loop_p
> loop_nest
;
208 /* All data dependences. Freed by free_dependence_relations, so not
213 /* Vectorizer state common between loop and basic-block vectorization. */
215 enum vec_kind
{ bb
, loop
};
217 vec_info (vec_kind
, void *, vec_info_shared
*);
220 stmt_vec_info
add_stmt (gimple
*);
221 stmt_vec_info
lookup_stmt (gimple
*);
222 stmt_vec_info
lookup_def (tree
);
223 stmt_vec_info
lookup_single_use (tree
);
224 struct dr_vec_info
*lookup_dr (data_reference
*);
225 void move_dr (stmt_vec_info
, stmt_vec_info
);
226 void remove_stmt (stmt_vec_info
);
227 void replace_stmt (gimple_stmt_iterator
*, stmt_vec_info
, gimple
*);
229 /* The type of vectorization. */
232 /* Shared vectorizer state. */
233 vec_info_shared
*shared
;
235 /* The mapping of GIMPLE UID to stmt_vec_info. */
236 vec
<stmt_vec_info
> stmt_vec_infos
;
238 /* All SLP instances. */
239 auto_vec
<slp_instance
> slp_instances
;
241 /* Maps base addresses to an innermost_loop_behavior that gives the maximum
242 known alignment for that base. */
243 vec_base_alignments base_alignments
;
245 /* All interleaving chains of stores, represented by the first
246 stmt in the chain. */
247 auto_vec
<stmt_vec_info
> grouped_stores
;
249 /* Cost data used by the target cost model. */
250 void *target_cost_data
;
253 stmt_vec_info
new_stmt_vec_info (gimple
*stmt
);
254 void set_vinfo_for_stmt (gimple
*, stmt_vec_info
);
255 void free_stmt_vec_infos ();
256 void free_stmt_vec_info (stmt_vec_info
);
259 struct _loop_vec_info
;
265 is_a_helper
<_loop_vec_info
*>::test (vec_info
*i
)
267 return i
->kind
== vec_info::loop
;
273 is_a_helper
<_bb_vec_info
*>::test (vec_info
*i
)
275 return i
->kind
== vec_info::bb
;
279 /* In general, we can divide the vector statements in a vectorized loop
280 into related groups ("rgroups") and say that for each rgroup there is
281 some nS such that the rgroup operates on nS values from one scalar
282 iteration followed by nS values from the next. That is, if VF is the
283 vectorization factor of the loop, the rgroup operates on a sequence:
285 (1,1) (1,2) ... (1,nS) (2,1) ... (2,nS) ... (VF,1) ... (VF,nS)
287 where (i,j) represents a scalar value with index j in a scalar
288 iteration with index i.
290 [ We use the term "rgroup" to emphasise that this grouping isn't
291 necessarily the same as the grouping of statements used elsewhere.
292 For example, if we implement a group of scalar loads using gather
293 loads, we'll use a separate gather load for each scalar load, and
294 thus each gather load will belong to its own rgroup. ]
296 In general this sequence will occupy nV vectors concatenated
297 together. If these vectors have nL lanes each, the total number
298 of scalar values N is given by:
300 N = nS * VF = nV * nL
302 None of nS, VF, nV and nL are required to be a power of 2. nS and nV
303 are compile-time constants but VF and nL can be variable (if the target
304 supports variable-length vectors).
306 In classical vectorization, each iteration of the vector loop would
307 handle exactly VF iterations of the original scalar loop. However,
308 in a fully-masked loop, a particular iteration of the vector loop
309 might handle fewer than VF iterations of the scalar loop. The vector
310 lanes that correspond to iterations of the scalar loop are said to be
311 "active" and the other lanes are said to be "inactive".
313 In a fully-masked loop, many rgroups need to be masked to ensure that
314 they have no effect for the inactive lanes. Each such rgroup needs a
315 sequence of booleans in the same order as above, but with each (i,j)
316 replaced by a boolean that indicates whether iteration i is active.
317 This sequence occupies nV vector masks that again have nL lanes each.
318 Thus the mask sequence as a whole consists of VF independent booleans
319 that are each repeated nS times.
321 We make the simplifying assumption that if a sequence of nV masks is
322 suitable for one (nS,nL) pair, we can reuse it for (nS/2,nL/2) by
323 VIEW_CONVERTing it. This holds for all current targets that support
324 fully-masked loops. For example, suppose the scalar loop is:
328 for (int i = 0; i < n; ++i)
330 f[i * 2 + 0] += 1.0f;
331 f[i * 2 + 1] += 2.0f;
335 and suppose that vectors have 256 bits. The vectorized f accesses
336 will belong to one rgroup and the vectorized d access to another:
338 f rgroup: nS = 2, nV = 1, nL = 8
339 d rgroup: nS = 1, nV = 1, nL = 4
342 [ In this simple example the rgroups do correspond to the normal
343 SLP grouping scheme. ]
345 If only the first three lanes are active, the masks we need are:
347 f rgroup: 1 1 | 1 1 | 1 1 | 0 0
348 d rgroup: 1 | 1 | 1 | 0
350 Here we can use a mask calculated for f's rgroup for d's, but not
353 Thus for each value of nV, it is enough to provide nV masks, with the
354 mask being calculated based on the highest nL (or, equivalently, based
355 on the highest nS) required by any rgroup with that nV. We therefore
356 represent the entire collection of masks as a two-level table, with the
357 first level being indexed by nV - 1 (since nV == 0 doesn't exist) and
358 the second being indexed by the mask index 0 <= i < nV. */
360 /* The masks needed by rgroups with nV vectors, according to the
361 description above. */
362 struct rgroup_masks
{
363 /* The largest nS for all rgroups that use these masks. */
364 unsigned int max_nscalars_per_iter
;
366 /* The type of mask to use, based on the highest nS recorded above. */
369 /* A vector of nV masks, in iteration order. */
373 typedef auto_vec
<rgroup_masks
> vec_loop_masks
;
375 /*-----------------------------------------------------------------*/
376 /* Info on vectorized loops. */
377 /*-----------------------------------------------------------------*/
378 typedef struct _loop_vec_info
: public vec_info
{
379 _loop_vec_info (struct loop
*, vec_info_shared
*);
382 /* The loop to which this info struct refers to. */
385 /* The loop basic blocks. */
388 /* Number of latch executions. */
390 /* Number of iterations. */
392 /* Number of iterations of the original loop. */
393 tree num_iters_unchanged
;
394 /* Condition under which this loop is analyzed and versioned. */
395 tree num_iters_assumptions
;
397 /* Threshold of number of iterations below which vectorzation will not be
398 performed. It is calculated from MIN_PROFITABLE_ITERS and
399 PARAM_MIN_VECT_LOOP_BOUND. */
402 /* When applying loop versioning, the vector form should only be used
403 if the number of scalar iterations is >= this value, on top of all
404 the other requirements. Ignored when loop versioning is not being
406 poly_uint64 versioning_threshold
;
408 /* Unrolling factor */
409 poly_uint64 vectorization_factor
;
411 /* Maximum runtime vectorization factor, or MAX_VECTORIZATION_FACTOR
412 if there is no particular limit. */
413 unsigned HOST_WIDE_INT max_vectorization_factor
;
415 /* The masks that a fully-masked loop should use to avoid operating
416 on inactive scalars. */
417 vec_loop_masks masks
;
419 /* If we are using a loop mask to align memory addresses, this variable
420 contains the number of vector elements that we should skip in the
421 first iteration of the vector loop (i.e. the number of leading
422 elements that should be false in the first mask). */
423 tree mask_skip_niters
;
425 /* Type of the variables to use in the WHILE_ULT call for fully-masked
427 tree mask_compare_type
;
429 /* Unknown DRs according to which loop was peeled. */
430 struct dr_vec_info
*unaligned_dr
;
432 /* peeling_for_alignment indicates whether peeling for alignment will take
433 place, and what the peeling factor should be:
434 peeling_for_alignment = X means:
435 If X=0: Peeling for alignment will not be applied.
436 If X>0: Peel first X iterations.
437 If X=-1: Generate a runtime test to calculate the number of iterations
438 to be peeled, using the dataref recorded in the field
440 int peeling_for_alignment
;
442 /* The mask used to check the alignment of pointers or arrays. */
445 /* Data Dependence Relations defining address ranges that are candidates
446 for a run-time aliasing check. */
447 auto_vec
<ddr_p
> may_alias_ddrs
;
449 /* Data Dependence Relations defining address ranges together with segment
450 lengths from which the run-time aliasing check is built. */
451 auto_vec
<dr_with_seg_len_pair_t
> comp_alias_ddrs
;
453 /* Check that the addresses of each pair of objects is unequal. */
454 auto_vec
<vec_object_pair
> check_unequal_addrs
;
456 /* List of values that are required to be nonzero. This is used to check
457 whether things like "x[i * n] += 1;" are safe and eventually gets added
458 to the checks for lower bounds below. */
459 auto_vec
<tree
> check_nonzero
;
461 /* List of values that need to be checked for a minimum value. */
462 auto_vec
<vec_lower_bound
> lower_bounds
;
464 /* Statements in the loop that have data references that are candidates for a
465 runtime (loop versioning) misalignment check. */
466 auto_vec
<stmt_vec_info
> may_misalign_stmts
;
468 /* Reduction cycles detected in the loop. Used in loop-aware SLP. */
469 auto_vec
<stmt_vec_info
> reductions
;
471 /* All reduction chains in the loop, represented by the first
472 stmt in the chain. */
473 auto_vec
<stmt_vec_info
> reduction_chains
;
475 /* Cost vector for a single scalar iteration. */
476 auto_vec
<stmt_info_for_cost
> scalar_cost_vec
;
478 /* Map of IV base/step expressions to inserted name in the preheader. */
479 hash_map
<tree_operand_hash
, tree
> *ivexpr_map
;
481 /* The unrolling factor needed to SLP the loop. In case of that pure SLP is
482 applied to the loop, i.e., no unrolling is needed, this is 1. */
483 poly_uint64 slp_unrolling_factor
;
485 /* Cost of a single scalar iteration. */
486 int single_scalar_iteration_cost
;
488 /* Is the loop vectorizable? */
491 /* Records whether we still have the option of using a fully-masked loop. */
492 bool can_fully_mask_p
;
494 /* True if have decided to use a fully-masked loop. */
497 /* When we have grouped data accesses with gaps, we may introduce invalid
498 memory accesses. We peel the last iteration of the loop to prevent
500 bool peeling_for_gaps
;
502 /* When the number of iterations is not a multiple of the vector size
503 we need to peel off iterations at the end to form an epilogue loop. */
504 bool peeling_for_niter
;
506 /* Reductions are canonicalized so that the last operand is the reduction
507 operand. If this places a constant into RHS1, this decanonicalizes
508 GIMPLE for other phases, so we must track when this has occurred and
510 bool operands_swapped
;
512 /* True if there are no loop carried data dependencies in the loop.
513 If loop->safelen <= 1, then this is always true, either the loop
514 didn't have any loop carried data dependencies, or the loop is being
515 vectorized guarded with some runtime alias checks, or couldn't
516 be vectorized at all, but then this field shouldn't be used.
517 For loop->safelen >= 2, the user has asserted that there are no
518 backward dependencies, but there still could be loop carried forward
519 dependencies in such loops. This flag will be false if normal
520 vectorizer data dependency analysis would fail or require versioning
521 for alias, but because of loop->safelen >= 2 it has been vectorized
522 even without versioning for alias. E.g. in:
524 for (int i = 0; i < m; i++)
526 (or #pragma simd or #pragma ivdep) we can vectorize this and it will
527 DTRT even for k > 0 && k < m, but without safelen we would not
528 vectorize this, so this field would be false. */
529 bool no_data_dependencies
;
531 /* Mark loops having masked stores. */
534 /* If if-conversion versioned this loop before conversion, this is the
535 loop version without if-conversion. */
536 struct loop
*scalar_loop
;
538 /* For loops being epilogues of already vectorized loops
539 this points to the original vectorized loop. Otherwise NULL. */
540 _loop_vec_info
*orig_loop_info
;
544 /* Access Functions. */
545 #define LOOP_VINFO_LOOP(L) (L)->loop
546 #define LOOP_VINFO_BBS(L) (L)->bbs
547 #define LOOP_VINFO_NITERSM1(L) (L)->num_itersm1
548 #define LOOP_VINFO_NITERS(L) (L)->num_iters
549 /* Since LOOP_VINFO_NITERS and LOOP_VINFO_NITERSM1 can change after
550 prologue peeling retain total unchanged scalar loop iterations for
552 #define LOOP_VINFO_NITERS_UNCHANGED(L) (L)->num_iters_unchanged
553 #define LOOP_VINFO_NITERS_ASSUMPTIONS(L) (L)->num_iters_assumptions
554 #define LOOP_VINFO_COST_MODEL_THRESHOLD(L) (L)->th
555 #define LOOP_VINFO_VERSIONING_THRESHOLD(L) (L)->versioning_threshold
556 #define LOOP_VINFO_VECTORIZABLE_P(L) (L)->vectorizable
557 #define LOOP_VINFO_CAN_FULLY_MASK_P(L) (L)->can_fully_mask_p
558 #define LOOP_VINFO_FULLY_MASKED_P(L) (L)->fully_masked_p
559 #define LOOP_VINFO_VECT_FACTOR(L) (L)->vectorization_factor
560 #define LOOP_VINFO_MAX_VECT_FACTOR(L) (L)->max_vectorization_factor
561 #define LOOP_VINFO_MASKS(L) (L)->masks
562 #define LOOP_VINFO_MASK_SKIP_NITERS(L) (L)->mask_skip_niters
563 #define LOOP_VINFO_MASK_COMPARE_TYPE(L) (L)->mask_compare_type
564 #define LOOP_VINFO_PTR_MASK(L) (L)->ptr_mask
565 #define LOOP_VINFO_LOOP_NEST(L) (L)->shared->loop_nest
566 #define LOOP_VINFO_DATAREFS(L) (L)->shared->datarefs
567 #define LOOP_VINFO_DDRS(L) (L)->shared->ddrs
568 #define LOOP_VINFO_INT_NITERS(L) (TREE_INT_CST_LOW ((L)->num_iters))
569 #define LOOP_VINFO_PEELING_FOR_ALIGNMENT(L) (L)->peeling_for_alignment
570 #define LOOP_VINFO_UNALIGNED_DR(L) (L)->unaligned_dr
571 #define LOOP_VINFO_MAY_MISALIGN_STMTS(L) (L)->may_misalign_stmts
572 #define LOOP_VINFO_MAY_ALIAS_DDRS(L) (L)->may_alias_ddrs
573 #define LOOP_VINFO_COMP_ALIAS_DDRS(L) (L)->comp_alias_ddrs
574 #define LOOP_VINFO_CHECK_UNEQUAL_ADDRS(L) (L)->check_unequal_addrs
575 #define LOOP_VINFO_CHECK_NONZERO(L) (L)->check_nonzero
576 #define LOOP_VINFO_LOWER_BOUNDS(L) (L)->lower_bounds
577 #define LOOP_VINFO_GROUPED_STORES(L) (L)->grouped_stores
578 #define LOOP_VINFO_SLP_INSTANCES(L) (L)->slp_instances
579 #define LOOP_VINFO_SLP_UNROLLING_FACTOR(L) (L)->slp_unrolling_factor
580 #define LOOP_VINFO_REDUCTIONS(L) (L)->reductions
581 #define LOOP_VINFO_REDUCTION_CHAINS(L) (L)->reduction_chains
582 #define LOOP_VINFO_TARGET_COST_DATA(L) (L)->target_cost_data
583 #define LOOP_VINFO_PEELING_FOR_GAPS(L) (L)->peeling_for_gaps
584 #define LOOP_VINFO_OPERANDS_SWAPPED(L) (L)->operands_swapped
585 #define LOOP_VINFO_PEELING_FOR_NITER(L) (L)->peeling_for_niter
586 #define LOOP_VINFO_NO_DATA_DEPENDENCIES(L) (L)->no_data_dependencies
587 #define LOOP_VINFO_SCALAR_LOOP(L) (L)->scalar_loop
588 #define LOOP_VINFO_HAS_MASK_STORE(L) (L)->has_mask_store
589 #define LOOP_VINFO_SCALAR_ITERATION_COST(L) (L)->scalar_cost_vec
590 #define LOOP_VINFO_SINGLE_SCALAR_ITERATION_COST(L) (L)->single_scalar_iteration_cost
591 #define LOOP_VINFO_ORIG_LOOP_INFO(L) (L)->orig_loop_info
593 #define LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT(L) \
594 ((L)->may_misalign_stmts.length () > 0)
595 #define LOOP_REQUIRES_VERSIONING_FOR_ALIAS(L) \
596 ((L)->comp_alias_ddrs.length () > 0 \
597 || (L)->check_unequal_addrs.length () > 0 \
598 || (L)->lower_bounds.length () > 0)
599 #define LOOP_REQUIRES_VERSIONING_FOR_NITERS(L) \
600 (LOOP_VINFO_NITERS_ASSUMPTIONS (L))
601 #define LOOP_REQUIRES_VERSIONING(L) \
602 (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (L) \
603 || LOOP_REQUIRES_VERSIONING_FOR_ALIAS (L) \
604 || LOOP_REQUIRES_VERSIONING_FOR_NITERS (L))
606 #define LOOP_VINFO_NITERS_KNOWN_P(L) \
607 (tree_fits_shwi_p ((L)->num_iters) && tree_to_shwi ((L)->num_iters) > 0)
609 #define LOOP_VINFO_EPILOGUE_P(L) \
610 (LOOP_VINFO_ORIG_LOOP_INFO (L) != NULL)
612 #define LOOP_VINFO_ORIG_MAX_VECT_FACTOR(L) \
613 (LOOP_VINFO_MAX_VECT_FACTOR (LOOP_VINFO_ORIG_LOOP_INFO (L)))
615 /* Wrapper for loop_vec_info, for tracking success/failure, where a non-NULL
616 value signifies success, and a NULL value signifies failure, supporting
617 propagating an opt_problem * describing the failure back up the call
619 typedef opt_pointer_wrapper
<loop_vec_info
> opt_loop_vec_info
;
621 static inline loop_vec_info
622 loop_vec_info_for_loop (struct loop
*loop
)
624 return (loop_vec_info
) loop
->aux
;
627 typedef struct _bb_vec_info
: public vec_info
629 _bb_vec_info (gimple_stmt_iterator
, gimple_stmt_iterator
, vec_info_shared
*);
633 gimple_stmt_iterator region_begin
;
634 gimple_stmt_iterator region_end
;
637 #define BB_VINFO_BB(B) (B)->bb
638 #define BB_VINFO_GROUPED_STORES(B) (B)->grouped_stores
639 #define BB_VINFO_SLP_INSTANCES(B) (B)->slp_instances
640 #define BB_VINFO_DATAREFS(B) (B)->shared->datarefs
641 #define BB_VINFO_DDRS(B) (B)->shared->ddrs
642 #define BB_VINFO_TARGET_COST_DATA(B) (B)->target_cost_data
644 static inline bb_vec_info
645 vec_info_for_bb (basic_block bb
)
647 return (bb_vec_info
) bb
->aux
;
650 /*-----------------------------------------------------------------*/
651 /* Info on vectorized defs. */
652 /*-----------------------------------------------------------------*/
653 enum stmt_vec_info_type
{
654 undef_vec_info_type
= 0,
660 call_simd_clone_vec_info_type
,
661 assignment_vec_info_type
,
662 condition_vec_info_type
,
663 comparison_vec_info_type
,
666 type_promotion_vec_info_type
,
667 type_demotion_vec_info_type
,
668 type_conversion_vec_info_type
,
669 loop_exit_ctrl_vec_info_type
672 /* Indicates whether/how a variable is used in the scope of loop/basic
675 vect_unused_in_scope
= 0,
677 /* The def is only used outside the loop. */
679 /* The def is in the inner loop, and the use is in the outer loop, and the
680 use is a reduction stmt. */
681 vect_used_in_outer_by_reduction
,
682 /* The def is in the inner loop, and the use is in the outer loop (and is
683 not part of reduction). */
686 /* defs that feed computations that end up (only) in a reduction. These
687 defs may be used by non-reduction stmts, but eventually, any
688 computations/values that are affected by these defs are used to compute
689 a reduction (i.e. don't get stored to memory, for example). We use this
690 to identify computations that we can change the order in which they are
692 vect_used_by_reduction
,
697 /* The type of vectorization that can be applied to the stmt: regular loop-based
698 vectorization; pure SLP - the stmt is a part of SLP instances and does not
699 have uses outside SLP instances; or hybrid SLP and loop-based - the stmt is
700 a part of SLP instance and also must be loop-based vectorized, since it has
701 uses outside SLP sequences.
703 In the loop context the meanings of pure and hybrid SLP are slightly
704 different. By saying that pure SLP is applied to the loop, we mean that we
705 exploit only intra-iteration parallelism in the loop; i.e., the loop can be
706 vectorized without doing any conceptual unrolling, cause we don't pack
707 together stmts from different iterations, only within a single iteration.
708 Loop hybrid SLP means that we exploit both intra-iteration and
709 inter-iteration parallelism (e.g., number of elements in the vector is 4
710 and the slp-group-size is 2, in which case we don't have enough parallelism
711 within an iteration, so we obtain the rest of the parallelism from subsequent
712 iterations by unrolling the loop by 2). */
719 /* Says whether a statement is a load, a store of a vectorized statement
720 result, or a store of an invariant value. */
721 enum vec_load_store_type
{
727 /* Describes how we're going to vectorize an individual load or store,
728 or a group of loads or stores. */
729 enum vect_memory_access_type
{
730 /* An access to an invariant address. This is used only for loads. */
733 /* A simple contiguous access. */
736 /* A contiguous access that goes down in memory rather than up,
737 with no additional permutation. This is used only for stores
739 VMAT_CONTIGUOUS_DOWN
,
741 /* A simple contiguous access in which the elements need to be permuted
742 after loading or before storing. Only used for loop vectorization;
743 SLP uses separate permutes. */
744 VMAT_CONTIGUOUS_PERMUTE
,
746 /* A simple contiguous access in which the elements need to be reversed
747 after loading or before storing. */
748 VMAT_CONTIGUOUS_REVERSE
,
750 /* An access that uses IFN_LOAD_LANES or IFN_STORE_LANES. */
751 VMAT_LOAD_STORE_LANES
,
753 /* An access in which each scalar element is loaded or stored
757 /* A hybrid of VMAT_CONTIGUOUS and VMAT_ELEMENTWISE, used for grouped
758 SLP accesses. Each unrolled iteration uses a contiguous load
759 or store for the whole group, but the groups from separate iterations
760 are combined in the same way as for VMAT_ELEMENTWISE. */
763 /* The access uses gather loads or scatter stores. */
768 /* The data reference itself. */
770 /* The statement that contains the data reference. */
772 /* The misalignment in bytes of the reference, or -1 if not known. */
774 /* The byte alignment that we'd ideally like the reference to have,
775 and the value that misalignment is measured against. */
776 int target_alignment
;
777 /* If true the alignment of base_decl needs to be increased. */
778 bool base_misaligned
;
782 typedef struct data_reference
*dr_p
;
784 struct _stmt_vec_info
{
786 enum stmt_vec_info_type type
;
788 /* Indicates whether this stmts is part of a computation whose result is
789 used outside the loop. */
792 /* Stmt is part of some pattern (computation idiom) */
795 /* True if the statement was created during pattern recognition as
796 part of the replacement for RELATED_STMT. This implies that the
797 statement isn't part of any basic block, although for convenience
798 its gimple_bb is the same as for RELATED_STMT. */
801 /* Is this statement vectorizable or should it be skipped in (partial)
805 /* The stmt to which this info struct refers to. */
808 /* The vec_info with respect to which STMT is vectorized. */
811 /* The vector type to be used for the LHS of this statement. */
814 /* The vectorized version of the stmt. */
815 stmt_vec_info vectorized_stmt
;
818 /* The following is relevant only for stmts that contain a non-scalar
819 data-ref (array/pointer/struct access). A GIMPLE stmt is expected to have
820 at most one such data-ref. */
824 /* Information about the data-ref relative to this loop
825 nest (the loop that is being considered for vectorization). */
826 innermost_loop_behavior dr_wrt_vec_loop
;
828 /* For loop PHI nodes, the base and evolution part of it. This makes sure
829 this information is still available in vect_update_ivs_after_vectorizer
830 where we may not be able to re-analyze the PHI nodes evolution as
831 peeling for the prologue loop can make it unanalyzable. The evolution
832 part is still correct after peeling, but the base may have changed from
834 tree loop_phi_evolution_base_unchanged
;
835 tree loop_phi_evolution_part
;
837 /* Used for various bookkeeping purposes, generally holding a pointer to
838 some other stmt S that is in some way "related" to this stmt.
839 Current use of this field is:
840 If this stmt is part of a pattern (i.e. the field 'in_pattern_p' is
841 true): S is the "pattern stmt" that represents (and replaces) the
842 sequence of stmts that constitutes the pattern. Similarly, the
843 related_stmt of the "pattern stmt" points back to this stmt (which is
844 the last stmt in the original sequence of stmts that constitutes the
846 stmt_vec_info related_stmt
;
848 /* Used to keep a sequence of def stmts of a pattern stmt if such exists.
849 The sequence is attached to the original statement rather than the
850 pattern statement. */
851 gimple_seq pattern_def_seq
;
853 /* List of datarefs that are known to have the same alignment as the dataref
855 vec
<dr_p
> same_align_refs
;
857 /* Selected SIMD clone's function info. First vector element
858 is SIMD clone's function decl, followed by a pair of trees (base + step)
859 for linear arguments (pair of NULLs for other arguments). */
860 vec
<tree
> simd_clone_info
;
862 /* Classify the def of this stmt. */
863 enum vect_def_type def_type
;
865 /* Whether the stmt is SLPed, loop-based vectorized, or both. */
866 enum slp_vect_type slp_type
;
868 /* Interleaving and reduction chains info. */
869 /* First element in the group. */
870 stmt_vec_info first_element
;
871 /* Pointer to the next element in the group. */
872 stmt_vec_info next_element
;
873 /* For data-refs, in case that two or more stmts share data-ref, this is the
874 pointer to the previously detected stmt with the same dr. */
875 stmt_vec_info same_dr_stmt
;
876 /* The size of the group. */
878 /* For stores, number of stores from this group seen. We vectorize the last
880 unsigned int store_count
;
881 /* For loads only, the gap from the previous load. For consecutive loads, GAP
885 /* The minimum negative dependence distance this stmt participates in
887 unsigned int min_neg_dist
;
889 /* Not all stmts in the loop need to be vectorized. e.g, the increment
890 of the loop induction variable and computation of array indexes. relevant
891 indicates whether the stmt needs to be vectorized. */
892 enum vect_relevant relevant
;
894 /* For loads if this is a gather, for stores if this is a scatter. */
895 bool gather_scatter_p
;
897 /* True if this is an access with loop-invariant stride. */
900 /* For both loads and stores. */
901 bool simd_lane_access_p
;
903 /* Classifies how the load or store is going to be implemented
904 for loop vectorization. */
905 vect_memory_access_type memory_access_type
;
907 /* For reduction loops, this is the type of reduction. */
908 enum vect_reduction_type v_reduc_type
;
910 /* For CONST_COND_REDUCTION, record the reduc code. */
911 enum tree_code const_cond_reduc_code
;
913 /* On a reduction PHI the reduction type as detected by
914 vect_force_simple_reduction. */
915 enum vect_reduction_type reduc_type
;
917 /* On a reduction PHI the def returned by vect_force_simple_reduction.
918 On the def returned by vect_force_simple_reduction the
919 corresponding PHI. */
920 stmt_vec_info reduc_def
;
922 /* The number of scalar stmt references from active SLP instances. */
923 unsigned int num_slp_uses
;
925 /* If nonzero, the lhs of the statement could be truncated to this
926 many bits without affecting any users of the result. */
927 unsigned int min_output_precision
;
929 /* If nonzero, all non-boolean input operands have the same precision,
930 and they could each be truncated to this many bits without changing
932 unsigned int min_input_precision
;
934 /* If OPERATION_BITS is nonzero, the statement could be performed on
935 an integer with the sign and number of bits given by OPERATION_SIGN
936 and OPERATION_BITS without changing the result. */
937 unsigned int operation_precision
;
938 signop operation_sign
;
941 /* Information about a gather/scatter call. */
942 struct gather_scatter_info
{
943 /* The internal function to use for the gather/scatter operation,
944 or IFN_LAST if a built-in function should be used instead. */
947 /* The FUNCTION_DECL for the built-in gather/scatter function,
948 or null if an internal function should be used instead. */
951 /* The loop-invariant base value. */
954 /* The original scalar offset, which is a non-loop-invariant SSA_NAME. */
957 /* Each offset element should be multiplied by this amount before
958 being added to the base. */
961 /* The definition type for the vectorized offset. */
962 enum vect_def_type offset_dt
;
964 /* The type of the vectorized offset. */
967 /* The type of the scalar elements after loading or before storing. */
970 /* The type of the scalar elements being loaded or stored. */
974 /* Access Functions. */
975 #define STMT_VINFO_TYPE(S) (S)->type
976 #define STMT_VINFO_STMT(S) (S)->stmt
978 STMT_VINFO_LOOP_VINFO (stmt_vec_info stmt_vinfo
)
980 if (loop_vec_info loop_vinfo
= dyn_cast
<loop_vec_info
> (stmt_vinfo
->vinfo
))
985 STMT_VINFO_BB_VINFO (stmt_vec_info stmt_vinfo
)
987 if (bb_vec_info bb_vinfo
= dyn_cast
<bb_vec_info
> (stmt_vinfo
->vinfo
))
991 #define STMT_VINFO_RELEVANT(S) (S)->relevant
992 #define STMT_VINFO_LIVE_P(S) (S)->live
993 #define STMT_VINFO_VECTYPE(S) (S)->vectype
994 #define STMT_VINFO_VEC_STMT(S) (S)->vectorized_stmt
995 #define STMT_VINFO_VECTORIZABLE(S) (S)->vectorizable
996 #define STMT_VINFO_DATA_REF(S) ((S)->dr_aux.dr + 0)
997 #define STMT_VINFO_GATHER_SCATTER_P(S) (S)->gather_scatter_p
998 #define STMT_VINFO_STRIDED_P(S) (S)->strided_p
999 #define STMT_VINFO_MEMORY_ACCESS_TYPE(S) (S)->memory_access_type
1000 #define STMT_VINFO_SIMD_LANE_ACCESS_P(S) (S)->simd_lane_access_p
1001 #define STMT_VINFO_VEC_REDUCTION_TYPE(S) (S)->v_reduc_type
1002 #define STMT_VINFO_VEC_CONST_COND_REDUC_CODE(S) (S)->const_cond_reduc_code
1004 #define STMT_VINFO_DR_WRT_VEC_LOOP(S) (S)->dr_wrt_vec_loop
1005 #define STMT_VINFO_DR_BASE_ADDRESS(S) (S)->dr_wrt_vec_loop.base_address
1006 #define STMT_VINFO_DR_INIT(S) (S)->dr_wrt_vec_loop.init
1007 #define STMT_VINFO_DR_OFFSET(S) (S)->dr_wrt_vec_loop.offset
1008 #define STMT_VINFO_DR_STEP(S) (S)->dr_wrt_vec_loop.step
1009 #define STMT_VINFO_DR_BASE_ALIGNMENT(S) (S)->dr_wrt_vec_loop.base_alignment
1010 #define STMT_VINFO_DR_BASE_MISALIGNMENT(S) \
1011 (S)->dr_wrt_vec_loop.base_misalignment
1012 #define STMT_VINFO_DR_OFFSET_ALIGNMENT(S) \
1013 (S)->dr_wrt_vec_loop.offset_alignment
1014 #define STMT_VINFO_DR_STEP_ALIGNMENT(S) \
1015 (S)->dr_wrt_vec_loop.step_alignment
1017 #define STMT_VINFO_DR_INFO(S) \
1018 (gcc_checking_assert ((S)->dr_aux.stmt == (S)), &(S)->dr_aux)
1020 #define STMT_VINFO_IN_PATTERN_P(S) (S)->in_pattern_p
1021 #define STMT_VINFO_RELATED_STMT(S) (S)->related_stmt
1022 #define STMT_VINFO_PATTERN_DEF_SEQ(S) (S)->pattern_def_seq
1023 #define STMT_VINFO_SAME_ALIGN_REFS(S) (S)->same_align_refs
1024 #define STMT_VINFO_SIMD_CLONE_INFO(S) (S)->simd_clone_info
1025 #define STMT_VINFO_DEF_TYPE(S) (S)->def_type
1026 #define STMT_VINFO_GROUPED_ACCESS(S) \
1027 ((S)->dr_aux.dr && DR_GROUP_FIRST_ELEMENT(S))
1028 #define STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED(S) (S)->loop_phi_evolution_base_unchanged
1029 #define STMT_VINFO_LOOP_PHI_EVOLUTION_PART(S) (S)->loop_phi_evolution_part
1030 #define STMT_VINFO_MIN_NEG_DIST(S) (S)->min_neg_dist
1031 #define STMT_VINFO_NUM_SLP_USES(S) (S)->num_slp_uses
1032 #define STMT_VINFO_REDUC_TYPE(S) (S)->reduc_type
1033 #define STMT_VINFO_REDUC_DEF(S) (S)->reduc_def
1035 #define DR_GROUP_FIRST_ELEMENT(S) \
1036 (gcc_checking_assert ((S)->dr_aux.dr), (S)->first_element)
1037 #define DR_GROUP_NEXT_ELEMENT(S) \
1038 (gcc_checking_assert ((S)->dr_aux.dr), (S)->next_element)
1039 #define DR_GROUP_SIZE(S) \
1040 (gcc_checking_assert ((S)->dr_aux.dr), (S)->size)
1041 #define DR_GROUP_STORE_COUNT(S) \
1042 (gcc_checking_assert ((S)->dr_aux.dr), (S)->store_count)
1043 #define DR_GROUP_GAP(S) \
1044 (gcc_checking_assert ((S)->dr_aux.dr), (S)->gap)
1045 #define DR_GROUP_SAME_DR_STMT(S) \
1046 (gcc_checking_assert ((S)->dr_aux.dr), (S)->same_dr_stmt)
1048 #define REDUC_GROUP_FIRST_ELEMENT(S) \
1049 (gcc_checking_assert (!(S)->dr_aux.dr), (S)->first_element)
1050 #define REDUC_GROUP_NEXT_ELEMENT(S) \
1051 (gcc_checking_assert (!(S)->dr_aux.dr), (S)->next_element)
1052 #define REDUC_GROUP_SIZE(S) \
1053 (gcc_checking_assert (!(S)->dr_aux.dr), (S)->size)
1055 #define STMT_VINFO_RELEVANT_P(S) ((S)->relevant != vect_unused_in_scope)
1057 #define HYBRID_SLP_STMT(S) ((S)->slp_type == hybrid)
1058 #define PURE_SLP_STMT(S) ((S)->slp_type == pure_slp)
1059 #define STMT_SLP_TYPE(S) (S)->slp_type
1061 #define VECT_MAX_COST 1000
1063 /* The maximum number of intermediate steps required in multi-step type
1065 #define MAX_INTERM_CVT_STEPS 3
1067 #define MAX_VECTORIZATION_FACTOR INT_MAX
1069 /* Nonzero if TYPE represents a (scalar) boolean type or type
1070 in the middle-end compatible with it (unsigned precision 1 integral
1071 types). Used to determine which types should be vectorized as
1072 VECTOR_BOOLEAN_TYPE_P. */
1074 #define VECT_SCALAR_BOOLEAN_TYPE_P(TYPE) \
1075 (TREE_CODE (TYPE) == BOOLEAN_TYPE \
1076 || ((TREE_CODE (TYPE) == INTEGER_TYPE \
1077 || TREE_CODE (TYPE) == ENUMERAL_TYPE) \
1078 && TYPE_PRECISION (TYPE) == 1 \
1079 && TYPE_UNSIGNED (TYPE)))
1082 nested_in_vect_loop_p (struct loop
*loop
, stmt_vec_info stmt_info
)
1085 && (loop
->inner
== (gimple_bb (stmt_info
->stmt
))->loop_father
));
1088 /* Return TRUE if a statement represented by STMT_INFO is a part of a
1092 is_pattern_stmt_p (stmt_vec_info stmt_info
)
1094 return stmt_info
->pattern_stmt_p
;
1097 /* If STMT_INFO is a pattern statement, return the statement that it
1098 replaces, otherwise return STMT_INFO itself. */
1100 inline stmt_vec_info
1101 vect_orig_stmt (stmt_vec_info stmt_info
)
1103 if (is_pattern_stmt_p (stmt_info
))
1104 return STMT_VINFO_RELATED_STMT (stmt_info
);
1108 /* Return the later statement between STMT1_INFO and STMT2_INFO. */
1110 static inline stmt_vec_info
1111 get_later_stmt (stmt_vec_info stmt1_info
, stmt_vec_info stmt2_info
)
1113 if (gimple_uid (vect_orig_stmt (stmt1_info
)->stmt
)
1114 > gimple_uid (vect_orig_stmt (stmt2_info
)->stmt
))
1120 /* If STMT_INFO has been replaced by a pattern statement, return the
1121 replacement statement, otherwise return STMT_INFO itself. */
1123 inline stmt_vec_info
1124 vect_stmt_to_vectorize (stmt_vec_info stmt_info
)
1126 if (STMT_VINFO_IN_PATTERN_P (stmt_info
))
1127 return STMT_VINFO_RELATED_STMT (stmt_info
);
1131 /* Return true if BB is a loop header. */
1134 is_loop_header_bb_p (basic_block bb
)
1136 if (bb
== (bb
->loop_father
)->header
)
1138 gcc_checking_assert (EDGE_COUNT (bb
->preds
) == 1);
1142 /* Return pow2 (X). */
1149 for (i
= 0; i
< x
; i
++)
1155 /* Alias targetm.vectorize.builtin_vectorization_cost. */
1158 builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost
,
1159 tree vectype
, int misalign
)
1161 return targetm
.vectorize
.builtin_vectorization_cost (type_of_cost
,
1165 /* Get cost by calling cost target builtin. */
1168 int vect_get_stmt_cost (enum vect_cost_for_stmt type_of_cost
)
1170 return builtin_vectorization_cost (type_of_cost
, NULL
, 0);
1173 /* Alias targetm.vectorize.init_cost. */
1175 static inline void *
1176 init_cost (struct loop
*loop_info
)
1178 return targetm
.vectorize
.init_cost (loop_info
);
1181 extern void dump_stmt_cost (FILE *, void *, int, enum vect_cost_for_stmt
,
1182 stmt_vec_info
, int, unsigned,
1183 enum vect_cost_model_location
);
1185 /* Alias targetm.vectorize.add_stmt_cost. */
1187 static inline unsigned
1188 add_stmt_cost (void *data
, int count
, enum vect_cost_for_stmt kind
,
1189 stmt_vec_info stmt_info
, int misalign
,
1190 enum vect_cost_model_location where
)
1192 unsigned cost
= targetm
.vectorize
.add_stmt_cost (data
, count
, kind
,
1193 stmt_info
, misalign
, where
);
1194 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
1195 dump_stmt_cost (dump_file
, data
, count
, kind
, stmt_info
, misalign
,
1200 /* Alias targetm.vectorize.finish_cost. */
1203 finish_cost (void *data
, unsigned *prologue_cost
,
1204 unsigned *body_cost
, unsigned *epilogue_cost
)
1206 targetm
.vectorize
.finish_cost (data
, prologue_cost
, body_cost
, epilogue_cost
);
1209 /* Alias targetm.vectorize.destroy_cost_data. */
1212 destroy_cost_data (void *data
)
1214 targetm
.vectorize
.destroy_cost_data (data
);
1218 add_stmt_costs (void *data
, stmt_vector_for_cost
*cost_vec
)
1220 stmt_info_for_cost
*cost
;
1222 FOR_EACH_VEC_ELT (*cost_vec
, i
, cost
)
1223 add_stmt_cost (data
, cost
->count
, cost
->kind
, cost
->stmt_info
,
1224 cost
->misalign
, cost
->where
);
1227 /*-----------------------------------------------------------------*/
1228 /* Info on data references alignment. */
1229 /*-----------------------------------------------------------------*/
1230 #define DR_MISALIGNMENT_UNKNOWN (-1)
1231 #define DR_MISALIGNMENT_UNINITIALIZED (-2)
1234 set_dr_misalignment (dr_vec_info
*dr_info
, int val
)
1236 dr_info
->misalignment
= val
;
1240 dr_misalignment (dr_vec_info
*dr_info
)
1242 int misalign
= dr_info
->misalignment
;
1243 gcc_assert (misalign
!= DR_MISALIGNMENT_UNINITIALIZED
);
1247 /* Reflects actual alignment of first access in the vectorized loop,
1248 taking into account peeling/versioning if applied. */
1249 #define DR_MISALIGNMENT(DR) dr_misalignment (DR)
1250 #define SET_DR_MISALIGNMENT(DR, VAL) set_dr_misalignment (DR, VAL)
1252 /* Only defined once DR_MISALIGNMENT is defined. */
1253 #define DR_TARGET_ALIGNMENT(DR) ((DR)->target_alignment)
1255 /* Return true if data access DR_INFO is aligned to its target alignment
1256 (which may be less than a full vector). */
1259 aligned_access_p (dr_vec_info
*dr_info
)
1261 return (DR_MISALIGNMENT (dr_info
) == 0);
1264 /* Return TRUE if the alignment of the data access is known, and FALSE
1268 known_alignment_for_access_p (dr_vec_info
*dr_info
)
1270 return (DR_MISALIGNMENT (dr_info
) != DR_MISALIGNMENT_UNKNOWN
);
1273 /* Return the minimum alignment in bytes that the vectorized version
1274 of DR_INFO is guaranteed to have. */
1276 static inline unsigned int
1277 vect_known_alignment_in_bytes (dr_vec_info
*dr_info
)
1279 if (DR_MISALIGNMENT (dr_info
) == DR_MISALIGNMENT_UNKNOWN
)
1280 return TYPE_ALIGN_UNIT (TREE_TYPE (DR_REF (dr_info
->dr
)));
1281 if (DR_MISALIGNMENT (dr_info
) == 0)
1282 return DR_TARGET_ALIGNMENT (dr_info
);
1283 return DR_MISALIGNMENT (dr_info
) & -DR_MISALIGNMENT (dr_info
);
1286 /* Return the behavior of DR_INFO with respect to the vectorization context
1287 (which for outer loop vectorization might not be the behavior recorded
1288 in DR_INFO itself). */
1290 static inline innermost_loop_behavior
*
1291 vect_dr_behavior (dr_vec_info
*dr_info
)
1293 stmt_vec_info stmt_info
= dr_info
->stmt
;
1294 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
1295 if (loop_vinfo
== NULL
1296 || !nested_in_vect_loop_p (LOOP_VINFO_LOOP (loop_vinfo
), stmt_info
))
1297 return &DR_INNERMOST (dr_info
->dr
);
1299 return &STMT_VINFO_DR_WRT_VEC_LOOP (stmt_info
);
1302 /* Return true if the vect cost model is unlimited. */
1304 unlimited_cost_model (loop_p loop
)
1306 if (loop
!= NULL
&& loop
->force_vectorize
1307 && flag_simd_cost_model
!= VECT_COST_MODEL_DEFAULT
)
1308 return flag_simd_cost_model
== VECT_COST_MODEL_UNLIMITED
;
1309 return (flag_vect_cost_model
== VECT_COST_MODEL_UNLIMITED
);
1312 /* Return true if the loop described by LOOP_VINFO is fully-masked and
1313 if the first iteration should use a partial mask in order to achieve
1317 vect_use_loop_mask_for_alignment_p (loop_vec_info loop_vinfo
)
1319 return (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo
)
1320 && LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo
));
1323 /* Return the number of vectors of type VECTYPE that are needed to get
1324 NUNITS elements. NUNITS should be based on the vectorization factor,
1325 so it is always a known multiple of the number of elements in VECTYPE. */
1327 static inline unsigned int
1328 vect_get_num_vectors (poly_uint64 nunits
, tree vectype
)
1330 return exact_div (nunits
, TYPE_VECTOR_SUBPARTS (vectype
)).to_constant ();
1333 /* Return the number of copies needed for loop vectorization when
1334 a statement operates on vectors of type VECTYPE. This is the
1335 vectorization factor divided by the number of elements in
1336 VECTYPE and is always known at compile time. */
1338 static inline unsigned int
1339 vect_get_num_copies (loop_vec_info loop_vinfo
, tree vectype
)
1341 return vect_get_num_vectors (LOOP_VINFO_VECT_FACTOR (loop_vinfo
), vectype
);
1344 /* Update maximum unit count *MAX_NUNITS so that it accounts for
1345 the number of units in vector type VECTYPE. *MAX_NUNITS can be 1
1346 if we haven't yet recorded any vector types. */
1349 vect_update_max_nunits (poly_uint64
*max_nunits
, tree vectype
)
1351 /* All unit counts have the form current_vector_size * X for some
1352 rational X, so two unit sizes must have a common multiple.
1353 Everything is a multiple of the initial value of 1. */
1354 poly_uint64 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
1355 *max_nunits
= force_common_multiple (*max_nunits
, nunits
);
1358 /* Return the vectorization factor that should be used for costing
1359 purposes while vectorizing the loop described by LOOP_VINFO.
1360 Pick a reasonable estimate if the vectorization factor isn't
1361 known at compile time. */
1363 static inline unsigned int
1364 vect_vf_for_cost (loop_vec_info loop_vinfo
)
1366 return estimated_poly_value (LOOP_VINFO_VECT_FACTOR (loop_vinfo
));
1369 /* Estimate the number of elements in VEC_TYPE for costing purposes.
1370 Pick a reasonable estimate if the exact number isn't known at
1373 static inline unsigned int
1374 vect_nunits_for_cost (tree vec_type
)
1376 return estimated_poly_value (TYPE_VECTOR_SUBPARTS (vec_type
));
1379 /* Return the maximum possible vectorization factor for LOOP_VINFO. */
1381 static inline unsigned HOST_WIDE_INT
1382 vect_max_vf (loop_vec_info loop_vinfo
)
1384 unsigned HOST_WIDE_INT vf
;
1385 if (LOOP_VINFO_VECT_FACTOR (loop_vinfo
).is_constant (&vf
))
1387 return MAX_VECTORIZATION_FACTOR
;
1390 /* Return the size of the value accessed by unvectorized data reference
1391 DR_INFO. This is only valid once STMT_VINFO_VECTYPE has been calculated
1392 for the associated gimple statement, since that guarantees that DR_INFO
1393 accesses either a scalar or a scalar equivalent. ("Scalar equivalent"
1394 here includes things like V1SI, which can be vectorized in the same way
1398 vect_get_scalar_dr_size (dr_vec_info
*dr_info
)
1400 return tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dr_info
->dr
))));
1403 /* Source location + hotness information. */
1404 extern dump_user_location_t vect_location
;
1406 /* A macro for calling:
1407 dump_begin_scope (MSG, vect_location);
1408 via an RAII object, thus printing "=== MSG ===\n" to the dumpfile etc,
1411 once the object goes out of scope, thus capturing the nesting of
1414 These scopes affect dump messages within them: dump messages at the
1415 top level implicitly default to MSG_PRIORITY_USER_FACING, whereas those
1416 in a nested scope implicitly default to MSG_PRIORITY_INTERNALS. */
1418 #define DUMP_VECT_SCOPE(MSG) \
1419 AUTO_DUMP_SCOPE (MSG, vect_location)
1421 /*-----------------------------------------------------------------*/
1422 /* Function prototypes. */
1423 /*-----------------------------------------------------------------*/
1425 /* Simple loop peeling and versioning utilities for vectorizer's purposes -
1426 in tree-vect-loop-manip.c. */
1427 extern void vect_set_loop_condition (struct loop
*, loop_vec_info
,
1428 tree
, tree
, tree
, bool);
1429 extern bool slpeel_can_duplicate_loop_p (const struct loop
*, const_edge
);
1430 struct loop
*slpeel_tree_duplicate_loop_to_edge_cfg (struct loop
*,
1431 struct loop
*, edge
);
1432 extern void vect_loop_versioning (loop_vec_info
, unsigned int, bool,
1434 extern struct loop
*vect_do_peeling (loop_vec_info
, tree
, tree
,
1435 tree
*, tree
*, tree
*, int, bool, bool);
1436 extern void vect_prepare_for_masked_peels (loop_vec_info
);
1437 extern dump_user_location_t
find_loop_location (struct loop
*);
1438 extern bool vect_can_advance_ivs_p (loop_vec_info
);
1440 /* In tree-vect-stmts.c. */
1441 extern poly_uint64 current_vector_size
;
1442 extern tree
get_vectype_for_scalar_type (tree
);
1443 extern tree
get_vectype_for_scalar_type_and_size (tree
, poly_uint64
);
1444 extern tree
get_mask_type_for_scalar_type (tree
);
1445 extern tree
get_same_sized_vectype (tree
, tree
);
1446 extern bool vect_get_loop_mask_type (loop_vec_info
);
1447 extern bool vect_is_simple_use (tree
, vec_info
*, enum vect_def_type
*,
1448 stmt_vec_info
* = NULL
, gimple
** = NULL
);
1449 extern bool vect_is_simple_use (tree
, vec_info
*, enum vect_def_type
*,
1450 tree
*, stmt_vec_info
* = NULL
,
1452 extern bool supportable_widening_operation (enum tree_code
, stmt_vec_info
,
1453 tree
, tree
, enum tree_code
*,
1454 enum tree_code
*, int *,
1456 extern bool supportable_narrowing_operation (enum tree_code
, tree
, tree
,
1458 int *, vec
<tree
> *);
1459 extern unsigned record_stmt_cost (stmt_vector_for_cost
*, int,
1460 enum vect_cost_for_stmt
, stmt_vec_info
,
1461 int, enum vect_cost_model_location
);
1462 extern stmt_vec_info
vect_finish_replace_stmt (stmt_vec_info
, gimple
*);
1463 extern stmt_vec_info
vect_finish_stmt_generation (stmt_vec_info
, gimple
*,
1464 gimple_stmt_iterator
*);
1465 extern opt_result
vect_mark_stmts_to_be_vectorized (loop_vec_info
);
1466 extern tree
vect_get_store_rhs (stmt_vec_info
);
1467 extern tree
vect_get_vec_def_for_operand_1 (stmt_vec_info
, enum vect_def_type
);
1468 extern tree
vect_get_vec_def_for_operand (tree
, stmt_vec_info
, tree
= NULL
);
1469 extern void vect_get_vec_defs (tree
, tree
, stmt_vec_info
, vec
<tree
> *,
1470 vec
<tree
> *, slp_tree
);
1471 extern void vect_get_vec_defs_for_stmt_copy (vec_info
*,
1472 vec
<tree
> *, vec
<tree
> *);
1473 extern tree
vect_init_vector (stmt_vec_info
, tree
, tree
,
1474 gimple_stmt_iterator
*);
1475 extern tree
vect_get_vec_def_for_stmt_copy (vec_info
*, tree
);
1476 extern bool vect_transform_stmt (stmt_vec_info
, gimple_stmt_iterator
*,
1477 slp_tree
, slp_instance
);
1478 extern void vect_remove_stores (stmt_vec_info
);
1479 extern opt_result
vect_analyze_stmt (stmt_vec_info
, bool *, slp_tree
,
1480 slp_instance
, stmt_vector_for_cost
*);
1481 extern bool vectorizable_condition (stmt_vec_info
, gimple_stmt_iterator
*,
1482 stmt_vec_info
*, tree
, int, slp_tree
,
1483 stmt_vector_for_cost
*);
1484 extern void vect_get_load_cost (stmt_vec_info
, int, bool,
1485 unsigned int *, unsigned int *,
1486 stmt_vector_for_cost
*,
1487 stmt_vector_for_cost
*, bool);
1488 extern void vect_get_store_cost (stmt_vec_info
, int,
1489 unsigned int *, stmt_vector_for_cost
*);
1490 extern bool vect_supportable_shift (enum tree_code
, tree
);
1491 extern tree
vect_gen_perm_mask_any (tree
, const vec_perm_indices
&);
1492 extern tree
vect_gen_perm_mask_checked (tree
, const vec_perm_indices
&);
1493 extern void optimize_mask_stores (struct loop
*);
1494 extern gcall
*vect_gen_while (tree
, tree
, tree
);
1495 extern tree
vect_gen_while_not (gimple_seq
*, tree
, tree
, tree
);
1496 extern opt_result
vect_get_vector_types_for_stmt (stmt_vec_info
, tree
*,
1498 extern opt_tree
vect_get_mask_type_for_stmt (stmt_vec_info
);
1500 /* In tree-vect-data-refs.c. */
1501 extern bool vect_can_force_dr_alignment_p (const_tree
, unsigned int);
1502 extern enum dr_alignment_support vect_supportable_dr_alignment
1503 (dr_vec_info
*, bool);
1504 extern tree
vect_get_smallest_scalar_type (stmt_vec_info
, HOST_WIDE_INT
*,
1506 extern opt_result
vect_analyze_data_ref_dependences (loop_vec_info
, unsigned int *);
1507 extern bool vect_slp_analyze_instance_dependence (slp_instance
);
1508 extern opt_result
vect_enhance_data_refs_alignment (loop_vec_info
);
1509 extern opt_result
vect_analyze_data_refs_alignment (loop_vec_info
);
1510 extern opt_result
vect_verify_datarefs_alignment (loop_vec_info
);
1511 extern bool vect_slp_analyze_and_verify_instance_alignment (slp_instance
);
1512 extern opt_result
vect_analyze_data_ref_accesses (vec_info
*);
1513 extern opt_result
vect_prune_runtime_alias_test_list (loop_vec_info
);
1514 extern bool vect_gather_scatter_fn_p (bool, bool, tree
, tree
, unsigned int,
1515 signop
, int, internal_fn
*, tree
*);
1516 extern bool vect_check_gather_scatter (stmt_vec_info
, loop_vec_info
,
1517 gather_scatter_info
*);
1518 extern opt_result
vect_find_stmt_data_reference (loop_p
, gimple
*,
1519 vec
<data_reference_p
> *);
1520 extern opt_result
vect_analyze_data_refs (vec_info
*, poly_uint64
*);
1521 extern void vect_record_base_alignments (vec_info
*);
1522 extern tree
vect_create_data_ref_ptr (stmt_vec_info
, tree
, struct loop
*, tree
,
1523 tree
*, gimple_stmt_iterator
*,
1525 tree
= NULL_TREE
, tree
= NULL_TREE
);
1526 extern tree
bump_vector_ptr (tree
, gimple
*, gimple_stmt_iterator
*,
1527 stmt_vec_info
, tree
);
1528 extern void vect_copy_ref_info (tree
, tree
);
1529 extern tree
vect_create_destination_var (tree
, tree
);
1530 extern bool vect_grouped_store_supported (tree
, unsigned HOST_WIDE_INT
);
1531 extern bool vect_store_lanes_supported (tree
, unsigned HOST_WIDE_INT
, bool);
1532 extern bool vect_grouped_load_supported (tree
, bool, unsigned HOST_WIDE_INT
);
1533 extern bool vect_load_lanes_supported (tree
, unsigned HOST_WIDE_INT
, bool);
1534 extern void vect_permute_store_chain (vec
<tree
> ,unsigned int, stmt_vec_info
,
1535 gimple_stmt_iterator
*, vec
<tree
> *);
1536 extern tree
vect_setup_realignment (stmt_vec_info
, gimple_stmt_iterator
*,
1537 tree
*, enum dr_alignment_support
, tree
,
1539 extern void vect_transform_grouped_load (stmt_vec_info
, vec
<tree
> , int,
1540 gimple_stmt_iterator
*);
1541 extern void vect_record_grouped_load_vectors (stmt_vec_info
, vec
<tree
>);
1542 extern tree
vect_get_new_vect_var (tree
, enum vect_var_kind
, const char *);
1543 extern tree
vect_get_new_ssa_name (tree
, enum vect_var_kind
,
1544 const char * = NULL
);
1545 extern tree
vect_create_addr_base_for_vector_ref (stmt_vec_info
, gimple_seq
*,
1546 tree
, tree
= NULL_TREE
);
1548 /* In tree-vect-loop.c. */
1549 /* FORNOW: Used in tree-parloops.c. */
1550 extern stmt_vec_info
vect_force_simple_reduction (loop_vec_info
, stmt_vec_info
,
1552 /* Used in gimple-loop-interchange.c. */
1553 extern bool check_reduction_path (dump_user_location_t
, loop_p
, gphi
*, tree
,
1555 /* Drive for loop analysis stage. */
1556 extern opt_loop_vec_info
vect_analyze_loop (struct loop
*,
1559 extern tree
vect_build_loop_niters (loop_vec_info
, bool * = NULL
);
1560 extern void vect_gen_vector_loop_niters (loop_vec_info
, tree
, tree
*,
1562 extern tree
vect_halve_mask_nunits (tree
);
1563 extern tree
vect_double_mask_nunits (tree
);
1564 extern void vect_record_loop_mask (loop_vec_info
, vec_loop_masks
*,
1565 unsigned int, tree
);
1566 extern tree
vect_get_loop_mask (gimple_stmt_iterator
*, vec_loop_masks
*,
1567 unsigned int, tree
, unsigned int);
1569 /* Drive for loop transformation stage. */
1570 extern struct loop
*vect_transform_loop (loop_vec_info
);
1571 extern opt_loop_vec_info
vect_analyze_loop_form (struct loop
*,
1573 extern bool vectorizable_live_operation (stmt_vec_info
, gimple_stmt_iterator
*,
1574 slp_tree
, int, stmt_vec_info
*,
1575 stmt_vector_for_cost
*);
1576 extern bool vectorizable_reduction (stmt_vec_info
, gimple_stmt_iterator
*,
1577 stmt_vec_info
*, slp_tree
, slp_instance
,
1578 stmt_vector_for_cost
*);
1579 extern bool vectorizable_induction (stmt_vec_info
, gimple_stmt_iterator
*,
1580 stmt_vec_info
*, slp_tree
,
1581 stmt_vector_for_cost
*);
1582 extern tree
get_initial_def_for_reduction (stmt_vec_info
, tree
, tree
*);
1583 extern bool vect_worthwhile_without_simd_p (vec_info
*, tree_code
);
1584 extern int vect_get_known_peeling_cost (loop_vec_info
, int, int *,
1585 stmt_vector_for_cost
*,
1586 stmt_vector_for_cost
*,
1587 stmt_vector_for_cost
*);
1588 extern tree
cse_and_gimplify_to_preheader (loop_vec_info
, tree
);
1590 /* In tree-vect-slp.c. */
1591 extern void vect_free_slp_instance (slp_instance
, bool);
1592 extern bool vect_transform_slp_perm_load (slp_tree
, vec
<tree
> ,
1593 gimple_stmt_iterator
*, poly_uint64
,
1594 slp_instance
, bool, unsigned *);
1595 extern bool vect_slp_analyze_operations (vec_info
*);
1596 extern void vect_schedule_slp (vec_info
*);
1597 extern opt_result
vect_analyze_slp (vec_info
*, unsigned);
1598 extern bool vect_make_slp_decision (loop_vec_info
);
1599 extern void vect_detect_hybrid_slp (loop_vec_info
);
1600 extern void vect_get_slp_defs (vec
<tree
> , slp_tree
, vec
<vec
<tree
> > *);
1601 extern bool vect_slp_bb (basic_block
);
1602 extern stmt_vec_info
vect_find_last_scalar_stmt_in_slp (slp_tree
);
1603 extern bool is_simple_and_all_uses_invariant (stmt_vec_info
, loop_vec_info
);
1604 extern bool can_duplicate_and_interleave_p (unsigned int, machine_mode
,
1605 unsigned int * = NULL
,
1606 tree
* = NULL
, tree
* = NULL
);
1607 extern void duplicate_and_interleave (gimple_seq
*, tree
, vec
<tree
>,
1608 unsigned int, vec
<tree
> &);
1609 extern int vect_get_place_in_interleaving_chain (stmt_vec_info
, stmt_vec_info
);
1611 /* In tree-vect-patterns.c. */
1612 /* Pattern recognition functions.
1613 Additional pattern recognition functions can (and will) be added
1615 void vect_pattern_recog (vec_info
*);
1617 /* In tree-vectorizer.c. */
1618 unsigned vectorize_loops (void);
1619 void vect_free_loop_info_assumptions (struct loop
*);
1621 #endif /* GCC_TREE_VECTORIZER_H */