2 Copyright (C) 2003-2018 Free Software Foundation, Inc.
3 Contributed by Dorit Naishlos <dorit@il.ibm.com>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 /* Loop and basic block vectorizer.
23 This file contains drivers for the three vectorizers:
24 (1) loop vectorizer (inter-iteration parallelism),
25 (2) loop-aware SLP (intra-iteration parallelism) (invoked by the loop
27 (3) BB vectorizer (out-of-loops), aka SLP
29 The rest of the vectorizer's code is organized as follows:
30 - tree-vect-loop.c - loop specific parts such as reductions, etc. These are
31 used by drivers (1) and (2).
32 - tree-vect-loop-manip.c - vectorizer's loop control-flow utilities, used by
34 - tree-vect-slp.c - BB vectorization specific analysis and transformation,
35 used by drivers (2) and (3).
36 - tree-vect-stmts.c - statements analysis and transformation (used by all).
37 - tree-vect-data-refs.c - vectorizer specific data-refs analysis and
38 manipulations (used by all).
39 - tree-vect-patterns.c - vectorizable code patterns detector (used by all)
41 Here's a poor attempt at illustrating that:
44 loop_vect() loop_aware_slp() slp_vect()
47 tree-vect-loop.c tree-vect-slp.c
52 tree-vect-stmts.c tree-vect-data-refs.c
59 #include "coretypes.h"
64 #include "tree-pass.h"
67 #include "fold-const.h"
68 #include "stor-layout.h"
69 #include "gimple-iterator.h"
70 #include "gimple-walk.h"
71 #include "tree-ssa-loop-manip.h"
72 #include "tree-ssa-loop-niter.h"
75 #include "tree-vectorizer.h"
76 #include "tree-ssa-propagate.h"
78 #include "tree-scalar-evolution.h"
79 #include "stringpool.h"
81 #include "gimple-pretty-print.h"
84 /* Loop or bb location, with hotness information. */
85 dump_user_location_t vect_location
;
87 /* Vector mapping GIMPLE stmt to stmt_vec_info. */
88 vec
<stmt_vec_info
> *stmt_vec_info_vec
;
90 /* Dump a cost entry according to args to F. */
93 dump_stmt_cost (FILE *f
, void *data
, int count
, enum vect_cost_for_stmt kind
,
94 stmt_vec_info stmt_info
, int misalign
,
95 enum vect_cost_model_location where
)
97 fprintf (f
, "%p ", data
);
100 print_gimple_expr (f
, STMT_VINFO_STMT (stmt_info
), 0, TDF_SLIM
);
104 fprintf (f
, "<unknown> ");
105 fprintf (f
, "%d times ", count
);
106 const char *ks
= "unknown";
124 case vector_gather_load
:
125 ks
= "vector_gather_load";
128 ks
= "unaligned_load";
130 case unaligned_store
:
131 ks
= "unaligned_store";
134 ks
= "unaligned_store";
136 case vector_scatter_store
:
137 ks
= "unaligned_store";
140 ks
= "unaligned_store";
143 ks
= "unaligned_store";
145 case cond_branch_not_taken
:
146 ks
= "unaligned_store";
148 case cond_branch_taken
:
149 ks
= "unaligned_store";
152 ks
= "unaligned_store";
154 case vec_promote_demote
:
155 ks
= "unaligned_store";
158 ks
= "unaligned_store";
161 fprintf (f
, "%s ", ks
);
162 if (kind
== unaligned_load
|| kind
== unaligned_store
)
163 fprintf (f
, "(misalign %d) ", misalign
);
164 const char *ws
= "unknown";
177 fprintf (f
, "in %s\n", ws
);
180 /* For mapping simduid to vectorization factor. */
182 struct simduid_to_vf
: free_ptr_hash
<simduid_to_vf
>
184 unsigned int simduid
;
187 /* hash_table support. */
188 static inline hashval_t
hash (const simduid_to_vf
*);
189 static inline int equal (const simduid_to_vf
*, const simduid_to_vf
*);
193 simduid_to_vf::hash (const simduid_to_vf
*p
)
199 simduid_to_vf::equal (const simduid_to_vf
*p1
, const simduid_to_vf
*p2
)
201 return p1
->simduid
== p2
->simduid
;
204 /* This hash maps the OMP simd array to the corresponding simduid used
205 to index into it. Like thus,
207 _7 = GOMP_SIMD_LANE (simduid.0)
213 This hash maps from the OMP simd array (D.1737[]) to DECL_UID of
216 struct simd_array_to_simduid
: free_ptr_hash
<simd_array_to_simduid
>
219 unsigned int simduid
;
221 /* hash_table support. */
222 static inline hashval_t
hash (const simd_array_to_simduid
*);
223 static inline int equal (const simd_array_to_simduid
*,
224 const simd_array_to_simduid
*);
228 simd_array_to_simduid::hash (const simd_array_to_simduid
*p
)
230 return DECL_UID (p
->decl
);
234 simd_array_to_simduid::equal (const simd_array_to_simduid
*p1
,
235 const simd_array_to_simduid
*p2
)
237 return p1
->decl
== p2
->decl
;
240 /* Fold IFN_GOMP_SIMD_LANE, IFN_GOMP_SIMD_VF, IFN_GOMP_SIMD_LAST_LANE,
241 into their corresponding constants and remove
242 IFN_GOMP_SIMD_ORDERED_{START,END}. */
245 adjust_simduid_builtins (hash_table
<simduid_to_vf
> *htab
)
249 FOR_EACH_BB_FN (bb
, cfun
)
251 gimple_stmt_iterator i
;
253 for (i
= gsi_start_bb (bb
); !gsi_end_p (i
); )
256 enum internal_fn ifn
;
257 gimple
*stmt
= gsi_stmt (i
);
259 if (!is_gimple_call (stmt
)
260 || !gimple_call_internal_p (stmt
))
265 ifn
= gimple_call_internal_fn (stmt
);
268 case IFN_GOMP_SIMD_LANE
:
269 case IFN_GOMP_SIMD_VF
:
270 case IFN_GOMP_SIMD_LAST_LANE
:
272 case IFN_GOMP_SIMD_ORDERED_START
:
273 case IFN_GOMP_SIMD_ORDERED_END
:
274 if (integer_onep (gimple_call_arg (stmt
, 0)))
276 enum built_in_function bcode
277 = (ifn
== IFN_GOMP_SIMD_ORDERED_START
278 ? BUILT_IN_GOMP_ORDERED_START
279 : BUILT_IN_GOMP_ORDERED_END
);
281 = gimple_build_call (builtin_decl_explicit (bcode
), 0);
282 tree vdef
= gimple_vdef (stmt
);
283 gimple_set_vdef (g
, vdef
);
284 SSA_NAME_DEF_STMT (vdef
) = g
;
285 gimple_set_vuse (g
, gimple_vuse (stmt
));
286 gsi_replace (&i
, g
, true);
289 gsi_remove (&i
, true);
290 unlink_stmt_vdef (stmt
);
296 tree arg
= gimple_call_arg (stmt
, 0);
297 gcc_assert (arg
!= NULL_TREE
);
298 gcc_assert (TREE_CODE (arg
) == SSA_NAME
);
299 simduid_to_vf
*p
= NULL
, data
;
300 data
.simduid
= DECL_UID (SSA_NAME_VAR (arg
));
301 /* Need to nullify loop safelen field since it's value is not
302 valid after transformation. */
303 if (bb
->loop_father
&& bb
->loop_father
->safelen
> 0)
304 bb
->loop_father
->safelen
= 0;
307 p
= htab
->find (&data
);
313 case IFN_GOMP_SIMD_VF
:
314 t
= build_int_cst (unsigned_type_node
, vf
);
316 case IFN_GOMP_SIMD_LANE
:
317 t
= build_int_cst (unsigned_type_node
, 0);
319 case IFN_GOMP_SIMD_LAST_LANE
:
320 t
= gimple_call_arg (stmt
, 1);
325 tree lhs
= gimple_call_lhs (stmt
);
327 replace_uses_by (lhs
, t
);
329 gsi_remove (&i
, true);
334 /* Helper structure for note_simd_array_uses. */
336 struct note_simd_array_uses_struct
338 hash_table
<simd_array_to_simduid
> **htab
;
339 unsigned int simduid
;
342 /* Callback for note_simd_array_uses, called through walk_gimple_op. */
345 note_simd_array_uses_cb (tree
*tp
, int *walk_subtrees
, void *data
)
347 struct walk_stmt_info
*wi
= (struct walk_stmt_info
*) data
;
348 struct note_simd_array_uses_struct
*ns
349 = (struct note_simd_array_uses_struct
*) wi
->info
;
354 && lookup_attribute ("omp simd array", DECL_ATTRIBUTES (*tp
))
355 && DECL_CONTEXT (*tp
) == current_function_decl
)
357 simd_array_to_simduid data
;
359 *ns
->htab
= new hash_table
<simd_array_to_simduid
> (15);
361 data
.simduid
= ns
->simduid
;
362 simd_array_to_simduid
**slot
= (*ns
->htab
)->find_slot (&data
, INSERT
);
365 simd_array_to_simduid
*p
= XNEW (simd_array_to_simduid
);
369 else if ((*slot
)->simduid
!= ns
->simduid
)
370 (*slot
)->simduid
= -1U;
376 /* Find "omp simd array" temporaries and map them to corresponding
380 note_simd_array_uses (hash_table
<simd_array_to_simduid
> **htab
)
383 gimple_stmt_iterator gsi
;
384 struct walk_stmt_info wi
;
385 struct note_simd_array_uses_struct ns
;
387 memset (&wi
, 0, sizeof (wi
));
391 FOR_EACH_BB_FN (bb
, cfun
)
392 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
394 gimple
*stmt
= gsi_stmt (gsi
);
395 if (!is_gimple_call (stmt
) || !gimple_call_internal_p (stmt
))
397 switch (gimple_call_internal_fn (stmt
))
399 case IFN_GOMP_SIMD_LANE
:
400 case IFN_GOMP_SIMD_VF
:
401 case IFN_GOMP_SIMD_LAST_LANE
:
406 tree lhs
= gimple_call_lhs (stmt
);
407 if (lhs
== NULL_TREE
)
409 imm_use_iterator use_iter
;
411 ns
.simduid
= DECL_UID (SSA_NAME_VAR (gimple_call_arg (stmt
, 0)));
412 FOR_EACH_IMM_USE_STMT (use_stmt
, use_iter
, lhs
)
413 if (!is_gimple_debug (use_stmt
))
414 walk_gimple_op (use_stmt
, note_simd_array_uses_cb
, &wi
);
418 /* Shrink arrays with "omp simd array" attribute to the corresponding
419 vectorization factor. */
423 (hash_table
<simd_array_to_simduid
> *simd_array_to_simduid_htab
,
424 hash_table
<simduid_to_vf
> *simduid_to_vf_htab
)
426 for (hash_table
<simd_array_to_simduid
>::iterator iter
427 = simd_array_to_simduid_htab
->begin ();
428 iter
!= simd_array_to_simduid_htab
->end (); ++iter
)
429 if ((*iter
)->simduid
!= -1U)
431 tree decl
= (*iter
)->decl
;
433 if (simduid_to_vf_htab
)
435 simduid_to_vf
*p
= NULL
, data
;
436 data
.simduid
= (*iter
)->simduid
;
437 p
= simduid_to_vf_htab
->find (&data
);
442 = build_array_type_nelts (TREE_TYPE (TREE_TYPE (decl
)), vf
);
443 TREE_TYPE (decl
) = atype
;
444 relayout_decl (decl
);
447 delete simd_array_to_simduid_htab
;
450 /* Initialize the vec_info with kind KIND_IN and target cost data
451 TARGET_COST_DATA_IN. */
453 vec_info::vec_info (vec_info::vec_kind kind_in
, void *target_cost_data_in
,
454 vec_info_shared
*shared_
)
457 target_cost_data (target_cost_data_in
)
459 stmt_vec_infos
.create (50);
460 set_stmt_vec_info_vec (&stmt_vec_infos
);
463 vec_info::~vec_info ()
465 slp_instance instance
;
468 FOR_EACH_VEC_ELT (slp_instances
, i
, instance
)
469 vect_free_slp_instance (instance
);
471 destroy_cost_data (target_cost_data
);
472 free_stmt_vec_infos (&stmt_vec_infos
);
475 vec_info_shared::vec_info_shared ()
477 datarefs_copy (vNULL
),
482 vec_info_shared::~vec_info_shared ()
484 free_data_refs (datarefs
);
485 free_dependence_relations (ddrs
);
486 datarefs_copy
.release ();
490 vec_info_shared::save_datarefs ()
494 datarefs_copy
.reserve_exact (datarefs
.length ());
495 for (unsigned i
= 0; i
< datarefs
.length (); ++i
)
496 datarefs_copy
.quick_push (*datarefs
[i
]);
500 vec_info_shared::check_datarefs ()
504 gcc_assert (datarefs
.length () == datarefs_copy
.length ());
505 for (unsigned i
= 0; i
< datarefs
.length (); ++i
)
506 if (memcmp (&datarefs_copy
[i
], datarefs
[i
], sizeof (data_reference
)) != 0)
510 /* A helper function to free scev and LOOP niter information, as well as
511 clear loop constraint LOOP_C_FINITE. */
514 vect_free_loop_info_assumptions (struct loop
*loop
)
517 /* We need to explicitly reset upper bound information since they are
518 used even after free_numbers_of_iterations_estimates. */
519 loop
->any_upper_bound
= false;
520 loop
->any_likely_upper_bound
= false;
521 free_numbers_of_iterations_estimates (loop
);
522 loop_constraint_clear (loop
, LOOP_C_FINITE
);
525 /* Return whether STMT is inside the region we try to vectorize. */
528 vect_stmt_in_region_p (vec_info
*vinfo
, gimple
*stmt
)
530 if (!gimple_bb (stmt
))
533 if (loop_vec_info loop_vinfo
= dyn_cast
<loop_vec_info
> (vinfo
))
535 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
536 if (!flow_bb_inside_loop_p (loop
, gimple_bb (stmt
)))
541 bb_vec_info bb_vinfo
= as_a
<bb_vec_info
> (vinfo
);
542 if (gimple_bb (stmt
) != BB_VINFO_BB (bb_vinfo
)
543 || gimple_uid (stmt
) == -1U
544 || gimple_code (stmt
) == GIMPLE_PHI
)
552 /* If LOOP has been versioned during ifcvt, return the internal call
556 vect_loop_vectorized_call (struct loop
*loop
)
558 basic_block bb
= loop_preheader_edge (loop
)->src
;
565 if (!single_pred_p (bb
))
567 bb
= single_pred (bb
);
570 if (g
&& gimple_code (g
) == GIMPLE_COND
)
572 gimple_stmt_iterator gsi
= gsi_for_stmt (g
);
574 if (!gsi_end_p (gsi
))
577 if (gimple_call_internal_p (g
, IFN_LOOP_VECTORIZED
)
578 && (tree_to_shwi (gimple_call_arg (g
, 0)) == loop
->num
579 || tree_to_shwi (gimple_call_arg (g
, 1)) == loop
->num
))
586 /* If LOOP has been versioned during loop distribution, return the gurading
590 vect_loop_dist_alias_call (struct loop
*loop
)
594 struct loop
*outer
, *orig
;
595 gimple_stmt_iterator gsi
;
598 if (loop
->orig_loop_num
== 0)
601 orig
= get_loop (cfun
, loop
->orig_loop_num
);
604 /* The original loop is somehow destroyed. Clear the information. */
605 loop
->orig_loop_num
= 0;
610 bb
= nearest_common_dominator (CDI_DOMINATORS
, loop
->header
, orig
->header
);
612 bb
= loop_preheader_edge (loop
)->src
;
614 outer
= bb
->loop_father
;
615 entry
= ENTRY_BLOCK_PTR_FOR_FN (cfun
);
617 /* Look upward in dominance tree. */
618 for (; bb
!= entry
&& flow_bb_inside_loop_p (outer
, bb
);
619 bb
= get_immediate_dominator (CDI_DOMINATORS
, bb
))
622 if (g
== NULL
|| gimple_code (g
) != GIMPLE_COND
)
625 gsi
= gsi_for_stmt (g
);
631 /* The guarding internal function call must have the same distribution
633 if (gimple_call_internal_p (g
, IFN_LOOP_DIST_ALIAS
)
634 && (tree_to_shwi (gimple_call_arg (g
, 0)) == loop
->orig_loop_num
))
640 /* Set the uids of all the statements in basic blocks inside loop
641 represented by LOOP_VINFO. LOOP_VECTORIZED_CALL is the internal
642 call guarding the loop which has been if converted. */
644 set_uid_loop_bbs (loop_vec_info loop_vinfo
, gimple
*loop_vectorized_call
)
646 tree arg
= gimple_call_arg (loop_vectorized_call
, 1);
649 struct loop
*scalar_loop
= get_loop (cfun
, tree_to_shwi (arg
));
651 LOOP_VINFO_SCALAR_LOOP (loop_vinfo
) = scalar_loop
;
652 gcc_checking_assert (vect_loop_vectorized_call (scalar_loop
)
653 == loop_vectorized_call
);
654 /* If we are going to vectorize outer loop, prevent vectorization
655 of the inner loop in the scalar loop - either the scalar loop is
656 thrown away, so it is a wasted work, or is used only for
658 if (scalar_loop
->inner
)
660 gimple
*g
= vect_loop_vectorized_call (scalar_loop
->inner
);
663 arg
= gimple_call_arg (g
, 0);
664 get_loop (cfun
, tree_to_shwi (arg
))->dont_vectorize
= true;
665 fold_loop_internal_call (g
, boolean_false_node
);
668 bbs
= get_loop_body (scalar_loop
);
669 for (i
= 0; i
< scalar_loop
->num_nodes
; i
++)
671 basic_block bb
= bbs
[i
];
672 gimple_stmt_iterator gsi
;
673 for (gsi
= gsi_start_phis (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
675 gimple
*phi
= gsi_stmt (gsi
);
676 gimple_set_uid (phi
, 0);
678 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
680 gimple
*stmt
= gsi_stmt (gsi
);
681 gimple_set_uid (stmt
, 0);
687 /* Try to vectorize LOOP. */
690 try_vectorize_loop_1 (hash_table
<simduid_to_vf
> *&simduid_to_vf_htab
,
691 unsigned *num_vectorized_loops
,
692 loop_p loop
, loop_vec_info orig_loop_vinfo
,
693 gimple
*loop_vectorized_call
,
694 gimple
*loop_dist_alias_call
)
697 vec_info_shared shared
;
698 vect_location
= find_loop_location (loop
);
699 if (LOCATION_LOCUS (vect_location
.get_location_t ()) != UNKNOWN_LOCATION
700 && dump_enabled_p ())
701 dump_printf (MSG_NOTE
, "\nAnalyzing loop at %s:%d\n",
702 LOCATION_FILE (vect_location
.get_location_t ()),
703 LOCATION_LINE (vect_location
.get_location_t ()));
705 loop_vec_info loop_vinfo
= vect_analyze_loop (loop
, orig_loop_vinfo
, &shared
);
706 loop
->aux
= loop_vinfo
;
708 if (!loop_vinfo
|| !LOOP_VINFO_VECTORIZABLE_P (loop_vinfo
))
710 /* Free existing information if loop is analyzed with some
712 if (loop_constraint_set_p (loop
, LOOP_C_FINITE
))
713 vect_free_loop_info_assumptions (loop
);
715 /* If we applied if-conversion then try to vectorize the
716 BB of innermost loops.
717 ??? Ideally BB vectorization would learn to vectorize
718 control flow by applying if-conversion on-the-fly, the
719 following retains the if-converted loop body even when
720 only non-if-converted parts took part in BB vectorization. */
721 if (flag_tree_slp_vectorize
!= 0
722 && loop_vectorized_call
725 basic_block bb
= loop
->header
;
726 bool has_mask_load_store
= false;
727 for (gimple_stmt_iterator gsi
= gsi_start_bb (bb
);
728 !gsi_end_p (gsi
); gsi_next (&gsi
))
730 gimple
*stmt
= gsi_stmt (gsi
);
731 if (is_gimple_call (stmt
)
732 && gimple_call_internal_p (stmt
)
733 && (gimple_call_internal_fn (stmt
) == IFN_MASK_LOAD
734 || gimple_call_internal_fn (stmt
) == IFN_MASK_STORE
))
736 has_mask_load_store
= true;
739 gimple_set_uid (stmt
, -1);
740 gimple_set_visited (stmt
, false);
742 if (! has_mask_load_store
&& vect_slp_bb (bb
))
744 dump_printf_loc (MSG_NOTE
, vect_location
,
745 "basic block vectorized\n");
746 fold_loop_internal_call (loop_vectorized_call
,
748 loop_vectorized_call
= NULL
;
749 ret
|= TODO_cleanup_cfg
;
752 /* If outer loop vectorization fails for LOOP_VECTORIZED guarded
753 loop, don't vectorize its inner loop; we'll attempt to
754 vectorize LOOP_VECTORIZED guarded inner loop of the scalar
756 if (loop_vectorized_call
&& loop
->inner
)
757 loop
->inner
->dont_vectorize
= true;
761 if (!dbg_cnt (vect_loop
))
763 /* Free existing information if loop is analyzed with some
765 if (loop_constraint_set_p (loop
, LOOP_C_FINITE
))
766 vect_free_loop_info_assumptions (loop
);
770 if (loop_vectorized_call
)
771 set_uid_loop_bbs (loop_vinfo
, loop_vectorized_call
);
773 unsigned HOST_WIDE_INT bytes
;
774 if (current_vector_size
.is_constant (&bytes
))
775 dump_printf_loc (MSG_OPTIMIZED_LOCATIONS
, vect_location
,
776 "loop vectorized vectorized using "
777 HOST_WIDE_INT_PRINT_UNSIGNED
" byte "
780 dump_printf_loc (MSG_OPTIMIZED_LOCATIONS
, vect_location
,
781 "loop vectorized using variable length vectors\n");
783 loop_p new_loop
= vect_transform_loop (loop_vinfo
);
784 (*num_vectorized_loops
)++;
785 /* Now that the loop has been vectorized, allow it to be unrolled
787 loop
->force_vectorize
= false;
791 simduid_to_vf
*simduid_to_vf_data
= XNEW (simduid_to_vf
);
792 if (!simduid_to_vf_htab
)
793 simduid_to_vf_htab
= new hash_table
<simduid_to_vf
> (15);
794 simduid_to_vf_data
->simduid
= DECL_UID (loop
->simduid
);
795 simduid_to_vf_data
->vf
= loop_vinfo
->vectorization_factor
;
796 *simduid_to_vf_htab
->find_slot (simduid_to_vf_data
, INSERT
)
797 = simduid_to_vf_data
;
800 if (loop_vectorized_call
)
802 fold_loop_internal_call (loop_vectorized_call
, boolean_true_node
);
803 loop_vectorized_call
= NULL
;
804 ret
|= TODO_cleanup_cfg
;
806 if (loop_dist_alias_call
)
808 tree value
= gimple_call_arg (loop_dist_alias_call
, 1);
809 fold_loop_internal_call (loop_dist_alias_call
, value
);
810 loop_dist_alias_call
= NULL
;
811 ret
|= TODO_cleanup_cfg
;
814 /* Epilogue of vectorized loop must be vectorized too. */
816 ret
|= try_vectorize_loop_1 (simduid_to_vf_htab
, num_vectorized_loops
,
817 new_loop
, loop_vinfo
, NULL
, NULL
);
822 /* Try to vectorize LOOP. */
825 try_vectorize_loop (hash_table
<simduid_to_vf
> *&simduid_to_vf_htab
,
826 unsigned *num_vectorized_loops
, loop_p loop
)
828 if (!((flag_tree_loop_vectorize
829 && optimize_loop_nest_for_speed_p (loop
))
830 || loop
->force_vectorize
))
833 return try_vectorize_loop_1 (simduid_to_vf_htab
, num_vectorized_loops
,
835 vect_loop_vectorized_call (loop
),
836 vect_loop_dist_alias_call (loop
));
840 /* Function vectorize_loops.
842 Entry point to loop vectorization phase. */
845 vectorize_loops (void)
848 unsigned int num_vectorized_loops
= 0;
849 unsigned int vect_loops_num
;
851 hash_table
<simduid_to_vf
> *simduid_to_vf_htab
= NULL
;
852 hash_table
<simd_array_to_simduid
> *simd_array_to_simduid_htab
= NULL
;
853 bool any_ifcvt_loops
= false;
856 vect_loops_num
= number_of_loops (cfun
);
858 /* Bail out if there are no loops. */
859 if (vect_loops_num
<= 1)
862 if (cfun
->has_simduid_loops
)
863 note_simd_array_uses (&simd_array_to_simduid_htab
);
865 set_stmt_vec_info_vec (NULL
);
867 /* ----------- Analyze loops. ----------- */
869 /* If some loop was duplicated, it gets bigger number
870 than all previously defined loops. This fact allows us to run
871 only over initial loops skipping newly generated ones. */
872 FOR_EACH_LOOP (loop
, 0)
873 if (loop
->dont_vectorize
)
875 any_ifcvt_loops
= true;
876 /* If-conversion sometimes versions both the outer loop
877 (for the case when outer loop vectorization might be
878 desirable) as well as the inner loop in the scalar version
879 of the loop. So we have:
880 if (LOOP_VECTORIZED (1, 3))
886 loop3 (copy of loop1)
887 if (LOOP_VECTORIZED (4, 5))
888 loop4 (copy of loop2)
890 loop5 (copy of loop4)
891 If FOR_EACH_LOOP gives us loop3 first (which has
892 dont_vectorize set), make sure to process loop1 before loop4;
893 so that we can prevent vectorization of loop4 if loop1
894 is successfully vectorized. */
897 gimple
*loop_vectorized_call
898 = vect_loop_vectorized_call (loop
);
899 if (loop_vectorized_call
900 && vect_loop_vectorized_call (loop
->inner
))
902 tree arg
= gimple_call_arg (loop_vectorized_call
, 0);
903 struct loop
*vector_loop
904 = get_loop (cfun
, tree_to_shwi (arg
));
905 if (vector_loop
&& vector_loop
!= loop
)
907 /* Make sure we don't vectorize it twice. */
908 vector_loop
->dont_vectorize
= true;
909 ret
|= try_vectorize_loop (simduid_to_vf_htab
,
910 &num_vectorized_loops
,
917 ret
|= try_vectorize_loop (simduid_to_vf_htab
, &num_vectorized_loops
,
920 vect_location
= dump_user_location_t ();
922 statistics_counter_event (cfun
, "Vectorized loops", num_vectorized_loops
);
923 if (dump_enabled_p ()
924 || (num_vectorized_loops
> 0 && dump_enabled_p ()))
925 dump_printf_loc (MSG_NOTE
, vect_location
,
926 "vectorized %u loops in function.\n",
927 num_vectorized_loops
);
929 /* ----------- Finalize. ----------- */
932 for (i
= 1; i
< number_of_loops (cfun
); i
++)
934 loop
= get_loop (cfun
, i
);
935 if (loop
&& loop
->dont_vectorize
)
937 gimple
*g
= vect_loop_vectorized_call (loop
);
940 fold_loop_internal_call (g
, boolean_false_node
);
941 ret
|= TODO_cleanup_cfg
;
945 g
= vect_loop_dist_alias_call (loop
);
949 fold_loop_internal_call (g
, boolean_false_node
);
950 ret
|= TODO_cleanup_cfg
;
955 for (i
= 1; i
< number_of_loops (cfun
); i
++)
957 loop_vec_info loop_vinfo
;
960 loop
= get_loop (cfun
, i
);
961 if (!loop
|| !loop
->aux
)
963 loop_vinfo
= (loop_vec_info
) loop
->aux
;
964 has_mask_store
= LOOP_VINFO_HAS_MASK_STORE (loop_vinfo
);
967 && targetm
.vectorize
.empty_mask_is_expensive (IFN_MASK_STORE
))
968 optimize_mask_stores (loop
);
972 /* Fold IFN_GOMP_SIMD_{VF,LANE,LAST_LANE,ORDERED_{START,END}} builtins. */
973 if (cfun
->has_simduid_loops
)
974 adjust_simduid_builtins (simduid_to_vf_htab
);
976 /* Shrink any "omp array simd" temporary arrays to the
977 actual vectorization factors. */
978 if (simd_array_to_simduid_htab
)
979 shrink_simd_arrays (simd_array_to_simduid_htab
, simduid_to_vf_htab
);
980 delete simduid_to_vf_htab
;
981 cfun
->has_simduid_loops
= false;
983 if (num_vectorized_loops
> 0)
985 /* If we vectorized any loop only virtual SSA form needs to be updated.
986 ??? Also while we try hard to update loop-closed SSA form we fail
987 to properly do this in some corner-cases (see PR56286). */
988 rewrite_into_loop_closed_ssa (NULL
, TODO_update_ssa_only_virtuals
);
989 return TODO_cleanup_cfg
;
996 /* Entry point to the simduid cleanup pass. */
1000 const pass_data pass_data_simduid_cleanup
=
1002 GIMPLE_PASS
, /* type */
1003 "simduid", /* name */
1004 OPTGROUP_NONE
, /* optinfo_flags */
1005 TV_NONE
, /* tv_id */
1006 ( PROP_ssa
| PROP_cfg
), /* properties_required */
1007 0, /* properties_provided */
1008 0, /* properties_destroyed */
1009 0, /* todo_flags_start */
1010 0, /* todo_flags_finish */
1013 class pass_simduid_cleanup
: public gimple_opt_pass
1016 pass_simduid_cleanup (gcc::context
*ctxt
)
1017 : gimple_opt_pass (pass_data_simduid_cleanup
, ctxt
)
1020 /* opt_pass methods: */
1021 opt_pass
* clone () { return new pass_simduid_cleanup (m_ctxt
); }
1022 virtual bool gate (function
*fun
) { return fun
->has_simduid_loops
; }
1023 virtual unsigned int execute (function
*);
1025 }; // class pass_simduid_cleanup
1028 pass_simduid_cleanup::execute (function
*fun
)
1030 hash_table
<simd_array_to_simduid
> *simd_array_to_simduid_htab
= NULL
;
1032 note_simd_array_uses (&simd_array_to_simduid_htab
);
1034 /* Fold IFN_GOMP_SIMD_{VF,LANE,LAST_LANE,ORDERED_{START,END}} builtins. */
1035 adjust_simduid_builtins (NULL
);
1037 /* Shrink any "omp array simd" temporary arrays to the
1038 actual vectorization factors. */
1039 if (simd_array_to_simduid_htab
)
1040 shrink_simd_arrays (simd_array_to_simduid_htab
, NULL
);
1041 fun
->has_simduid_loops
= false;
1048 make_pass_simduid_cleanup (gcc::context
*ctxt
)
1050 return new pass_simduid_cleanup (ctxt
);
1054 /* Entry point to basic block SLP phase. */
1058 const pass_data pass_data_slp_vectorize
=
1060 GIMPLE_PASS
, /* type */
1062 OPTGROUP_LOOP
| OPTGROUP_VEC
, /* optinfo_flags */
1063 TV_TREE_SLP_VECTORIZATION
, /* tv_id */
1064 ( PROP_ssa
| PROP_cfg
), /* properties_required */
1065 0, /* properties_provided */
1066 0, /* properties_destroyed */
1067 0, /* todo_flags_start */
1068 TODO_update_ssa
, /* todo_flags_finish */
1071 class pass_slp_vectorize
: public gimple_opt_pass
1074 pass_slp_vectorize (gcc::context
*ctxt
)
1075 : gimple_opt_pass (pass_data_slp_vectorize
, ctxt
)
1078 /* opt_pass methods: */
1079 opt_pass
* clone () { return new pass_slp_vectorize (m_ctxt
); }
1080 virtual bool gate (function
*) { return flag_tree_slp_vectorize
!= 0; }
1081 virtual unsigned int execute (function
*);
1083 }; // class pass_slp_vectorize
1086 pass_slp_vectorize::execute (function
*fun
)
1090 bool in_loop_pipeline
= scev_initialized_p ();
1091 if (!in_loop_pipeline
)
1093 loop_optimizer_init (LOOPS_NORMAL
);
1097 /* Mark all stmts as not belonging to the current region and unvisited. */
1098 FOR_EACH_BB_FN (bb
, fun
)
1100 for (gimple_stmt_iterator gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
);
1103 gimple
*stmt
= gsi_stmt (gsi
);
1104 gimple_set_uid (stmt
, -1);
1105 gimple_set_visited (stmt
, false);
1109 FOR_EACH_BB_FN (bb
, fun
)
1111 if (vect_slp_bb (bb
))
1112 dump_printf_loc (MSG_NOTE
, vect_location
, "basic block vectorized\n");
1115 if (!in_loop_pipeline
)
1118 loop_optimizer_finalize ();
1127 make_pass_slp_vectorize (gcc::context
*ctxt
)
1129 return new pass_slp_vectorize (ctxt
);
1133 /* Increase alignment of global arrays to improve vectorization potential.
1135 - Consider also structs that have an array field.
1136 - Use ipa analysis to prune arrays that can't be vectorized?
1137 This should involve global alignment analysis and in the future also
1140 static unsigned get_vec_alignment_for_type (tree
);
1141 static hash_map
<tree
, unsigned> *type_align_map
;
1143 /* Return alignment of array's vector type corresponding to scalar type.
1144 0 if no vector type exists. */
1146 get_vec_alignment_for_array_type (tree type
)
1148 gcc_assert (TREE_CODE (type
) == ARRAY_TYPE
);
1149 poly_uint64 array_size
, vector_size
;
1151 tree vectype
= get_vectype_for_scalar_type (strip_array_types (type
));
1153 || !poly_int_tree_p (TYPE_SIZE (type
), &array_size
)
1154 || !poly_int_tree_p (TYPE_SIZE (vectype
), &vector_size
)
1155 || maybe_lt (array_size
, vector_size
))
1158 return TYPE_ALIGN (vectype
);
1161 /* Return alignment of field having maximum alignment of vector type
1162 corresponding to it's scalar type. For now, we only consider fields whose
1163 offset is a multiple of it's vector alignment.
1164 0 if no suitable field is found. */
1166 get_vec_alignment_for_record_type (tree type
)
1168 gcc_assert (TREE_CODE (type
) == RECORD_TYPE
);
1170 unsigned max_align
= 0, alignment
;
1171 HOST_WIDE_INT offset
;
1174 if (TYPE_PACKED (type
))
1177 unsigned *slot
= type_align_map
->get (type
);
1181 for (tree field
= first_field (type
);
1183 field
= DECL_CHAIN (field
))
1185 /* Skip if not FIELD_DECL or if alignment is set by user. */
1186 if (TREE_CODE (field
) != FIELD_DECL
1187 || DECL_USER_ALIGN (field
)
1188 || DECL_ARTIFICIAL (field
))
1191 /* We don't need to process the type further if offset is variable,
1192 since the offsets of remaining members will also be variable. */
1193 if (TREE_CODE (DECL_FIELD_OFFSET (field
)) != INTEGER_CST
1194 || TREE_CODE (DECL_FIELD_BIT_OFFSET (field
)) != INTEGER_CST
)
1197 /* Similarly stop processing the type if offset_tree
1198 does not fit in unsigned HOST_WIDE_INT. */
1199 offset_tree
= bit_position (field
);
1200 if (!tree_fits_uhwi_p (offset_tree
))
1203 offset
= tree_to_uhwi (offset_tree
);
1204 alignment
= get_vec_alignment_for_type (TREE_TYPE (field
));
1206 /* Get maximum alignment of vectorized field/array among those members
1207 whose offset is multiple of the vector alignment. */
1209 && (offset
% alignment
== 0)
1210 && (alignment
> max_align
))
1211 max_align
= alignment
;
1214 type_align_map
->put (type
, max_align
);
1218 /* Return alignment of vector type corresponding to decl's scalar type
1219 or 0 if it doesn't exist or the vector alignment is lesser than
1220 decl's alignment. */
1222 get_vec_alignment_for_type (tree type
)
1224 if (type
== NULL_TREE
)
1227 gcc_assert (TYPE_P (type
));
1229 static unsigned alignment
= 0;
1230 switch (TREE_CODE (type
))
1233 alignment
= get_vec_alignment_for_array_type (type
);
1236 alignment
= get_vec_alignment_for_record_type (type
);
1243 return (alignment
> TYPE_ALIGN (type
)) ? alignment
: 0;
1246 /* Entry point to increase_alignment pass. */
1248 increase_alignment (void)
1250 varpool_node
*vnode
;
1252 vect_location
= dump_user_location_t ();
1253 type_align_map
= new hash_map
<tree
, unsigned>;
1255 /* Increase the alignment of all global arrays for vectorization. */
1256 FOR_EACH_DEFINED_VARIABLE (vnode
)
1258 tree decl
= vnode
->decl
;
1259 unsigned int alignment
;
1261 if ((decl_in_symtab_p (decl
)
1262 && !symtab_node::get (decl
)->can_increase_alignment_p ())
1263 || DECL_USER_ALIGN (decl
) || DECL_ARTIFICIAL (decl
))
1266 alignment
= get_vec_alignment_for_type (TREE_TYPE (decl
));
1267 if (alignment
&& vect_can_force_dr_alignment_p (decl
, alignment
))
1269 vnode
->increase_alignment (alignment
);
1270 dump_printf (MSG_NOTE
, "Increasing alignment of decl: ");
1271 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, decl
);
1272 dump_printf (MSG_NOTE
, "\n");
1276 delete type_align_map
;
1283 const pass_data pass_data_ipa_increase_alignment
=
1285 SIMPLE_IPA_PASS
, /* type */
1286 "increase_alignment", /* name */
1287 OPTGROUP_LOOP
| OPTGROUP_VEC
, /* optinfo_flags */
1288 TV_IPA_OPT
, /* tv_id */
1289 0, /* properties_required */
1290 0, /* properties_provided */
1291 0, /* properties_destroyed */
1292 0, /* todo_flags_start */
1293 0, /* todo_flags_finish */
1296 class pass_ipa_increase_alignment
: public simple_ipa_opt_pass
1299 pass_ipa_increase_alignment (gcc::context
*ctxt
)
1300 : simple_ipa_opt_pass (pass_data_ipa_increase_alignment
, ctxt
)
1303 /* opt_pass methods: */
1304 virtual bool gate (function
*)
1306 return flag_section_anchors
&& flag_tree_loop_vectorize
;
1309 virtual unsigned int execute (function
*) { return increase_alignment (); }
1311 }; // class pass_ipa_increase_alignment
1315 simple_ipa_opt_pass
*
1316 make_pass_ipa_increase_alignment (gcc::context
*ctxt
)
1318 return new pass_ipa_increase_alignment (ctxt
);