2 Copyright (C) 2003-2016 Free Software Foundation, Inc.
3 Contributed by Dorit Naishlos <dorit@il.ibm.com>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 /* Loop and basic block vectorizer.
23 This file contains drivers for the three vectorizers:
24 (1) loop vectorizer (inter-iteration parallelism),
25 (2) loop-aware SLP (intra-iteration parallelism) (invoked by the loop
27 (3) BB vectorizer (out-of-loops), aka SLP
29 The rest of the vectorizer's code is organized as follows:
30 - tree-vect-loop.c - loop specific parts such as reductions, etc. These are
31 used by drivers (1) and (2).
32 - tree-vect-loop-manip.c - vectorizer's loop control-flow utilities, used by
34 - tree-vect-slp.c - BB vectorization specific analysis and transformation,
35 used by drivers (2) and (3).
36 - tree-vect-stmts.c - statements analysis and transformation (used by all).
37 - tree-vect-data-refs.c - vectorizer specific data-refs analysis and
38 manipulations (used by all).
39 - tree-vect-patterns.c - vectorizable code patterns detector (used by all)
41 Here's a poor attempt at illustrating that:
44 loop_vect() loop_aware_slp() slp_vect()
47 tree-vect-loop.c tree-vect-slp.c
52 tree-vect-stmts.c tree-vect-data-refs.c
59 #include "coretypes.h"
64 #include "tree-pass.h"
67 #include "fold-const.h"
68 #include "stor-layout.h"
69 #include "gimple-iterator.h"
70 #include "gimple-walk.h"
71 #include "tree-ssa-loop-manip.h"
74 #include "tree-vectorizer.h"
75 #include "tree-ssa-propagate.h"
77 #include "tree-scalar-evolution.h"
80 /* Loop or bb location. */
81 source_location vect_location
;
83 /* Vector mapping GIMPLE stmt to stmt_vec_info. */
84 vec
<stmt_vec_info
> stmt_vec_info_vec
;
86 /* For mapping simduid to vectorization factor. */
88 struct simduid_to_vf
: free_ptr_hash
<simduid_to_vf
>
93 /* hash_table support. */
94 static inline hashval_t
hash (const simduid_to_vf
*);
95 static inline int equal (const simduid_to_vf
*, const simduid_to_vf
*);
99 simduid_to_vf::hash (const simduid_to_vf
*p
)
105 simduid_to_vf::equal (const simduid_to_vf
*p1
, const simduid_to_vf
*p2
)
107 return p1
->simduid
== p2
->simduid
;
110 /* This hash maps the OMP simd array to the corresponding simduid used
111 to index into it. Like thus,
113 _7 = GOMP_SIMD_LANE (simduid.0)
119 This hash maps from the OMP simd array (D.1737[]) to DECL_UID of
122 struct simd_array_to_simduid
: free_ptr_hash
<simd_array_to_simduid
>
125 unsigned int simduid
;
127 /* hash_table support. */
128 static inline hashval_t
hash (const simd_array_to_simduid
*);
129 static inline int equal (const simd_array_to_simduid
*,
130 const simd_array_to_simduid
*);
134 simd_array_to_simduid::hash (const simd_array_to_simduid
*p
)
136 return DECL_UID (p
->decl
);
140 simd_array_to_simduid::equal (const simd_array_to_simduid
*p1
,
141 const simd_array_to_simduid
*p2
)
143 return p1
->decl
== p2
->decl
;
146 /* Fold IFN_GOMP_SIMD_LANE, IFN_GOMP_SIMD_VF, IFN_GOMP_SIMD_LAST_LANE,
147 into their corresponding constants and remove
148 IFN_GOMP_SIMD_ORDERED_{START,END}. */
151 adjust_simduid_builtins (hash_table
<simduid_to_vf
> *htab
)
155 FOR_EACH_BB_FN (bb
, cfun
)
157 gimple_stmt_iterator i
;
159 for (i
= gsi_start_bb (bb
); !gsi_end_p (i
); )
162 enum internal_fn ifn
;
163 gimple
*stmt
= gsi_stmt (i
);
165 if (!is_gimple_call (stmt
)
166 || !gimple_call_internal_p (stmt
))
171 ifn
= gimple_call_internal_fn (stmt
);
174 case IFN_GOMP_SIMD_LANE
:
175 case IFN_GOMP_SIMD_VF
:
176 case IFN_GOMP_SIMD_LAST_LANE
:
178 case IFN_GOMP_SIMD_ORDERED_START
:
179 case IFN_GOMP_SIMD_ORDERED_END
:
180 if (integer_onep (gimple_call_arg (stmt
, 0)))
182 enum built_in_function bcode
183 = (ifn
== IFN_GOMP_SIMD_ORDERED_START
184 ? BUILT_IN_GOMP_ORDERED_START
185 : BUILT_IN_GOMP_ORDERED_END
);
187 = gimple_build_call (builtin_decl_explicit (bcode
), 0);
188 tree vdef
= gimple_vdef (stmt
);
189 gimple_set_vdef (g
, vdef
);
190 SSA_NAME_DEF_STMT (vdef
) = g
;
191 gimple_set_vuse (g
, gimple_vuse (stmt
));
192 gsi_replace (&i
, g
, true);
195 gsi_remove (&i
, true);
196 unlink_stmt_vdef (stmt
);
202 tree arg
= gimple_call_arg (stmt
, 0);
203 gcc_assert (arg
!= NULL_TREE
);
204 gcc_assert (TREE_CODE (arg
) == SSA_NAME
);
205 simduid_to_vf
*p
= NULL
, data
;
206 data
.simduid
= DECL_UID (SSA_NAME_VAR (arg
));
209 p
= htab
->find (&data
);
215 case IFN_GOMP_SIMD_VF
:
216 t
= build_int_cst (unsigned_type_node
, vf
);
218 case IFN_GOMP_SIMD_LANE
:
219 t
= build_int_cst (unsigned_type_node
, 0);
221 case IFN_GOMP_SIMD_LAST_LANE
:
222 t
= gimple_call_arg (stmt
, 1);
227 update_call_from_tree (&i
, t
);
233 /* Helper structure for note_simd_array_uses. */
235 struct note_simd_array_uses_struct
237 hash_table
<simd_array_to_simduid
> **htab
;
238 unsigned int simduid
;
241 /* Callback for note_simd_array_uses, called through walk_gimple_op. */
244 note_simd_array_uses_cb (tree
*tp
, int *walk_subtrees
, void *data
)
246 struct walk_stmt_info
*wi
= (struct walk_stmt_info
*) data
;
247 struct note_simd_array_uses_struct
*ns
248 = (struct note_simd_array_uses_struct
*) wi
->info
;
253 && lookup_attribute ("omp simd array", DECL_ATTRIBUTES (*tp
))
254 && DECL_CONTEXT (*tp
) == current_function_decl
)
256 simd_array_to_simduid data
;
258 *ns
->htab
= new hash_table
<simd_array_to_simduid
> (15);
260 data
.simduid
= ns
->simduid
;
261 simd_array_to_simduid
**slot
= (*ns
->htab
)->find_slot (&data
, INSERT
);
264 simd_array_to_simduid
*p
= XNEW (simd_array_to_simduid
);
268 else if ((*slot
)->simduid
!= ns
->simduid
)
269 (*slot
)->simduid
= -1U;
275 /* Find "omp simd array" temporaries and map them to corresponding
279 note_simd_array_uses (hash_table
<simd_array_to_simduid
> **htab
)
282 gimple_stmt_iterator gsi
;
283 struct walk_stmt_info wi
;
284 struct note_simd_array_uses_struct ns
;
286 memset (&wi
, 0, sizeof (wi
));
290 FOR_EACH_BB_FN (bb
, cfun
)
291 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
293 gimple
*stmt
= gsi_stmt (gsi
);
294 if (!is_gimple_call (stmt
) || !gimple_call_internal_p (stmt
))
296 switch (gimple_call_internal_fn (stmt
))
298 case IFN_GOMP_SIMD_LANE
:
299 case IFN_GOMP_SIMD_VF
:
300 case IFN_GOMP_SIMD_LAST_LANE
:
305 tree lhs
= gimple_call_lhs (stmt
);
306 if (lhs
== NULL_TREE
)
308 imm_use_iterator use_iter
;
310 ns
.simduid
= DECL_UID (SSA_NAME_VAR (gimple_call_arg (stmt
, 0)));
311 FOR_EACH_IMM_USE_STMT (use_stmt
, use_iter
, lhs
)
312 if (!is_gimple_debug (use_stmt
))
313 walk_gimple_op (use_stmt
, note_simd_array_uses_cb
, &wi
);
317 /* Shrink arrays with "omp simd array" attribute to the corresponding
318 vectorization factor. */
322 (hash_table
<simd_array_to_simduid
> *simd_array_to_simduid_htab
,
323 hash_table
<simduid_to_vf
> *simduid_to_vf_htab
)
325 for (hash_table
<simd_array_to_simduid
>::iterator iter
326 = simd_array_to_simduid_htab
->begin ();
327 iter
!= simd_array_to_simduid_htab
->end (); ++iter
)
328 if ((*iter
)->simduid
!= -1U)
330 tree decl
= (*iter
)->decl
;
332 if (simduid_to_vf_htab
)
334 simduid_to_vf
*p
= NULL
, data
;
335 data
.simduid
= (*iter
)->simduid
;
336 p
= simduid_to_vf_htab
->find (&data
);
341 = build_array_type_nelts (TREE_TYPE (TREE_TYPE (decl
)), vf
);
342 TREE_TYPE (decl
) = atype
;
343 relayout_decl (decl
);
346 delete simd_array_to_simduid_htab
;
349 /* A helper function to free data refs. */
352 vect_destroy_datarefs (vec_info
*vinfo
)
354 struct data_reference
*dr
;
357 FOR_EACH_VEC_ELT (vinfo
->datarefs
, i
, dr
)
364 free_data_refs (vinfo
->datarefs
);
368 /* Return whether STMT is inside the region we try to vectorize. */
371 vect_stmt_in_region_p (vec_info
*vinfo
, gimple
*stmt
)
373 if (!gimple_bb (stmt
))
376 if (loop_vec_info loop_vinfo
= dyn_cast
<loop_vec_info
> (vinfo
))
378 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
379 if (!flow_bb_inside_loop_p (loop
, gimple_bb (stmt
)))
384 bb_vec_info bb_vinfo
= as_a
<bb_vec_info
> (vinfo
);
385 if (gimple_bb (stmt
) != BB_VINFO_BB (bb_vinfo
)
386 || gimple_uid (stmt
) == -1U
387 || gimple_code (stmt
) == GIMPLE_PHI
)
395 /* If LOOP has been versioned during ifcvt, return the internal call
399 vect_loop_vectorized_call (struct loop
*loop
)
401 basic_block bb
= loop_preheader_edge (loop
)->src
;
408 if (!single_pred_p (bb
))
410 bb
= single_pred (bb
);
413 if (g
&& gimple_code (g
) == GIMPLE_COND
)
415 gimple_stmt_iterator gsi
= gsi_for_stmt (g
);
417 if (!gsi_end_p (gsi
))
420 if (is_gimple_call (g
)
421 && gimple_call_internal_p (g
)
422 && gimple_call_internal_fn (g
) == IFN_LOOP_VECTORIZED
423 && (tree_to_shwi (gimple_call_arg (g
, 0)) == loop
->num
424 || tree_to_shwi (gimple_call_arg (g
, 1)) == loop
->num
))
431 /* Fold LOOP_VECTORIZED internal call G to VALUE and
432 update any immediate uses of it's LHS. */
435 fold_loop_vectorized_call (gimple
*g
, tree value
)
437 tree lhs
= gimple_call_lhs (g
);
439 imm_use_iterator iter
;
441 gimple_stmt_iterator gsi
= gsi_for_stmt (g
);
443 update_call_from_tree (&gsi
, value
);
444 FOR_EACH_IMM_USE_STMT (use_stmt
, iter
, lhs
)
446 FOR_EACH_IMM_USE_ON_STMT (use_p
, iter
)
447 SET_USE (use_p
, value
);
448 update_stmt (use_stmt
);
451 /* Set the uids of all the statements in basic blocks inside loop
452 represented by LOOP_VINFO. LOOP_VECTORIZED_CALL is the internal
453 call guarding the loop which has been if converted. */
455 set_uid_loop_bbs (loop_vec_info loop_vinfo
, gimple
*loop_vectorized_call
)
457 tree arg
= gimple_call_arg (loop_vectorized_call
, 1);
460 struct loop
*scalar_loop
= get_loop (cfun
, tree_to_shwi (arg
));
462 LOOP_VINFO_SCALAR_LOOP (loop_vinfo
) = scalar_loop
;
463 gcc_checking_assert (vect_loop_vectorized_call
464 (LOOP_VINFO_SCALAR_LOOP (loop_vinfo
))
465 == loop_vectorized_call
);
466 bbs
= get_loop_body (scalar_loop
);
467 for (i
= 0; i
< scalar_loop
->num_nodes
; i
++)
469 basic_block bb
= bbs
[i
];
470 gimple_stmt_iterator gsi
;
471 for (gsi
= gsi_start_phis (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
473 gimple
*phi
= gsi_stmt (gsi
);
474 gimple_set_uid (phi
, 0);
476 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
478 gimple
*stmt
= gsi_stmt (gsi
);
479 gimple_set_uid (stmt
, 0);
485 /* Function vectorize_loops.
487 Entry point to loop vectorization phase. */
490 vectorize_loops (void)
493 unsigned int num_vectorized_loops
= 0;
494 unsigned int vect_loops_num
;
496 hash_table
<simduid_to_vf
> *simduid_to_vf_htab
= NULL
;
497 hash_table
<simd_array_to_simduid
> *simd_array_to_simduid_htab
= NULL
;
498 bool any_ifcvt_loops
= false;
501 vect_loops_num
= number_of_loops (cfun
);
503 /* Bail out if there are no loops. */
504 if (vect_loops_num
<= 1)
507 if (cfun
->has_simduid_loops
)
508 note_simd_array_uses (&simd_array_to_simduid_htab
);
510 init_stmt_vec_info_vec ();
512 /* ----------- Analyze loops. ----------- */
514 /* If some loop was duplicated, it gets bigger number
515 than all previously defined loops. This fact allows us to run
516 only over initial loops skipping newly generated ones. */
517 FOR_EACH_LOOP (loop
, 0)
518 if (loop
->dont_vectorize
)
519 any_ifcvt_loops
= true;
520 else if ((flag_tree_loop_vectorize
521 && optimize_loop_nest_for_speed_p (loop
))
522 || loop
->force_vectorize
)
524 loop_vec_info loop_vinfo
;
525 vect_location
= find_loop_location (loop
);
526 if (LOCATION_LOCUS (vect_location
) != UNKNOWN_LOCATION
527 && dump_enabled_p ())
528 dump_printf (MSG_NOTE
, "\nAnalyzing loop at %s:%d\n",
529 LOCATION_FILE (vect_location
),
530 LOCATION_LINE (vect_location
));
532 loop_vinfo
= vect_analyze_loop (loop
);
533 loop
->aux
= loop_vinfo
;
535 if (!loop_vinfo
|| !LOOP_VINFO_VECTORIZABLE_P (loop_vinfo
))
538 if (!dbg_cnt (vect_loop
))
540 /* We may miss some if-converted loops due to
541 debug counter. Set any_ifcvt_loops to visit
542 them at finalization. */
543 any_ifcvt_loops
= true;
547 gimple
*loop_vectorized_call
= vect_loop_vectorized_call (loop
);
548 if (loop_vectorized_call
)
549 set_uid_loop_bbs (loop_vinfo
, loop_vectorized_call
);
550 if (LOCATION_LOCUS (vect_location
) != UNKNOWN_LOCATION
551 && dump_enabled_p ())
552 dump_printf_loc (MSG_OPTIMIZED_LOCATIONS
, vect_location
,
553 "loop vectorized\n");
554 vect_transform_loop (loop_vinfo
);
555 num_vectorized_loops
++;
556 /* Now that the loop has been vectorized, allow it to be unrolled
558 loop
->force_vectorize
= false;
562 simduid_to_vf
*simduid_to_vf_data
= XNEW (simduid_to_vf
);
563 if (!simduid_to_vf_htab
)
564 simduid_to_vf_htab
= new hash_table
<simduid_to_vf
> (15);
565 simduid_to_vf_data
->simduid
= DECL_UID (loop
->simduid
);
566 simduid_to_vf_data
->vf
= loop_vinfo
->vectorization_factor
;
567 *simduid_to_vf_htab
->find_slot (simduid_to_vf_data
, INSERT
)
568 = simduid_to_vf_data
;
571 if (loop_vectorized_call
)
573 fold_loop_vectorized_call (loop_vectorized_call
, boolean_true_node
);
574 ret
|= TODO_cleanup_cfg
;
578 vect_location
= UNKNOWN_LOCATION
;
580 statistics_counter_event (cfun
, "Vectorized loops", num_vectorized_loops
);
581 if (dump_enabled_p ()
582 || (num_vectorized_loops
> 0 && dump_enabled_p ()))
583 dump_printf_loc (MSG_NOTE
, vect_location
,
584 "vectorized %u loops in function.\n",
585 num_vectorized_loops
);
587 /* ----------- Finalize. ----------- */
590 for (i
= 1; i
< vect_loops_num
; i
++)
592 loop
= get_loop (cfun
, i
);
593 if (loop
&& loop
->dont_vectorize
)
595 gimple
*g
= vect_loop_vectorized_call (loop
);
598 fold_loop_vectorized_call (g
, boolean_false_node
);
599 ret
|= TODO_cleanup_cfg
;
604 for (i
= 1; i
< vect_loops_num
; i
++)
606 loop_vec_info loop_vinfo
;
609 loop
= get_loop (cfun
, i
);
612 loop_vinfo
= (loop_vec_info
) loop
->aux
;
613 has_mask_store
= false;
615 has_mask_store
= LOOP_VINFO_HAS_MASK_STORE (loop_vinfo
);
616 destroy_loop_vec_info (loop_vinfo
, true);
618 optimize_mask_stores (loop
);
622 free_stmt_vec_info_vec ();
624 /* Fold IFN_GOMP_SIMD_{VF,LANE,LAST_LANE,ORDERED_{START,END}} builtins. */
625 if (cfun
->has_simduid_loops
)
626 adjust_simduid_builtins (simduid_to_vf_htab
);
628 /* Shrink any "omp array simd" temporary arrays to the
629 actual vectorization factors. */
630 if (simd_array_to_simduid_htab
)
631 shrink_simd_arrays (simd_array_to_simduid_htab
, simduid_to_vf_htab
);
632 delete simduid_to_vf_htab
;
633 cfun
->has_simduid_loops
= false;
635 if (num_vectorized_loops
> 0)
637 /* If we vectorized any loop only virtual SSA form needs to be updated.
638 ??? Also while we try hard to update loop-closed SSA form we fail
639 to properly do this in some corner-cases (see PR56286). */
640 rewrite_into_loop_closed_ssa (NULL
, TODO_update_ssa_only_virtuals
);
641 return TODO_cleanup_cfg
;
648 /* Entry point to the simduid cleanup pass. */
652 const pass_data pass_data_simduid_cleanup
=
654 GIMPLE_PASS
, /* type */
655 "simduid", /* name */
656 OPTGROUP_NONE
, /* optinfo_flags */
658 ( PROP_ssa
| PROP_cfg
), /* properties_required */
659 0, /* properties_provided */
660 0, /* properties_destroyed */
661 0, /* todo_flags_start */
662 0, /* todo_flags_finish */
665 class pass_simduid_cleanup
: public gimple_opt_pass
668 pass_simduid_cleanup (gcc::context
*ctxt
)
669 : gimple_opt_pass (pass_data_simduid_cleanup
, ctxt
)
672 /* opt_pass methods: */
673 opt_pass
* clone () { return new pass_simduid_cleanup (m_ctxt
); }
674 virtual bool gate (function
*fun
) { return fun
->has_simduid_loops
; }
675 virtual unsigned int execute (function
*);
677 }; // class pass_simduid_cleanup
680 pass_simduid_cleanup::execute (function
*fun
)
682 hash_table
<simd_array_to_simduid
> *simd_array_to_simduid_htab
= NULL
;
684 note_simd_array_uses (&simd_array_to_simduid_htab
);
686 /* Fold IFN_GOMP_SIMD_{VF,LANE,LAST_LANE,ORDERED_{START,END}} builtins. */
687 adjust_simduid_builtins (NULL
);
689 /* Shrink any "omp array simd" temporary arrays to the
690 actual vectorization factors. */
691 if (simd_array_to_simduid_htab
)
692 shrink_simd_arrays (simd_array_to_simduid_htab
, NULL
);
693 fun
->has_simduid_loops
= false;
700 make_pass_simduid_cleanup (gcc::context
*ctxt
)
702 return new pass_simduid_cleanup (ctxt
);
706 /* Entry point to basic block SLP phase. */
710 const pass_data pass_data_slp_vectorize
=
712 GIMPLE_PASS
, /* type */
714 OPTGROUP_LOOP
| OPTGROUP_VEC
, /* optinfo_flags */
715 TV_TREE_SLP_VECTORIZATION
, /* tv_id */
716 ( PROP_ssa
| PROP_cfg
), /* properties_required */
717 0, /* properties_provided */
718 0, /* properties_destroyed */
719 0, /* todo_flags_start */
720 TODO_update_ssa
, /* todo_flags_finish */
723 class pass_slp_vectorize
: public gimple_opt_pass
726 pass_slp_vectorize (gcc::context
*ctxt
)
727 : gimple_opt_pass (pass_data_slp_vectorize
, ctxt
)
730 /* opt_pass methods: */
731 opt_pass
* clone () { return new pass_slp_vectorize (m_ctxt
); }
732 virtual bool gate (function
*) { return flag_tree_slp_vectorize
!= 0; }
733 virtual unsigned int execute (function
*);
735 }; // class pass_slp_vectorize
738 pass_slp_vectorize::execute (function
*fun
)
742 bool in_loop_pipeline
= scev_initialized_p ();
743 if (!in_loop_pipeline
)
745 loop_optimizer_init (LOOPS_NORMAL
);
749 /* Mark all stmts as not belonging to the current region and unvisited. */
750 FOR_EACH_BB_FN (bb
, fun
)
752 for (gimple_stmt_iterator gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
);
755 gimple
*stmt
= gsi_stmt (gsi
);
756 gimple_set_uid (stmt
, -1);
757 gimple_set_visited (stmt
, false);
761 init_stmt_vec_info_vec ();
763 FOR_EACH_BB_FN (bb
, fun
)
765 if (vect_slp_bb (bb
))
766 dump_printf_loc (MSG_OPTIMIZED_LOCATIONS
, vect_location
,
767 "basic block vectorized\n");
770 free_stmt_vec_info_vec ();
772 if (!in_loop_pipeline
)
775 loop_optimizer_finalize ();
784 make_pass_slp_vectorize (gcc::context
*ctxt
)
786 return new pass_slp_vectorize (ctxt
);
790 /* Increase alignment of global arrays to improve vectorization potential.
792 - Consider also structs that have an array field.
793 - Use ipa analysis to prune arrays that can't be vectorized?
794 This should involve global alignment analysis and in the future also
797 static unsigned get_vec_alignment_for_type (tree
);
798 static hash_map
<tree
, unsigned> *type_align_map
;
800 /* Return alignment of array's vector type corresponding to scalar type.
801 0 if no vector type exists. */
803 get_vec_alignment_for_array_type (tree type
)
805 gcc_assert (TREE_CODE (type
) == ARRAY_TYPE
);
807 tree vectype
= get_vectype_for_scalar_type (strip_array_types (type
));
810 || TREE_CODE (TYPE_SIZE (type
)) != INTEGER_CST
811 || tree_int_cst_lt (TYPE_SIZE (type
), TYPE_SIZE (vectype
)))
814 return TYPE_ALIGN (vectype
);
817 /* Return alignment of field having maximum alignment of vector type
818 corresponding to it's scalar type. For now, we only consider fields whose
819 offset is a multiple of it's vector alignment.
820 0 if no suitable field is found. */
822 get_vec_alignment_for_record_type (tree type
)
824 gcc_assert (TREE_CODE (type
) == RECORD_TYPE
);
826 unsigned max_align
= 0, alignment
;
827 HOST_WIDE_INT offset
;
830 if (TYPE_PACKED (type
))
833 unsigned *slot
= type_align_map
->get (type
);
837 for (tree field
= first_field (type
);
839 field
= DECL_CHAIN (field
))
841 /* Skip if not FIELD_DECL or if alignment is set by user. */
842 if (TREE_CODE (field
) != FIELD_DECL
843 || DECL_USER_ALIGN (field
)
844 || DECL_ARTIFICIAL (field
))
847 /* We don't need to process the type further if offset is variable,
848 since the offsets of remaining members will also be variable. */
849 if (TREE_CODE (DECL_FIELD_OFFSET (field
)) != INTEGER_CST
850 || TREE_CODE (DECL_FIELD_BIT_OFFSET (field
)) != INTEGER_CST
)
853 /* Similarly stop processing the type if offset_tree
854 does not fit in unsigned HOST_WIDE_INT. */
855 offset_tree
= bit_position (field
);
856 if (!tree_fits_uhwi_p (offset_tree
))
859 offset
= tree_to_uhwi (offset_tree
);
860 alignment
= get_vec_alignment_for_type (TREE_TYPE (field
));
862 /* Get maximum alignment of vectorized field/array among those members
863 whose offset is multiple of the vector alignment. */
865 && (offset
% alignment
== 0)
866 && (alignment
> max_align
))
867 max_align
= alignment
;
870 type_align_map
->put (type
, max_align
);
874 /* Return alignment of vector type corresponding to decl's scalar type
875 or 0 if it doesn't exist or the vector alignment is lesser than
878 get_vec_alignment_for_type (tree type
)
880 if (type
== NULL_TREE
)
883 gcc_assert (TYPE_P (type
));
885 static unsigned alignment
= 0;
886 switch (TREE_CODE (type
))
889 alignment
= get_vec_alignment_for_array_type (type
);
892 alignment
= get_vec_alignment_for_record_type (type
);
899 return (alignment
> TYPE_ALIGN (type
)) ? alignment
: 0;
902 /* Entry point to increase_alignment pass. */
904 increase_alignment (void)
908 vect_location
= UNKNOWN_LOCATION
;
909 type_align_map
= new hash_map
<tree
, unsigned>;
911 /* Increase the alignment of all global arrays for vectorization. */
912 FOR_EACH_DEFINED_VARIABLE (vnode
)
914 tree decl
= vnode
->decl
;
915 unsigned int alignment
;
917 if ((decl_in_symtab_p (decl
)
918 && !symtab_node::get (decl
)->can_increase_alignment_p ())
919 || DECL_USER_ALIGN (decl
) || DECL_ARTIFICIAL (decl
))
922 alignment
= get_vec_alignment_for_type (TREE_TYPE (decl
));
923 if (alignment
&& vect_can_force_dr_alignment_p (decl
, alignment
))
925 vnode
->increase_alignment (alignment
);
926 dump_printf (MSG_NOTE
, "Increasing alignment of decl: ");
927 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, decl
);
928 dump_printf (MSG_NOTE
, "\n");
932 delete type_align_map
;
939 const pass_data pass_data_ipa_increase_alignment
=
941 SIMPLE_IPA_PASS
, /* type */
942 "increase_alignment", /* name */
943 OPTGROUP_LOOP
| OPTGROUP_VEC
, /* optinfo_flags */
944 TV_IPA_OPT
, /* tv_id */
945 0, /* properties_required */
946 0, /* properties_provided */
947 0, /* properties_destroyed */
948 0, /* todo_flags_start */
949 0, /* todo_flags_finish */
952 class pass_ipa_increase_alignment
: public simple_ipa_opt_pass
955 pass_ipa_increase_alignment (gcc::context
*ctxt
)
956 : simple_ipa_opt_pass (pass_data_ipa_increase_alignment
, ctxt
)
959 /* opt_pass methods: */
960 virtual bool gate (function
*)
962 return flag_section_anchors
&& flag_tree_loop_vectorize
;
965 virtual unsigned int execute (function
*) { return increase_alignment (); }
967 }; // class pass_ipa_increase_alignment
971 simple_ipa_opt_pass
*
972 make_pass_ipa_increase_alignment (gcc::context
*ctxt
)
974 return new pass_ipa_increase_alignment (ctxt
);