Fix build on sparc64-linux-gnu.
[official-gcc.git] / gcc / tree-vectorizer.c
blob12bf0fcd5bde4b889fb74342c4e7dd52327efa57
1 /* Vectorizer
2 Copyright (C) 2003-2018 Free Software Foundation, Inc.
3 Contributed by Dorit Naishlos <dorit@il.ibm.com>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 /* Loop and basic block vectorizer.
23 This file contains drivers for the three vectorizers:
24 (1) loop vectorizer (inter-iteration parallelism),
25 (2) loop-aware SLP (intra-iteration parallelism) (invoked by the loop
26 vectorizer)
27 (3) BB vectorizer (out-of-loops), aka SLP
29 The rest of the vectorizer's code is organized as follows:
30 - tree-vect-loop.c - loop specific parts such as reductions, etc. These are
31 used by drivers (1) and (2).
32 - tree-vect-loop-manip.c - vectorizer's loop control-flow utilities, used by
33 drivers (1) and (2).
34 - tree-vect-slp.c - BB vectorization specific analysis and transformation,
35 used by drivers (2) and (3).
36 - tree-vect-stmts.c - statements analysis and transformation (used by all).
37 - tree-vect-data-refs.c - vectorizer specific data-refs analysis and
38 manipulations (used by all).
39 - tree-vect-patterns.c - vectorizable code patterns detector (used by all)
41 Here's a poor attempt at illustrating that:
43 tree-vectorizer.c:
44 loop_vect() loop_aware_slp() slp_vect()
45 | / \ /
46 | / \ /
47 tree-vect-loop.c tree-vect-slp.c
48 | \ \ / / |
49 | \ \/ / |
50 | \ /\ / |
51 | \ / \ / |
52 tree-vect-stmts.c tree-vect-data-refs.c
53 \ /
54 tree-vect-patterns.c
57 #include "config.h"
58 #include "system.h"
59 #include "coretypes.h"
60 #include "backend.h"
61 #include "tree.h"
62 #include "gimple.h"
63 #include "predict.h"
64 #include "tree-pass.h"
65 #include "ssa.h"
66 #include "cgraph.h"
67 #include "fold-const.h"
68 #include "stor-layout.h"
69 #include "gimple-iterator.h"
70 #include "gimple-walk.h"
71 #include "tree-ssa-loop-manip.h"
72 #include "tree-ssa-loop-niter.h"
73 #include "tree-cfg.h"
74 #include "cfgloop.h"
75 #include "tree-vectorizer.h"
76 #include "tree-ssa-propagate.h"
77 #include "dbgcnt.h"
78 #include "tree-scalar-evolution.h"
79 #include "stringpool.h"
80 #include "attribs.h"
81 #include "gimple-pretty-print.h"
82 #include "opt-problem.h"
83 #include "internal-fn.h"
86 /* Loop or bb location, with hotness information. */
87 dump_user_location_t vect_location;
89 /* Dump a cost entry according to args to F. */
91 void
92 dump_stmt_cost (FILE *f, void *data, int count, enum vect_cost_for_stmt kind,
93 stmt_vec_info stmt_info, int misalign, unsigned cost,
94 enum vect_cost_model_location where)
96 fprintf (f, "%p ", data);
97 if (stmt_info)
99 print_gimple_expr (f, STMT_VINFO_STMT (stmt_info), 0, TDF_SLIM);
100 fprintf (f, " ");
102 else
103 fprintf (f, "<unknown> ");
104 fprintf (f, "%d times ", count);
105 const char *ks = "unknown";
106 switch (kind)
108 case scalar_stmt:
109 ks = "scalar_stmt";
110 break;
111 case scalar_load:
112 ks = "scalar_load";
113 break;
114 case scalar_store:
115 ks = "scalar_store";
116 break;
117 case vector_stmt:
118 ks = "vector_stmt";
119 break;
120 case vector_load:
121 ks = "vector_load";
122 break;
123 case vector_gather_load:
124 ks = "vector_gather_load";
125 break;
126 case unaligned_load:
127 ks = "unaligned_load";
128 break;
129 case unaligned_store:
130 ks = "unaligned_store";
131 break;
132 case vector_store:
133 ks = "vector_store";
134 break;
135 case vector_scatter_store:
136 ks = "vector_scatter_store";
137 break;
138 case vec_to_scalar:
139 ks = "vec_to_scalar";
140 break;
141 case scalar_to_vec:
142 ks = "scalar_to_vec";
143 break;
144 case cond_branch_not_taken:
145 ks = "cond_branch_not_taken";
146 break;
147 case cond_branch_taken:
148 ks = "cond_branch_taken";
149 break;
150 case vec_perm:
151 ks = "vec_perm";
152 break;
153 case vec_promote_demote:
154 ks = "vec_promote_demote";
155 break;
156 case vec_construct:
157 ks = "vec_construct";
158 break;
160 fprintf (f, "%s ", ks);
161 if (kind == unaligned_load || kind == unaligned_store)
162 fprintf (f, "(misalign %d) ", misalign);
163 fprintf (f, "costs %u ", cost);
164 const char *ws = "unknown";
165 switch (where)
167 case vect_prologue:
168 ws = "prologue";
169 break;
170 case vect_body:
171 ws = "body";
172 break;
173 case vect_epilogue:
174 ws = "epilogue";
175 break;
177 fprintf (f, "in %s\n", ws);
180 /* For mapping simduid to vectorization factor. */
182 struct simduid_to_vf : free_ptr_hash<simduid_to_vf>
184 unsigned int simduid;
185 poly_uint64 vf;
187 /* hash_table support. */
188 static inline hashval_t hash (const simduid_to_vf *);
189 static inline int equal (const simduid_to_vf *, const simduid_to_vf *);
192 inline hashval_t
193 simduid_to_vf::hash (const simduid_to_vf *p)
195 return p->simduid;
198 inline int
199 simduid_to_vf::equal (const simduid_to_vf *p1, const simduid_to_vf *p2)
201 return p1->simduid == p2->simduid;
204 /* This hash maps the OMP simd array to the corresponding simduid used
205 to index into it. Like thus,
207 _7 = GOMP_SIMD_LANE (simduid.0)
210 D.1737[_7] = stuff;
213 This hash maps from the OMP simd array (D.1737[]) to DECL_UID of
214 simduid.0. */
216 struct simd_array_to_simduid : free_ptr_hash<simd_array_to_simduid>
218 tree decl;
219 unsigned int simduid;
221 /* hash_table support. */
222 static inline hashval_t hash (const simd_array_to_simduid *);
223 static inline int equal (const simd_array_to_simduid *,
224 const simd_array_to_simduid *);
227 inline hashval_t
228 simd_array_to_simduid::hash (const simd_array_to_simduid *p)
230 return DECL_UID (p->decl);
233 inline int
234 simd_array_to_simduid::equal (const simd_array_to_simduid *p1,
235 const simd_array_to_simduid *p2)
237 return p1->decl == p2->decl;
240 /* Fold IFN_GOMP_SIMD_LANE, IFN_GOMP_SIMD_VF, IFN_GOMP_SIMD_LAST_LANE,
241 into their corresponding constants and remove
242 IFN_GOMP_SIMD_ORDERED_{START,END}. */
244 static void
245 adjust_simduid_builtins (hash_table<simduid_to_vf> *htab)
247 basic_block bb;
249 FOR_EACH_BB_FN (bb, cfun)
251 gimple_stmt_iterator i;
253 for (i = gsi_start_bb (bb); !gsi_end_p (i); )
255 poly_uint64 vf = 1;
256 enum internal_fn ifn;
257 gimple *stmt = gsi_stmt (i);
258 tree t;
259 if (!is_gimple_call (stmt)
260 || !gimple_call_internal_p (stmt))
262 gsi_next (&i);
263 continue;
265 ifn = gimple_call_internal_fn (stmt);
266 switch (ifn)
268 case IFN_GOMP_SIMD_LANE:
269 case IFN_GOMP_SIMD_VF:
270 case IFN_GOMP_SIMD_LAST_LANE:
271 break;
272 case IFN_GOMP_SIMD_ORDERED_START:
273 case IFN_GOMP_SIMD_ORDERED_END:
274 if (integer_onep (gimple_call_arg (stmt, 0)))
276 enum built_in_function bcode
277 = (ifn == IFN_GOMP_SIMD_ORDERED_START
278 ? BUILT_IN_GOMP_ORDERED_START
279 : BUILT_IN_GOMP_ORDERED_END);
280 gimple *g
281 = gimple_build_call (builtin_decl_explicit (bcode), 0);
282 tree vdef = gimple_vdef (stmt);
283 gimple_set_vdef (g, vdef);
284 SSA_NAME_DEF_STMT (vdef) = g;
285 gimple_set_vuse (g, gimple_vuse (stmt));
286 gsi_replace (&i, g, true);
287 continue;
289 gsi_remove (&i, true);
290 unlink_stmt_vdef (stmt);
291 continue;
292 default:
293 gsi_next (&i);
294 continue;
296 tree arg = gimple_call_arg (stmt, 0);
297 gcc_assert (arg != NULL_TREE);
298 gcc_assert (TREE_CODE (arg) == SSA_NAME);
299 simduid_to_vf *p = NULL, data;
300 data.simduid = DECL_UID (SSA_NAME_VAR (arg));
301 /* Need to nullify loop safelen field since it's value is not
302 valid after transformation. */
303 if (bb->loop_father && bb->loop_father->safelen > 0)
304 bb->loop_father->safelen = 0;
305 if (htab)
307 p = htab->find (&data);
308 if (p)
309 vf = p->vf;
311 switch (ifn)
313 case IFN_GOMP_SIMD_VF:
314 t = build_int_cst (unsigned_type_node, vf);
315 break;
316 case IFN_GOMP_SIMD_LANE:
317 t = build_int_cst (unsigned_type_node, 0);
318 break;
319 case IFN_GOMP_SIMD_LAST_LANE:
320 t = gimple_call_arg (stmt, 1);
321 break;
322 default:
323 gcc_unreachable ();
325 tree lhs = gimple_call_lhs (stmt);
326 if (lhs)
327 replace_uses_by (lhs, t);
328 release_defs (stmt);
329 gsi_remove (&i, true);
334 /* Helper structure for note_simd_array_uses. */
336 struct note_simd_array_uses_struct
338 hash_table<simd_array_to_simduid> **htab;
339 unsigned int simduid;
342 /* Callback for note_simd_array_uses, called through walk_gimple_op. */
344 static tree
345 note_simd_array_uses_cb (tree *tp, int *walk_subtrees, void *data)
347 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
348 struct note_simd_array_uses_struct *ns
349 = (struct note_simd_array_uses_struct *) wi->info;
351 if (TYPE_P (*tp))
352 *walk_subtrees = 0;
353 else if (VAR_P (*tp)
354 && lookup_attribute ("omp simd array", DECL_ATTRIBUTES (*tp))
355 && DECL_CONTEXT (*tp) == current_function_decl)
357 simd_array_to_simduid data;
358 if (!*ns->htab)
359 *ns->htab = new hash_table<simd_array_to_simduid> (15);
360 data.decl = *tp;
361 data.simduid = ns->simduid;
362 simd_array_to_simduid **slot = (*ns->htab)->find_slot (&data, INSERT);
363 if (*slot == NULL)
365 simd_array_to_simduid *p = XNEW (simd_array_to_simduid);
366 *p = data;
367 *slot = p;
369 else if ((*slot)->simduid != ns->simduid)
370 (*slot)->simduid = -1U;
371 *walk_subtrees = 0;
373 return NULL_TREE;
376 /* Find "omp simd array" temporaries and map them to corresponding
377 simduid. */
379 static void
380 note_simd_array_uses (hash_table<simd_array_to_simduid> **htab)
382 basic_block bb;
383 gimple_stmt_iterator gsi;
384 struct walk_stmt_info wi;
385 struct note_simd_array_uses_struct ns;
387 memset (&wi, 0, sizeof (wi));
388 wi.info = &ns;
389 ns.htab = htab;
391 FOR_EACH_BB_FN (bb, cfun)
392 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
394 gimple *stmt = gsi_stmt (gsi);
395 if (!is_gimple_call (stmt) || !gimple_call_internal_p (stmt))
396 continue;
397 switch (gimple_call_internal_fn (stmt))
399 case IFN_GOMP_SIMD_LANE:
400 case IFN_GOMP_SIMD_VF:
401 case IFN_GOMP_SIMD_LAST_LANE:
402 break;
403 default:
404 continue;
406 tree lhs = gimple_call_lhs (stmt);
407 if (lhs == NULL_TREE)
408 continue;
409 imm_use_iterator use_iter;
410 gimple *use_stmt;
411 ns.simduid = DECL_UID (SSA_NAME_VAR (gimple_call_arg (stmt, 0)));
412 FOR_EACH_IMM_USE_STMT (use_stmt, use_iter, lhs)
413 if (!is_gimple_debug (use_stmt))
414 walk_gimple_op (use_stmt, note_simd_array_uses_cb, &wi);
418 /* Shrink arrays with "omp simd array" attribute to the corresponding
419 vectorization factor. */
421 static void
422 shrink_simd_arrays
423 (hash_table<simd_array_to_simduid> *simd_array_to_simduid_htab,
424 hash_table<simduid_to_vf> *simduid_to_vf_htab)
426 for (hash_table<simd_array_to_simduid>::iterator iter
427 = simd_array_to_simduid_htab->begin ();
428 iter != simd_array_to_simduid_htab->end (); ++iter)
429 if ((*iter)->simduid != -1U)
431 tree decl = (*iter)->decl;
432 poly_uint64 vf = 1;
433 if (simduid_to_vf_htab)
435 simduid_to_vf *p = NULL, data;
436 data.simduid = (*iter)->simduid;
437 p = simduid_to_vf_htab->find (&data);
438 if (p)
439 vf = p->vf;
441 tree atype
442 = build_array_type_nelts (TREE_TYPE (TREE_TYPE (decl)), vf);
443 TREE_TYPE (decl) = atype;
444 relayout_decl (decl);
447 delete simd_array_to_simduid_htab;
450 /* Initialize the vec_info with kind KIND_IN and target cost data
451 TARGET_COST_DATA_IN. */
453 vec_info::vec_info (vec_info::vec_kind kind_in, void *target_cost_data_in,
454 vec_info_shared *shared_)
455 : kind (kind_in),
456 shared (shared_),
457 target_cost_data (target_cost_data_in)
459 stmt_vec_infos.create (50);
462 vec_info::~vec_info ()
464 slp_instance instance;
465 unsigned int i;
467 FOR_EACH_VEC_ELT (slp_instances, i, instance)
468 vect_free_slp_instance (instance, true);
470 destroy_cost_data (target_cost_data);
471 free_stmt_vec_infos ();
474 vec_info_shared::vec_info_shared ()
475 : datarefs (vNULL),
476 datarefs_copy (vNULL),
477 ddrs (vNULL)
481 vec_info_shared::~vec_info_shared ()
483 free_data_refs (datarefs);
484 free_dependence_relations (ddrs);
485 datarefs_copy.release ();
488 void
489 vec_info_shared::save_datarefs ()
491 if (!flag_checking)
492 return;
493 datarefs_copy.reserve_exact (datarefs.length ());
494 for (unsigned i = 0; i < datarefs.length (); ++i)
495 datarefs_copy.quick_push (*datarefs[i]);
498 void
499 vec_info_shared::check_datarefs ()
501 if (!flag_checking)
502 return;
503 gcc_assert (datarefs.length () == datarefs_copy.length ());
504 for (unsigned i = 0; i < datarefs.length (); ++i)
505 if (memcmp (&datarefs_copy[i], datarefs[i], sizeof (data_reference)) != 0)
506 gcc_unreachable ();
509 /* Record that STMT belongs to the vectorizable region. Create and return
510 an associated stmt_vec_info. */
512 stmt_vec_info
513 vec_info::add_stmt (gimple *stmt)
515 stmt_vec_info res = new_stmt_vec_info (stmt);
516 set_vinfo_for_stmt (stmt, res);
517 return res;
520 /* If STMT has an associated stmt_vec_info, return that vec_info, otherwise
521 return null. It is safe to call this function on any statement, even if
522 it might not be part of the vectorizable region. */
524 stmt_vec_info
525 vec_info::lookup_stmt (gimple *stmt)
527 unsigned int uid = gimple_uid (stmt);
528 if (uid > 0 && uid - 1 < stmt_vec_infos.length ())
530 stmt_vec_info res = stmt_vec_infos[uid - 1];
531 if (res && res->stmt == stmt)
532 return res;
534 return NULL;
537 /* If NAME is an SSA_NAME and its definition has an associated stmt_vec_info,
538 return that stmt_vec_info, otherwise return null. It is safe to call
539 this on arbitrary operands. */
541 stmt_vec_info
542 vec_info::lookup_def (tree name)
544 if (TREE_CODE (name) == SSA_NAME
545 && !SSA_NAME_IS_DEFAULT_DEF (name))
546 return lookup_stmt (SSA_NAME_DEF_STMT (name));
547 return NULL;
550 /* See whether there is a single non-debug statement that uses LHS and
551 whether that statement has an associated stmt_vec_info. Return the
552 stmt_vec_info if so, otherwise return null. */
554 stmt_vec_info
555 vec_info::lookup_single_use (tree lhs)
557 use_operand_p dummy;
558 gimple *use_stmt;
559 if (single_imm_use (lhs, &dummy, &use_stmt))
560 return lookup_stmt (use_stmt);
561 return NULL;
564 /* Return vectorization information about DR. */
566 dr_vec_info *
567 vec_info::lookup_dr (data_reference *dr)
569 stmt_vec_info stmt_info = lookup_stmt (DR_STMT (dr));
570 /* DR_STMT should never refer to a stmt in a pattern replacement. */
571 gcc_checking_assert (!is_pattern_stmt_p (stmt_info));
572 return STMT_VINFO_DR_INFO (stmt_info->dr_aux.stmt);
575 /* Record that NEW_STMT_INFO now implements the same data reference
576 as OLD_STMT_INFO. */
578 void
579 vec_info::move_dr (stmt_vec_info new_stmt_info, stmt_vec_info old_stmt_info)
581 gcc_assert (!is_pattern_stmt_p (old_stmt_info));
582 STMT_VINFO_DR_INFO (old_stmt_info)->stmt = new_stmt_info;
583 new_stmt_info->dr_aux = old_stmt_info->dr_aux;
584 STMT_VINFO_DR_WRT_VEC_LOOP (new_stmt_info)
585 = STMT_VINFO_DR_WRT_VEC_LOOP (old_stmt_info);
586 STMT_VINFO_GATHER_SCATTER_P (new_stmt_info)
587 = STMT_VINFO_GATHER_SCATTER_P (old_stmt_info);
590 /* Permanently remove the statement described by STMT_INFO from the
591 function. */
593 void
594 vec_info::remove_stmt (stmt_vec_info stmt_info)
596 gcc_assert (!stmt_info->pattern_stmt_p);
597 set_vinfo_for_stmt (stmt_info->stmt, NULL);
598 gimple_stmt_iterator si = gsi_for_stmt (stmt_info->stmt);
599 unlink_stmt_vdef (stmt_info->stmt);
600 gsi_remove (&si, true);
601 release_defs (stmt_info->stmt);
602 free_stmt_vec_info (stmt_info);
605 /* Replace the statement at GSI by NEW_STMT, both the vectorization
606 information and the function itself. STMT_INFO describes the statement
607 at GSI. */
609 void
610 vec_info::replace_stmt (gimple_stmt_iterator *gsi, stmt_vec_info stmt_info,
611 gimple *new_stmt)
613 gimple *old_stmt = stmt_info->stmt;
614 gcc_assert (!stmt_info->pattern_stmt_p && old_stmt == gsi_stmt (*gsi));
615 set_vinfo_for_stmt (old_stmt, NULL);
616 set_vinfo_for_stmt (new_stmt, stmt_info);
617 stmt_info->stmt = new_stmt;
618 gsi_replace (gsi, new_stmt, true);
621 /* Create and initialize a new stmt_vec_info struct for STMT. */
623 stmt_vec_info
624 vec_info::new_stmt_vec_info (gimple *stmt)
626 stmt_vec_info res = XCNEW (struct _stmt_vec_info);
627 res->vinfo = this;
628 res->stmt = stmt;
630 STMT_VINFO_TYPE (res) = undef_vec_info_type;
631 STMT_VINFO_RELEVANT (res) = vect_unused_in_scope;
632 STMT_VINFO_VECTORIZABLE (res) = true;
633 STMT_VINFO_VEC_REDUCTION_TYPE (res) = TREE_CODE_REDUCTION;
634 STMT_VINFO_VEC_CONST_COND_REDUC_CODE (res) = ERROR_MARK;
636 if (gimple_code (stmt) == GIMPLE_PHI
637 && is_loop_header_bb_p (gimple_bb (stmt)))
638 STMT_VINFO_DEF_TYPE (res) = vect_unknown_def_type;
639 else
640 STMT_VINFO_DEF_TYPE (res) = vect_internal_def;
642 STMT_VINFO_SAME_ALIGN_REFS (res).create (0);
643 STMT_SLP_TYPE (res) = loop_vect;
645 /* This is really "uninitialized" until vect_compute_data_ref_alignment. */
646 res->dr_aux.misalignment = DR_MISALIGNMENT_UNINITIALIZED;
648 return res;
651 /* Associate STMT with INFO. */
653 void
654 vec_info::set_vinfo_for_stmt (gimple *stmt, stmt_vec_info info)
656 unsigned int uid = gimple_uid (stmt);
657 if (uid == 0)
659 gcc_checking_assert (info);
660 uid = stmt_vec_infos.length () + 1;
661 gimple_set_uid (stmt, uid);
662 stmt_vec_infos.safe_push (info);
664 else
666 gcc_checking_assert (info == NULL);
667 stmt_vec_infos[uid - 1] = info;
671 /* Free the contents of stmt_vec_infos. */
673 void
674 vec_info::free_stmt_vec_infos (void)
676 unsigned int i;
677 stmt_vec_info info;
678 FOR_EACH_VEC_ELT (stmt_vec_infos, i, info)
679 if (info != NULL)
680 free_stmt_vec_info (info);
681 stmt_vec_infos.release ();
684 /* Free STMT_INFO. */
686 void
687 vec_info::free_stmt_vec_info (stmt_vec_info stmt_info)
689 if (stmt_info->pattern_stmt_p)
691 gimple_set_bb (stmt_info->stmt, NULL);
692 tree lhs = gimple_get_lhs (stmt_info->stmt);
693 if (lhs && TREE_CODE (lhs) == SSA_NAME)
694 release_ssa_name (lhs);
697 STMT_VINFO_SAME_ALIGN_REFS (stmt_info).release ();
698 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).release ();
699 free (stmt_info);
702 /* A helper function to free scev and LOOP niter information, as well as
703 clear loop constraint LOOP_C_FINITE. */
705 void
706 vect_free_loop_info_assumptions (struct loop *loop)
708 scev_reset_htab ();
709 /* We need to explicitly reset upper bound information since they are
710 used even after free_numbers_of_iterations_estimates. */
711 loop->any_upper_bound = false;
712 loop->any_likely_upper_bound = false;
713 free_numbers_of_iterations_estimates (loop);
714 loop_constraint_clear (loop, LOOP_C_FINITE);
717 /* If LOOP has been versioned during ifcvt, return the internal call
718 guarding it. */
720 static gimple *
721 vect_loop_vectorized_call (struct loop *loop)
723 basic_block bb = loop_preheader_edge (loop)->src;
724 gimple *g;
727 g = last_stmt (bb);
728 if (g)
729 break;
730 if (!single_pred_p (bb))
731 break;
732 bb = single_pred (bb);
734 while (1);
735 if (g && gimple_code (g) == GIMPLE_COND)
737 gimple_stmt_iterator gsi = gsi_for_stmt (g);
738 gsi_prev (&gsi);
739 if (!gsi_end_p (gsi))
741 g = gsi_stmt (gsi);
742 if (gimple_call_internal_p (g, IFN_LOOP_VECTORIZED)
743 && (tree_to_shwi (gimple_call_arg (g, 0)) == loop->num
744 || tree_to_shwi (gimple_call_arg (g, 1)) == loop->num))
745 return g;
748 return NULL;
751 /* If LOOP has been versioned during loop distribution, return the gurading
752 internal call. */
754 static gimple *
755 vect_loop_dist_alias_call (struct loop *loop)
757 basic_block bb;
758 basic_block entry;
759 struct loop *outer, *orig;
760 gimple_stmt_iterator gsi;
761 gimple *g;
763 if (loop->orig_loop_num == 0)
764 return NULL;
766 orig = get_loop (cfun, loop->orig_loop_num);
767 if (orig == NULL)
769 /* The original loop is somehow destroyed. Clear the information. */
770 loop->orig_loop_num = 0;
771 return NULL;
774 if (loop != orig)
775 bb = nearest_common_dominator (CDI_DOMINATORS, loop->header, orig->header);
776 else
777 bb = loop_preheader_edge (loop)->src;
779 outer = bb->loop_father;
780 entry = ENTRY_BLOCK_PTR_FOR_FN (cfun);
782 /* Look upward in dominance tree. */
783 for (; bb != entry && flow_bb_inside_loop_p (outer, bb);
784 bb = get_immediate_dominator (CDI_DOMINATORS, bb))
786 g = last_stmt (bb);
787 if (g == NULL || gimple_code (g) != GIMPLE_COND)
788 continue;
790 gsi = gsi_for_stmt (g);
791 gsi_prev (&gsi);
792 if (gsi_end_p (gsi))
793 continue;
795 g = gsi_stmt (gsi);
796 /* The guarding internal function call must have the same distribution
797 alias id. */
798 if (gimple_call_internal_p (g, IFN_LOOP_DIST_ALIAS)
799 && (tree_to_shwi (gimple_call_arg (g, 0)) == loop->orig_loop_num))
800 return g;
802 return NULL;
805 /* Set the uids of all the statements in basic blocks inside loop
806 represented by LOOP_VINFO. LOOP_VECTORIZED_CALL is the internal
807 call guarding the loop which has been if converted. */
808 static void
809 set_uid_loop_bbs (loop_vec_info loop_vinfo, gimple *loop_vectorized_call)
811 tree arg = gimple_call_arg (loop_vectorized_call, 1);
812 basic_block *bbs;
813 unsigned int i;
814 struct loop *scalar_loop = get_loop (cfun, tree_to_shwi (arg));
816 LOOP_VINFO_SCALAR_LOOP (loop_vinfo) = scalar_loop;
817 gcc_checking_assert (vect_loop_vectorized_call (scalar_loop)
818 == loop_vectorized_call);
819 /* If we are going to vectorize outer loop, prevent vectorization
820 of the inner loop in the scalar loop - either the scalar loop is
821 thrown away, so it is a wasted work, or is used only for
822 a few iterations. */
823 if (scalar_loop->inner)
825 gimple *g = vect_loop_vectorized_call (scalar_loop->inner);
826 if (g)
828 arg = gimple_call_arg (g, 0);
829 get_loop (cfun, tree_to_shwi (arg))->dont_vectorize = true;
830 fold_loop_internal_call (g, boolean_false_node);
833 bbs = get_loop_body (scalar_loop);
834 for (i = 0; i < scalar_loop->num_nodes; i++)
836 basic_block bb = bbs[i];
837 gimple_stmt_iterator gsi;
838 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
840 gimple *phi = gsi_stmt (gsi);
841 gimple_set_uid (phi, 0);
843 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
845 gimple *stmt = gsi_stmt (gsi);
846 gimple_set_uid (stmt, 0);
849 free (bbs);
852 /* Try to vectorize LOOP. */
854 static unsigned
855 try_vectorize_loop_1 (hash_table<simduid_to_vf> *&simduid_to_vf_htab,
856 unsigned *num_vectorized_loops,
857 loop_p loop, loop_vec_info orig_loop_vinfo,
858 gimple *loop_vectorized_call,
859 gimple *loop_dist_alias_call)
861 unsigned ret = 0;
862 vec_info_shared shared;
863 vect_location = find_loop_location (loop);
864 if (LOCATION_LOCUS (vect_location.get_location_t ()) != UNKNOWN_LOCATION
865 && dump_enabled_p ())
866 dump_printf (MSG_NOTE | MSG_PRIORITY_INTERNALS,
867 "\nAnalyzing loop at %s:%d\n",
868 LOCATION_FILE (vect_location.get_location_t ()),
869 LOCATION_LINE (vect_location.get_location_t ()));
871 /* Try to analyze the loop, retaining an opt_problem if dump_enabled_p. */
872 opt_loop_vec_info loop_vinfo
873 = vect_analyze_loop (loop, orig_loop_vinfo, &shared);
874 loop->aux = loop_vinfo;
876 if (!loop_vinfo)
877 if (dump_enabled_p ())
878 if (opt_problem *problem = loop_vinfo.get_problem ())
880 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
881 "couldn't vectorize loop\n");
882 problem->emit_and_clear ();
885 if (!loop_vinfo || !LOOP_VINFO_VECTORIZABLE_P (loop_vinfo))
887 /* Free existing information if loop is analyzed with some
888 assumptions. */
889 if (loop_constraint_set_p (loop, LOOP_C_FINITE))
890 vect_free_loop_info_assumptions (loop);
892 /* If we applied if-conversion then try to vectorize the
893 BB of innermost loops.
894 ??? Ideally BB vectorization would learn to vectorize
895 control flow by applying if-conversion on-the-fly, the
896 following retains the if-converted loop body even when
897 only non-if-converted parts took part in BB vectorization. */
898 if (flag_tree_slp_vectorize != 0
899 && loop_vectorized_call
900 && ! loop->inner)
902 basic_block bb = loop->header;
903 bool require_loop_vectorize = false;
904 for (gimple_stmt_iterator gsi = gsi_start_bb (bb);
905 !gsi_end_p (gsi); gsi_next (&gsi))
907 gimple *stmt = gsi_stmt (gsi);
908 gcall *call = dyn_cast <gcall *> (stmt);
909 if (call && gimple_call_internal_p (call))
911 internal_fn ifn = gimple_call_internal_fn (call);
912 if (ifn == IFN_MASK_LOAD || ifn == IFN_MASK_STORE
913 /* Don't keep the if-converted parts when the ifn with
914 specifc type is not supported by the backend. */
915 || (direct_internal_fn_p (ifn)
916 && !direct_internal_fn_supported_p
917 (call, OPTIMIZE_FOR_SPEED)))
919 require_loop_vectorize = true;
920 break;
923 gimple_set_uid (stmt, -1);
924 gimple_set_visited (stmt, false);
926 if (!require_loop_vectorize && vect_slp_bb (bb))
928 dump_printf_loc (MSG_NOTE, vect_location,
929 "basic block vectorized\n");
930 fold_loop_internal_call (loop_vectorized_call,
931 boolean_true_node);
932 loop_vectorized_call = NULL;
933 ret |= TODO_cleanup_cfg;
936 /* If outer loop vectorization fails for LOOP_VECTORIZED guarded
937 loop, don't vectorize its inner loop; we'll attempt to
938 vectorize LOOP_VECTORIZED guarded inner loop of the scalar
939 loop version. */
940 if (loop_vectorized_call && loop->inner)
941 loop->inner->dont_vectorize = true;
942 return ret;
945 if (!dbg_cnt (vect_loop))
947 /* Free existing information if loop is analyzed with some
948 assumptions. */
949 if (loop_constraint_set_p (loop, LOOP_C_FINITE))
950 vect_free_loop_info_assumptions (loop);
951 return ret;
954 if (loop_vectorized_call)
955 set_uid_loop_bbs (loop_vinfo, loop_vectorized_call);
957 unsigned HOST_WIDE_INT bytes;
958 if (current_vector_size.is_constant (&bytes))
959 dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, vect_location,
960 "loop vectorized using %wu byte vectors\n", bytes);
961 else
962 dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, vect_location,
963 "loop vectorized using variable length vectors\n");
965 loop_p new_loop = vect_transform_loop (loop_vinfo);
966 (*num_vectorized_loops)++;
967 /* Now that the loop has been vectorized, allow it to be unrolled
968 etc. */
969 loop->force_vectorize = false;
971 if (loop->simduid)
973 simduid_to_vf *simduid_to_vf_data = XNEW (simduid_to_vf);
974 if (!simduid_to_vf_htab)
975 simduid_to_vf_htab = new hash_table<simduid_to_vf> (15);
976 simduid_to_vf_data->simduid = DECL_UID (loop->simduid);
977 simduid_to_vf_data->vf = loop_vinfo->vectorization_factor;
978 *simduid_to_vf_htab->find_slot (simduid_to_vf_data, INSERT)
979 = simduid_to_vf_data;
982 if (loop_vectorized_call)
984 fold_loop_internal_call (loop_vectorized_call, boolean_true_node);
985 loop_vectorized_call = NULL;
986 ret |= TODO_cleanup_cfg;
988 if (loop_dist_alias_call)
990 tree value = gimple_call_arg (loop_dist_alias_call, 1);
991 fold_loop_internal_call (loop_dist_alias_call, value);
992 loop_dist_alias_call = NULL;
993 ret |= TODO_cleanup_cfg;
996 /* Epilogue of vectorized loop must be vectorized too. */
997 if (new_loop)
998 ret |= try_vectorize_loop_1 (simduid_to_vf_htab, num_vectorized_loops,
999 new_loop, loop_vinfo, NULL, NULL);
1001 return ret;
1004 /* Try to vectorize LOOP. */
1006 static unsigned
1007 try_vectorize_loop (hash_table<simduid_to_vf> *&simduid_to_vf_htab,
1008 unsigned *num_vectorized_loops, loop_p loop)
1010 if (!((flag_tree_loop_vectorize
1011 && optimize_loop_nest_for_speed_p (loop))
1012 || loop->force_vectorize))
1013 return 0;
1015 return try_vectorize_loop_1 (simduid_to_vf_htab, num_vectorized_loops,
1016 loop, NULL,
1017 vect_loop_vectorized_call (loop),
1018 vect_loop_dist_alias_call (loop));
1022 /* Function vectorize_loops.
1024 Entry point to loop vectorization phase. */
1026 unsigned
1027 vectorize_loops (void)
1029 unsigned int i;
1030 unsigned int num_vectorized_loops = 0;
1031 unsigned int vect_loops_num;
1032 struct loop *loop;
1033 hash_table<simduid_to_vf> *simduid_to_vf_htab = NULL;
1034 hash_table<simd_array_to_simduid> *simd_array_to_simduid_htab = NULL;
1035 bool any_ifcvt_loops = false;
1036 unsigned ret = 0;
1038 vect_loops_num = number_of_loops (cfun);
1040 /* Bail out if there are no loops. */
1041 if (vect_loops_num <= 1)
1042 return 0;
1044 if (cfun->has_simduid_loops)
1045 note_simd_array_uses (&simd_array_to_simduid_htab);
1047 /* ----------- Analyze loops. ----------- */
1049 /* If some loop was duplicated, it gets bigger number
1050 than all previously defined loops. This fact allows us to run
1051 only over initial loops skipping newly generated ones. */
1052 FOR_EACH_LOOP (loop, 0)
1053 if (loop->dont_vectorize)
1055 any_ifcvt_loops = true;
1056 /* If-conversion sometimes versions both the outer loop
1057 (for the case when outer loop vectorization might be
1058 desirable) as well as the inner loop in the scalar version
1059 of the loop. So we have:
1060 if (LOOP_VECTORIZED (1, 3))
1062 loop1
1063 loop2
1065 else
1066 loop3 (copy of loop1)
1067 if (LOOP_VECTORIZED (4, 5))
1068 loop4 (copy of loop2)
1069 else
1070 loop5 (copy of loop4)
1071 If FOR_EACH_LOOP gives us loop3 first (which has
1072 dont_vectorize set), make sure to process loop1 before loop4;
1073 so that we can prevent vectorization of loop4 if loop1
1074 is successfully vectorized. */
1075 if (loop->inner)
1077 gimple *loop_vectorized_call
1078 = vect_loop_vectorized_call (loop);
1079 if (loop_vectorized_call
1080 && vect_loop_vectorized_call (loop->inner))
1082 tree arg = gimple_call_arg (loop_vectorized_call, 0);
1083 struct loop *vector_loop
1084 = get_loop (cfun, tree_to_shwi (arg));
1085 if (vector_loop && vector_loop != loop)
1087 /* Make sure we don't vectorize it twice. */
1088 vector_loop->dont_vectorize = true;
1089 ret |= try_vectorize_loop (simduid_to_vf_htab,
1090 &num_vectorized_loops,
1091 vector_loop);
1096 else
1097 ret |= try_vectorize_loop (simduid_to_vf_htab, &num_vectorized_loops,
1098 loop);
1100 vect_location = dump_user_location_t ();
1102 statistics_counter_event (cfun, "Vectorized loops", num_vectorized_loops);
1103 if (dump_enabled_p ()
1104 || (num_vectorized_loops > 0 && dump_enabled_p ()))
1105 dump_printf_loc (MSG_NOTE, vect_location,
1106 "vectorized %u loops in function.\n",
1107 num_vectorized_loops);
1109 /* ----------- Finalize. ----------- */
1111 if (any_ifcvt_loops)
1112 for (i = 1; i < number_of_loops (cfun); i++)
1114 loop = get_loop (cfun, i);
1115 if (loop && loop->dont_vectorize)
1117 gimple *g = vect_loop_vectorized_call (loop);
1118 if (g)
1120 fold_loop_internal_call (g, boolean_false_node);
1121 ret |= TODO_cleanup_cfg;
1122 g = NULL;
1124 else
1125 g = vect_loop_dist_alias_call (loop);
1127 if (g)
1129 fold_loop_internal_call (g, boolean_false_node);
1130 ret |= TODO_cleanup_cfg;
1135 for (i = 1; i < number_of_loops (cfun); i++)
1137 loop_vec_info loop_vinfo;
1138 bool has_mask_store;
1140 loop = get_loop (cfun, i);
1141 if (!loop || !loop->aux)
1142 continue;
1143 loop_vinfo = (loop_vec_info) loop->aux;
1144 has_mask_store = LOOP_VINFO_HAS_MASK_STORE (loop_vinfo);
1145 delete loop_vinfo;
1146 if (has_mask_store
1147 && targetm.vectorize.empty_mask_is_expensive (IFN_MASK_STORE))
1148 optimize_mask_stores (loop);
1149 loop->aux = NULL;
1152 /* Fold IFN_GOMP_SIMD_{VF,LANE,LAST_LANE,ORDERED_{START,END}} builtins. */
1153 if (cfun->has_simduid_loops)
1154 adjust_simduid_builtins (simduid_to_vf_htab);
1156 /* Shrink any "omp array simd" temporary arrays to the
1157 actual vectorization factors. */
1158 if (simd_array_to_simduid_htab)
1159 shrink_simd_arrays (simd_array_to_simduid_htab, simduid_to_vf_htab);
1160 delete simduid_to_vf_htab;
1161 cfun->has_simduid_loops = false;
1163 if (num_vectorized_loops > 0)
1165 /* If we vectorized any loop only virtual SSA form needs to be updated.
1166 ??? Also while we try hard to update loop-closed SSA form we fail
1167 to properly do this in some corner-cases (see PR56286). */
1168 rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa_only_virtuals);
1169 return TODO_cleanup_cfg;
1172 return ret;
1176 /* Entry point to the simduid cleanup pass. */
1178 namespace {
1180 const pass_data pass_data_simduid_cleanup =
1182 GIMPLE_PASS, /* type */
1183 "simduid", /* name */
1184 OPTGROUP_NONE, /* optinfo_flags */
1185 TV_NONE, /* tv_id */
1186 ( PROP_ssa | PROP_cfg ), /* properties_required */
1187 0, /* properties_provided */
1188 0, /* properties_destroyed */
1189 0, /* todo_flags_start */
1190 0, /* todo_flags_finish */
1193 class pass_simduid_cleanup : public gimple_opt_pass
1195 public:
1196 pass_simduid_cleanup (gcc::context *ctxt)
1197 : gimple_opt_pass (pass_data_simduid_cleanup, ctxt)
1200 /* opt_pass methods: */
1201 opt_pass * clone () { return new pass_simduid_cleanup (m_ctxt); }
1202 virtual bool gate (function *fun) { return fun->has_simduid_loops; }
1203 virtual unsigned int execute (function *);
1205 }; // class pass_simduid_cleanup
1207 unsigned int
1208 pass_simduid_cleanup::execute (function *fun)
1210 hash_table<simd_array_to_simduid> *simd_array_to_simduid_htab = NULL;
1212 note_simd_array_uses (&simd_array_to_simduid_htab);
1214 /* Fold IFN_GOMP_SIMD_{VF,LANE,LAST_LANE,ORDERED_{START,END}} builtins. */
1215 adjust_simduid_builtins (NULL);
1217 /* Shrink any "omp array simd" temporary arrays to the
1218 actual vectorization factors. */
1219 if (simd_array_to_simduid_htab)
1220 shrink_simd_arrays (simd_array_to_simduid_htab, NULL);
1221 fun->has_simduid_loops = false;
1222 return 0;
1225 } // anon namespace
1227 gimple_opt_pass *
1228 make_pass_simduid_cleanup (gcc::context *ctxt)
1230 return new pass_simduid_cleanup (ctxt);
1234 /* Entry point to basic block SLP phase. */
1236 namespace {
1238 const pass_data pass_data_slp_vectorize =
1240 GIMPLE_PASS, /* type */
1241 "slp", /* name */
1242 OPTGROUP_LOOP | OPTGROUP_VEC, /* optinfo_flags */
1243 TV_TREE_SLP_VECTORIZATION, /* tv_id */
1244 ( PROP_ssa | PROP_cfg ), /* properties_required */
1245 0, /* properties_provided */
1246 0, /* properties_destroyed */
1247 0, /* todo_flags_start */
1248 TODO_update_ssa, /* todo_flags_finish */
1251 class pass_slp_vectorize : public gimple_opt_pass
1253 public:
1254 pass_slp_vectorize (gcc::context *ctxt)
1255 : gimple_opt_pass (pass_data_slp_vectorize, ctxt)
1258 /* opt_pass methods: */
1259 opt_pass * clone () { return new pass_slp_vectorize (m_ctxt); }
1260 virtual bool gate (function *) { return flag_tree_slp_vectorize != 0; }
1261 virtual unsigned int execute (function *);
1263 }; // class pass_slp_vectorize
1265 unsigned int
1266 pass_slp_vectorize::execute (function *fun)
1268 basic_block bb;
1270 bool in_loop_pipeline = scev_initialized_p ();
1271 if (!in_loop_pipeline)
1273 loop_optimizer_init (LOOPS_NORMAL);
1274 scev_initialize ();
1277 /* Mark all stmts as not belonging to the current region and unvisited. */
1278 FOR_EACH_BB_FN (bb, fun)
1280 for (gimple_stmt_iterator gsi = gsi_start_bb (bb); !gsi_end_p (gsi);
1281 gsi_next (&gsi))
1283 gimple *stmt = gsi_stmt (gsi);
1284 gimple_set_uid (stmt, -1);
1285 gimple_set_visited (stmt, false);
1289 FOR_EACH_BB_FN (bb, fun)
1291 if (vect_slp_bb (bb))
1292 dump_printf_loc (MSG_NOTE, vect_location, "basic block vectorized\n");
1295 if (!in_loop_pipeline)
1297 scev_finalize ();
1298 loop_optimizer_finalize ();
1301 return 0;
1304 } // anon namespace
1306 gimple_opt_pass *
1307 make_pass_slp_vectorize (gcc::context *ctxt)
1309 return new pass_slp_vectorize (ctxt);
1313 /* Increase alignment of global arrays to improve vectorization potential.
1314 TODO:
1315 - Consider also structs that have an array field.
1316 - Use ipa analysis to prune arrays that can't be vectorized?
1317 This should involve global alignment analysis and in the future also
1318 array padding. */
1320 static unsigned get_vec_alignment_for_type (tree);
1321 static hash_map<tree, unsigned> *type_align_map;
1323 /* Return alignment of array's vector type corresponding to scalar type.
1324 0 if no vector type exists. */
1325 static unsigned
1326 get_vec_alignment_for_array_type (tree type)
1328 gcc_assert (TREE_CODE (type) == ARRAY_TYPE);
1329 poly_uint64 array_size, vector_size;
1331 tree vectype = get_vectype_for_scalar_type (strip_array_types (type));
1332 if (!vectype
1333 || !poly_int_tree_p (TYPE_SIZE (type), &array_size)
1334 || !poly_int_tree_p (TYPE_SIZE (vectype), &vector_size)
1335 || maybe_lt (array_size, vector_size))
1336 return 0;
1338 return TYPE_ALIGN (vectype);
1341 /* Return alignment of field having maximum alignment of vector type
1342 corresponding to it's scalar type. For now, we only consider fields whose
1343 offset is a multiple of it's vector alignment.
1344 0 if no suitable field is found. */
1345 static unsigned
1346 get_vec_alignment_for_record_type (tree type)
1348 gcc_assert (TREE_CODE (type) == RECORD_TYPE);
1350 unsigned max_align = 0, alignment;
1351 HOST_WIDE_INT offset;
1352 tree offset_tree;
1354 if (TYPE_PACKED (type))
1355 return 0;
1357 unsigned *slot = type_align_map->get (type);
1358 if (slot)
1359 return *slot;
1361 for (tree field = first_field (type);
1362 field != NULL_TREE;
1363 field = DECL_CHAIN (field))
1365 /* Skip if not FIELD_DECL or if alignment is set by user. */
1366 if (TREE_CODE (field) != FIELD_DECL
1367 || DECL_USER_ALIGN (field)
1368 || DECL_ARTIFICIAL (field))
1369 continue;
1371 /* We don't need to process the type further if offset is variable,
1372 since the offsets of remaining members will also be variable. */
1373 if (TREE_CODE (DECL_FIELD_OFFSET (field)) != INTEGER_CST
1374 || TREE_CODE (DECL_FIELD_BIT_OFFSET (field)) != INTEGER_CST)
1375 break;
1377 /* Similarly stop processing the type if offset_tree
1378 does not fit in unsigned HOST_WIDE_INT. */
1379 offset_tree = bit_position (field);
1380 if (!tree_fits_uhwi_p (offset_tree))
1381 break;
1383 offset = tree_to_uhwi (offset_tree);
1384 alignment = get_vec_alignment_for_type (TREE_TYPE (field));
1386 /* Get maximum alignment of vectorized field/array among those members
1387 whose offset is multiple of the vector alignment. */
1388 if (alignment
1389 && (offset % alignment == 0)
1390 && (alignment > max_align))
1391 max_align = alignment;
1394 type_align_map->put (type, max_align);
1395 return max_align;
1398 /* Return alignment of vector type corresponding to decl's scalar type
1399 or 0 if it doesn't exist or the vector alignment is lesser than
1400 decl's alignment. */
1401 static unsigned
1402 get_vec_alignment_for_type (tree type)
1404 if (type == NULL_TREE)
1405 return 0;
1407 gcc_assert (TYPE_P (type));
1409 static unsigned alignment = 0;
1410 switch (TREE_CODE (type))
1412 case ARRAY_TYPE:
1413 alignment = get_vec_alignment_for_array_type (type);
1414 break;
1415 case RECORD_TYPE:
1416 alignment = get_vec_alignment_for_record_type (type);
1417 break;
1418 default:
1419 alignment = 0;
1420 break;
1423 return (alignment > TYPE_ALIGN (type)) ? alignment : 0;
1426 /* Entry point to increase_alignment pass. */
1427 static unsigned int
1428 increase_alignment (void)
1430 varpool_node *vnode;
1432 vect_location = dump_user_location_t ();
1433 type_align_map = new hash_map<tree, unsigned>;
1435 /* Increase the alignment of all global arrays for vectorization. */
1436 FOR_EACH_DEFINED_VARIABLE (vnode)
1438 tree decl = vnode->decl;
1439 unsigned int alignment;
1441 if ((decl_in_symtab_p (decl)
1442 && !symtab_node::get (decl)->can_increase_alignment_p ())
1443 || DECL_USER_ALIGN (decl) || DECL_ARTIFICIAL (decl))
1444 continue;
1446 alignment = get_vec_alignment_for_type (TREE_TYPE (decl));
1447 if (alignment && vect_can_force_dr_alignment_p (decl, alignment))
1449 vnode->increase_alignment (alignment);
1450 dump_printf (MSG_NOTE, "Increasing alignment of decl: %T\n", decl);
1454 delete type_align_map;
1455 return 0;
1459 namespace {
1461 const pass_data pass_data_ipa_increase_alignment =
1463 SIMPLE_IPA_PASS, /* type */
1464 "increase_alignment", /* name */
1465 OPTGROUP_LOOP | OPTGROUP_VEC, /* optinfo_flags */
1466 TV_IPA_OPT, /* tv_id */
1467 0, /* properties_required */
1468 0, /* properties_provided */
1469 0, /* properties_destroyed */
1470 0, /* todo_flags_start */
1471 0, /* todo_flags_finish */
1474 class pass_ipa_increase_alignment : public simple_ipa_opt_pass
1476 public:
1477 pass_ipa_increase_alignment (gcc::context *ctxt)
1478 : simple_ipa_opt_pass (pass_data_ipa_increase_alignment, ctxt)
1481 /* opt_pass methods: */
1482 virtual bool gate (function *)
1484 return flag_section_anchors && flag_tree_loop_vectorize;
1487 virtual unsigned int execute (function *) { return increase_alignment (); }
1489 }; // class pass_ipa_increase_alignment
1491 } // anon namespace
1493 simple_ipa_opt_pass *
1494 make_pass_ipa_increase_alignment (gcc::context *ctxt)
1496 return new pass_ipa_increase_alignment (ctxt);