match_asm_constraints: Use copy_rtx where needed (PR88001)
[official-gcc.git] / gcc / tree-vectorizer.c
blob1a6cb56a8725a83a02582f5544cd74a2785270ca
1 /* Vectorizer
2 Copyright (C) 2003-2018 Free Software Foundation, Inc.
3 Contributed by Dorit Naishlos <dorit@il.ibm.com>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 /* Loop and basic block vectorizer.
23 This file contains drivers for the three vectorizers:
24 (1) loop vectorizer (inter-iteration parallelism),
25 (2) loop-aware SLP (intra-iteration parallelism) (invoked by the loop
26 vectorizer)
27 (3) BB vectorizer (out-of-loops), aka SLP
29 The rest of the vectorizer's code is organized as follows:
30 - tree-vect-loop.c - loop specific parts such as reductions, etc. These are
31 used by drivers (1) and (2).
32 - tree-vect-loop-manip.c - vectorizer's loop control-flow utilities, used by
33 drivers (1) and (2).
34 - tree-vect-slp.c - BB vectorization specific analysis and transformation,
35 used by drivers (2) and (3).
36 - tree-vect-stmts.c - statements analysis and transformation (used by all).
37 - tree-vect-data-refs.c - vectorizer specific data-refs analysis and
38 manipulations (used by all).
39 - tree-vect-patterns.c - vectorizable code patterns detector (used by all)
41 Here's a poor attempt at illustrating that:
43 tree-vectorizer.c:
44 loop_vect() loop_aware_slp() slp_vect()
45 | / \ /
46 | / \ /
47 tree-vect-loop.c tree-vect-slp.c
48 | \ \ / / |
49 | \ \/ / |
50 | \ /\ / |
51 | \ / \ / |
52 tree-vect-stmts.c tree-vect-data-refs.c
53 \ /
54 tree-vect-patterns.c
57 #include "config.h"
58 #include "system.h"
59 #include "coretypes.h"
60 #include "backend.h"
61 #include "tree.h"
62 #include "gimple.h"
63 #include "predict.h"
64 #include "tree-pass.h"
65 #include "ssa.h"
66 #include "cgraph.h"
67 #include "fold-const.h"
68 #include "stor-layout.h"
69 #include "gimple-iterator.h"
70 #include "gimple-walk.h"
71 #include "tree-ssa-loop-manip.h"
72 #include "tree-ssa-loop-niter.h"
73 #include "tree-cfg.h"
74 #include "cfgloop.h"
75 #include "tree-vectorizer.h"
76 #include "tree-ssa-propagate.h"
77 #include "dbgcnt.h"
78 #include "tree-scalar-evolution.h"
79 #include "stringpool.h"
80 #include "attribs.h"
81 #include "gimple-pretty-print.h"
82 #include "opt-problem.h"
83 #include "internal-fn.h"
86 /* Loop or bb location, with hotness information. */
87 dump_user_location_t vect_location;
89 /* Dump a cost entry according to args to F. */
91 void
92 dump_stmt_cost (FILE *f, void *data, int count, enum vect_cost_for_stmt kind,
93 stmt_vec_info stmt_info, int misalign, unsigned cost,
94 enum vect_cost_model_location where)
96 fprintf (f, "%p ", data);
97 if (stmt_info)
99 print_gimple_expr (f, STMT_VINFO_STMT (stmt_info), 0, TDF_SLIM);
100 fprintf (f, " ");
102 else
103 fprintf (f, "<unknown> ");
104 fprintf (f, "%d times ", count);
105 const char *ks = "unknown";
106 switch (kind)
108 case scalar_stmt:
109 ks = "scalar_stmt";
110 break;
111 case scalar_load:
112 ks = "scalar_load";
113 break;
114 case scalar_store:
115 ks = "scalar_store";
116 break;
117 case vector_stmt:
118 ks = "vector_stmt";
119 break;
120 case vector_load:
121 ks = "vector_load";
122 break;
123 case vector_gather_load:
124 ks = "vector_gather_load";
125 break;
126 case unaligned_load:
127 ks = "unaligned_load";
128 break;
129 case unaligned_store:
130 ks = "unaligned_store";
131 break;
132 case vector_store:
133 ks = "vector_store";
134 break;
135 case vector_scatter_store:
136 ks = "vector_scatter_store";
137 break;
138 case vec_to_scalar:
139 ks = "vec_to_scalar";
140 break;
141 case scalar_to_vec:
142 ks = "scalar_to_vec";
143 break;
144 case cond_branch_not_taken:
145 ks = "cond_branch_not_taken";
146 break;
147 case cond_branch_taken:
148 ks = "cond_branch_taken";
149 break;
150 case vec_perm:
151 ks = "vec_perm";
152 break;
153 case vec_promote_demote:
154 ks = "vec_promote_demote";
155 break;
156 case vec_construct:
157 ks = "vec_construct";
158 break;
160 fprintf (f, "%s ", ks);
161 if (kind == unaligned_load || kind == unaligned_store)
162 fprintf (f, "(misalign %d) ", misalign);
163 fprintf (f, "costs %u ", cost);
164 const char *ws = "unknown";
165 switch (where)
167 case vect_prologue:
168 ws = "prologue";
169 break;
170 case vect_body:
171 ws = "body";
172 break;
173 case vect_epilogue:
174 ws = "epilogue";
175 break;
177 fprintf (f, "in %s\n", ws);
180 /* For mapping simduid to vectorization factor. */
182 struct simduid_to_vf : free_ptr_hash<simduid_to_vf>
184 unsigned int simduid;
185 poly_uint64 vf;
187 /* hash_table support. */
188 static inline hashval_t hash (const simduid_to_vf *);
189 static inline int equal (const simduid_to_vf *, const simduid_to_vf *);
192 inline hashval_t
193 simduid_to_vf::hash (const simduid_to_vf *p)
195 return p->simduid;
198 inline int
199 simduid_to_vf::equal (const simduid_to_vf *p1, const simduid_to_vf *p2)
201 return p1->simduid == p2->simduid;
204 /* This hash maps the OMP simd array to the corresponding simduid used
205 to index into it. Like thus,
207 _7 = GOMP_SIMD_LANE (simduid.0)
210 D.1737[_7] = stuff;
213 This hash maps from the OMP simd array (D.1737[]) to DECL_UID of
214 simduid.0. */
216 struct simd_array_to_simduid : free_ptr_hash<simd_array_to_simduid>
218 tree decl;
219 unsigned int simduid;
221 /* hash_table support. */
222 static inline hashval_t hash (const simd_array_to_simduid *);
223 static inline int equal (const simd_array_to_simduid *,
224 const simd_array_to_simduid *);
227 inline hashval_t
228 simd_array_to_simduid::hash (const simd_array_to_simduid *p)
230 return DECL_UID (p->decl);
233 inline int
234 simd_array_to_simduid::equal (const simd_array_to_simduid *p1,
235 const simd_array_to_simduid *p2)
237 return p1->decl == p2->decl;
240 /* Fold IFN_GOMP_SIMD_LANE, IFN_GOMP_SIMD_VF, IFN_GOMP_SIMD_LAST_LANE,
241 into their corresponding constants and remove
242 IFN_GOMP_SIMD_ORDERED_{START,END}. */
244 static void
245 adjust_simduid_builtins (hash_table<simduid_to_vf> *htab)
247 basic_block bb;
249 FOR_EACH_BB_FN (bb, cfun)
251 gimple_stmt_iterator i;
253 for (i = gsi_start_bb (bb); !gsi_end_p (i); )
255 poly_uint64 vf = 1;
256 enum internal_fn ifn;
257 gimple *stmt = gsi_stmt (i);
258 tree t;
259 if (!is_gimple_call (stmt)
260 || !gimple_call_internal_p (stmt))
262 gsi_next (&i);
263 continue;
265 ifn = gimple_call_internal_fn (stmt);
266 switch (ifn)
268 case IFN_GOMP_SIMD_LANE:
269 case IFN_GOMP_SIMD_VF:
270 case IFN_GOMP_SIMD_LAST_LANE:
271 break;
272 case IFN_GOMP_SIMD_ORDERED_START:
273 case IFN_GOMP_SIMD_ORDERED_END:
274 if (integer_onep (gimple_call_arg (stmt, 0)))
276 enum built_in_function bcode
277 = (ifn == IFN_GOMP_SIMD_ORDERED_START
278 ? BUILT_IN_GOMP_ORDERED_START
279 : BUILT_IN_GOMP_ORDERED_END);
280 gimple *g
281 = gimple_build_call (builtin_decl_explicit (bcode), 0);
282 tree vdef = gimple_vdef (stmt);
283 gimple_set_vdef (g, vdef);
284 SSA_NAME_DEF_STMT (vdef) = g;
285 gimple_set_vuse (g, gimple_vuse (stmt));
286 gsi_replace (&i, g, true);
287 continue;
289 gsi_remove (&i, true);
290 unlink_stmt_vdef (stmt);
291 continue;
292 default:
293 gsi_next (&i);
294 continue;
296 tree arg = gimple_call_arg (stmt, 0);
297 gcc_assert (arg != NULL_TREE);
298 gcc_assert (TREE_CODE (arg) == SSA_NAME);
299 simduid_to_vf *p = NULL, data;
300 data.simduid = DECL_UID (SSA_NAME_VAR (arg));
301 /* Need to nullify loop safelen field since it's value is not
302 valid after transformation. */
303 if (bb->loop_father && bb->loop_father->safelen > 0)
304 bb->loop_father->safelen = 0;
305 if (htab)
307 p = htab->find (&data);
308 if (p)
309 vf = p->vf;
311 switch (ifn)
313 case IFN_GOMP_SIMD_VF:
314 t = build_int_cst (unsigned_type_node, vf);
315 break;
316 case IFN_GOMP_SIMD_LANE:
317 t = build_int_cst (unsigned_type_node, 0);
318 break;
319 case IFN_GOMP_SIMD_LAST_LANE:
320 t = gimple_call_arg (stmt, 1);
321 break;
322 default:
323 gcc_unreachable ();
325 tree lhs = gimple_call_lhs (stmt);
326 if (lhs)
327 replace_uses_by (lhs, t);
328 release_defs (stmt);
329 gsi_remove (&i, true);
334 /* Helper structure for note_simd_array_uses. */
336 struct note_simd_array_uses_struct
338 hash_table<simd_array_to_simduid> **htab;
339 unsigned int simduid;
342 /* Callback for note_simd_array_uses, called through walk_gimple_op. */
344 static tree
345 note_simd_array_uses_cb (tree *tp, int *walk_subtrees, void *data)
347 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
348 struct note_simd_array_uses_struct *ns
349 = (struct note_simd_array_uses_struct *) wi->info;
351 if (TYPE_P (*tp))
352 *walk_subtrees = 0;
353 else if (VAR_P (*tp)
354 && lookup_attribute ("omp simd array", DECL_ATTRIBUTES (*tp))
355 && DECL_CONTEXT (*tp) == current_function_decl)
357 simd_array_to_simduid data;
358 if (!*ns->htab)
359 *ns->htab = new hash_table<simd_array_to_simduid> (15);
360 data.decl = *tp;
361 data.simduid = ns->simduid;
362 simd_array_to_simduid **slot = (*ns->htab)->find_slot (&data, INSERT);
363 if (*slot == NULL)
365 simd_array_to_simduid *p = XNEW (simd_array_to_simduid);
366 *p = data;
367 *slot = p;
369 else if ((*slot)->simduid != ns->simduid)
370 (*slot)->simduid = -1U;
371 *walk_subtrees = 0;
373 return NULL_TREE;
376 /* Find "omp simd array" temporaries and map them to corresponding
377 simduid. */
379 static void
380 note_simd_array_uses (hash_table<simd_array_to_simduid> **htab)
382 basic_block bb;
383 gimple_stmt_iterator gsi;
384 struct walk_stmt_info wi;
385 struct note_simd_array_uses_struct ns;
387 memset (&wi, 0, sizeof (wi));
388 wi.info = &ns;
389 ns.htab = htab;
391 FOR_EACH_BB_FN (bb, cfun)
392 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
394 gimple *stmt = gsi_stmt (gsi);
395 if (!is_gimple_call (stmt) || !gimple_call_internal_p (stmt))
396 continue;
397 switch (gimple_call_internal_fn (stmt))
399 case IFN_GOMP_SIMD_LANE:
400 case IFN_GOMP_SIMD_VF:
401 case IFN_GOMP_SIMD_LAST_LANE:
402 break;
403 default:
404 continue;
406 tree lhs = gimple_call_lhs (stmt);
407 if (lhs == NULL_TREE)
408 continue;
409 imm_use_iterator use_iter;
410 gimple *use_stmt;
411 ns.simduid = DECL_UID (SSA_NAME_VAR (gimple_call_arg (stmt, 0)));
412 FOR_EACH_IMM_USE_STMT (use_stmt, use_iter, lhs)
413 if (!is_gimple_debug (use_stmt))
414 walk_gimple_op (use_stmt, note_simd_array_uses_cb, &wi);
418 /* Shrink arrays with "omp simd array" attribute to the corresponding
419 vectorization factor. */
421 static void
422 shrink_simd_arrays
423 (hash_table<simd_array_to_simduid> *simd_array_to_simduid_htab,
424 hash_table<simduid_to_vf> *simduid_to_vf_htab)
426 for (hash_table<simd_array_to_simduid>::iterator iter
427 = simd_array_to_simduid_htab->begin ();
428 iter != simd_array_to_simduid_htab->end (); ++iter)
429 if ((*iter)->simduid != -1U)
431 tree decl = (*iter)->decl;
432 poly_uint64 vf = 1;
433 if (simduid_to_vf_htab)
435 simduid_to_vf *p = NULL, data;
436 data.simduid = (*iter)->simduid;
437 p = simduid_to_vf_htab->find (&data);
438 if (p)
439 vf = p->vf;
441 tree atype
442 = build_array_type_nelts (TREE_TYPE (TREE_TYPE (decl)), vf);
443 TREE_TYPE (decl) = atype;
444 relayout_decl (decl);
447 delete simd_array_to_simduid_htab;
450 /* Initialize the vec_info with kind KIND_IN and target cost data
451 TARGET_COST_DATA_IN. */
453 vec_info::vec_info (vec_info::vec_kind kind_in, void *target_cost_data_in,
454 vec_info_shared *shared_)
455 : kind (kind_in),
456 shared (shared_),
457 target_cost_data (target_cost_data_in)
459 stmt_vec_infos.create (50);
462 vec_info::~vec_info ()
464 slp_instance instance;
465 unsigned int i;
467 FOR_EACH_VEC_ELT (slp_instances, i, instance)
468 vect_free_slp_instance (instance, true);
470 destroy_cost_data (target_cost_data);
471 free_stmt_vec_infos ();
474 vec_info_shared::vec_info_shared ()
475 : datarefs (vNULL),
476 datarefs_copy (vNULL),
477 ddrs (vNULL)
481 vec_info_shared::~vec_info_shared ()
483 free_data_refs (datarefs);
484 free_dependence_relations (ddrs);
485 datarefs_copy.release ();
488 void
489 vec_info_shared::save_datarefs ()
491 if (!flag_checking)
492 return;
493 datarefs_copy.reserve_exact (datarefs.length ());
494 for (unsigned i = 0; i < datarefs.length (); ++i)
495 datarefs_copy.quick_push (*datarefs[i]);
498 void
499 vec_info_shared::check_datarefs ()
501 if (!flag_checking)
502 return;
503 gcc_assert (datarefs.length () == datarefs_copy.length ());
504 for (unsigned i = 0; i < datarefs.length (); ++i)
505 if (memcmp (&datarefs_copy[i], datarefs[i], sizeof (data_reference)) != 0)
506 gcc_unreachable ();
509 /* Record that STMT belongs to the vectorizable region. Create and return
510 an associated stmt_vec_info. */
512 stmt_vec_info
513 vec_info::add_stmt (gimple *stmt)
515 stmt_vec_info res = new_stmt_vec_info (stmt);
516 set_vinfo_for_stmt (stmt, res);
517 return res;
520 /* If STMT has an associated stmt_vec_info, return that vec_info, otherwise
521 return null. It is safe to call this function on any statement, even if
522 it might not be part of the vectorizable region. */
524 stmt_vec_info
525 vec_info::lookup_stmt (gimple *stmt)
527 unsigned int uid = gimple_uid (stmt);
528 if (uid > 0 && uid - 1 < stmt_vec_infos.length ())
530 stmt_vec_info res = stmt_vec_infos[uid - 1];
531 if (res && res->stmt == stmt)
532 return res;
534 return NULL;
537 /* If NAME is an SSA_NAME and its definition has an associated stmt_vec_info,
538 return that stmt_vec_info, otherwise return null. It is safe to call
539 this on arbitrary operands. */
541 stmt_vec_info
542 vec_info::lookup_def (tree name)
544 if (TREE_CODE (name) == SSA_NAME
545 && !SSA_NAME_IS_DEFAULT_DEF (name))
546 return lookup_stmt (SSA_NAME_DEF_STMT (name));
547 return NULL;
550 /* See whether there is a single non-debug statement that uses LHS and
551 whether that statement has an associated stmt_vec_info. Return the
552 stmt_vec_info if so, otherwise return null. */
554 stmt_vec_info
555 vec_info::lookup_single_use (tree lhs)
557 use_operand_p dummy;
558 gimple *use_stmt;
559 if (single_imm_use (lhs, &dummy, &use_stmt))
560 return lookup_stmt (use_stmt);
561 return NULL;
564 /* Return vectorization information about DR. */
566 dr_vec_info *
567 vec_info::lookup_dr (data_reference *dr)
569 stmt_vec_info stmt_info = lookup_stmt (DR_STMT (dr));
570 /* DR_STMT should never refer to a stmt in a pattern replacement. */
571 gcc_checking_assert (!is_pattern_stmt_p (stmt_info));
572 return STMT_VINFO_DR_INFO (stmt_info->dr_aux.stmt);
575 /* Record that NEW_STMT_INFO now implements the same data reference
576 as OLD_STMT_INFO. */
578 void
579 vec_info::move_dr (stmt_vec_info new_stmt_info, stmt_vec_info old_stmt_info)
581 gcc_assert (!is_pattern_stmt_p (old_stmt_info));
582 STMT_VINFO_DR_INFO (old_stmt_info)->stmt = new_stmt_info;
583 new_stmt_info->dr_aux = old_stmt_info->dr_aux;
584 STMT_VINFO_DR_WRT_VEC_LOOP (new_stmt_info)
585 = STMT_VINFO_DR_WRT_VEC_LOOP (old_stmt_info);
586 STMT_VINFO_GATHER_SCATTER_P (new_stmt_info)
587 = STMT_VINFO_GATHER_SCATTER_P (old_stmt_info);
590 /* Permanently remove the statement described by STMT_INFO from the
591 function. */
593 void
594 vec_info::remove_stmt (stmt_vec_info stmt_info)
596 gcc_assert (!stmt_info->pattern_stmt_p);
597 set_vinfo_for_stmt (stmt_info->stmt, NULL);
598 gimple_stmt_iterator si = gsi_for_stmt (stmt_info->stmt);
599 unlink_stmt_vdef (stmt_info->stmt);
600 gsi_remove (&si, true);
601 release_defs (stmt_info->stmt);
602 free_stmt_vec_info (stmt_info);
605 /* Replace the statement at GSI by NEW_STMT, both the vectorization
606 information and the function itself. STMT_INFO describes the statement
607 at GSI. */
609 void
610 vec_info::replace_stmt (gimple_stmt_iterator *gsi, stmt_vec_info stmt_info,
611 gimple *new_stmt)
613 gimple *old_stmt = stmt_info->stmt;
614 gcc_assert (!stmt_info->pattern_stmt_p && old_stmt == gsi_stmt (*gsi));
615 set_vinfo_for_stmt (old_stmt, NULL);
616 set_vinfo_for_stmt (new_stmt, stmt_info);
617 stmt_info->stmt = new_stmt;
618 gsi_replace (gsi, new_stmt, true);
621 /* Create and initialize a new stmt_vec_info struct for STMT. */
623 stmt_vec_info
624 vec_info::new_stmt_vec_info (gimple *stmt)
626 stmt_vec_info res = XCNEW (struct _stmt_vec_info);
627 res->vinfo = this;
628 res->stmt = stmt;
630 STMT_VINFO_TYPE (res) = undef_vec_info_type;
631 STMT_VINFO_RELEVANT (res) = vect_unused_in_scope;
632 STMT_VINFO_VECTORIZABLE (res) = true;
633 STMT_VINFO_VEC_REDUCTION_TYPE (res) = TREE_CODE_REDUCTION;
634 STMT_VINFO_VEC_CONST_COND_REDUC_CODE (res) = ERROR_MARK;
636 if (gimple_code (stmt) == GIMPLE_PHI
637 && is_loop_header_bb_p (gimple_bb (stmt)))
638 STMT_VINFO_DEF_TYPE (res) = vect_unknown_def_type;
639 else
640 STMT_VINFO_DEF_TYPE (res) = vect_internal_def;
642 STMT_VINFO_SAME_ALIGN_REFS (res).create (0);
643 STMT_SLP_TYPE (res) = loop_vect;
645 /* This is really "uninitialized" until vect_compute_data_ref_alignment. */
646 res->dr_aux.misalignment = DR_MISALIGNMENT_UNINITIALIZED;
648 return res;
651 /* Associate STMT with INFO. */
653 void
654 vec_info::set_vinfo_for_stmt (gimple *stmt, stmt_vec_info info)
656 unsigned int uid = gimple_uid (stmt);
657 if (uid == 0)
659 gcc_checking_assert (info);
660 uid = stmt_vec_infos.length () + 1;
661 gimple_set_uid (stmt, uid);
662 stmt_vec_infos.safe_push (info);
664 else
666 gcc_checking_assert (info == NULL);
667 stmt_vec_infos[uid - 1] = info;
671 /* Free the contents of stmt_vec_infos. */
673 void
674 vec_info::free_stmt_vec_infos (void)
676 unsigned int i;
677 stmt_vec_info info;
678 FOR_EACH_VEC_ELT (stmt_vec_infos, i, info)
679 if (info != NULL)
680 free_stmt_vec_info (info);
681 stmt_vec_infos.release ();
684 /* Free STMT_INFO. */
686 void
687 vec_info::free_stmt_vec_info (stmt_vec_info stmt_info)
689 if (stmt_info->pattern_stmt_p)
691 gimple_set_bb (stmt_info->stmt, NULL);
692 tree lhs = gimple_get_lhs (stmt_info->stmt);
693 if (lhs && TREE_CODE (lhs) == SSA_NAME)
694 release_ssa_name (lhs);
697 STMT_VINFO_SAME_ALIGN_REFS (stmt_info).release ();
698 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).release ();
699 free (stmt_info);
702 /* A helper function to free scev and LOOP niter information, as well as
703 clear loop constraint LOOP_C_FINITE. */
705 void
706 vect_free_loop_info_assumptions (struct loop *loop)
708 scev_reset_htab ();
709 /* We need to explicitly reset upper bound information since they are
710 used even after free_numbers_of_iterations_estimates. */
711 loop->any_upper_bound = false;
712 loop->any_likely_upper_bound = false;
713 free_numbers_of_iterations_estimates (loop);
714 loop_constraint_clear (loop, LOOP_C_FINITE);
717 /* If LOOP has been versioned during ifcvt, return the internal call
718 guarding it. */
720 static gimple *
721 vect_loop_vectorized_call (struct loop *loop)
723 basic_block bb = loop_preheader_edge (loop)->src;
724 gimple *g;
727 g = last_stmt (bb);
728 if (g)
729 break;
730 if (!single_pred_p (bb))
731 break;
732 bb = single_pred (bb);
734 while (1);
735 if (g && gimple_code (g) == GIMPLE_COND)
737 gimple_stmt_iterator gsi = gsi_for_stmt (g);
738 gsi_prev (&gsi);
739 if (!gsi_end_p (gsi))
741 g = gsi_stmt (gsi);
742 if (gimple_call_internal_p (g, IFN_LOOP_VECTORIZED)
743 && (tree_to_shwi (gimple_call_arg (g, 0)) == loop->num
744 || tree_to_shwi (gimple_call_arg (g, 1)) == loop->num))
745 return g;
748 return NULL;
751 /* If LOOP has been versioned during loop distribution, return the gurading
752 internal call. */
754 static gimple *
755 vect_loop_dist_alias_call (struct loop *loop)
757 basic_block bb;
758 basic_block entry;
759 struct loop *outer, *orig;
760 gimple_stmt_iterator gsi;
761 gimple *g;
763 if (loop->orig_loop_num == 0)
764 return NULL;
766 orig = get_loop (cfun, loop->orig_loop_num);
767 if (orig == NULL)
769 /* The original loop is somehow destroyed. Clear the information. */
770 loop->orig_loop_num = 0;
771 return NULL;
774 if (loop != orig)
775 bb = nearest_common_dominator (CDI_DOMINATORS, loop->header, orig->header);
776 else
777 bb = loop_preheader_edge (loop)->src;
779 outer = bb->loop_father;
780 entry = ENTRY_BLOCK_PTR_FOR_FN (cfun);
782 /* Look upward in dominance tree. */
783 for (; bb != entry && flow_bb_inside_loop_p (outer, bb);
784 bb = get_immediate_dominator (CDI_DOMINATORS, bb))
786 g = last_stmt (bb);
787 if (g == NULL || gimple_code (g) != GIMPLE_COND)
788 continue;
790 gsi = gsi_for_stmt (g);
791 gsi_prev (&gsi);
792 if (gsi_end_p (gsi))
793 continue;
795 g = gsi_stmt (gsi);
796 /* The guarding internal function call must have the same distribution
797 alias id. */
798 if (gimple_call_internal_p (g, IFN_LOOP_DIST_ALIAS)
799 && (tree_to_shwi (gimple_call_arg (g, 0)) == loop->orig_loop_num))
800 return g;
802 return NULL;
805 /* Set the uids of all the statements in basic blocks inside loop
806 represented by LOOP_VINFO. LOOP_VECTORIZED_CALL is the internal
807 call guarding the loop which has been if converted. */
808 static void
809 set_uid_loop_bbs (loop_vec_info loop_vinfo, gimple *loop_vectorized_call)
811 tree arg = gimple_call_arg (loop_vectorized_call, 1);
812 basic_block *bbs;
813 unsigned int i;
814 struct loop *scalar_loop = get_loop (cfun, tree_to_shwi (arg));
816 LOOP_VINFO_SCALAR_LOOP (loop_vinfo) = scalar_loop;
817 gcc_checking_assert (vect_loop_vectorized_call (scalar_loop)
818 == loop_vectorized_call);
819 /* If we are going to vectorize outer loop, prevent vectorization
820 of the inner loop in the scalar loop - either the scalar loop is
821 thrown away, so it is a wasted work, or is used only for
822 a few iterations. */
823 if (scalar_loop->inner)
825 gimple *g = vect_loop_vectorized_call (scalar_loop->inner);
826 if (g)
828 arg = gimple_call_arg (g, 0);
829 get_loop (cfun, tree_to_shwi (arg))->dont_vectorize = true;
830 fold_loop_internal_call (g, boolean_false_node);
833 bbs = get_loop_body (scalar_loop);
834 for (i = 0; i < scalar_loop->num_nodes; i++)
836 basic_block bb = bbs[i];
837 gimple_stmt_iterator gsi;
838 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
840 gimple *phi = gsi_stmt (gsi);
841 gimple_set_uid (phi, 0);
843 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
845 gimple *stmt = gsi_stmt (gsi);
846 gimple_set_uid (stmt, 0);
849 free (bbs);
852 /* Try to vectorize LOOP. */
854 static unsigned
855 try_vectorize_loop_1 (hash_table<simduid_to_vf> *&simduid_to_vf_htab,
856 unsigned *num_vectorized_loops,
857 loop_p loop, loop_vec_info orig_loop_vinfo,
858 gimple *loop_vectorized_call,
859 gimple *loop_dist_alias_call)
861 unsigned ret = 0;
862 vec_info_shared shared;
863 vect_location = find_loop_location (loop);
864 if (LOCATION_LOCUS (vect_location.get_location_t ()) != UNKNOWN_LOCATION
865 && dump_enabled_p ())
866 dump_printf (MSG_NOTE | MSG_PRIORITY_INTERNALS,
867 "\nAnalyzing loop at %s:%d\n",
868 LOCATION_FILE (vect_location.get_location_t ()),
869 LOCATION_LINE (vect_location.get_location_t ()));
871 /* Try to analyze the loop, retaining an opt_problem if dump_enabled_p. */
872 opt_loop_vec_info loop_vinfo
873 = vect_analyze_loop (loop, orig_loop_vinfo, &shared);
874 loop->aux = loop_vinfo;
876 if (!loop_vinfo)
877 if (dump_enabled_p ())
878 if (opt_problem *problem = loop_vinfo.get_problem ())
880 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
881 "couldn't vectorize loop\n");
882 problem->emit_and_clear ();
885 if (!loop_vinfo || !LOOP_VINFO_VECTORIZABLE_P (loop_vinfo))
887 /* Free existing information if loop is analyzed with some
888 assumptions. */
889 if (loop_constraint_set_p (loop, LOOP_C_FINITE))
890 vect_free_loop_info_assumptions (loop);
892 /* If we applied if-conversion then try to vectorize the
893 BB of innermost loops.
894 ??? Ideally BB vectorization would learn to vectorize
895 control flow by applying if-conversion on-the-fly, the
896 following retains the if-converted loop body even when
897 only non-if-converted parts took part in BB vectorization. */
898 if (flag_tree_slp_vectorize != 0
899 && loop_vectorized_call
900 && ! loop->inner)
902 basic_block bb = loop->header;
903 bool require_loop_vectorize = false;
904 for (gimple_stmt_iterator gsi = gsi_start_bb (bb);
905 !gsi_end_p (gsi); gsi_next (&gsi))
907 gimple *stmt = gsi_stmt (gsi);
908 gcall *call = dyn_cast <gcall *> (stmt);
909 if (call && gimple_call_internal_p (call))
911 internal_fn ifn = gimple_call_internal_fn (call);
912 if (ifn == IFN_MASK_LOAD || ifn == IFN_MASK_STORE
913 /* Don't keep the if-converted parts when the ifn with
914 specifc type is not supported by the backend. */
915 || (direct_internal_fn_p (ifn)
916 && !direct_internal_fn_supported_p
917 (call, OPTIMIZE_FOR_SPEED)))
919 require_loop_vectorize = true;
920 break;
923 gimple_set_uid (stmt, -1);
924 gimple_set_visited (stmt, false);
926 if (!require_loop_vectorize && vect_slp_bb (bb))
928 if (dump_enabled_p ())
929 dump_printf_loc (MSG_NOTE, vect_location,
930 "basic block vectorized\n");
931 fold_loop_internal_call (loop_vectorized_call,
932 boolean_true_node);
933 loop_vectorized_call = NULL;
934 ret |= TODO_cleanup_cfg;
937 /* If outer loop vectorization fails for LOOP_VECTORIZED guarded
938 loop, don't vectorize its inner loop; we'll attempt to
939 vectorize LOOP_VECTORIZED guarded inner loop of the scalar
940 loop version. */
941 if (loop_vectorized_call && loop->inner)
942 loop->inner->dont_vectorize = true;
943 return ret;
946 if (!dbg_cnt (vect_loop))
948 /* Free existing information if loop is analyzed with some
949 assumptions. */
950 if (loop_constraint_set_p (loop, LOOP_C_FINITE))
951 vect_free_loop_info_assumptions (loop);
952 return ret;
955 if (loop_vectorized_call)
956 set_uid_loop_bbs (loop_vinfo, loop_vectorized_call);
958 unsigned HOST_WIDE_INT bytes;
959 if (dump_enabled_p ())
961 if (current_vector_size.is_constant (&bytes))
962 dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, vect_location,
963 "loop vectorized using %wu byte vectors\n", bytes);
964 else
965 dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, vect_location,
966 "loop vectorized using variable length vectors\n");
969 loop_p new_loop = vect_transform_loop (loop_vinfo);
970 (*num_vectorized_loops)++;
971 /* Now that the loop has been vectorized, allow it to be unrolled
972 etc. */
973 loop->force_vectorize = false;
975 if (loop->simduid)
977 simduid_to_vf *simduid_to_vf_data = XNEW (simduid_to_vf);
978 if (!simduid_to_vf_htab)
979 simduid_to_vf_htab = new hash_table<simduid_to_vf> (15);
980 simduid_to_vf_data->simduid = DECL_UID (loop->simduid);
981 simduid_to_vf_data->vf = loop_vinfo->vectorization_factor;
982 *simduid_to_vf_htab->find_slot (simduid_to_vf_data, INSERT)
983 = simduid_to_vf_data;
986 if (loop_vectorized_call)
988 fold_loop_internal_call (loop_vectorized_call, boolean_true_node);
989 loop_vectorized_call = NULL;
990 ret |= TODO_cleanup_cfg;
992 if (loop_dist_alias_call)
994 tree value = gimple_call_arg (loop_dist_alias_call, 1);
995 fold_loop_internal_call (loop_dist_alias_call, value);
996 loop_dist_alias_call = NULL;
997 ret |= TODO_cleanup_cfg;
1000 /* Epilogue of vectorized loop must be vectorized too. */
1001 if (new_loop)
1002 ret |= try_vectorize_loop_1 (simduid_to_vf_htab, num_vectorized_loops,
1003 new_loop, loop_vinfo, NULL, NULL);
1005 return ret;
1008 /* Try to vectorize LOOP. */
1010 static unsigned
1011 try_vectorize_loop (hash_table<simduid_to_vf> *&simduid_to_vf_htab,
1012 unsigned *num_vectorized_loops, loop_p loop)
1014 if (!((flag_tree_loop_vectorize
1015 && optimize_loop_nest_for_speed_p (loop))
1016 || loop->force_vectorize))
1017 return 0;
1019 return try_vectorize_loop_1 (simduid_to_vf_htab, num_vectorized_loops,
1020 loop, NULL,
1021 vect_loop_vectorized_call (loop),
1022 vect_loop_dist_alias_call (loop));
1026 /* Function vectorize_loops.
1028 Entry point to loop vectorization phase. */
1030 unsigned
1031 vectorize_loops (void)
1033 unsigned int i;
1034 unsigned int num_vectorized_loops = 0;
1035 unsigned int vect_loops_num;
1036 struct loop *loop;
1037 hash_table<simduid_to_vf> *simduid_to_vf_htab = NULL;
1038 hash_table<simd_array_to_simduid> *simd_array_to_simduid_htab = NULL;
1039 bool any_ifcvt_loops = false;
1040 unsigned ret = 0;
1042 vect_loops_num = number_of_loops (cfun);
1044 /* Bail out if there are no loops. */
1045 if (vect_loops_num <= 1)
1046 return 0;
1048 if (cfun->has_simduid_loops)
1049 note_simd_array_uses (&simd_array_to_simduid_htab);
1051 /* ----------- Analyze loops. ----------- */
1053 /* If some loop was duplicated, it gets bigger number
1054 than all previously defined loops. This fact allows us to run
1055 only over initial loops skipping newly generated ones. */
1056 FOR_EACH_LOOP (loop, 0)
1057 if (loop->dont_vectorize)
1059 any_ifcvt_loops = true;
1060 /* If-conversion sometimes versions both the outer loop
1061 (for the case when outer loop vectorization might be
1062 desirable) as well as the inner loop in the scalar version
1063 of the loop. So we have:
1064 if (LOOP_VECTORIZED (1, 3))
1066 loop1
1067 loop2
1069 else
1070 loop3 (copy of loop1)
1071 if (LOOP_VECTORIZED (4, 5))
1072 loop4 (copy of loop2)
1073 else
1074 loop5 (copy of loop4)
1075 If FOR_EACH_LOOP gives us loop3 first (which has
1076 dont_vectorize set), make sure to process loop1 before loop4;
1077 so that we can prevent vectorization of loop4 if loop1
1078 is successfully vectorized. */
1079 if (loop->inner)
1081 gimple *loop_vectorized_call
1082 = vect_loop_vectorized_call (loop);
1083 if (loop_vectorized_call
1084 && vect_loop_vectorized_call (loop->inner))
1086 tree arg = gimple_call_arg (loop_vectorized_call, 0);
1087 struct loop *vector_loop
1088 = get_loop (cfun, tree_to_shwi (arg));
1089 if (vector_loop && vector_loop != loop)
1091 /* Make sure we don't vectorize it twice. */
1092 vector_loop->dont_vectorize = true;
1093 ret |= try_vectorize_loop (simduid_to_vf_htab,
1094 &num_vectorized_loops,
1095 vector_loop);
1100 else
1101 ret |= try_vectorize_loop (simduid_to_vf_htab, &num_vectorized_loops,
1102 loop);
1104 vect_location = dump_user_location_t ();
1106 statistics_counter_event (cfun, "Vectorized loops", num_vectorized_loops);
1107 if (dump_enabled_p ()
1108 || (num_vectorized_loops > 0 && dump_enabled_p ()))
1109 dump_printf_loc (MSG_NOTE, vect_location,
1110 "vectorized %u loops in function.\n",
1111 num_vectorized_loops);
1113 /* ----------- Finalize. ----------- */
1115 if (any_ifcvt_loops)
1116 for (i = 1; i < number_of_loops (cfun); i++)
1118 loop = get_loop (cfun, i);
1119 if (loop && loop->dont_vectorize)
1121 gimple *g = vect_loop_vectorized_call (loop);
1122 if (g)
1124 fold_loop_internal_call (g, boolean_false_node);
1125 ret |= TODO_cleanup_cfg;
1126 g = NULL;
1128 else
1129 g = vect_loop_dist_alias_call (loop);
1131 if (g)
1133 fold_loop_internal_call (g, boolean_false_node);
1134 ret |= TODO_cleanup_cfg;
1139 for (i = 1; i < number_of_loops (cfun); i++)
1141 loop_vec_info loop_vinfo;
1142 bool has_mask_store;
1144 loop = get_loop (cfun, i);
1145 if (!loop || !loop->aux)
1146 continue;
1147 loop_vinfo = (loop_vec_info) loop->aux;
1148 has_mask_store = LOOP_VINFO_HAS_MASK_STORE (loop_vinfo);
1149 delete loop_vinfo;
1150 if (has_mask_store
1151 && targetm.vectorize.empty_mask_is_expensive (IFN_MASK_STORE))
1152 optimize_mask_stores (loop);
1153 loop->aux = NULL;
1156 /* Fold IFN_GOMP_SIMD_{VF,LANE,LAST_LANE,ORDERED_{START,END}} builtins. */
1157 if (cfun->has_simduid_loops)
1158 adjust_simduid_builtins (simduid_to_vf_htab);
1160 /* Shrink any "omp array simd" temporary arrays to the
1161 actual vectorization factors. */
1162 if (simd_array_to_simduid_htab)
1163 shrink_simd_arrays (simd_array_to_simduid_htab, simduid_to_vf_htab);
1164 delete simduid_to_vf_htab;
1165 cfun->has_simduid_loops = false;
1167 if (num_vectorized_loops > 0)
1169 /* If we vectorized any loop only virtual SSA form needs to be updated.
1170 ??? Also while we try hard to update loop-closed SSA form we fail
1171 to properly do this in some corner-cases (see PR56286). */
1172 rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa_only_virtuals);
1173 return TODO_cleanup_cfg;
1176 return ret;
1180 /* Entry point to the simduid cleanup pass. */
1182 namespace {
1184 const pass_data pass_data_simduid_cleanup =
1186 GIMPLE_PASS, /* type */
1187 "simduid", /* name */
1188 OPTGROUP_NONE, /* optinfo_flags */
1189 TV_NONE, /* tv_id */
1190 ( PROP_ssa | PROP_cfg ), /* properties_required */
1191 0, /* properties_provided */
1192 0, /* properties_destroyed */
1193 0, /* todo_flags_start */
1194 0, /* todo_flags_finish */
1197 class pass_simduid_cleanup : public gimple_opt_pass
1199 public:
1200 pass_simduid_cleanup (gcc::context *ctxt)
1201 : gimple_opt_pass (pass_data_simduid_cleanup, ctxt)
1204 /* opt_pass methods: */
1205 opt_pass * clone () { return new pass_simduid_cleanup (m_ctxt); }
1206 virtual bool gate (function *fun) { return fun->has_simduid_loops; }
1207 virtual unsigned int execute (function *);
1209 }; // class pass_simduid_cleanup
1211 unsigned int
1212 pass_simduid_cleanup::execute (function *fun)
1214 hash_table<simd_array_to_simduid> *simd_array_to_simduid_htab = NULL;
1216 note_simd_array_uses (&simd_array_to_simduid_htab);
1218 /* Fold IFN_GOMP_SIMD_{VF,LANE,LAST_LANE,ORDERED_{START,END}} builtins. */
1219 adjust_simduid_builtins (NULL);
1221 /* Shrink any "omp array simd" temporary arrays to the
1222 actual vectorization factors. */
1223 if (simd_array_to_simduid_htab)
1224 shrink_simd_arrays (simd_array_to_simduid_htab, NULL);
1225 fun->has_simduid_loops = false;
1226 return 0;
1229 } // anon namespace
1231 gimple_opt_pass *
1232 make_pass_simduid_cleanup (gcc::context *ctxt)
1234 return new pass_simduid_cleanup (ctxt);
1238 /* Entry point to basic block SLP phase. */
1240 namespace {
1242 const pass_data pass_data_slp_vectorize =
1244 GIMPLE_PASS, /* type */
1245 "slp", /* name */
1246 OPTGROUP_LOOP | OPTGROUP_VEC, /* optinfo_flags */
1247 TV_TREE_SLP_VECTORIZATION, /* tv_id */
1248 ( PROP_ssa | PROP_cfg ), /* properties_required */
1249 0, /* properties_provided */
1250 0, /* properties_destroyed */
1251 0, /* todo_flags_start */
1252 TODO_update_ssa, /* todo_flags_finish */
1255 class pass_slp_vectorize : public gimple_opt_pass
1257 public:
1258 pass_slp_vectorize (gcc::context *ctxt)
1259 : gimple_opt_pass (pass_data_slp_vectorize, ctxt)
1262 /* opt_pass methods: */
1263 opt_pass * clone () { return new pass_slp_vectorize (m_ctxt); }
1264 virtual bool gate (function *) { return flag_tree_slp_vectorize != 0; }
1265 virtual unsigned int execute (function *);
1267 }; // class pass_slp_vectorize
1269 unsigned int
1270 pass_slp_vectorize::execute (function *fun)
1272 basic_block bb;
1274 bool in_loop_pipeline = scev_initialized_p ();
1275 if (!in_loop_pipeline)
1277 loop_optimizer_init (LOOPS_NORMAL);
1278 scev_initialize ();
1281 /* Mark all stmts as not belonging to the current region and unvisited. */
1282 FOR_EACH_BB_FN (bb, fun)
1284 for (gimple_stmt_iterator gsi = gsi_start_bb (bb); !gsi_end_p (gsi);
1285 gsi_next (&gsi))
1287 gimple *stmt = gsi_stmt (gsi);
1288 gimple_set_uid (stmt, -1);
1289 gimple_set_visited (stmt, false);
1293 FOR_EACH_BB_FN (bb, fun)
1295 if (vect_slp_bb (bb))
1296 if (dump_enabled_p ())
1297 dump_printf_loc (MSG_NOTE, vect_location, "basic block vectorized\n");
1300 if (!in_loop_pipeline)
1302 scev_finalize ();
1303 loop_optimizer_finalize ();
1306 vect_location = dump_user_location_t ();
1308 return 0;
1311 } // anon namespace
1313 gimple_opt_pass *
1314 make_pass_slp_vectorize (gcc::context *ctxt)
1316 return new pass_slp_vectorize (ctxt);
1320 /* Increase alignment of global arrays to improve vectorization potential.
1321 TODO:
1322 - Consider also structs that have an array field.
1323 - Use ipa analysis to prune arrays that can't be vectorized?
1324 This should involve global alignment analysis and in the future also
1325 array padding. */
1327 static unsigned get_vec_alignment_for_type (tree);
1328 static hash_map<tree, unsigned> *type_align_map;
1330 /* Return alignment of array's vector type corresponding to scalar type.
1331 0 if no vector type exists. */
1332 static unsigned
1333 get_vec_alignment_for_array_type (tree type)
1335 gcc_assert (TREE_CODE (type) == ARRAY_TYPE);
1336 poly_uint64 array_size, vector_size;
1338 tree vectype = get_vectype_for_scalar_type (strip_array_types (type));
1339 if (!vectype
1340 || !poly_int_tree_p (TYPE_SIZE (type), &array_size)
1341 || !poly_int_tree_p (TYPE_SIZE (vectype), &vector_size)
1342 || maybe_lt (array_size, vector_size))
1343 return 0;
1345 return TYPE_ALIGN (vectype);
1348 /* Return alignment of field having maximum alignment of vector type
1349 corresponding to it's scalar type. For now, we only consider fields whose
1350 offset is a multiple of it's vector alignment.
1351 0 if no suitable field is found. */
1352 static unsigned
1353 get_vec_alignment_for_record_type (tree type)
1355 gcc_assert (TREE_CODE (type) == RECORD_TYPE);
1357 unsigned max_align = 0, alignment;
1358 HOST_WIDE_INT offset;
1359 tree offset_tree;
1361 if (TYPE_PACKED (type))
1362 return 0;
1364 unsigned *slot = type_align_map->get (type);
1365 if (slot)
1366 return *slot;
1368 for (tree field = first_field (type);
1369 field != NULL_TREE;
1370 field = DECL_CHAIN (field))
1372 /* Skip if not FIELD_DECL or if alignment is set by user. */
1373 if (TREE_CODE (field) != FIELD_DECL
1374 || DECL_USER_ALIGN (field)
1375 || DECL_ARTIFICIAL (field))
1376 continue;
1378 /* We don't need to process the type further if offset is variable,
1379 since the offsets of remaining members will also be variable. */
1380 if (TREE_CODE (DECL_FIELD_OFFSET (field)) != INTEGER_CST
1381 || TREE_CODE (DECL_FIELD_BIT_OFFSET (field)) != INTEGER_CST)
1382 break;
1384 /* Similarly stop processing the type if offset_tree
1385 does not fit in unsigned HOST_WIDE_INT. */
1386 offset_tree = bit_position (field);
1387 if (!tree_fits_uhwi_p (offset_tree))
1388 break;
1390 offset = tree_to_uhwi (offset_tree);
1391 alignment = get_vec_alignment_for_type (TREE_TYPE (field));
1393 /* Get maximum alignment of vectorized field/array among those members
1394 whose offset is multiple of the vector alignment. */
1395 if (alignment
1396 && (offset % alignment == 0)
1397 && (alignment > max_align))
1398 max_align = alignment;
1401 type_align_map->put (type, max_align);
1402 return max_align;
1405 /* Return alignment of vector type corresponding to decl's scalar type
1406 or 0 if it doesn't exist or the vector alignment is lesser than
1407 decl's alignment. */
1408 static unsigned
1409 get_vec_alignment_for_type (tree type)
1411 if (type == NULL_TREE)
1412 return 0;
1414 gcc_assert (TYPE_P (type));
1416 static unsigned alignment = 0;
1417 switch (TREE_CODE (type))
1419 case ARRAY_TYPE:
1420 alignment = get_vec_alignment_for_array_type (type);
1421 break;
1422 case RECORD_TYPE:
1423 alignment = get_vec_alignment_for_record_type (type);
1424 break;
1425 default:
1426 alignment = 0;
1427 break;
1430 return (alignment > TYPE_ALIGN (type)) ? alignment : 0;
1433 /* Entry point to increase_alignment pass. */
1434 static unsigned int
1435 increase_alignment (void)
1437 varpool_node *vnode;
1439 vect_location = dump_user_location_t ();
1440 type_align_map = new hash_map<tree, unsigned>;
1442 /* Increase the alignment of all global arrays for vectorization. */
1443 FOR_EACH_DEFINED_VARIABLE (vnode)
1445 tree decl = vnode->decl;
1446 unsigned int alignment;
1448 if ((decl_in_symtab_p (decl)
1449 && !symtab_node::get (decl)->can_increase_alignment_p ())
1450 || DECL_USER_ALIGN (decl) || DECL_ARTIFICIAL (decl))
1451 continue;
1453 alignment = get_vec_alignment_for_type (TREE_TYPE (decl));
1454 if (alignment && vect_can_force_dr_alignment_p (decl, alignment))
1456 vnode->increase_alignment (alignment);
1457 if (dump_enabled_p ())
1458 dump_printf (MSG_NOTE, "Increasing alignment of decl: %T\n", decl);
1462 delete type_align_map;
1463 return 0;
1467 namespace {
1469 const pass_data pass_data_ipa_increase_alignment =
1471 SIMPLE_IPA_PASS, /* type */
1472 "increase_alignment", /* name */
1473 OPTGROUP_LOOP | OPTGROUP_VEC, /* optinfo_flags */
1474 TV_IPA_OPT, /* tv_id */
1475 0, /* properties_required */
1476 0, /* properties_provided */
1477 0, /* properties_destroyed */
1478 0, /* todo_flags_start */
1479 0, /* todo_flags_finish */
1482 class pass_ipa_increase_alignment : public simple_ipa_opt_pass
1484 public:
1485 pass_ipa_increase_alignment (gcc::context *ctxt)
1486 : simple_ipa_opt_pass (pass_data_ipa_increase_alignment, ctxt)
1489 /* opt_pass methods: */
1490 virtual bool gate (function *)
1492 return flag_section_anchors && flag_tree_loop_vectorize;
1495 virtual unsigned int execute (function *) { return increase_alignment (); }
1497 }; // class pass_ipa_increase_alignment
1499 } // anon namespace
1501 simple_ipa_opt_pass *
1502 make_pass_ipa_increase_alignment (gcc::context *ctxt)
1504 return new pass_ipa_increase_alignment (ctxt);