pr70100.c: Add -mvsx.
[official-gcc.git] / gcc / tree-vectorizer.c
blob30dcc442c4c440c44ef3ba29a03182834229ba35
1 /* Vectorizer
2 Copyright (C) 2003-2019 Free Software Foundation, Inc.
3 Contributed by Dorit Naishlos <dorit@il.ibm.com>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 /* Loop and basic block vectorizer.
23 This file contains drivers for the three vectorizers:
24 (1) loop vectorizer (inter-iteration parallelism),
25 (2) loop-aware SLP (intra-iteration parallelism) (invoked by the loop
26 vectorizer)
27 (3) BB vectorizer (out-of-loops), aka SLP
29 The rest of the vectorizer's code is organized as follows:
30 - tree-vect-loop.c - loop specific parts such as reductions, etc. These are
31 used by drivers (1) and (2).
32 - tree-vect-loop-manip.c - vectorizer's loop control-flow utilities, used by
33 drivers (1) and (2).
34 - tree-vect-slp.c - BB vectorization specific analysis and transformation,
35 used by drivers (2) and (3).
36 - tree-vect-stmts.c - statements analysis and transformation (used by all).
37 - tree-vect-data-refs.c - vectorizer specific data-refs analysis and
38 manipulations (used by all).
39 - tree-vect-patterns.c - vectorizable code patterns detector (used by all)
41 Here's a poor attempt at illustrating that:
43 tree-vectorizer.c:
44 loop_vect() loop_aware_slp() slp_vect()
45 | / \ /
46 | / \ /
47 tree-vect-loop.c tree-vect-slp.c
48 | \ \ / / |
49 | \ \/ / |
50 | \ /\ / |
51 | \ / \ / |
52 tree-vect-stmts.c tree-vect-data-refs.c
53 \ /
54 tree-vect-patterns.c
57 #include "config.h"
58 #include "system.h"
59 #include "coretypes.h"
60 #include "backend.h"
61 #include "tree.h"
62 #include "gimple.h"
63 #include "predict.h"
64 #include "tree-pass.h"
65 #include "ssa.h"
66 #include "cgraph.h"
67 #include "fold-const.h"
68 #include "stor-layout.h"
69 #include "gimple-iterator.h"
70 #include "gimple-walk.h"
71 #include "tree-ssa-loop-manip.h"
72 #include "tree-ssa-loop-niter.h"
73 #include "tree-cfg.h"
74 #include "cfgloop.h"
75 #include "tree-vectorizer.h"
76 #include "tree-ssa-propagate.h"
77 #include "dbgcnt.h"
78 #include "tree-scalar-evolution.h"
79 #include "stringpool.h"
80 #include "attribs.h"
81 #include "gimple-pretty-print.h"
82 #include "opt-problem.h"
83 #include "internal-fn.h"
86 /* Loop or bb location, with hotness information. */
87 dump_user_location_t vect_location;
89 /* auto_purge_vect_location's dtor: reset the vect_location
90 global, to avoid stale location_t values that could reference
91 GC-ed blocks. */
93 auto_purge_vect_location::~auto_purge_vect_location ()
95 vect_location = dump_user_location_t ();
98 /* Dump a cost entry according to args to F. */
100 void
101 dump_stmt_cost (FILE *f, void *data, int count, enum vect_cost_for_stmt kind,
102 stmt_vec_info stmt_info, int misalign, unsigned cost,
103 enum vect_cost_model_location where)
105 fprintf (f, "%p ", data);
106 if (stmt_info)
108 print_gimple_expr (f, STMT_VINFO_STMT (stmt_info), 0, TDF_SLIM);
109 fprintf (f, " ");
111 else
112 fprintf (f, "<unknown> ");
113 fprintf (f, "%d times ", count);
114 const char *ks = "unknown";
115 switch (kind)
117 case scalar_stmt:
118 ks = "scalar_stmt";
119 break;
120 case scalar_load:
121 ks = "scalar_load";
122 break;
123 case scalar_store:
124 ks = "scalar_store";
125 break;
126 case vector_stmt:
127 ks = "vector_stmt";
128 break;
129 case vector_load:
130 ks = "vector_load";
131 break;
132 case vector_gather_load:
133 ks = "vector_gather_load";
134 break;
135 case unaligned_load:
136 ks = "unaligned_load";
137 break;
138 case unaligned_store:
139 ks = "unaligned_store";
140 break;
141 case vector_store:
142 ks = "vector_store";
143 break;
144 case vector_scatter_store:
145 ks = "vector_scatter_store";
146 break;
147 case vec_to_scalar:
148 ks = "vec_to_scalar";
149 break;
150 case scalar_to_vec:
151 ks = "scalar_to_vec";
152 break;
153 case cond_branch_not_taken:
154 ks = "cond_branch_not_taken";
155 break;
156 case cond_branch_taken:
157 ks = "cond_branch_taken";
158 break;
159 case vec_perm:
160 ks = "vec_perm";
161 break;
162 case vec_promote_demote:
163 ks = "vec_promote_demote";
164 break;
165 case vec_construct:
166 ks = "vec_construct";
167 break;
169 fprintf (f, "%s ", ks);
170 if (kind == unaligned_load || kind == unaligned_store)
171 fprintf (f, "(misalign %d) ", misalign);
172 fprintf (f, "costs %u ", cost);
173 const char *ws = "unknown";
174 switch (where)
176 case vect_prologue:
177 ws = "prologue";
178 break;
179 case vect_body:
180 ws = "body";
181 break;
182 case vect_epilogue:
183 ws = "epilogue";
184 break;
186 fprintf (f, "in %s\n", ws);
189 /* For mapping simduid to vectorization factor. */
191 class simduid_to_vf : public free_ptr_hash<simduid_to_vf>
193 public:
194 unsigned int simduid;
195 poly_uint64 vf;
197 /* hash_table support. */
198 static inline hashval_t hash (const simduid_to_vf *);
199 static inline int equal (const simduid_to_vf *, const simduid_to_vf *);
202 inline hashval_t
203 simduid_to_vf::hash (const simduid_to_vf *p)
205 return p->simduid;
208 inline int
209 simduid_to_vf::equal (const simduid_to_vf *p1, const simduid_to_vf *p2)
211 return p1->simduid == p2->simduid;
214 /* This hash maps the OMP simd array to the corresponding simduid used
215 to index into it. Like thus,
217 _7 = GOMP_SIMD_LANE (simduid.0)
220 D.1737[_7] = stuff;
223 This hash maps from the OMP simd array (D.1737[]) to DECL_UID of
224 simduid.0. */
226 struct simd_array_to_simduid : free_ptr_hash<simd_array_to_simduid>
228 tree decl;
229 unsigned int simduid;
231 /* hash_table support. */
232 static inline hashval_t hash (const simd_array_to_simduid *);
233 static inline int equal (const simd_array_to_simduid *,
234 const simd_array_to_simduid *);
237 inline hashval_t
238 simd_array_to_simduid::hash (const simd_array_to_simduid *p)
240 return DECL_UID (p->decl);
243 inline int
244 simd_array_to_simduid::equal (const simd_array_to_simduid *p1,
245 const simd_array_to_simduid *p2)
247 return p1->decl == p2->decl;
250 /* Fold IFN_GOMP_SIMD_LANE, IFN_GOMP_SIMD_VF, IFN_GOMP_SIMD_LAST_LANE,
251 into their corresponding constants and remove
252 IFN_GOMP_SIMD_ORDERED_{START,END}. */
254 static void
255 adjust_simduid_builtins (hash_table<simduid_to_vf> *htab)
257 basic_block bb;
259 FOR_EACH_BB_FN (bb, cfun)
261 gimple_stmt_iterator i;
263 for (i = gsi_start_bb (bb); !gsi_end_p (i); )
265 poly_uint64 vf = 1;
266 enum internal_fn ifn;
267 gimple *stmt = gsi_stmt (i);
268 tree t;
269 if (!is_gimple_call (stmt)
270 || !gimple_call_internal_p (stmt))
272 gsi_next (&i);
273 continue;
275 ifn = gimple_call_internal_fn (stmt);
276 switch (ifn)
278 case IFN_GOMP_SIMD_LANE:
279 case IFN_GOMP_SIMD_VF:
280 case IFN_GOMP_SIMD_LAST_LANE:
281 break;
282 case IFN_GOMP_SIMD_ORDERED_START:
283 case IFN_GOMP_SIMD_ORDERED_END:
284 if (integer_onep (gimple_call_arg (stmt, 0)))
286 enum built_in_function bcode
287 = (ifn == IFN_GOMP_SIMD_ORDERED_START
288 ? BUILT_IN_GOMP_ORDERED_START
289 : BUILT_IN_GOMP_ORDERED_END);
290 gimple *g
291 = gimple_build_call (builtin_decl_explicit (bcode), 0);
292 gimple_move_vops (g, stmt);
293 gsi_replace (&i, g, true);
294 continue;
296 gsi_remove (&i, true);
297 unlink_stmt_vdef (stmt);
298 continue;
299 default:
300 gsi_next (&i);
301 continue;
303 tree arg = gimple_call_arg (stmt, 0);
304 gcc_assert (arg != NULL_TREE);
305 gcc_assert (TREE_CODE (arg) == SSA_NAME);
306 simduid_to_vf *p = NULL, data;
307 data.simduid = DECL_UID (SSA_NAME_VAR (arg));
308 /* Need to nullify loop safelen field since it's value is not
309 valid after transformation. */
310 if (bb->loop_father && bb->loop_father->safelen > 0)
311 bb->loop_father->safelen = 0;
312 if (htab)
314 p = htab->find (&data);
315 if (p)
316 vf = p->vf;
318 switch (ifn)
320 case IFN_GOMP_SIMD_VF:
321 t = build_int_cst (unsigned_type_node, vf);
322 break;
323 case IFN_GOMP_SIMD_LANE:
324 t = build_int_cst (unsigned_type_node, 0);
325 break;
326 case IFN_GOMP_SIMD_LAST_LANE:
327 t = gimple_call_arg (stmt, 1);
328 break;
329 default:
330 gcc_unreachable ();
332 tree lhs = gimple_call_lhs (stmt);
333 if (lhs)
334 replace_uses_by (lhs, t);
335 release_defs (stmt);
336 gsi_remove (&i, true);
341 /* Helper structure for note_simd_array_uses. */
343 struct note_simd_array_uses_struct
345 hash_table<simd_array_to_simduid> **htab;
346 unsigned int simduid;
349 /* Callback for note_simd_array_uses, called through walk_gimple_op. */
351 static tree
352 note_simd_array_uses_cb (tree *tp, int *walk_subtrees, void *data)
354 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
355 struct note_simd_array_uses_struct *ns
356 = (struct note_simd_array_uses_struct *) wi->info;
358 if (TYPE_P (*tp))
359 *walk_subtrees = 0;
360 else if (VAR_P (*tp)
361 && lookup_attribute ("omp simd array", DECL_ATTRIBUTES (*tp))
362 && DECL_CONTEXT (*tp) == current_function_decl)
364 simd_array_to_simduid data;
365 if (!*ns->htab)
366 *ns->htab = new hash_table<simd_array_to_simduid> (15);
367 data.decl = *tp;
368 data.simduid = ns->simduid;
369 simd_array_to_simduid **slot = (*ns->htab)->find_slot (&data, INSERT);
370 if (*slot == NULL)
372 simd_array_to_simduid *p = XNEW (simd_array_to_simduid);
373 *p = data;
374 *slot = p;
376 else if ((*slot)->simduid != ns->simduid)
377 (*slot)->simduid = -1U;
378 *walk_subtrees = 0;
380 return NULL_TREE;
383 /* Find "omp simd array" temporaries and map them to corresponding
384 simduid. */
386 static void
387 note_simd_array_uses (hash_table<simd_array_to_simduid> **htab)
389 basic_block bb;
390 gimple_stmt_iterator gsi;
391 struct walk_stmt_info wi;
392 struct note_simd_array_uses_struct ns;
394 memset (&wi, 0, sizeof (wi));
395 wi.info = &ns;
396 ns.htab = htab;
398 FOR_EACH_BB_FN (bb, cfun)
399 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
401 gimple *stmt = gsi_stmt (gsi);
402 if (!is_gimple_call (stmt) || !gimple_call_internal_p (stmt))
403 continue;
404 switch (gimple_call_internal_fn (stmt))
406 case IFN_GOMP_SIMD_LANE:
407 case IFN_GOMP_SIMD_VF:
408 case IFN_GOMP_SIMD_LAST_LANE:
409 break;
410 default:
411 continue;
413 tree lhs = gimple_call_lhs (stmt);
414 if (lhs == NULL_TREE)
415 continue;
416 imm_use_iterator use_iter;
417 gimple *use_stmt;
418 ns.simduid = DECL_UID (SSA_NAME_VAR (gimple_call_arg (stmt, 0)));
419 FOR_EACH_IMM_USE_STMT (use_stmt, use_iter, lhs)
420 if (!is_gimple_debug (use_stmt))
421 walk_gimple_op (use_stmt, note_simd_array_uses_cb, &wi);
425 /* Shrink arrays with "omp simd array" attribute to the corresponding
426 vectorization factor. */
428 static void
429 shrink_simd_arrays
430 (hash_table<simd_array_to_simduid> *simd_array_to_simduid_htab,
431 hash_table<simduid_to_vf> *simduid_to_vf_htab)
433 for (hash_table<simd_array_to_simduid>::iterator iter
434 = simd_array_to_simduid_htab->begin ();
435 iter != simd_array_to_simduid_htab->end (); ++iter)
436 if ((*iter)->simduid != -1U)
438 tree decl = (*iter)->decl;
439 poly_uint64 vf = 1;
440 if (simduid_to_vf_htab)
442 simduid_to_vf *p = NULL, data;
443 data.simduid = (*iter)->simduid;
444 p = simduid_to_vf_htab->find (&data);
445 if (p)
446 vf = p->vf;
448 tree atype
449 = build_array_type_nelts (TREE_TYPE (TREE_TYPE (decl)), vf);
450 TREE_TYPE (decl) = atype;
451 relayout_decl (decl);
454 delete simd_array_to_simduid_htab;
457 /* Initialize the vec_info with kind KIND_IN and target cost data
458 TARGET_COST_DATA_IN. */
460 vec_info::vec_info (vec_info::vec_kind kind_in, void *target_cost_data_in,
461 vec_info_shared *shared_)
462 : kind (kind_in),
463 shared (shared_),
464 target_cost_data (target_cost_data_in)
466 stmt_vec_infos.create (50);
469 vec_info::~vec_info ()
471 slp_instance instance;
472 unsigned int i;
474 FOR_EACH_VEC_ELT (slp_instances, i, instance)
475 vect_free_slp_instance (instance, true);
477 destroy_cost_data (target_cost_data);
478 free_stmt_vec_infos ();
481 vec_info_shared::vec_info_shared ()
482 : datarefs (vNULL),
483 datarefs_copy (vNULL),
484 ddrs (vNULL)
488 vec_info_shared::~vec_info_shared ()
490 free_data_refs (datarefs);
491 free_dependence_relations (ddrs);
492 datarefs_copy.release ();
495 void
496 vec_info_shared::save_datarefs ()
498 if (!flag_checking)
499 return;
500 datarefs_copy.reserve_exact (datarefs.length ());
501 for (unsigned i = 0; i < datarefs.length (); ++i)
502 datarefs_copy.quick_push (*datarefs[i]);
505 void
506 vec_info_shared::check_datarefs ()
508 if (!flag_checking)
509 return;
510 gcc_assert (datarefs.length () == datarefs_copy.length ());
511 for (unsigned i = 0; i < datarefs.length (); ++i)
512 if (memcmp (&datarefs_copy[i], datarefs[i], sizeof (data_reference)) != 0)
513 gcc_unreachable ();
516 /* Record that STMT belongs to the vectorizable region. Create and return
517 an associated stmt_vec_info. */
519 stmt_vec_info
520 vec_info::add_stmt (gimple *stmt)
522 stmt_vec_info res = new_stmt_vec_info (stmt);
523 set_vinfo_for_stmt (stmt, res);
524 return res;
527 /* If STMT has an associated stmt_vec_info, return that vec_info, otherwise
528 return null. It is safe to call this function on any statement, even if
529 it might not be part of the vectorizable region. */
531 stmt_vec_info
532 vec_info::lookup_stmt (gimple *stmt)
534 unsigned int uid = gimple_uid (stmt);
535 if (uid > 0 && uid - 1 < stmt_vec_infos.length ())
537 stmt_vec_info res = stmt_vec_infos[uid - 1];
538 if (res && res->stmt == stmt)
539 return res;
541 return NULL;
544 /* If NAME is an SSA_NAME and its definition has an associated stmt_vec_info,
545 return that stmt_vec_info, otherwise return null. It is safe to call
546 this on arbitrary operands. */
548 stmt_vec_info
549 vec_info::lookup_def (tree name)
551 if (TREE_CODE (name) == SSA_NAME
552 && !SSA_NAME_IS_DEFAULT_DEF (name))
553 return lookup_stmt (SSA_NAME_DEF_STMT (name));
554 return NULL;
557 /* See whether there is a single non-debug statement that uses LHS and
558 whether that statement has an associated stmt_vec_info. Return the
559 stmt_vec_info if so, otherwise return null. */
561 stmt_vec_info
562 vec_info::lookup_single_use (tree lhs)
564 use_operand_p dummy;
565 gimple *use_stmt;
566 if (single_imm_use (lhs, &dummy, &use_stmt))
567 return lookup_stmt (use_stmt);
568 return NULL;
571 /* Return vectorization information about DR. */
573 dr_vec_info *
574 vec_info::lookup_dr (data_reference *dr)
576 stmt_vec_info stmt_info = lookup_stmt (DR_STMT (dr));
577 /* DR_STMT should never refer to a stmt in a pattern replacement. */
578 gcc_checking_assert (!is_pattern_stmt_p (stmt_info));
579 return STMT_VINFO_DR_INFO (stmt_info->dr_aux.stmt);
582 /* Record that NEW_STMT_INFO now implements the same data reference
583 as OLD_STMT_INFO. */
585 void
586 vec_info::move_dr (stmt_vec_info new_stmt_info, stmt_vec_info old_stmt_info)
588 gcc_assert (!is_pattern_stmt_p (old_stmt_info));
589 STMT_VINFO_DR_INFO (old_stmt_info)->stmt = new_stmt_info;
590 new_stmt_info->dr_aux = old_stmt_info->dr_aux;
591 STMT_VINFO_DR_WRT_VEC_LOOP (new_stmt_info)
592 = STMT_VINFO_DR_WRT_VEC_LOOP (old_stmt_info);
593 STMT_VINFO_GATHER_SCATTER_P (new_stmt_info)
594 = STMT_VINFO_GATHER_SCATTER_P (old_stmt_info);
597 /* Permanently remove the statement described by STMT_INFO from the
598 function. */
600 void
601 vec_info::remove_stmt (stmt_vec_info stmt_info)
603 gcc_assert (!stmt_info->pattern_stmt_p);
604 set_vinfo_for_stmt (stmt_info->stmt, NULL);
605 gimple_stmt_iterator si = gsi_for_stmt (stmt_info->stmt);
606 unlink_stmt_vdef (stmt_info->stmt);
607 gsi_remove (&si, true);
608 release_defs (stmt_info->stmt);
609 free_stmt_vec_info (stmt_info);
612 /* Replace the statement at GSI by NEW_STMT, both the vectorization
613 information and the function itself. STMT_INFO describes the statement
614 at GSI. */
616 void
617 vec_info::replace_stmt (gimple_stmt_iterator *gsi, stmt_vec_info stmt_info,
618 gimple *new_stmt)
620 gimple *old_stmt = stmt_info->stmt;
621 gcc_assert (!stmt_info->pattern_stmt_p && old_stmt == gsi_stmt (*gsi));
622 set_vinfo_for_stmt (old_stmt, NULL);
623 set_vinfo_for_stmt (new_stmt, stmt_info);
624 stmt_info->stmt = new_stmt;
625 gsi_replace (gsi, new_stmt, true);
628 /* Create and initialize a new stmt_vec_info struct for STMT. */
630 stmt_vec_info
631 vec_info::new_stmt_vec_info (gimple *stmt)
633 stmt_vec_info res = XCNEW (class _stmt_vec_info);
634 res->vinfo = this;
635 res->stmt = stmt;
637 STMT_VINFO_TYPE (res) = undef_vec_info_type;
638 STMT_VINFO_RELEVANT (res) = vect_unused_in_scope;
639 STMT_VINFO_VECTORIZABLE (res) = true;
640 STMT_VINFO_REDUC_TYPE (res) = TREE_CODE_REDUCTION;
641 STMT_VINFO_REDUC_CODE (res) = ERROR_MARK;
642 STMT_VINFO_REDUC_FN (res) = IFN_LAST;
643 STMT_VINFO_REDUC_IDX (res) = -1;
644 STMT_VINFO_SLP_VECT_ONLY (res) = false;
646 if (gimple_code (stmt) == GIMPLE_PHI
647 && is_loop_header_bb_p (gimple_bb (stmt)))
648 STMT_VINFO_DEF_TYPE (res) = vect_unknown_def_type;
649 else
650 STMT_VINFO_DEF_TYPE (res) = vect_internal_def;
652 STMT_VINFO_SAME_ALIGN_REFS (res).create (0);
653 STMT_SLP_TYPE (res) = loop_vect;
655 /* This is really "uninitialized" until vect_compute_data_ref_alignment. */
656 res->dr_aux.misalignment = DR_MISALIGNMENT_UNINITIALIZED;
658 return res;
661 /* Associate STMT with INFO. */
663 void
664 vec_info::set_vinfo_for_stmt (gimple *stmt, stmt_vec_info info)
666 unsigned int uid = gimple_uid (stmt);
667 if (uid == 0)
669 gcc_checking_assert (info);
670 uid = stmt_vec_infos.length () + 1;
671 gimple_set_uid (stmt, uid);
672 stmt_vec_infos.safe_push (info);
674 else
676 gcc_checking_assert (info == NULL);
677 stmt_vec_infos[uid - 1] = info;
681 /* Free the contents of stmt_vec_infos. */
683 void
684 vec_info::free_stmt_vec_infos (void)
686 unsigned int i;
687 stmt_vec_info info;
688 FOR_EACH_VEC_ELT (stmt_vec_infos, i, info)
689 if (info != NULL)
690 free_stmt_vec_info (info);
691 stmt_vec_infos.release ();
694 /* Free STMT_INFO. */
696 void
697 vec_info::free_stmt_vec_info (stmt_vec_info stmt_info)
699 if (stmt_info->pattern_stmt_p)
701 gimple_set_bb (stmt_info->stmt, NULL);
702 tree lhs = gimple_get_lhs (stmt_info->stmt);
703 if (lhs && TREE_CODE (lhs) == SSA_NAME)
704 release_ssa_name (lhs);
707 STMT_VINFO_SAME_ALIGN_REFS (stmt_info).release ();
708 STMT_VINFO_SIMD_CLONE_INFO (stmt_info).release ();
709 free (stmt_info);
712 /* A helper function to free scev and LOOP niter information, as well as
713 clear loop constraint LOOP_C_FINITE. */
715 void
716 vect_free_loop_info_assumptions (class loop *loop)
718 scev_reset_htab ();
719 /* We need to explicitly reset upper bound information since they are
720 used even after free_numbers_of_iterations_estimates. */
721 loop->any_upper_bound = false;
722 loop->any_likely_upper_bound = false;
723 free_numbers_of_iterations_estimates (loop);
724 loop_constraint_clear (loop, LOOP_C_FINITE);
727 /* If LOOP has been versioned during ifcvt, return the internal call
728 guarding it. */
730 gimple *
731 vect_loop_vectorized_call (class loop *loop, gcond **cond)
733 basic_block bb = loop_preheader_edge (loop)->src;
734 gimple *g;
737 g = last_stmt (bb);
738 if (g)
739 break;
740 if (!single_pred_p (bb))
741 break;
742 bb = single_pred (bb);
744 while (1);
745 if (g && gimple_code (g) == GIMPLE_COND)
747 if (cond)
748 *cond = as_a <gcond *> (g);
749 gimple_stmt_iterator gsi = gsi_for_stmt (g);
750 gsi_prev (&gsi);
751 if (!gsi_end_p (gsi))
753 g = gsi_stmt (gsi);
754 if (gimple_call_internal_p (g, IFN_LOOP_VECTORIZED)
755 && (tree_to_shwi (gimple_call_arg (g, 0)) == loop->num
756 || tree_to_shwi (gimple_call_arg (g, 1)) == loop->num))
757 return g;
760 return NULL;
763 /* If LOOP has been versioned during loop distribution, return the gurading
764 internal call. */
766 static gimple *
767 vect_loop_dist_alias_call (class loop *loop)
769 basic_block bb;
770 basic_block entry;
771 class loop *outer, *orig;
772 gimple_stmt_iterator gsi;
773 gimple *g;
775 if (loop->orig_loop_num == 0)
776 return NULL;
778 orig = get_loop (cfun, loop->orig_loop_num);
779 if (orig == NULL)
781 /* The original loop is somehow destroyed. Clear the information. */
782 loop->orig_loop_num = 0;
783 return NULL;
786 if (loop != orig)
787 bb = nearest_common_dominator (CDI_DOMINATORS, loop->header, orig->header);
788 else
789 bb = loop_preheader_edge (loop)->src;
791 outer = bb->loop_father;
792 entry = ENTRY_BLOCK_PTR_FOR_FN (cfun);
794 /* Look upward in dominance tree. */
795 for (; bb != entry && flow_bb_inside_loop_p (outer, bb);
796 bb = get_immediate_dominator (CDI_DOMINATORS, bb))
798 g = last_stmt (bb);
799 if (g == NULL || gimple_code (g) != GIMPLE_COND)
800 continue;
802 gsi = gsi_for_stmt (g);
803 gsi_prev (&gsi);
804 if (gsi_end_p (gsi))
805 continue;
807 g = gsi_stmt (gsi);
808 /* The guarding internal function call must have the same distribution
809 alias id. */
810 if (gimple_call_internal_p (g, IFN_LOOP_DIST_ALIAS)
811 && (tree_to_shwi (gimple_call_arg (g, 0)) == loop->orig_loop_num))
812 return g;
814 return NULL;
817 /* Set the uids of all the statements in basic blocks inside loop
818 represented by LOOP_VINFO. LOOP_VECTORIZED_CALL is the internal
819 call guarding the loop which has been if converted. */
820 static void
821 set_uid_loop_bbs (loop_vec_info loop_vinfo, gimple *loop_vectorized_call)
823 tree arg = gimple_call_arg (loop_vectorized_call, 1);
824 basic_block *bbs;
825 unsigned int i;
826 class loop *scalar_loop = get_loop (cfun, tree_to_shwi (arg));
828 LOOP_VINFO_SCALAR_LOOP (loop_vinfo) = scalar_loop;
829 gcc_checking_assert (vect_loop_vectorized_call (scalar_loop)
830 == loop_vectorized_call);
831 /* If we are going to vectorize outer loop, prevent vectorization
832 of the inner loop in the scalar loop - either the scalar loop is
833 thrown away, so it is a wasted work, or is used only for
834 a few iterations. */
835 if (scalar_loop->inner)
837 gimple *g = vect_loop_vectorized_call (scalar_loop->inner);
838 if (g)
840 arg = gimple_call_arg (g, 0);
841 get_loop (cfun, tree_to_shwi (arg))->dont_vectorize = true;
842 fold_loop_internal_call (g, boolean_false_node);
845 bbs = get_loop_body (scalar_loop);
846 for (i = 0; i < scalar_loop->num_nodes; i++)
848 basic_block bb = bbs[i];
849 gimple_stmt_iterator gsi;
850 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
852 gimple *phi = gsi_stmt (gsi);
853 gimple_set_uid (phi, 0);
855 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
857 gimple *stmt = gsi_stmt (gsi);
858 gimple_set_uid (stmt, 0);
861 free (bbs);
864 /* Try to vectorize LOOP. */
866 static unsigned
867 try_vectorize_loop_1 (hash_table<simduid_to_vf> *&simduid_to_vf_htab,
868 unsigned *num_vectorized_loops,
869 loop_p loop, loop_vec_info orig_loop_vinfo,
870 gimple *loop_vectorized_call,
871 gimple *loop_dist_alias_call)
873 unsigned ret = 0;
874 vec_info_shared shared;
875 auto_purge_vect_location sentinel;
876 vect_location = find_loop_location (loop);
877 if (LOCATION_LOCUS (vect_location.get_location_t ()) != UNKNOWN_LOCATION
878 && dump_enabled_p ())
879 dump_printf (MSG_NOTE | MSG_PRIORITY_INTERNALS,
880 "\nAnalyzing loop at %s:%d\n",
881 LOCATION_FILE (vect_location.get_location_t ()),
882 LOCATION_LINE (vect_location.get_location_t ()));
884 /* Try to analyze the loop, retaining an opt_problem if dump_enabled_p. */
885 opt_loop_vec_info loop_vinfo
886 = vect_analyze_loop (loop, orig_loop_vinfo, &shared);
887 loop->aux = loop_vinfo;
889 if (!loop_vinfo)
890 if (dump_enabled_p ())
891 if (opt_problem *problem = loop_vinfo.get_problem ())
893 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
894 "couldn't vectorize loop\n");
895 problem->emit_and_clear ();
898 if (!loop_vinfo || !LOOP_VINFO_VECTORIZABLE_P (loop_vinfo))
900 /* Free existing information if loop is analyzed with some
901 assumptions. */
902 if (loop_constraint_set_p (loop, LOOP_C_FINITE))
903 vect_free_loop_info_assumptions (loop);
905 /* If we applied if-conversion then try to vectorize the
906 BB of innermost loops.
907 ??? Ideally BB vectorization would learn to vectorize
908 control flow by applying if-conversion on-the-fly, the
909 following retains the if-converted loop body even when
910 only non-if-converted parts took part in BB vectorization. */
911 if (flag_tree_slp_vectorize != 0
912 && loop_vectorized_call
913 && ! loop->inner)
915 basic_block bb = loop->header;
916 bool require_loop_vectorize = false;
917 for (gimple_stmt_iterator gsi = gsi_start_bb (bb);
918 !gsi_end_p (gsi); gsi_next (&gsi))
920 gimple *stmt = gsi_stmt (gsi);
921 gcall *call = dyn_cast <gcall *> (stmt);
922 if (call && gimple_call_internal_p (call))
924 internal_fn ifn = gimple_call_internal_fn (call);
925 if (ifn == IFN_MASK_LOAD || ifn == IFN_MASK_STORE
926 /* Don't keep the if-converted parts when the ifn with
927 specifc type is not supported by the backend. */
928 || (direct_internal_fn_p (ifn)
929 && !direct_internal_fn_supported_p
930 (call, OPTIMIZE_FOR_SPEED)))
932 require_loop_vectorize = true;
933 break;
936 gimple_set_uid (stmt, -1);
937 gimple_set_visited (stmt, false);
939 if (!require_loop_vectorize && vect_slp_bb (bb))
941 if (dump_enabled_p ())
942 dump_printf_loc (MSG_NOTE, vect_location,
943 "basic block vectorized\n");
944 fold_loop_internal_call (loop_vectorized_call,
945 boolean_true_node);
946 loop_vectorized_call = NULL;
947 ret |= TODO_cleanup_cfg | TODO_update_ssa_only_virtuals;
950 /* If outer loop vectorization fails for LOOP_VECTORIZED guarded
951 loop, don't vectorize its inner loop; we'll attempt to
952 vectorize LOOP_VECTORIZED guarded inner loop of the scalar
953 loop version. */
954 if (loop_vectorized_call && loop->inner)
955 loop->inner->dont_vectorize = true;
956 return ret;
959 if (!dbg_cnt (vect_loop))
961 /* Free existing information if loop is analyzed with some
962 assumptions. */
963 if (loop_constraint_set_p (loop, LOOP_C_FINITE))
964 vect_free_loop_info_assumptions (loop);
965 return ret;
968 if (loop_vectorized_call)
969 set_uid_loop_bbs (loop_vinfo, loop_vectorized_call);
971 unsigned HOST_WIDE_INT bytes;
972 if (dump_enabled_p ())
974 if (loop_vinfo->vector_size.is_constant (&bytes))
975 dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, vect_location,
976 "loop vectorized using %wu byte vectors\n", bytes);
977 else
978 dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, vect_location,
979 "loop vectorized using variable length vectors\n");
982 loop_p new_loop = vect_transform_loop (loop_vinfo);
983 (*num_vectorized_loops)++;
984 /* Now that the loop has been vectorized, allow it to be unrolled
985 etc. */
986 loop->force_vectorize = false;
988 if (loop->simduid)
990 simduid_to_vf *simduid_to_vf_data = XNEW (simduid_to_vf);
991 if (!simduid_to_vf_htab)
992 simduid_to_vf_htab = new hash_table<simduid_to_vf> (15);
993 simduid_to_vf_data->simduid = DECL_UID (loop->simduid);
994 simduid_to_vf_data->vf = loop_vinfo->vectorization_factor;
995 *simduid_to_vf_htab->find_slot (simduid_to_vf_data, INSERT)
996 = simduid_to_vf_data;
999 if (loop_vectorized_call)
1001 fold_loop_internal_call (loop_vectorized_call, boolean_true_node);
1002 loop_vectorized_call = NULL;
1003 ret |= TODO_cleanup_cfg;
1005 if (loop_dist_alias_call)
1007 tree value = gimple_call_arg (loop_dist_alias_call, 1);
1008 fold_loop_internal_call (loop_dist_alias_call, value);
1009 loop_dist_alias_call = NULL;
1010 ret |= TODO_cleanup_cfg;
1013 /* Epilogue of vectorized loop must be vectorized too. */
1014 if (new_loop)
1015 ret |= try_vectorize_loop_1 (simduid_to_vf_htab, num_vectorized_loops,
1016 new_loop, loop_vinfo, NULL, NULL);
1018 return ret;
1021 /* Try to vectorize LOOP. */
1023 static unsigned
1024 try_vectorize_loop (hash_table<simduid_to_vf> *&simduid_to_vf_htab,
1025 unsigned *num_vectorized_loops, loop_p loop)
1027 if (!((flag_tree_loop_vectorize
1028 && optimize_loop_nest_for_speed_p (loop))
1029 || loop->force_vectorize))
1030 return 0;
1032 return try_vectorize_loop_1 (simduid_to_vf_htab, num_vectorized_loops,
1033 loop, NULL,
1034 vect_loop_vectorized_call (loop),
1035 vect_loop_dist_alias_call (loop));
1039 /* Function vectorize_loops.
1041 Entry point to loop vectorization phase. */
1043 unsigned
1044 vectorize_loops (void)
1046 unsigned int i;
1047 unsigned int num_vectorized_loops = 0;
1048 unsigned int vect_loops_num;
1049 class loop *loop;
1050 hash_table<simduid_to_vf> *simduid_to_vf_htab = NULL;
1051 hash_table<simd_array_to_simduid> *simd_array_to_simduid_htab = NULL;
1052 bool any_ifcvt_loops = false;
1053 unsigned ret = 0;
1055 vect_loops_num = number_of_loops (cfun);
1057 /* Bail out if there are no loops. */
1058 if (vect_loops_num <= 1)
1059 return 0;
1061 if (cfun->has_simduid_loops)
1062 note_simd_array_uses (&simd_array_to_simduid_htab);
1064 /* ----------- Analyze loops. ----------- */
1066 /* If some loop was duplicated, it gets bigger number
1067 than all previously defined loops. This fact allows us to run
1068 only over initial loops skipping newly generated ones. */
1069 FOR_EACH_LOOP (loop, 0)
1070 if (loop->dont_vectorize)
1072 any_ifcvt_loops = true;
1073 /* If-conversion sometimes versions both the outer loop
1074 (for the case when outer loop vectorization might be
1075 desirable) as well as the inner loop in the scalar version
1076 of the loop. So we have:
1077 if (LOOP_VECTORIZED (1, 3))
1079 loop1
1080 loop2
1082 else
1083 loop3 (copy of loop1)
1084 if (LOOP_VECTORIZED (4, 5))
1085 loop4 (copy of loop2)
1086 else
1087 loop5 (copy of loop4)
1088 If FOR_EACH_LOOP gives us loop3 first (which has
1089 dont_vectorize set), make sure to process loop1 before loop4;
1090 so that we can prevent vectorization of loop4 if loop1
1091 is successfully vectorized. */
1092 if (loop->inner)
1094 gimple *loop_vectorized_call
1095 = vect_loop_vectorized_call (loop);
1096 if (loop_vectorized_call
1097 && vect_loop_vectorized_call (loop->inner))
1099 tree arg = gimple_call_arg (loop_vectorized_call, 0);
1100 class loop *vector_loop
1101 = get_loop (cfun, tree_to_shwi (arg));
1102 if (vector_loop && vector_loop != loop)
1104 /* Make sure we don't vectorize it twice. */
1105 vector_loop->dont_vectorize = true;
1106 ret |= try_vectorize_loop (simduid_to_vf_htab,
1107 &num_vectorized_loops,
1108 vector_loop);
1113 else
1114 ret |= try_vectorize_loop (simduid_to_vf_htab, &num_vectorized_loops,
1115 loop);
1117 vect_location = dump_user_location_t ();
1119 statistics_counter_event (cfun, "Vectorized loops", num_vectorized_loops);
1120 if (dump_enabled_p ()
1121 || (num_vectorized_loops > 0 && dump_enabled_p ()))
1122 dump_printf_loc (MSG_NOTE, vect_location,
1123 "vectorized %u loops in function.\n",
1124 num_vectorized_loops);
1126 /* ----------- Finalize. ----------- */
1128 if (any_ifcvt_loops)
1129 for (i = 1; i < number_of_loops (cfun); i++)
1131 loop = get_loop (cfun, i);
1132 if (loop && loop->dont_vectorize)
1134 gimple *g = vect_loop_vectorized_call (loop);
1135 if (g)
1137 fold_loop_internal_call (g, boolean_false_node);
1138 ret |= TODO_cleanup_cfg;
1139 g = NULL;
1141 else
1142 g = vect_loop_dist_alias_call (loop);
1144 if (g)
1146 fold_loop_internal_call (g, boolean_false_node);
1147 ret |= TODO_cleanup_cfg;
1152 for (i = 1; i < number_of_loops (cfun); i++)
1154 loop_vec_info loop_vinfo;
1155 bool has_mask_store;
1157 loop = get_loop (cfun, i);
1158 if (!loop || !loop->aux)
1159 continue;
1160 loop_vinfo = (loop_vec_info) loop->aux;
1161 has_mask_store = LOOP_VINFO_HAS_MASK_STORE (loop_vinfo);
1162 delete loop_vinfo;
1163 if (has_mask_store
1164 && targetm.vectorize.empty_mask_is_expensive (IFN_MASK_STORE))
1165 optimize_mask_stores (loop);
1166 loop->aux = NULL;
1169 /* Fold IFN_GOMP_SIMD_{VF,LANE,LAST_LANE,ORDERED_{START,END}} builtins. */
1170 if (cfun->has_simduid_loops)
1171 adjust_simduid_builtins (simduid_to_vf_htab);
1173 /* Shrink any "omp array simd" temporary arrays to the
1174 actual vectorization factors. */
1175 if (simd_array_to_simduid_htab)
1176 shrink_simd_arrays (simd_array_to_simduid_htab, simduid_to_vf_htab);
1177 delete simduid_to_vf_htab;
1178 cfun->has_simduid_loops = false;
1180 if (num_vectorized_loops > 0)
1182 /* If we vectorized any loop only virtual SSA form needs to be updated.
1183 ??? Also while we try hard to update loop-closed SSA form we fail
1184 to properly do this in some corner-cases (see PR56286). */
1185 rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa_only_virtuals);
1186 return TODO_cleanup_cfg;
1189 return ret;
1193 /* Entry point to the simduid cleanup pass. */
1195 namespace {
1197 const pass_data pass_data_simduid_cleanup =
1199 GIMPLE_PASS, /* type */
1200 "simduid", /* name */
1201 OPTGROUP_NONE, /* optinfo_flags */
1202 TV_NONE, /* tv_id */
1203 ( PROP_ssa | PROP_cfg ), /* properties_required */
1204 0, /* properties_provided */
1205 0, /* properties_destroyed */
1206 0, /* todo_flags_start */
1207 0, /* todo_flags_finish */
1210 class pass_simduid_cleanup : public gimple_opt_pass
1212 public:
1213 pass_simduid_cleanup (gcc::context *ctxt)
1214 : gimple_opt_pass (pass_data_simduid_cleanup, ctxt)
1217 /* opt_pass methods: */
1218 opt_pass * clone () { return new pass_simduid_cleanup (m_ctxt); }
1219 virtual bool gate (function *fun) { return fun->has_simduid_loops; }
1220 virtual unsigned int execute (function *);
1222 }; // class pass_simduid_cleanup
1224 unsigned int
1225 pass_simduid_cleanup::execute (function *fun)
1227 hash_table<simd_array_to_simduid> *simd_array_to_simduid_htab = NULL;
1229 note_simd_array_uses (&simd_array_to_simduid_htab);
1231 /* Fold IFN_GOMP_SIMD_{VF,LANE,LAST_LANE,ORDERED_{START,END}} builtins. */
1232 adjust_simduid_builtins (NULL);
1234 /* Shrink any "omp array simd" temporary arrays to the
1235 actual vectorization factors. */
1236 if (simd_array_to_simduid_htab)
1237 shrink_simd_arrays (simd_array_to_simduid_htab, NULL);
1238 fun->has_simduid_loops = false;
1239 return 0;
1242 } // anon namespace
1244 gimple_opt_pass *
1245 make_pass_simduid_cleanup (gcc::context *ctxt)
1247 return new pass_simduid_cleanup (ctxt);
1251 /* Entry point to basic block SLP phase. */
1253 namespace {
1255 const pass_data pass_data_slp_vectorize =
1257 GIMPLE_PASS, /* type */
1258 "slp", /* name */
1259 OPTGROUP_LOOP | OPTGROUP_VEC, /* optinfo_flags */
1260 TV_TREE_SLP_VECTORIZATION, /* tv_id */
1261 ( PROP_ssa | PROP_cfg ), /* properties_required */
1262 0, /* properties_provided */
1263 0, /* properties_destroyed */
1264 0, /* todo_flags_start */
1265 TODO_update_ssa, /* todo_flags_finish */
1268 class pass_slp_vectorize : public gimple_opt_pass
1270 public:
1271 pass_slp_vectorize (gcc::context *ctxt)
1272 : gimple_opt_pass (pass_data_slp_vectorize, ctxt)
1275 /* opt_pass methods: */
1276 opt_pass * clone () { return new pass_slp_vectorize (m_ctxt); }
1277 virtual bool gate (function *) { return flag_tree_slp_vectorize != 0; }
1278 virtual unsigned int execute (function *);
1280 }; // class pass_slp_vectorize
1282 unsigned int
1283 pass_slp_vectorize::execute (function *fun)
1285 auto_purge_vect_location sentinel;
1286 basic_block bb;
1288 bool in_loop_pipeline = scev_initialized_p ();
1289 if (!in_loop_pipeline)
1291 loop_optimizer_init (LOOPS_NORMAL);
1292 scev_initialize ();
1295 /* Mark all stmts as not belonging to the current region and unvisited. */
1296 FOR_EACH_BB_FN (bb, fun)
1298 for (gimple_stmt_iterator gsi = gsi_start_bb (bb); !gsi_end_p (gsi);
1299 gsi_next (&gsi))
1301 gimple *stmt = gsi_stmt (gsi);
1302 gimple_set_uid (stmt, -1);
1303 gimple_set_visited (stmt, false);
1307 FOR_EACH_BB_FN (bb, fun)
1309 if (vect_slp_bb (bb))
1310 if (dump_enabled_p ())
1311 dump_printf_loc (MSG_NOTE, vect_location, "basic block vectorized\n");
1314 if (!in_loop_pipeline)
1316 scev_finalize ();
1317 loop_optimizer_finalize ();
1320 return 0;
1323 } // anon namespace
1325 gimple_opt_pass *
1326 make_pass_slp_vectorize (gcc::context *ctxt)
1328 return new pass_slp_vectorize (ctxt);
1332 /* Increase alignment of global arrays to improve vectorization potential.
1333 TODO:
1334 - Consider also structs that have an array field.
1335 - Use ipa analysis to prune arrays that can't be vectorized?
1336 This should involve global alignment analysis and in the future also
1337 array padding. */
1339 static unsigned get_vec_alignment_for_type (tree);
1340 static hash_map<tree, unsigned> *type_align_map;
1342 /* Return alignment of array's vector type corresponding to scalar type.
1343 0 if no vector type exists. */
1344 static unsigned
1345 get_vec_alignment_for_array_type (tree type)
1347 gcc_assert (TREE_CODE (type) == ARRAY_TYPE);
1348 poly_uint64 array_size, vector_size;
1350 tree scalar_type = strip_array_types (type);
1351 tree vectype = get_vectype_for_scalar_type_and_size (scalar_type, 0);
1352 if (!vectype
1353 || !poly_int_tree_p (TYPE_SIZE (type), &array_size)
1354 || !poly_int_tree_p (TYPE_SIZE (vectype), &vector_size)
1355 || maybe_lt (array_size, vector_size))
1356 return 0;
1358 return TYPE_ALIGN (vectype);
1361 /* Return alignment of field having maximum alignment of vector type
1362 corresponding to it's scalar type. For now, we only consider fields whose
1363 offset is a multiple of it's vector alignment.
1364 0 if no suitable field is found. */
1365 static unsigned
1366 get_vec_alignment_for_record_type (tree type)
1368 gcc_assert (TREE_CODE (type) == RECORD_TYPE);
1370 unsigned max_align = 0, alignment;
1371 HOST_WIDE_INT offset;
1372 tree offset_tree;
1374 if (TYPE_PACKED (type))
1375 return 0;
1377 unsigned *slot = type_align_map->get (type);
1378 if (slot)
1379 return *slot;
1381 for (tree field = first_field (type);
1382 field != NULL_TREE;
1383 field = DECL_CHAIN (field))
1385 /* Skip if not FIELD_DECL or if alignment is set by user. */
1386 if (TREE_CODE (field) != FIELD_DECL
1387 || DECL_USER_ALIGN (field)
1388 || DECL_ARTIFICIAL (field))
1389 continue;
1391 /* We don't need to process the type further if offset is variable,
1392 since the offsets of remaining members will also be variable. */
1393 if (TREE_CODE (DECL_FIELD_OFFSET (field)) != INTEGER_CST
1394 || TREE_CODE (DECL_FIELD_BIT_OFFSET (field)) != INTEGER_CST)
1395 break;
1397 /* Similarly stop processing the type if offset_tree
1398 does not fit in unsigned HOST_WIDE_INT. */
1399 offset_tree = bit_position (field);
1400 if (!tree_fits_uhwi_p (offset_tree))
1401 break;
1403 offset = tree_to_uhwi (offset_tree);
1404 alignment = get_vec_alignment_for_type (TREE_TYPE (field));
1406 /* Get maximum alignment of vectorized field/array among those members
1407 whose offset is multiple of the vector alignment. */
1408 if (alignment
1409 && (offset % alignment == 0)
1410 && (alignment > max_align))
1411 max_align = alignment;
1414 type_align_map->put (type, max_align);
1415 return max_align;
1418 /* Return alignment of vector type corresponding to decl's scalar type
1419 or 0 if it doesn't exist or the vector alignment is lesser than
1420 decl's alignment. */
1421 static unsigned
1422 get_vec_alignment_for_type (tree type)
1424 if (type == NULL_TREE)
1425 return 0;
1427 gcc_assert (TYPE_P (type));
1429 static unsigned alignment = 0;
1430 switch (TREE_CODE (type))
1432 case ARRAY_TYPE:
1433 alignment = get_vec_alignment_for_array_type (type);
1434 break;
1435 case RECORD_TYPE:
1436 alignment = get_vec_alignment_for_record_type (type);
1437 break;
1438 default:
1439 alignment = 0;
1440 break;
1443 return (alignment > TYPE_ALIGN (type)) ? alignment : 0;
1446 /* Entry point to increase_alignment pass. */
1447 static unsigned int
1448 increase_alignment (void)
1450 varpool_node *vnode;
1452 vect_location = dump_user_location_t ();
1453 type_align_map = new hash_map<tree, unsigned>;
1455 /* Increase the alignment of all global arrays for vectorization. */
1456 FOR_EACH_DEFINED_VARIABLE (vnode)
1458 tree decl = vnode->decl;
1459 unsigned int alignment;
1461 if ((decl_in_symtab_p (decl)
1462 && !symtab_node::get (decl)->can_increase_alignment_p ())
1463 || DECL_USER_ALIGN (decl) || DECL_ARTIFICIAL (decl))
1464 continue;
1466 alignment = get_vec_alignment_for_type (TREE_TYPE (decl));
1467 if (alignment && vect_can_force_dr_alignment_p (decl, alignment))
1469 vnode->increase_alignment (alignment);
1470 if (dump_enabled_p ())
1471 dump_printf (MSG_NOTE, "Increasing alignment of decl: %T\n", decl);
1475 delete type_align_map;
1476 return 0;
1480 namespace {
1482 const pass_data pass_data_ipa_increase_alignment =
1484 SIMPLE_IPA_PASS, /* type */
1485 "increase_alignment", /* name */
1486 OPTGROUP_LOOP | OPTGROUP_VEC, /* optinfo_flags */
1487 TV_IPA_OPT, /* tv_id */
1488 0, /* properties_required */
1489 0, /* properties_provided */
1490 0, /* properties_destroyed */
1491 0, /* todo_flags_start */
1492 0, /* todo_flags_finish */
1495 class pass_ipa_increase_alignment : public simple_ipa_opt_pass
1497 public:
1498 pass_ipa_increase_alignment (gcc::context *ctxt)
1499 : simple_ipa_opt_pass (pass_data_ipa_increase_alignment, ctxt)
1502 /* opt_pass methods: */
1503 virtual bool gate (function *)
1505 return flag_section_anchors && flag_tree_loop_vectorize;
1508 virtual unsigned int execute (function *) { return increase_alignment (); }
1510 }; // class pass_ipa_increase_alignment
1512 } // anon namespace
1514 simple_ipa_opt_pass *
1515 make_pass_ipa_increase_alignment (gcc::context *ctxt)
1517 return new pass_ipa_increase_alignment (ctxt);
1520 /* If the condition represented by T is a comparison or the SSA name
1521 result of a comparison, extract the comparison's operands. Represent
1522 T as NE_EXPR <T, 0> otherwise. */
1524 void
1525 scalar_cond_masked_key::get_cond_ops_from_tree (tree t)
1527 if (TREE_CODE_CLASS (TREE_CODE (t)) == tcc_comparison)
1529 this->code = TREE_CODE (t);
1530 this->op0 = TREE_OPERAND (t, 0);
1531 this->op1 = TREE_OPERAND (t, 1);
1532 return;
1535 if (TREE_CODE (t) == SSA_NAME)
1536 if (gassign *stmt = dyn_cast<gassign *> (SSA_NAME_DEF_STMT (t)))
1538 tree_code code = gimple_assign_rhs_code (stmt);
1539 if (TREE_CODE_CLASS (code) == tcc_comparison)
1541 this->code = code;
1542 this->op0 = gimple_assign_rhs1 (stmt);
1543 this->op1 = gimple_assign_rhs2 (stmt);
1544 return;
1548 this->code = NE_EXPR;
1549 this->op0 = t;
1550 this->op1 = build_zero_cst (TREE_TYPE (t));