[42/46] Add vec_info::replace_stmt
[official-gcc.git] / gcc / tree-vectorizer.c
blob6e647abb69438d358f2bbeba05c89cdfe26b3440
1 /* Vectorizer
2 Copyright (C) 2003-2018 Free Software Foundation, Inc.
3 Contributed by Dorit Naishlos <dorit@il.ibm.com>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 /* Loop and basic block vectorizer.
23 This file contains drivers for the three vectorizers:
24 (1) loop vectorizer (inter-iteration parallelism),
25 (2) loop-aware SLP (intra-iteration parallelism) (invoked by the loop
26 vectorizer)
27 (3) BB vectorizer (out-of-loops), aka SLP
29 The rest of the vectorizer's code is organized as follows:
30 - tree-vect-loop.c - loop specific parts such as reductions, etc. These are
31 used by drivers (1) and (2).
32 - tree-vect-loop-manip.c - vectorizer's loop control-flow utilities, used by
33 drivers (1) and (2).
34 - tree-vect-slp.c - BB vectorization specific analysis and transformation,
35 used by drivers (2) and (3).
36 - tree-vect-stmts.c - statements analysis and transformation (used by all).
37 - tree-vect-data-refs.c - vectorizer specific data-refs analysis and
38 manipulations (used by all).
39 - tree-vect-patterns.c - vectorizable code patterns detector (used by all)
41 Here's a poor attempt at illustrating that:
43 tree-vectorizer.c:
44 loop_vect() loop_aware_slp() slp_vect()
45 | / \ /
46 | / \ /
47 tree-vect-loop.c tree-vect-slp.c
48 | \ \ / / |
49 | \ \/ / |
50 | \ /\ / |
51 | \ / \ / |
52 tree-vect-stmts.c tree-vect-data-refs.c
53 \ /
54 tree-vect-patterns.c
57 #include "config.h"
58 #include "system.h"
59 #include "coretypes.h"
60 #include "backend.h"
61 #include "tree.h"
62 #include "gimple.h"
63 #include "predict.h"
64 #include "tree-pass.h"
65 #include "ssa.h"
66 #include "cgraph.h"
67 #include "fold-const.h"
68 #include "stor-layout.h"
69 #include "gimple-iterator.h"
70 #include "gimple-walk.h"
71 #include "tree-ssa-loop-manip.h"
72 #include "tree-ssa-loop-niter.h"
73 #include "tree-cfg.h"
74 #include "cfgloop.h"
75 #include "tree-vectorizer.h"
76 #include "tree-ssa-propagate.h"
77 #include "dbgcnt.h"
78 #include "tree-scalar-evolution.h"
79 #include "stringpool.h"
80 #include "attribs.h"
81 #include "gimple-pretty-print.h"
84 /* Loop or bb location, with hotness information. */
85 dump_user_location_t vect_location;
87 /* Vector mapping GIMPLE stmt to stmt_vec_info. */
88 vec<stmt_vec_info> *stmt_vec_info_vec;
90 /* Dump a cost entry according to args to F. */
92 void
93 dump_stmt_cost (FILE *f, void *data, int count, enum vect_cost_for_stmt kind,
94 stmt_vec_info stmt_info, int misalign,
95 enum vect_cost_model_location where)
97 fprintf (f, "%p ", data);
98 if (stmt_info)
100 print_gimple_expr (f, STMT_VINFO_STMT (stmt_info), 0, TDF_SLIM);
101 fprintf (f, " ");
103 else
104 fprintf (f, "<unknown> ");
105 fprintf (f, "%d times ", count);
106 const char *ks = "unknown";
107 switch (kind)
109 case scalar_stmt:
110 ks = "scalar_stmt";
111 break;
112 case scalar_load:
113 ks = "scalar_load";
114 break;
115 case scalar_store:
116 ks = "scalar_store";
117 break;
118 case vector_stmt:
119 ks = "vector_stmt";
120 break;
121 case vector_load:
122 ks = "vector_load";
123 break;
124 case vector_gather_load:
125 ks = "vector_gather_load";
126 break;
127 case unaligned_load:
128 ks = "unaligned_load";
129 break;
130 case unaligned_store:
131 ks = "unaligned_store";
132 break;
133 case vector_store:
134 ks = "unaligned_store";
135 break;
136 case vector_scatter_store:
137 ks = "unaligned_store";
138 break;
139 case vec_to_scalar:
140 ks = "unaligned_store";
141 break;
142 case scalar_to_vec:
143 ks = "unaligned_store";
144 break;
145 case cond_branch_not_taken:
146 ks = "unaligned_store";
147 break;
148 case cond_branch_taken:
149 ks = "unaligned_store";
150 break;
151 case vec_perm:
152 ks = "unaligned_store";
153 break;
154 case vec_promote_demote:
155 ks = "unaligned_store";
156 break;
157 case vec_construct:
158 ks = "unaligned_store";
159 break;
161 fprintf (f, "%s ", ks);
162 if (kind == unaligned_load || kind == unaligned_store)
163 fprintf (f, "(misalign %d) ", misalign);
164 const char *ws = "unknown";
165 switch (where)
167 case vect_prologue:
168 ws = "prologue";
169 break;
170 case vect_body:
171 ws = "body";
172 break;
173 case vect_epilogue:
174 ws = "epilogue";
175 break;
177 fprintf (f, "in %s\n", ws);
180 /* For mapping simduid to vectorization factor. */
182 struct simduid_to_vf : free_ptr_hash<simduid_to_vf>
184 unsigned int simduid;
185 poly_uint64 vf;
187 /* hash_table support. */
188 static inline hashval_t hash (const simduid_to_vf *);
189 static inline int equal (const simduid_to_vf *, const simduid_to_vf *);
192 inline hashval_t
193 simduid_to_vf::hash (const simduid_to_vf *p)
195 return p->simduid;
198 inline int
199 simduid_to_vf::equal (const simduid_to_vf *p1, const simduid_to_vf *p2)
201 return p1->simduid == p2->simduid;
204 /* This hash maps the OMP simd array to the corresponding simduid used
205 to index into it. Like thus,
207 _7 = GOMP_SIMD_LANE (simduid.0)
210 D.1737[_7] = stuff;
213 This hash maps from the OMP simd array (D.1737[]) to DECL_UID of
214 simduid.0. */
216 struct simd_array_to_simduid : free_ptr_hash<simd_array_to_simduid>
218 tree decl;
219 unsigned int simduid;
221 /* hash_table support. */
222 static inline hashval_t hash (const simd_array_to_simduid *);
223 static inline int equal (const simd_array_to_simduid *,
224 const simd_array_to_simduid *);
227 inline hashval_t
228 simd_array_to_simduid::hash (const simd_array_to_simduid *p)
230 return DECL_UID (p->decl);
233 inline int
234 simd_array_to_simduid::equal (const simd_array_to_simduid *p1,
235 const simd_array_to_simduid *p2)
237 return p1->decl == p2->decl;
240 /* Fold IFN_GOMP_SIMD_LANE, IFN_GOMP_SIMD_VF, IFN_GOMP_SIMD_LAST_LANE,
241 into their corresponding constants and remove
242 IFN_GOMP_SIMD_ORDERED_{START,END}. */
244 static void
245 adjust_simduid_builtins (hash_table<simduid_to_vf> *htab)
247 basic_block bb;
249 FOR_EACH_BB_FN (bb, cfun)
251 gimple_stmt_iterator i;
253 for (i = gsi_start_bb (bb); !gsi_end_p (i); )
255 poly_uint64 vf = 1;
256 enum internal_fn ifn;
257 gimple *stmt = gsi_stmt (i);
258 tree t;
259 if (!is_gimple_call (stmt)
260 || !gimple_call_internal_p (stmt))
262 gsi_next (&i);
263 continue;
265 ifn = gimple_call_internal_fn (stmt);
266 switch (ifn)
268 case IFN_GOMP_SIMD_LANE:
269 case IFN_GOMP_SIMD_VF:
270 case IFN_GOMP_SIMD_LAST_LANE:
271 break;
272 case IFN_GOMP_SIMD_ORDERED_START:
273 case IFN_GOMP_SIMD_ORDERED_END:
274 if (integer_onep (gimple_call_arg (stmt, 0)))
276 enum built_in_function bcode
277 = (ifn == IFN_GOMP_SIMD_ORDERED_START
278 ? BUILT_IN_GOMP_ORDERED_START
279 : BUILT_IN_GOMP_ORDERED_END);
280 gimple *g
281 = gimple_build_call (builtin_decl_explicit (bcode), 0);
282 tree vdef = gimple_vdef (stmt);
283 gimple_set_vdef (g, vdef);
284 SSA_NAME_DEF_STMT (vdef) = g;
285 gimple_set_vuse (g, gimple_vuse (stmt));
286 gsi_replace (&i, g, true);
287 continue;
289 gsi_remove (&i, true);
290 unlink_stmt_vdef (stmt);
291 continue;
292 default:
293 gsi_next (&i);
294 continue;
296 tree arg = gimple_call_arg (stmt, 0);
297 gcc_assert (arg != NULL_TREE);
298 gcc_assert (TREE_CODE (arg) == SSA_NAME);
299 simduid_to_vf *p = NULL, data;
300 data.simduid = DECL_UID (SSA_NAME_VAR (arg));
301 /* Need to nullify loop safelen field since it's value is not
302 valid after transformation. */
303 if (bb->loop_father && bb->loop_father->safelen > 0)
304 bb->loop_father->safelen = 0;
305 if (htab)
307 p = htab->find (&data);
308 if (p)
309 vf = p->vf;
311 switch (ifn)
313 case IFN_GOMP_SIMD_VF:
314 t = build_int_cst (unsigned_type_node, vf);
315 break;
316 case IFN_GOMP_SIMD_LANE:
317 t = build_int_cst (unsigned_type_node, 0);
318 break;
319 case IFN_GOMP_SIMD_LAST_LANE:
320 t = gimple_call_arg (stmt, 1);
321 break;
322 default:
323 gcc_unreachable ();
325 tree lhs = gimple_call_lhs (stmt);
326 if (lhs)
327 replace_uses_by (lhs, t);
328 release_defs (stmt);
329 gsi_remove (&i, true);
334 /* Helper structure for note_simd_array_uses. */
336 struct note_simd_array_uses_struct
338 hash_table<simd_array_to_simduid> **htab;
339 unsigned int simduid;
342 /* Callback for note_simd_array_uses, called through walk_gimple_op. */
344 static tree
345 note_simd_array_uses_cb (tree *tp, int *walk_subtrees, void *data)
347 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
348 struct note_simd_array_uses_struct *ns
349 = (struct note_simd_array_uses_struct *) wi->info;
351 if (TYPE_P (*tp))
352 *walk_subtrees = 0;
353 else if (VAR_P (*tp)
354 && lookup_attribute ("omp simd array", DECL_ATTRIBUTES (*tp))
355 && DECL_CONTEXT (*tp) == current_function_decl)
357 simd_array_to_simduid data;
358 if (!*ns->htab)
359 *ns->htab = new hash_table<simd_array_to_simduid> (15);
360 data.decl = *tp;
361 data.simduid = ns->simduid;
362 simd_array_to_simduid **slot = (*ns->htab)->find_slot (&data, INSERT);
363 if (*slot == NULL)
365 simd_array_to_simduid *p = XNEW (simd_array_to_simduid);
366 *p = data;
367 *slot = p;
369 else if ((*slot)->simduid != ns->simduid)
370 (*slot)->simduid = -1U;
371 *walk_subtrees = 0;
373 return NULL_TREE;
376 /* Find "omp simd array" temporaries and map them to corresponding
377 simduid. */
379 static void
380 note_simd_array_uses (hash_table<simd_array_to_simduid> **htab)
382 basic_block bb;
383 gimple_stmt_iterator gsi;
384 struct walk_stmt_info wi;
385 struct note_simd_array_uses_struct ns;
387 memset (&wi, 0, sizeof (wi));
388 wi.info = &ns;
389 ns.htab = htab;
391 FOR_EACH_BB_FN (bb, cfun)
392 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
394 gimple *stmt = gsi_stmt (gsi);
395 if (!is_gimple_call (stmt) || !gimple_call_internal_p (stmt))
396 continue;
397 switch (gimple_call_internal_fn (stmt))
399 case IFN_GOMP_SIMD_LANE:
400 case IFN_GOMP_SIMD_VF:
401 case IFN_GOMP_SIMD_LAST_LANE:
402 break;
403 default:
404 continue;
406 tree lhs = gimple_call_lhs (stmt);
407 if (lhs == NULL_TREE)
408 continue;
409 imm_use_iterator use_iter;
410 gimple *use_stmt;
411 ns.simduid = DECL_UID (SSA_NAME_VAR (gimple_call_arg (stmt, 0)));
412 FOR_EACH_IMM_USE_STMT (use_stmt, use_iter, lhs)
413 if (!is_gimple_debug (use_stmt))
414 walk_gimple_op (use_stmt, note_simd_array_uses_cb, &wi);
418 /* Shrink arrays with "omp simd array" attribute to the corresponding
419 vectorization factor. */
421 static void
422 shrink_simd_arrays
423 (hash_table<simd_array_to_simduid> *simd_array_to_simduid_htab,
424 hash_table<simduid_to_vf> *simduid_to_vf_htab)
426 for (hash_table<simd_array_to_simduid>::iterator iter
427 = simd_array_to_simduid_htab->begin ();
428 iter != simd_array_to_simduid_htab->end (); ++iter)
429 if ((*iter)->simduid != -1U)
431 tree decl = (*iter)->decl;
432 poly_uint64 vf = 1;
433 if (simduid_to_vf_htab)
435 simduid_to_vf *p = NULL, data;
436 data.simduid = (*iter)->simduid;
437 p = simduid_to_vf_htab->find (&data);
438 if (p)
439 vf = p->vf;
441 tree atype
442 = build_array_type_nelts (TREE_TYPE (TREE_TYPE (decl)), vf);
443 TREE_TYPE (decl) = atype;
444 relayout_decl (decl);
447 delete simd_array_to_simduid_htab;
450 /* Initialize the vec_info with kind KIND_IN and target cost data
451 TARGET_COST_DATA_IN. */
453 vec_info::vec_info (vec_info::vec_kind kind_in, void *target_cost_data_in,
454 vec_info_shared *shared_)
455 : kind (kind_in),
456 shared (shared_),
457 target_cost_data (target_cost_data_in)
459 stmt_vec_infos.create (50);
460 set_stmt_vec_info_vec (&stmt_vec_infos);
463 vec_info::~vec_info ()
465 slp_instance instance;
466 unsigned int i;
468 FOR_EACH_VEC_ELT (slp_instances, i, instance)
469 vect_free_slp_instance (instance, true);
471 destroy_cost_data (target_cost_data);
472 free_stmt_vec_infos (&stmt_vec_infos);
475 vec_info_shared::vec_info_shared ()
476 : datarefs (vNULL),
477 datarefs_copy (vNULL),
478 ddrs (vNULL)
482 vec_info_shared::~vec_info_shared ()
484 free_data_refs (datarefs);
485 free_dependence_relations (ddrs);
486 datarefs_copy.release ();
489 void
490 vec_info_shared::save_datarefs ()
492 if (!flag_checking)
493 return;
494 datarefs_copy.reserve_exact (datarefs.length ());
495 for (unsigned i = 0; i < datarefs.length (); ++i)
496 datarefs_copy.quick_push (*datarefs[i]);
499 void
500 vec_info_shared::check_datarefs ()
502 if (!flag_checking)
503 return;
504 gcc_assert (datarefs.length () == datarefs_copy.length ());
505 for (unsigned i = 0; i < datarefs.length (); ++i)
506 if (memcmp (&datarefs_copy[i], datarefs[i], sizeof (data_reference)) != 0)
507 gcc_unreachable ();
510 /* Record that STMT belongs to the vectorizable region. Create and return
511 an associated stmt_vec_info. */
513 stmt_vec_info
514 vec_info::add_stmt (gimple *stmt)
516 stmt_vec_info res = new_stmt_vec_info (stmt, this);
517 set_vinfo_for_stmt (stmt, res);
518 return res;
521 /* If STMT has an associated stmt_vec_info, return that vec_info, otherwise
522 return null. It is safe to call this function on any statement, even if
523 it might not be part of the vectorizable region. */
525 stmt_vec_info
526 vec_info::lookup_stmt (gimple *stmt)
528 unsigned int uid = gimple_uid (stmt);
529 if (uid > 0 && uid - 1 < stmt_vec_infos.length ())
531 stmt_vec_info res = stmt_vec_infos[uid - 1];
532 if (res && res->stmt == stmt)
533 return res;
535 return NULL;
538 /* If NAME is an SSA_NAME and its definition has an associated stmt_vec_info,
539 return that stmt_vec_info, otherwise return null. It is safe to call
540 this on arbitrary operands. */
542 stmt_vec_info
543 vec_info::lookup_def (tree name)
545 if (TREE_CODE (name) == SSA_NAME
546 && !SSA_NAME_IS_DEFAULT_DEF (name))
547 return lookup_stmt (SSA_NAME_DEF_STMT (name));
548 return NULL;
551 /* See whether there is a single non-debug statement that uses LHS and
552 whether that statement has an associated stmt_vec_info. Return the
553 stmt_vec_info if so, otherwise return null. */
555 stmt_vec_info
556 vec_info::lookup_single_use (tree lhs)
558 use_operand_p dummy;
559 gimple *use_stmt;
560 if (single_imm_use (lhs, &dummy, &use_stmt))
561 return lookup_stmt (use_stmt);
562 return NULL;
565 /* Return vectorization information about DR. */
567 dr_vec_info *
568 vec_info::lookup_dr (data_reference *dr)
570 stmt_vec_info stmt_info = lookup_stmt (DR_STMT (dr));
571 /* DR_STMT should never refer to a stmt in a pattern replacement. */
572 gcc_checking_assert (!is_pattern_stmt_p (stmt_info));
573 return STMT_VINFO_DR_INFO (stmt_info->dr_aux.stmt);
576 /* Record that NEW_STMT_INFO now implements the same data reference
577 as OLD_STMT_INFO. */
579 void
580 vec_info::move_dr (stmt_vec_info new_stmt_info, stmt_vec_info old_stmt_info)
582 gcc_assert (!is_pattern_stmt_p (old_stmt_info));
583 STMT_VINFO_DR_INFO (old_stmt_info)->stmt = new_stmt_info;
584 new_stmt_info->dr_aux = old_stmt_info->dr_aux;
585 STMT_VINFO_DR_WRT_VEC_LOOP (new_stmt_info)
586 = STMT_VINFO_DR_WRT_VEC_LOOP (old_stmt_info);
587 STMT_VINFO_GATHER_SCATTER_P (new_stmt_info)
588 = STMT_VINFO_GATHER_SCATTER_P (old_stmt_info);
591 /* Permanently remove the statement described by STMT_INFO from the
592 function. */
594 void
595 vec_info::remove_stmt (stmt_vec_info stmt_info)
597 gcc_assert (!stmt_info->pattern_stmt_p);
598 gimple_stmt_iterator si = gsi_for_stmt (stmt_info->stmt);
599 unlink_stmt_vdef (stmt_info->stmt);
600 gsi_remove (&si, true);
601 release_defs (stmt_info->stmt);
602 free_stmt_vec_info (stmt_info);
605 /* Replace the statement at GSI by NEW_STMT, both the vectorization
606 information and the function itself. STMT_INFO describes the statement
607 at GSI. */
609 void
610 vec_info::replace_stmt (gimple_stmt_iterator *gsi, stmt_vec_info stmt_info,
611 gimple *new_stmt)
613 gimple *old_stmt = stmt_info->stmt;
614 gcc_assert (!stmt_info->pattern_stmt_p && old_stmt == gsi_stmt (*gsi));
615 set_vinfo_for_stmt (old_stmt, NULL);
616 set_vinfo_for_stmt (new_stmt, stmt_info);
617 stmt_info->stmt = new_stmt;
618 gsi_replace (gsi, new_stmt, true);
621 /* A helper function to free scev and LOOP niter information, as well as
622 clear loop constraint LOOP_C_FINITE. */
624 void
625 vect_free_loop_info_assumptions (struct loop *loop)
627 scev_reset_htab ();
628 /* We need to explicitly reset upper bound information since they are
629 used even after free_numbers_of_iterations_estimates. */
630 loop->any_upper_bound = false;
631 loop->any_likely_upper_bound = false;
632 free_numbers_of_iterations_estimates (loop);
633 loop_constraint_clear (loop, LOOP_C_FINITE);
636 /* Return whether STMT is inside the region we try to vectorize. */
638 bool
639 vect_stmt_in_region_p (vec_info *vinfo, gimple *stmt)
641 if (!gimple_bb (stmt))
642 return false;
644 if (loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo))
646 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
647 if (!flow_bb_inside_loop_p (loop, gimple_bb (stmt)))
648 return false;
650 else
652 bb_vec_info bb_vinfo = as_a <bb_vec_info> (vinfo);
653 if (gimple_bb (stmt) != BB_VINFO_BB (bb_vinfo)
654 || gimple_uid (stmt) == -1U
655 || gimple_code (stmt) == GIMPLE_PHI)
656 return false;
659 return true;
663 /* If LOOP has been versioned during ifcvt, return the internal call
664 guarding it. */
666 static gimple *
667 vect_loop_vectorized_call (struct loop *loop)
669 basic_block bb = loop_preheader_edge (loop)->src;
670 gimple *g;
673 g = last_stmt (bb);
674 if (g)
675 break;
676 if (!single_pred_p (bb))
677 break;
678 bb = single_pred (bb);
680 while (1);
681 if (g && gimple_code (g) == GIMPLE_COND)
683 gimple_stmt_iterator gsi = gsi_for_stmt (g);
684 gsi_prev (&gsi);
685 if (!gsi_end_p (gsi))
687 g = gsi_stmt (gsi);
688 if (gimple_call_internal_p (g, IFN_LOOP_VECTORIZED)
689 && (tree_to_shwi (gimple_call_arg (g, 0)) == loop->num
690 || tree_to_shwi (gimple_call_arg (g, 1)) == loop->num))
691 return g;
694 return NULL;
697 /* If LOOP has been versioned during loop distribution, return the gurading
698 internal call. */
700 static gimple *
701 vect_loop_dist_alias_call (struct loop *loop)
703 basic_block bb;
704 basic_block entry;
705 struct loop *outer, *orig;
706 gimple_stmt_iterator gsi;
707 gimple *g;
709 if (loop->orig_loop_num == 0)
710 return NULL;
712 orig = get_loop (cfun, loop->orig_loop_num);
713 if (orig == NULL)
715 /* The original loop is somehow destroyed. Clear the information. */
716 loop->orig_loop_num = 0;
717 return NULL;
720 if (loop != orig)
721 bb = nearest_common_dominator (CDI_DOMINATORS, loop->header, orig->header);
722 else
723 bb = loop_preheader_edge (loop)->src;
725 outer = bb->loop_father;
726 entry = ENTRY_BLOCK_PTR_FOR_FN (cfun);
728 /* Look upward in dominance tree. */
729 for (; bb != entry && flow_bb_inside_loop_p (outer, bb);
730 bb = get_immediate_dominator (CDI_DOMINATORS, bb))
732 g = last_stmt (bb);
733 if (g == NULL || gimple_code (g) != GIMPLE_COND)
734 continue;
736 gsi = gsi_for_stmt (g);
737 gsi_prev (&gsi);
738 if (gsi_end_p (gsi))
739 continue;
741 g = gsi_stmt (gsi);
742 /* The guarding internal function call must have the same distribution
743 alias id. */
744 if (gimple_call_internal_p (g, IFN_LOOP_DIST_ALIAS)
745 && (tree_to_shwi (gimple_call_arg (g, 0)) == loop->orig_loop_num))
746 return g;
748 return NULL;
751 /* Set the uids of all the statements in basic blocks inside loop
752 represented by LOOP_VINFO. LOOP_VECTORIZED_CALL is the internal
753 call guarding the loop which has been if converted. */
754 static void
755 set_uid_loop_bbs (loop_vec_info loop_vinfo, gimple *loop_vectorized_call)
757 tree arg = gimple_call_arg (loop_vectorized_call, 1);
758 basic_block *bbs;
759 unsigned int i;
760 struct loop *scalar_loop = get_loop (cfun, tree_to_shwi (arg));
762 LOOP_VINFO_SCALAR_LOOP (loop_vinfo) = scalar_loop;
763 gcc_checking_assert (vect_loop_vectorized_call (scalar_loop)
764 == loop_vectorized_call);
765 /* If we are going to vectorize outer loop, prevent vectorization
766 of the inner loop in the scalar loop - either the scalar loop is
767 thrown away, so it is a wasted work, or is used only for
768 a few iterations. */
769 if (scalar_loop->inner)
771 gimple *g = vect_loop_vectorized_call (scalar_loop->inner);
772 if (g)
774 arg = gimple_call_arg (g, 0);
775 get_loop (cfun, tree_to_shwi (arg))->dont_vectorize = true;
776 fold_loop_internal_call (g, boolean_false_node);
779 bbs = get_loop_body (scalar_loop);
780 for (i = 0; i < scalar_loop->num_nodes; i++)
782 basic_block bb = bbs[i];
783 gimple_stmt_iterator gsi;
784 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
786 gimple *phi = gsi_stmt (gsi);
787 gimple_set_uid (phi, 0);
789 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
791 gimple *stmt = gsi_stmt (gsi);
792 gimple_set_uid (stmt, 0);
795 free (bbs);
798 /* Try to vectorize LOOP. */
800 static unsigned
801 try_vectorize_loop_1 (hash_table<simduid_to_vf> *&simduid_to_vf_htab,
802 unsigned *num_vectorized_loops,
803 loop_p loop, loop_vec_info orig_loop_vinfo,
804 gimple *loop_vectorized_call,
805 gimple *loop_dist_alias_call)
807 unsigned ret = 0;
808 vec_info_shared shared;
809 vect_location = find_loop_location (loop);
810 if (LOCATION_LOCUS (vect_location.get_location_t ()) != UNKNOWN_LOCATION
811 && dump_enabled_p ())
812 dump_printf (MSG_NOTE, "\nAnalyzing loop at %s:%d\n",
813 LOCATION_FILE (vect_location.get_location_t ()),
814 LOCATION_LINE (vect_location.get_location_t ()));
816 loop_vec_info loop_vinfo = vect_analyze_loop (loop, orig_loop_vinfo, &shared);
817 loop->aux = loop_vinfo;
819 if (!loop_vinfo || !LOOP_VINFO_VECTORIZABLE_P (loop_vinfo))
821 /* Free existing information if loop is analyzed with some
822 assumptions. */
823 if (loop_constraint_set_p (loop, LOOP_C_FINITE))
824 vect_free_loop_info_assumptions (loop);
826 /* If we applied if-conversion then try to vectorize the
827 BB of innermost loops.
828 ??? Ideally BB vectorization would learn to vectorize
829 control flow by applying if-conversion on-the-fly, the
830 following retains the if-converted loop body even when
831 only non-if-converted parts took part in BB vectorization. */
832 if (flag_tree_slp_vectorize != 0
833 && loop_vectorized_call
834 && ! loop->inner)
836 basic_block bb = loop->header;
837 bool has_mask_load_store = false;
838 for (gimple_stmt_iterator gsi = gsi_start_bb (bb);
839 !gsi_end_p (gsi); gsi_next (&gsi))
841 gimple *stmt = gsi_stmt (gsi);
842 if (is_gimple_call (stmt)
843 && gimple_call_internal_p (stmt)
844 && (gimple_call_internal_fn (stmt) == IFN_MASK_LOAD
845 || gimple_call_internal_fn (stmt) == IFN_MASK_STORE))
847 has_mask_load_store = true;
848 break;
850 gimple_set_uid (stmt, -1);
851 gimple_set_visited (stmt, false);
853 if (! has_mask_load_store && vect_slp_bb (bb))
855 dump_printf_loc (MSG_NOTE, vect_location,
856 "basic block vectorized\n");
857 fold_loop_internal_call (loop_vectorized_call,
858 boolean_true_node);
859 loop_vectorized_call = NULL;
860 ret |= TODO_cleanup_cfg;
863 /* If outer loop vectorization fails for LOOP_VECTORIZED guarded
864 loop, don't vectorize its inner loop; we'll attempt to
865 vectorize LOOP_VECTORIZED guarded inner loop of the scalar
866 loop version. */
867 if (loop_vectorized_call && loop->inner)
868 loop->inner->dont_vectorize = true;
869 return ret;
872 if (!dbg_cnt (vect_loop))
874 /* Free existing information if loop is analyzed with some
875 assumptions. */
876 if (loop_constraint_set_p (loop, LOOP_C_FINITE))
877 vect_free_loop_info_assumptions (loop);
878 return ret;
881 if (loop_vectorized_call)
882 set_uid_loop_bbs (loop_vinfo, loop_vectorized_call);
884 unsigned HOST_WIDE_INT bytes;
885 if (current_vector_size.is_constant (&bytes))
886 dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, vect_location,
887 "loop vectorized vectorized using "
888 HOST_WIDE_INT_PRINT_UNSIGNED " byte "
889 "vectors\n", bytes);
890 else
891 dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, vect_location,
892 "loop vectorized using variable length vectors\n");
894 loop_p new_loop = vect_transform_loop (loop_vinfo);
895 (*num_vectorized_loops)++;
896 /* Now that the loop has been vectorized, allow it to be unrolled
897 etc. */
898 loop->force_vectorize = false;
900 if (loop->simduid)
902 simduid_to_vf *simduid_to_vf_data = XNEW (simduid_to_vf);
903 if (!simduid_to_vf_htab)
904 simduid_to_vf_htab = new hash_table<simduid_to_vf> (15);
905 simduid_to_vf_data->simduid = DECL_UID (loop->simduid);
906 simduid_to_vf_data->vf = loop_vinfo->vectorization_factor;
907 *simduid_to_vf_htab->find_slot (simduid_to_vf_data, INSERT)
908 = simduid_to_vf_data;
911 if (loop_vectorized_call)
913 fold_loop_internal_call (loop_vectorized_call, boolean_true_node);
914 loop_vectorized_call = NULL;
915 ret |= TODO_cleanup_cfg;
917 if (loop_dist_alias_call)
919 tree value = gimple_call_arg (loop_dist_alias_call, 1);
920 fold_loop_internal_call (loop_dist_alias_call, value);
921 loop_dist_alias_call = NULL;
922 ret |= TODO_cleanup_cfg;
925 /* Epilogue of vectorized loop must be vectorized too. */
926 if (new_loop)
927 ret |= try_vectorize_loop_1 (simduid_to_vf_htab, num_vectorized_loops,
928 new_loop, loop_vinfo, NULL, NULL);
930 return ret;
933 /* Try to vectorize LOOP. */
935 static unsigned
936 try_vectorize_loop (hash_table<simduid_to_vf> *&simduid_to_vf_htab,
937 unsigned *num_vectorized_loops, loop_p loop)
939 if (!((flag_tree_loop_vectorize
940 && optimize_loop_nest_for_speed_p (loop))
941 || loop->force_vectorize))
942 return 0;
944 return try_vectorize_loop_1 (simduid_to_vf_htab, num_vectorized_loops,
945 loop, NULL,
946 vect_loop_vectorized_call (loop),
947 vect_loop_dist_alias_call (loop));
951 /* Function vectorize_loops.
953 Entry point to loop vectorization phase. */
955 unsigned
956 vectorize_loops (void)
958 unsigned int i;
959 unsigned int num_vectorized_loops = 0;
960 unsigned int vect_loops_num;
961 struct loop *loop;
962 hash_table<simduid_to_vf> *simduid_to_vf_htab = NULL;
963 hash_table<simd_array_to_simduid> *simd_array_to_simduid_htab = NULL;
964 bool any_ifcvt_loops = false;
965 unsigned ret = 0;
967 vect_loops_num = number_of_loops (cfun);
969 /* Bail out if there are no loops. */
970 if (vect_loops_num <= 1)
971 return 0;
973 if (cfun->has_simduid_loops)
974 note_simd_array_uses (&simd_array_to_simduid_htab);
976 set_stmt_vec_info_vec (NULL);
978 /* ----------- Analyze loops. ----------- */
980 /* If some loop was duplicated, it gets bigger number
981 than all previously defined loops. This fact allows us to run
982 only over initial loops skipping newly generated ones. */
983 FOR_EACH_LOOP (loop, 0)
984 if (loop->dont_vectorize)
986 any_ifcvt_loops = true;
987 /* If-conversion sometimes versions both the outer loop
988 (for the case when outer loop vectorization might be
989 desirable) as well as the inner loop in the scalar version
990 of the loop. So we have:
991 if (LOOP_VECTORIZED (1, 3))
993 loop1
994 loop2
996 else
997 loop3 (copy of loop1)
998 if (LOOP_VECTORIZED (4, 5))
999 loop4 (copy of loop2)
1000 else
1001 loop5 (copy of loop4)
1002 If FOR_EACH_LOOP gives us loop3 first (which has
1003 dont_vectorize set), make sure to process loop1 before loop4;
1004 so that we can prevent vectorization of loop4 if loop1
1005 is successfully vectorized. */
1006 if (loop->inner)
1008 gimple *loop_vectorized_call
1009 = vect_loop_vectorized_call (loop);
1010 if (loop_vectorized_call
1011 && vect_loop_vectorized_call (loop->inner))
1013 tree arg = gimple_call_arg (loop_vectorized_call, 0);
1014 struct loop *vector_loop
1015 = get_loop (cfun, tree_to_shwi (arg));
1016 if (vector_loop && vector_loop != loop)
1018 /* Make sure we don't vectorize it twice. */
1019 vector_loop->dont_vectorize = true;
1020 ret |= try_vectorize_loop (simduid_to_vf_htab,
1021 &num_vectorized_loops,
1022 vector_loop);
1027 else
1028 ret |= try_vectorize_loop (simduid_to_vf_htab, &num_vectorized_loops,
1029 loop);
1031 vect_location = dump_user_location_t ();
1033 statistics_counter_event (cfun, "Vectorized loops", num_vectorized_loops);
1034 if (dump_enabled_p ()
1035 || (num_vectorized_loops > 0 && dump_enabled_p ()))
1036 dump_printf_loc (MSG_NOTE, vect_location,
1037 "vectorized %u loops in function.\n",
1038 num_vectorized_loops);
1040 /* ----------- Finalize. ----------- */
1042 if (any_ifcvt_loops)
1043 for (i = 1; i < number_of_loops (cfun); i++)
1045 loop = get_loop (cfun, i);
1046 if (loop && loop->dont_vectorize)
1048 gimple *g = vect_loop_vectorized_call (loop);
1049 if (g)
1051 fold_loop_internal_call (g, boolean_false_node);
1052 ret |= TODO_cleanup_cfg;
1053 g = NULL;
1055 else
1056 g = vect_loop_dist_alias_call (loop);
1058 if (g)
1060 fold_loop_internal_call (g, boolean_false_node);
1061 ret |= TODO_cleanup_cfg;
1066 for (i = 1; i < number_of_loops (cfun); i++)
1068 loop_vec_info loop_vinfo;
1069 bool has_mask_store;
1071 loop = get_loop (cfun, i);
1072 if (!loop || !loop->aux)
1073 continue;
1074 loop_vinfo = (loop_vec_info) loop->aux;
1075 has_mask_store = LOOP_VINFO_HAS_MASK_STORE (loop_vinfo);
1076 delete loop_vinfo;
1077 if (has_mask_store
1078 && targetm.vectorize.empty_mask_is_expensive (IFN_MASK_STORE))
1079 optimize_mask_stores (loop);
1080 loop->aux = NULL;
1083 /* Fold IFN_GOMP_SIMD_{VF,LANE,LAST_LANE,ORDERED_{START,END}} builtins. */
1084 if (cfun->has_simduid_loops)
1085 adjust_simduid_builtins (simduid_to_vf_htab);
1087 /* Shrink any "omp array simd" temporary arrays to the
1088 actual vectorization factors. */
1089 if (simd_array_to_simduid_htab)
1090 shrink_simd_arrays (simd_array_to_simduid_htab, simduid_to_vf_htab);
1091 delete simduid_to_vf_htab;
1092 cfun->has_simduid_loops = false;
1094 if (num_vectorized_loops > 0)
1096 /* If we vectorized any loop only virtual SSA form needs to be updated.
1097 ??? Also while we try hard to update loop-closed SSA form we fail
1098 to properly do this in some corner-cases (see PR56286). */
1099 rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa_only_virtuals);
1100 return TODO_cleanup_cfg;
1103 return ret;
1107 /* Entry point to the simduid cleanup pass. */
1109 namespace {
1111 const pass_data pass_data_simduid_cleanup =
1113 GIMPLE_PASS, /* type */
1114 "simduid", /* name */
1115 OPTGROUP_NONE, /* optinfo_flags */
1116 TV_NONE, /* tv_id */
1117 ( PROP_ssa | PROP_cfg ), /* properties_required */
1118 0, /* properties_provided */
1119 0, /* properties_destroyed */
1120 0, /* todo_flags_start */
1121 0, /* todo_flags_finish */
1124 class pass_simduid_cleanup : public gimple_opt_pass
1126 public:
1127 pass_simduid_cleanup (gcc::context *ctxt)
1128 : gimple_opt_pass (pass_data_simduid_cleanup, ctxt)
1131 /* opt_pass methods: */
1132 opt_pass * clone () { return new pass_simduid_cleanup (m_ctxt); }
1133 virtual bool gate (function *fun) { return fun->has_simduid_loops; }
1134 virtual unsigned int execute (function *);
1136 }; // class pass_simduid_cleanup
1138 unsigned int
1139 pass_simduid_cleanup::execute (function *fun)
1141 hash_table<simd_array_to_simduid> *simd_array_to_simduid_htab = NULL;
1143 note_simd_array_uses (&simd_array_to_simduid_htab);
1145 /* Fold IFN_GOMP_SIMD_{VF,LANE,LAST_LANE,ORDERED_{START,END}} builtins. */
1146 adjust_simduid_builtins (NULL);
1148 /* Shrink any "omp array simd" temporary arrays to the
1149 actual vectorization factors. */
1150 if (simd_array_to_simduid_htab)
1151 shrink_simd_arrays (simd_array_to_simduid_htab, NULL);
1152 fun->has_simduid_loops = false;
1153 return 0;
1156 } // anon namespace
1158 gimple_opt_pass *
1159 make_pass_simduid_cleanup (gcc::context *ctxt)
1161 return new pass_simduid_cleanup (ctxt);
1165 /* Entry point to basic block SLP phase. */
1167 namespace {
1169 const pass_data pass_data_slp_vectorize =
1171 GIMPLE_PASS, /* type */
1172 "slp", /* name */
1173 OPTGROUP_LOOP | OPTGROUP_VEC, /* optinfo_flags */
1174 TV_TREE_SLP_VECTORIZATION, /* tv_id */
1175 ( PROP_ssa | PROP_cfg ), /* properties_required */
1176 0, /* properties_provided */
1177 0, /* properties_destroyed */
1178 0, /* todo_flags_start */
1179 TODO_update_ssa, /* todo_flags_finish */
1182 class pass_slp_vectorize : public gimple_opt_pass
1184 public:
1185 pass_slp_vectorize (gcc::context *ctxt)
1186 : gimple_opt_pass (pass_data_slp_vectorize, ctxt)
1189 /* opt_pass methods: */
1190 opt_pass * clone () { return new pass_slp_vectorize (m_ctxt); }
1191 virtual bool gate (function *) { return flag_tree_slp_vectorize != 0; }
1192 virtual unsigned int execute (function *);
1194 }; // class pass_slp_vectorize
1196 unsigned int
1197 pass_slp_vectorize::execute (function *fun)
1199 basic_block bb;
1201 bool in_loop_pipeline = scev_initialized_p ();
1202 if (!in_loop_pipeline)
1204 loop_optimizer_init (LOOPS_NORMAL);
1205 scev_initialize ();
1208 /* Mark all stmts as not belonging to the current region and unvisited. */
1209 FOR_EACH_BB_FN (bb, fun)
1211 for (gimple_stmt_iterator gsi = gsi_start_bb (bb); !gsi_end_p (gsi);
1212 gsi_next (&gsi))
1214 gimple *stmt = gsi_stmt (gsi);
1215 gimple_set_uid (stmt, -1);
1216 gimple_set_visited (stmt, false);
1220 FOR_EACH_BB_FN (bb, fun)
1222 if (vect_slp_bb (bb))
1223 dump_printf_loc (MSG_NOTE, vect_location, "basic block vectorized\n");
1226 if (!in_loop_pipeline)
1228 scev_finalize ();
1229 loop_optimizer_finalize ();
1232 return 0;
1235 } // anon namespace
1237 gimple_opt_pass *
1238 make_pass_slp_vectorize (gcc::context *ctxt)
1240 return new pass_slp_vectorize (ctxt);
1244 /* Increase alignment of global arrays to improve vectorization potential.
1245 TODO:
1246 - Consider also structs that have an array field.
1247 - Use ipa analysis to prune arrays that can't be vectorized?
1248 This should involve global alignment analysis and in the future also
1249 array padding. */
1251 static unsigned get_vec_alignment_for_type (tree);
1252 static hash_map<tree, unsigned> *type_align_map;
1254 /* Return alignment of array's vector type corresponding to scalar type.
1255 0 if no vector type exists. */
1256 static unsigned
1257 get_vec_alignment_for_array_type (tree type)
1259 gcc_assert (TREE_CODE (type) == ARRAY_TYPE);
1260 poly_uint64 array_size, vector_size;
1262 tree vectype = get_vectype_for_scalar_type (strip_array_types (type));
1263 if (!vectype
1264 || !poly_int_tree_p (TYPE_SIZE (type), &array_size)
1265 || !poly_int_tree_p (TYPE_SIZE (vectype), &vector_size)
1266 || maybe_lt (array_size, vector_size))
1267 return 0;
1269 return TYPE_ALIGN (vectype);
1272 /* Return alignment of field having maximum alignment of vector type
1273 corresponding to it's scalar type. For now, we only consider fields whose
1274 offset is a multiple of it's vector alignment.
1275 0 if no suitable field is found. */
1276 static unsigned
1277 get_vec_alignment_for_record_type (tree type)
1279 gcc_assert (TREE_CODE (type) == RECORD_TYPE);
1281 unsigned max_align = 0, alignment;
1282 HOST_WIDE_INT offset;
1283 tree offset_tree;
1285 if (TYPE_PACKED (type))
1286 return 0;
1288 unsigned *slot = type_align_map->get (type);
1289 if (slot)
1290 return *slot;
1292 for (tree field = first_field (type);
1293 field != NULL_TREE;
1294 field = DECL_CHAIN (field))
1296 /* Skip if not FIELD_DECL or if alignment is set by user. */
1297 if (TREE_CODE (field) != FIELD_DECL
1298 || DECL_USER_ALIGN (field)
1299 || DECL_ARTIFICIAL (field))
1300 continue;
1302 /* We don't need to process the type further if offset is variable,
1303 since the offsets of remaining members will also be variable. */
1304 if (TREE_CODE (DECL_FIELD_OFFSET (field)) != INTEGER_CST
1305 || TREE_CODE (DECL_FIELD_BIT_OFFSET (field)) != INTEGER_CST)
1306 break;
1308 /* Similarly stop processing the type if offset_tree
1309 does not fit in unsigned HOST_WIDE_INT. */
1310 offset_tree = bit_position (field);
1311 if (!tree_fits_uhwi_p (offset_tree))
1312 break;
1314 offset = tree_to_uhwi (offset_tree);
1315 alignment = get_vec_alignment_for_type (TREE_TYPE (field));
1317 /* Get maximum alignment of vectorized field/array among those members
1318 whose offset is multiple of the vector alignment. */
1319 if (alignment
1320 && (offset % alignment == 0)
1321 && (alignment > max_align))
1322 max_align = alignment;
1325 type_align_map->put (type, max_align);
1326 return max_align;
1329 /* Return alignment of vector type corresponding to decl's scalar type
1330 or 0 if it doesn't exist or the vector alignment is lesser than
1331 decl's alignment. */
1332 static unsigned
1333 get_vec_alignment_for_type (tree type)
1335 if (type == NULL_TREE)
1336 return 0;
1338 gcc_assert (TYPE_P (type));
1340 static unsigned alignment = 0;
1341 switch (TREE_CODE (type))
1343 case ARRAY_TYPE:
1344 alignment = get_vec_alignment_for_array_type (type);
1345 break;
1346 case RECORD_TYPE:
1347 alignment = get_vec_alignment_for_record_type (type);
1348 break;
1349 default:
1350 alignment = 0;
1351 break;
1354 return (alignment > TYPE_ALIGN (type)) ? alignment : 0;
1357 /* Entry point to increase_alignment pass. */
1358 static unsigned int
1359 increase_alignment (void)
1361 varpool_node *vnode;
1363 vect_location = dump_user_location_t ();
1364 type_align_map = new hash_map<tree, unsigned>;
1366 /* Increase the alignment of all global arrays for vectorization. */
1367 FOR_EACH_DEFINED_VARIABLE (vnode)
1369 tree decl = vnode->decl;
1370 unsigned int alignment;
1372 if ((decl_in_symtab_p (decl)
1373 && !symtab_node::get (decl)->can_increase_alignment_p ())
1374 || DECL_USER_ALIGN (decl) || DECL_ARTIFICIAL (decl))
1375 continue;
1377 alignment = get_vec_alignment_for_type (TREE_TYPE (decl));
1378 if (alignment && vect_can_force_dr_alignment_p (decl, alignment))
1380 vnode->increase_alignment (alignment);
1381 dump_printf (MSG_NOTE, "Increasing alignment of decl: ");
1382 dump_generic_expr (MSG_NOTE, TDF_SLIM, decl);
1383 dump_printf (MSG_NOTE, "\n");
1387 delete type_align_map;
1388 return 0;
1392 namespace {
1394 const pass_data pass_data_ipa_increase_alignment =
1396 SIMPLE_IPA_PASS, /* type */
1397 "increase_alignment", /* name */
1398 OPTGROUP_LOOP | OPTGROUP_VEC, /* optinfo_flags */
1399 TV_IPA_OPT, /* tv_id */
1400 0, /* properties_required */
1401 0, /* properties_provided */
1402 0, /* properties_destroyed */
1403 0, /* todo_flags_start */
1404 0, /* todo_flags_finish */
1407 class pass_ipa_increase_alignment : public simple_ipa_opt_pass
1409 public:
1410 pass_ipa_increase_alignment (gcc::context *ctxt)
1411 : simple_ipa_opt_pass (pass_data_ipa_increase_alignment, ctxt)
1414 /* opt_pass methods: */
1415 virtual bool gate (function *)
1417 return flag_section_anchors && flag_tree_loop_vectorize;
1420 virtual unsigned int execute (function *) { return increase_alignment (); }
1422 }; // class pass_ipa_increase_alignment
1424 } // anon namespace
1426 simple_ipa_opt_pass *
1427 make_pass_ipa_increase_alignment (gcc::context *ctxt)
1429 return new pass_ipa_increase_alignment (ctxt);