2018-06-19 Richard Biener <rguenther@suse.de>
[official-gcc.git] / gcc / tree-vectorizer.c
blob1c487e608ffbdc549dd8f276715f63382a0a967f
1 /* Vectorizer
2 Copyright (C) 2003-2018 Free Software Foundation, Inc.
3 Contributed by Dorit Naishlos <dorit@il.ibm.com>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 /* Loop and basic block vectorizer.
23 This file contains drivers for the three vectorizers:
24 (1) loop vectorizer (inter-iteration parallelism),
25 (2) loop-aware SLP (intra-iteration parallelism) (invoked by the loop
26 vectorizer)
27 (3) BB vectorizer (out-of-loops), aka SLP
29 The rest of the vectorizer's code is organized as follows:
30 - tree-vect-loop.c - loop specific parts such as reductions, etc. These are
31 used by drivers (1) and (2).
32 - tree-vect-loop-manip.c - vectorizer's loop control-flow utilities, used by
33 drivers (1) and (2).
34 - tree-vect-slp.c - BB vectorization specific analysis and transformation,
35 used by drivers (2) and (3).
36 - tree-vect-stmts.c - statements analysis and transformation (used by all).
37 - tree-vect-data-refs.c - vectorizer specific data-refs analysis and
38 manipulations (used by all).
39 - tree-vect-patterns.c - vectorizable code patterns detector (used by all)
41 Here's a poor attempt at illustrating that:
43 tree-vectorizer.c:
44 loop_vect() loop_aware_slp() slp_vect()
45 | / \ /
46 | / \ /
47 tree-vect-loop.c tree-vect-slp.c
48 | \ \ / / |
49 | \ \/ / |
50 | \ /\ / |
51 | \ / \ / |
52 tree-vect-stmts.c tree-vect-data-refs.c
53 \ /
54 tree-vect-patterns.c
57 #include "config.h"
58 #include "system.h"
59 #include "coretypes.h"
60 #include "backend.h"
61 #include "tree.h"
62 #include "gimple.h"
63 #include "predict.h"
64 #include "tree-pass.h"
65 #include "ssa.h"
66 #include "cgraph.h"
67 #include "fold-const.h"
68 #include "stor-layout.h"
69 #include "gimple-iterator.h"
70 #include "gimple-walk.h"
71 #include "tree-ssa-loop-manip.h"
72 #include "tree-ssa-loop-niter.h"
73 #include "tree-cfg.h"
74 #include "cfgloop.h"
75 #include "tree-vectorizer.h"
76 #include "tree-ssa-propagate.h"
77 #include "dbgcnt.h"
78 #include "tree-scalar-evolution.h"
79 #include "stringpool.h"
80 #include "attribs.h"
81 #include "gimple-pretty-print.h"
84 /* Loop or bb location. */
85 source_location vect_location;
87 /* Vector mapping GIMPLE stmt to stmt_vec_info. */
88 vec<stmt_vec_info> *stmt_vec_info_vec;
90 /* Dump a cost entry according to args to F. */
92 void
93 dump_stmt_cost (FILE *f, void *data, int count, enum vect_cost_for_stmt kind,
94 stmt_vec_info stmt_info, int misalign,
95 enum vect_cost_model_location where)
97 fprintf (f, "%p ", data);
98 if (stmt_info)
100 print_gimple_expr (f, STMT_VINFO_STMT (stmt_info), 0, TDF_SLIM);
101 fprintf (f, " ");
103 else
104 fprintf (f, "<unknown> ");
105 fprintf (f, "%d times ", count);
106 const char *ks = "unknown";
107 switch (kind)
109 case scalar_stmt:
110 ks = "scalar_stmt";
111 break;
112 case scalar_load:
113 ks = "scalar_load";
114 break;
115 case scalar_store:
116 ks = "scalar_store";
117 break;
118 case vector_stmt:
119 ks = "vector_stmt";
120 break;
121 case vector_load:
122 ks = "vector_load";
123 break;
124 case vector_gather_load:
125 ks = "vector_gather_load";
126 break;
127 case unaligned_load:
128 ks = "unaligned_load";
129 break;
130 case unaligned_store:
131 ks = "unaligned_store";
132 break;
133 case vector_store:
134 ks = "unaligned_store";
135 break;
136 case vector_scatter_store:
137 ks = "unaligned_store";
138 break;
139 case vec_to_scalar:
140 ks = "unaligned_store";
141 break;
142 case scalar_to_vec:
143 ks = "unaligned_store";
144 break;
145 case cond_branch_not_taken:
146 ks = "unaligned_store";
147 break;
148 case cond_branch_taken:
149 ks = "unaligned_store";
150 break;
151 case vec_perm:
152 ks = "unaligned_store";
153 break;
154 case vec_promote_demote:
155 ks = "unaligned_store";
156 break;
157 case vec_construct:
158 ks = "unaligned_store";
159 break;
161 fprintf (f, "%s ", ks);
162 if (kind == unaligned_load || kind == unaligned_store)
163 fprintf (f, "(misalign %d) ", misalign);
164 const char *ws = "unknown";
165 switch (where)
167 case vect_prologue:
168 ws = "prologue";
169 break;
170 case vect_body:
171 ws = "body";
172 break;
173 case vect_epilogue:
174 ws = "epilogue";
175 break;
177 fprintf (f, "in %s\n", ws);
180 /* For mapping simduid to vectorization factor. */
182 struct simduid_to_vf : free_ptr_hash<simduid_to_vf>
184 unsigned int simduid;
185 poly_uint64 vf;
187 /* hash_table support. */
188 static inline hashval_t hash (const simduid_to_vf *);
189 static inline int equal (const simduid_to_vf *, const simduid_to_vf *);
192 inline hashval_t
193 simduid_to_vf::hash (const simduid_to_vf *p)
195 return p->simduid;
198 inline int
199 simduid_to_vf::equal (const simduid_to_vf *p1, const simduid_to_vf *p2)
201 return p1->simduid == p2->simduid;
204 /* This hash maps the OMP simd array to the corresponding simduid used
205 to index into it. Like thus,
207 _7 = GOMP_SIMD_LANE (simduid.0)
210 D.1737[_7] = stuff;
213 This hash maps from the OMP simd array (D.1737[]) to DECL_UID of
214 simduid.0. */
216 struct simd_array_to_simduid : free_ptr_hash<simd_array_to_simduid>
218 tree decl;
219 unsigned int simduid;
221 /* hash_table support. */
222 static inline hashval_t hash (const simd_array_to_simduid *);
223 static inline int equal (const simd_array_to_simduid *,
224 const simd_array_to_simduid *);
227 inline hashval_t
228 simd_array_to_simduid::hash (const simd_array_to_simduid *p)
230 return DECL_UID (p->decl);
233 inline int
234 simd_array_to_simduid::equal (const simd_array_to_simduid *p1,
235 const simd_array_to_simduid *p2)
237 return p1->decl == p2->decl;
240 /* Fold IFN_GOMP_SIMD_LANE, IFN_GOMP_SIMD_VF, IFN_GOMP_SIMD_LAST_LANE,
241 into their corresponding constants and remove
242 IFN_GOMP_SIMD_ORDERED_{START,END}. */
244 static void
245 adjust_simduid_builtins (hash_table<simduid_to_vf> *htab)
247 basic_block bb;
249 FOR_EACH_BB_FN (bb, cfun)
251 gimple_stmt_iterator i;
253 for (i = gsi_start_bb (bb); !gsi_end_p (i); )
255 poly_uint64 vf = 1;
256 enum internal_fn ifn;
257 gimple *stmt = gsi_stmt (i);
258 tree t;
259 if (!is_gimple_call (stmt)
260 || !gimple_call_internal_p (stmt))
262 gsi_next (&i);
263 continue;
265 ifn = gimple_call_internal_fn (stmt);
266 switch (ifn)
268 case IFN_GOMP_SIMD_LANE:
269 case IFN_GOMP_SIMD_VF:
270 case IFN_GOMP_SIMD_LAST_LANE:
271 break;
272 case IFN_GOMP_SIMD_ORDERED_START:
273 case IFN_GOMP_SIMD_ORDERED_END:
274 if (integer_onep (gimple_call_arg (stmt, 0)))
276 enum built_in_function bcode
277 = (ifn == IFN_GOMP_SIMD_ORDERED_START
278 ? BUILT_IN_GOMP_ORDERED_START
279 : BUILT_IN_GOMP_ORDERED_END);
280 gimple *g
281 = gimple_build_call (builtin_decl_explicit (bcode), 0);
282 tree vdef = gimple_vdef (stmt);
283 gimple_set_vdef (g, vdef);
284 SSA_NAME_DEF_STMT (vdef) = g;
285 gimple_set_vuse (g, gimple_vuse (stmt));
286 gsi_replace (&i, g, true);
287 continue;
289 gsi_remove (&i, true);
290 unlink_stmt_vdef (stmt);
291 continue;
292 default:
293 gsi_next (&i);
294 continue;
296 tree arg = gimple_call_arg (stmt, 0);
297 gcc_assert (arg != NULL_TREE);
298 gcc_assert (TREE_CODE (arg) == SSA_NAME);
299 simduid_to_vf *p = NULL, data;
300 data.simduid = DECL_UID (SSA_NAME_VAR (arg));
301 /* Need to nullify loop safelen field since it's value is not
302 valid after transformation. */
303 if (bb->loop_father && bb->loop_father->safelen > 0)
304 bb->loop_father->safelen = 0;
305 if (htab)
307 p = htab->find (&data);
308 if (p)
309 vf = p->vf;
311 switch (ifn)
313 case IFN_GOMP_SIMD_VF:
314 t = build_int_cst (unsigned_type_node, vf);
315 break;
316 case IFN_GOMP_SIMD_LANE:
317 t = build_int_cst (unsigned_type_node, 0);
318 break;
319 case IFN_GOMP_SIMD_LAST_LANE:
320 t = gimple_call_arg (stmt, 1);
321 break;
322 default:
323 gcc_unreachable ();
325 tree lhs = gimple_call_lhs (stmt);
326 if (lhs)
327 replace_uses_by (lhs, t);
328 release_defs (stmt);
329 gsi_remove (&i, true);
334 /* Helper structure for note_simd_array_uses. */
336 struct note_simd_array_uses_struct
338 hash_table<simd_array_to_simduid> **htab;
339 unsigned int simduid;
342 /* Callback for note_simd_array_uses, called through walk_gimple_op. */
344 static tree
345 note_simd_array_uses_cb (tree *tp, int *walk_subtrees, void *data)
347 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
348 struct note_simd_array_uses_struct *ns
349 = (struct note_simd_array_uses_struct *) wi->info;
351 if (TYPE_P (*tp))
352 *walk_subtrees = 0;
353 else if (VAR_P (*tp)
354 && lookup_attribute ("omp simd array", DECL_ATTRIBUTES (*tp))
355 && DECL_CONTEXT (*tp) == current_function_decl)
357 simd_array_to_simduid data;
358 if (!*ns->htab)
359 *ns->htab = new hash_table<simd_array_to_simduid> (15);
360 data.decl = *tp;
361 data.simduid = ns->simduid;
362 simd_array_to_simduid **slot = (*ns->htab)->find_slot (&data, INSERT);
363 if (*slot == NULL)
365 simd_array_to_simduid *p = XNEW (simd_array_to_simduid);
366 *p = data;
367 *slot = p;
369 else if ((*slot)->simduid != ns->simduid)
370 (*slot)->simduid = -1U;
371 *walk_subtrees = 0;
373 return NULL_TREE;
376 /* Find "omp simd array" temporaries and map them to corresponding
377 simduid. */
379 static void
380 note_simd_array_uses (hash_table<simd_array_to_simduid> **htab)
382 basic_block bb;
383 gimple_stmt_iterator gsi;
384 struct walk_stmt_info wi;
385 struct note_simd_array_uses_struct ns;
387 memset (&wi, 0, sizeof (wi));
388 wi.info = &ns;
389 ns.htab = htab;
391 FOR_EACH_BB_FN (bb, cfun)
392 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
394 gimple *stmt = gsi_stmt (gsi);
395 if (!is_gimple_call (stmt) || !gimple_call_internal_p (stmt))
396 continue;
397 switch (gimple_call_internal_fn (stmt))
399 case IFN_GOMP_SIMD_LANE:
400 case IFN_GOMP_SIMD_VF:
401 case IFN_GOMP_SIMD_LAST_LANE:
402 break;
403 default:
404 continue;
406 tree lhs = gimple_call_lhs (stmt);
407 if (lhs == NULL_TREE)
408 continue;
409 imm_use_iterator use_iter;
410 gimple *use_stmt;
411 ns.simduid = DECL_UID (SSA_NAME_VAR (gimple_call_arg (stmt, 0)));
412 FOR_EACH_IMM_USE_STMT (use_stmt, use_iter, lhs)
413 if (!is_gimple_debug (use_stmt))
414 walk_gimple_op (use_stmt, note_simd_array_uses_cb, &wi);
418 /* Shrink arrays with "omp simd array" attribute to the corresponding
419 vectorization factor. */
421 static void
422 shrink_simd_arrays
423 (hash_table<simd_array_to_simduid> *simd_array_to_simduid_htab,
424 hash_table<simduid_to_vf> *simduid_to_vf_htab)
426 for (hash_table<simd_array_to_simduid>::iterator iter
427 = simd_array_to_simduid_htab->begin ();
428 iter != simd_array_to_simduid_htab->end (); ++iter)
429 if ((*iter)->simduid != -1U)
431 tree decl = (*iter)->decl;
432 poly_uint64 vf = 1;
433 if (simduid_to_vf_htab)
435 simduid_to_vf *p = NULL, data;
436 data.simduid = (*iter)->simduid;
437 p = simduid_to_vf_htab->find (&data);
438 if (p)
439 vf = p->vf;
441 tree atype
442 = build_array_type_nelts (TREE_TYPE (TREE_TYPE (decl)), vf);
443 TREE_TYPE (decl) = atype;
444 relayout_decl (decl);
447 delete simd_array_to_simduid_htab;
450 /* Initialize the vec_info with kind KIND_IN and target cost data
451 TARGET_COST_DATA_IN. */
453 vec_info::vec_info (vec_info::vec_kind kind_in, void *target_cost_data_in)
454 : kind (kind_in),
455 datarefs (vNULL),
456 ddrs (vNULL),
457 target_cost_data (target_cost_data_in)
459 stmt_vec_infos.create (50);
460 set_stmt_vec_info_vec (&stmt_vec_infos);
463 vec_info::~vec_info ()
465 slp_instance instance;
466 struct data_reference *dr;
467 unsigned int i;
469 FOR_EACH_VEC_ELT (datarefs, i, dr)
470 if (dr->aux)
472 free (dr->aux);
473 dr->aux = NULL;
476 FOR_EACH_VEC_ELT (slp_instances, i, instance)
477 vect_free_slp_instance (instance);
479 free_data_refs (datarefs);
480 free_dependence_relations (ddrs);
481 destroy_cost_data (target_cost_data);
482 free_stmt_vec_infos (&stmt_vec_infos);
485 /* A helper function to free scev and LOOP niter information, as well as
486 clear loop constraint LOOP_C_FINITE. */
488 void
489 vect_free_loop_info_assumptions (struct loop *loop)
491 scev_reset_htab ();
492 /* We need to explicitly reset upper bound information since they are
493 used even after free_numbers_of_iterations_estimates. */
494 loop->any_upper_bound = false;
495 loop->any_likely_upper_bound = false;
496 free_numbers_of_iterations_estimates (loop);
497 loop_constraint_clear (loop, LOOP_C_FINITE);
500 /* Return whether STMT is inside the region we try to vectorize. */
502 bool
503 vect_stmt_in_region_p (vec_info *vinfo, gimple *stmt)
505 if (!gimple_bb (stmt))
506 return false;
508 if (loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo))
510 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
511 if (!flow_bb_inside_loop_p (loop, gimple_bb (stmt)))
512 return false;
514 else
516 bb_vec_info bb_vinfo = as_a <bb_vec_info> (vinfo);
517 if (gimple_bb (stmt) != BB_VINFO_BB (bb_vinfo)
518 || gimple_uid (stmt) == -1U
519 || gimple_code (stmt) == GIMPLE_PHI)
520 return false;
523 return true;
527 /* If LOOP has been versioned during ifcvt, return the internal call
528 guarding it. */
530 static gimple *
531 vect_loop_vectorized_call (struct loop *loop)
533 basic_block bb = loop_preheader_edge (loop)->src;
534 gimple *g;
537 g = last_stmt (bb);
538 if (g)
539 break;
540 if (!single_pred_p (bb))
541 break;
542 bb = single_pred (bb);
544 while (1);
545 if (g && gimple_code (g) == GIMPLE_COND)
547 gimple_stmt_iterator gsi = gsi_for_stmt (g);
548 gsi_prev (&gsi);
549 if (!gsi_end_p (gsi))
551 g = gsi_stmt (gsi);
552 if (gimple_call_internal_p (g, IFN_LOOP_VECTORIZED)
553 && (tree_to_shwi (gimple_call_arg (g, 0)) == loop->num
554 || tree_to_shwi (gimple_call_arg (g, 1)) == loop->num))
555 return g;
558 return NULL;
561 /* If LOOP has been versioned during loop distribution, return the gurading
562 internal call. */
564 static gimple *
565 vect_loop_dist_alias_call (struct loop *loop)
567 basic_block bb;
568 basic_block entry;
569 struct loop *outer, *orig;
570 gimple_stmt_iterator gsi;
571 gimple *g;
573 if (loop->orig_loop_num == 0)
574 return NULL;
576 orig = get_loop (cfun, loop->orig_loop_num);
577 if (orig == NULL)
579 /* The original loop is somehow destroyed. Clear the information. */
580 loop->orig_loop_num = 0;
581 return NULL;
584 if (loop != orig)
585 bb = nearest_common_dominator (CDI_DOMINATORS, loop->header, orig->header);
586 else
587 bb = loop_preheader_edge (loop)->src;
589 outer = bb->loop_father;
590 entry = ENTRY_BLOCK_PTR_FOR_FN (cfun);
592 /* Look upward in dominance tree. */
593 for (; bb != entry && flow_bb_inside_loop_p (outer, bb);
594 bb = get_immediate_dominator (CDI_DOMINATORS, bb))
596 g = last_stmt (bb);
597 if (g == NULL || gimple_code (g) != GIMPLE_COND)
598 continue;
600 gsi = gsi_for_stmt (g);
601 gsi_prev (&gsi);
602 if (gsi_end_p (gsi))
603 continue;
605 g = gsi_stmt (gsi);
606 /* The guarding internal function call must have the same distribution
607 alias id. */
608 if (gimple_call_internal_p (g, IFN_LOOP_DIST_ALIAS)
609 && (tree_to_shwi (gimple_call_arg (g, 0)) == loop->orig_loop_num))
610 return g;
612 return NULL;
615 /* Set the uids of all the statements in basic blocks inside loop
616 represented by LOOP_VINFO. LOOP_VECTORIZED_CALL is the internal
617 call guarding the loop which has been if converted. */
618 static void
619 set_uid_loop_bbs (loop_vec_info loop_vinfo, gimple *loop_vectorized_call)
621 tree arg = gimple_call_arg (loop_vectorized_call, 1);
622 basic_block *bbs;
623 unsigned int i;
624 struct loop *scalar_loop = get_loop (cfun, tree_to_shwi (arg));
626 LOOP_VINFO_SCALAR_LOOP (loop_vinfo) = scalar_loop;
627 gcc_checking_assert (vect_loop_vectorized_call (scalar_loop)
628 == loop_vectorized_call);
629 /* If we are going to vectorize outer loop, prevent vectorization
630 of the inner loop in the scalar loop - either the scalar loop is
631 thrown away, so it is a wasted work, or is used only for
632 a few iterations. */
633 if (scalar_loop->inner)
635 gimple *g = vect_loop_vectorized_call (scalar_loop->inner);
636 if (g)
638 arg = gimple_call_arg (g, 0);
639 get_loop (cfun, tree_to_shwi (arg))->dont_vectorize = true;
640 fold_loop_internal_call (g, boolean_false_node);
643 bbs = get_loop_body (scalar_loop);
644 for (i = 0; i < scalar_loop->num_nodes; i++)
646 basic_block bb = bbs[i];
647 gimple_stmt_iterator gsi;
648 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
650 gimple *phi = gsi_stmt (gsi);
651 gimple_set_uid (phi, 0);
653 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
655 gimple *stmt = gsi_stmt (gsi);
656 gimple_set_uid (stmt, 0);
659 free (bbs);
662 /* Try to vectorize LOOP. */
664 static unsigned
665 try_vectorize_loop_1 (hash_table<simduid_to_vf> *&simduid_to_vf_htab,
666 unsigned *num_vectorized_loops,
667 loop_p loop, loop_vec_info orig_loop_vinfo,
668 gimple *loop_vectorized_call,
669 gimple *loop_dist_alias_call)
671 unsigned ret = 0;
672 vect_location = find_loop_location (loop);
673 if (LOCATION_LOCUS (vect_location) != UNKNOWN_LOCATION
674 && dump_enabled_p ())
675 dump_printf (MSG_NOTE, "\nAnalyzing loop at %s:%d\n",
676 LOCATION_FILE (vect_location),
677 LOCATION_LINE (vect_location));
679 loop_vec_info loop_vinfo = vect_analyze_loop (loop, orig_loop_vinfo);
680 loop->aux = loop_vinfo;
682 if (!loop_vinfo || !LOOP_VINFO_VECTORIZABLE_P (loop_vinfo))
684 /* Free existing information if loop is analyzed with some
685 assumptions. */
686 if (loop_constraint_set_p (loop, LOOP_C_FINITE))
687 vect_free_loop_info_assumptions (loop);
689 /* If we applied if-conversion then try to vectorize the
690 BB of innermost loops.
691 ??? Ideally BB vectorization would learn to vectorize
692 control flow by applying if-conversion on-the-fly, the
693 following retains the if-converted loop body even when
694 only non-if-converted parts took part in BB vectorization. */
695 if (flag_tree_slp_vectorize != 0
696 && loop_vectorized_call
697 && ! loop->inner)
699 basic_block bb = loop->header;
700 bool has_mask_load_store = false;
701 for (gimple_stmt_iterator gsi = gsi_start_bb (bb);
702 !gsi_end_p (gsi); gsi_next (&gsi))
704 gimple *stmt = gsi_stmt (gsi);
705 if (is_gimple_call (stmt)
706 && gimple_call_internal_p (stmt)
707 && (gimple_call_internal_fn (stmt) == IFN_MASK_LOAD
708 || gimple_call_internal_fn (stmt) == IFN_MASK_STORE))
710 has_mask_load_store = true;
711 break;
713 gimple_set_uid (stmt, -1);
714 gimple_set_visited (stmt, false);
716 if (! has_mask_load_store && vect_slp_bb (bb))
718 dump_printf_loc (MSG_NOTE, vect_location,
719 "basic block vectorized\n");
720 fold_loop_internal_call (loop_vectorized_call,
721 boolean_true_node);
722 loop_vectorized_call = NULL;
723 ret |= TODO_cleanup_cfg;
726 /* If outer loop vectorization fails for LOOP_VECTORIZED guarded
727 loop, don't vectorize its inner loop; we'll attempt to
728 vectorize LOOP_VECTORIZED guarded inner loop of the scalar
729 loop version. */
730 if (loop_vectorized_call && loop->inner)
731 loop->inner->dont_vectorize = true;
732 return ret;
735 if (!dbg_cnt (vect_loop))
737 /* Free existing information if loop is analyzed with some
738 assumptions. */
739 if (loop_constraint_set_p (loop, LOOP_C_FINITE))
740 vect_free_loop_info_assumptions (loop);
741 return ret;
744 if (loop_vectorized_call)
745 set_uid_loop_bbs (loop_vinfo, loop_vectorized_call);
747 unsigned HOST_WIDE_INT bytes;
748 if (current_vector_size.is_constant (&bytes))
749 dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, vect_location,
750 "loop vectorized vectorized using "
751 HOST_WIDE_INT_PRINT_UNSIGNED " byte "
752 "vectors\n", bytes);
753 else
754 dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, vect_location,
755 "loop vectorized using variable length vectors\n");
757 loop_p new_loop = vect_transform_loop (loop_vinfo);
758 (*num_vectorized_loops)++;
759 /* Now that the loop has been vectorized, allow it to be unrolled
760 etc. */
761 loop->force_vectorize = false;
763 if (loop->simduid)
765 simduid_to_vf *simduid_to_vf_data = XNEW (simduid_to_vf);
766 if (!simduid_to_vf_htab)
767 simduid_to_vf_htab = new hash_table<simduid_to_vf> (15);
768 simduid_to_vf_data->simduid = DECL_UID (loop->simduid);
769 simduid_to_vf_data->vf = loop_vinfo->vectorization_factor;
770 *simduid_to_vf_htab->find_slot (simduid_to_vf_data, INSERT)
771 = simduid_to_vf_data;
774 if (loop_vectorized_call)
776 fold_loop_internal_call (loop_vectorized_call, boolean_true_node);
777 loop_vectorized_call = NULL;
778 ret |= TODO_cleanup_cfg;
780 if (loop_dist_alias_call)
782 tree value = gimple_call_arg (loop_dist_alias_call, 1);
783 fold_loop_internal_call (loop_dist_alias_call, value);
784 loop_dist_alias_call = NULL;
785 ret |= TODO_cleanup_cfg;
788 /* Epilogue of vectorized loop must be vectorized too. */
789 if (new_loop)
790 ret |= try_vectorize_loop_1 (simduid_to_vf_htab, num_vectorized_loops,
791 new_loop, loop_vinfo, NULL, NULL);
793 return ret;
796 /* Try to vectorize LOOP. */
798 static unsigned
799 try_vectorize_loop (hash_table<simduid_to_vf> *&simduid_to_vf_htab,
800 unsigned *num_vectorized_loops, loop_p loop)
802 if (!((flag_tree_loop_vectorize
803 && optimize_loop_nest_for_speed_p (loop))
804 || loop->force_vectorize))
805 return 0;
807 return try_vectorize_loop_1 (simduid_to_vf_htab, num_vectorized_loops,
808 loop, NULL,
809 vect_loop_vectorized_call (loop),
810 vect_loop_dist_alias_call (loop));
814 /* Function vectorize_loops.
816 Entry point to loop vectorization phase. */
818 unsigned
819 vectorize_loops (void)
821 unsigned int i;
822 unsigned int num_vectorized_loops = 0;
823 unsigned int vect_loops_num;
824 struct loop *loop;
825 hash_table<simduid_to_vf> *simduid_to_vf_htab = NULL;
826 hash_table<simd_array_to_simduid> *simd_array_to_simduid_htab = NULL;
827 bool any_ifcvt_loops = false;
828 unsigned ret = 0;
830 vect_loops_num = number_of_loops (cfun);
832 /* Bail out if there are no loops. */
833 if (vect_loops_num <= 1)
834 return 0;
836 if (cfun->has_simduid_loops)
837 note_simd_array_uses (&simd_array_to_simduid_htab);
839 set_stmt_vec_info_vec (NULL);
841 /* ----------- Analyze loops. ----------- */
843 /* If some loop was duplicated, it gets bigger number
844 than all previously defined loops. This fact allows us to run
845 only over initial loops skipping newly generated ones. */
846 FOR_EACH_LOOP (loop, 0)
847 if (loop->dont_vectorize)
849 any_ifcvt_loops = true;
850 /* If-conversion sometimes versions both the outer loop
851 (for the case when outer loop vectorization might be
852 desirable) as well as the inner loop in the scalar version
853 of the loop. So we have:
854 if (LOOP_VECTORIZED (1, 3))
856 loop1
857 loop2
859 else
860 loop3 (copy of loop1)
861 if (LOOP_VECTORIZED (4, 5))
862 loop4 (copy of loop2)
863 else
864 loop5 (copy of loop4)
865 If FOR_EACH_LOOP gives us loop3 first (which has
866 dont_vectorize set), make sure to process loop1 before loop4;
867 so that we can prevent vectorization of loop4 if loop1
868 is successfully vectorized. */
869 if (loop->inner)
871 gimple *loop_vectorized_call
872 = vect_loop_vectorized_call (loop);
873 if (loop_vectorized_call
874 && vect_loop_vectorized_call (loop->inner))
876 tree arg = gimple_call_arg (loop_vectorized_call, 0);
877 struct loop *vector_loop
878 = get_loop (cfun, tree_to_shwi (arg));
879 if (vector_loop && vector_loop != loop)
881 /* Make sure we don't vectorize it twice. */
882 vector_loop->dont_vectorize = true;
883 ret |= try_vectorize_loop (simduid_to_vf_htab,
884 &num_vectorized_loops,
885 vector_loop);
890 else
891 ret |= try_vectorize_loop (simduid_to_vf_htab, &num_vectorized_loops,
892 loop);
894 vect_location = UNKNOWN_LOCATION;
896 statistics_counter_event (cfun, "Vectorized loops", num_vectorized_loops);
897 if (dump_enabled_p ()
898 || (num_vectorized_loops > 0 && dump_enabled_p ()))
899 dump_printf_loc (MSG_NOTE, vect_location,
900 "vectorized %u loops in function.\n",
901 num_vectorized_loops);
903 /* ----------- Finalize. ----------- */
905 if (any_ifcvt_loops)
906 for (i = 1; i < vect_loops_num; i++)
908 loop = get_loop (cfun, i);
909 if (loop && loop->dont_vectorize)
911 gimple *g = vect_loop_vectorized_call (loop);
912 if (g)
914 fold_loop_internal_call (g, boolean_false_node);
915 ret |= TODO_cleanup_cfg;
916 g = NULL;
918 else
919 g = vect_loop_dist_alias_call (loop);
921 if (g)
923 fold_loop_internal_call (g, boolean_false_node);
924 ret |= TODO_cleanup_cfg;
929 for (i = 1; i < number_of_loops (cfun); i++)
931 loop_vec_info loop_vinfo;
932 bool has_mask_store;
934 loop = get_loop (cfun, i);
935 if (!loop || !loop->aux)
936 continue;
937 loop_vinfo = (loop_vec_info) loop->aux;
938 has_mask_store = LOOP_VINFO_HAS_MASK_STORE (loop_vinfo);
939 delete loop_vinfo;
940 if (has_mask_store
941 && targetm.vectorize.empty_mask_is_expensive (IFN_MASK_STORE))
942 optimize_mask_stores (loop);
943 loop->aux = NULL;
946 /* Fold IFN_GOMP_SIMD_{VF,LANE,LAST_LANE,ORDERED_{START,END}} builtins. */
947 if (cfun->has_simduid_loops)
948 adjust_simduid_builtins (simduid_to_vf_htab);
950 /* Shrink any "omp array simd" temporary arrays to the
951 actual vectorization factors. */
952 if (simd_array_to_simduid_htab)
953 shrink_simd_arrays (simd_array_to_simduid_htab, simduid_to_vf_htab);
954 delete simduid_to_vf_htab;
955 cfun->has_simduid_loops = false;
957 if (num_vectorized_loops > 0)
959 /* If we vectorized any loop only virtual SSA form needs to be updated.
960 ??? Also while we try hard to update loop-closed SSA form we fail
961 to properly do this in some corner-cases (see PR56286). */
962 rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa_only_virtuals);
963 return TODO_cleanup_cfg;
966 return ret;
970 /* Entry point to the simduid cleanup pass. */
972 namespace {
974 const pass_data pass_data_simduid_cleanup =
976 GIMPLE_PASS, /* type */
977 "simduid", /* name */
978 OPTGROUP_NONE, /* optinfo_flags */
979 TV_NONE, /* tv_id */
980 ( PROP_ssa | PROP_cfg ), /* properties_required */
981 0, /* properties_provided */
982 0, /* properties_destroyed */
983 0, /* todo_flags_start */
984 0, /* todo_flags_finish */
987 class pass_simduid_cleanup : public gimple_opt_pass
989 public:
990 pass_simduid_cleanup (gcc::context *ctxt)
991 : gimple_opt_pass (pass_data_simduid_cleanup, ctxt)
994 /* opt_pass methods: */
995 opt_pass * clone () { return new pass_simduid_cleanup (m_ctxt); }
996 virtual bool gate (function *fun) { return fun->has_simduid_loops; }
997 virtual unsigned int execute (function *);
999 }; // class pass_simduid_cleanup
1001 unsigned int
1002 pass_simduid_cleanup::execute (function *fun)
1004 hash_table<simd_array_to_simduid> *simd_array_to_simduid_htab = NULL;
1006 note_simd_array_uses (&simd_array_to_simduid_htab);
1008 /* Fold IFN_GOMP_SIMD_{VF,LANE,LAST_LANE,ORDERED_{START,END}} builtins. */
1009 adjust_simduid_builtins (NULL);
1011 /* Shrink any "omp array simd" temporary arrays to the
1012 actual vectorization factors. */
1013 if (simd_array_to_simduid_htab)
1014 shrink_simd_arrays (simd_array_to_simduid_htab, NULL);
1015 fun->has_simduid_loops = false;
1016 return 0;
1019 } // anon namespace
1021 gimple_opt_pass *
1022 make_pass_simduid_cleanup (gcc::context *ctxt)
1024 return new pass_simduid_cleanup (ctxt);
1028 /* Entry point to basic block SLP phase. */
1030 namespace {
1032 const pass_data pass_data_slp_vectorize =
1034 GIMPLE_PASS, /* type */
1035 "slp", /* name */
1036 OPTGROUP_LOOP | OPTGROUP_VEC, /* optinfo_flags */
1037 TV_TREE_SLP_VECTORIZATION, /* tv_id */
1038 ( PROP_ssa | PROP_cfg ), /* properties_required */
1039 0, /* properties_provided */
1040 0, /* properties_destroyed */
1041 0, /* todo_flags_start */
1042 TODO_update_ssa, /* todo_flags_finish */
1045 class pass_slp_vectorize : public gimple_opt_pass
1047 public:
1048 pass_slp_vectorize (gcc::context *ctxt)
1049 : gimple_opt_pass (pass_data_slp_vectorize, ctxt)
1052 /* opt_pass methods: */
1053 opt_pass * clone () { return new pass_slp_vectorize (m_ctxt); }
1054 virtual bool gate (function *) { return flag_tree_slp_vectorize != 0; }
1055 virtual unsigned int execute (function *);
1057 }; // class pass_slp_vectorize
1059 unsigned int
1060 pass_slp_vectorize::execute (function *fun)
1062 basic_block bb;
1064 bool in_loop_pipeline = scev_initialized_p ();
1065 if (!in_loop_pipeline)
1067 loop_optimizer_init (LOOPS_NORMAL);
1068 scev_initialize ();
1071 /* Mark all stmts as not belonging to the current region and unvisited. */
1072 FOR_EACH_BB_FN (bb, fun)
1074 for (gimple_stmt_iterator gsi = gsi_start_bb (bb); !gsi_end_p (gsi);
1075 gsi_next (&gsi))
1077 gimple *stmt = gsi_stmt (gsi);
1078 gimple_set_uid (stmt, -1);
1079 gimple_set_visited (stmt, false);
1083 FOR_EACH_BB_FN (bb, fun)
1085 if (vect_slp_bb (bb))
1086 dump_printf_loc (MSG_NOTE, vect_location, "basic block vectorized\n");
1089 if (!in_loop_pipeline)
1091 scev_finalize ();
1092 loop_optimizer_finalize ();
1095 return 0;
1098 } // anon namespace
1100 gimple_opt_pass *
1101 make_pass_slp_vectorize (gcc::context *ctxt)
1103 return new pass_slp_vectorize (ctxt);
1107 /* Increase alignment of global arrays to improve vectorization potential.
1108 TODO:
1109 - Consider also structs that have an array field.
1110 - Use ipa analysis to prune arrays that can't be vectorized?
1111 This should involve global alignment analysis and in the future also
1112 array padding. */
1114 static unsigned get_vec_alignment_for_type (tree);
1115 static hash_map<tree, unsigned> *type_align_map;
1117 /* Return alignment of array's vector type corresponding to scalar type.
1118 0 if no vector type exists. */
1119 static unsigned
1120 get_vec_alignment_for_array_type (tree type)
1122 gcc_assert (TREE_CODE (type) == ARRAY_TYPE);
1123 poly_uint64 array_size, vector_size;
1125 tree vectype = get_vectype_for_scalar_type (strip_array_types (type));
1126 if (!vectype
1127 || !poly_int_tree_p (TYPE_SIZE (type), &array_size)
1128 || !poly_int_tree_p (TYPE_SIZE (vectype), &vector_size)
1129 || maybe_lt (array_size, vector_size))
1130 return 0;
1132 return TYPE_ALIGN (vectype);
1135 /* Return alignment of field having maximum alignment of vector type
1136 corresponding to it's scalar type. For now, we only consider fields whose
1137 offset is a multiple of it's vector alignment.
1138 0 if no suitable field is found. */
1139 static unsigned
1140 get_vec_alignment_for_record_type (tree type)
1142 gcc_assert (TREE_CODE (type) == RECORD_TYPE);
1144 unsigned max_align = 0, alignment;
1145 HOST_WIDE_INT offset;
1146 tree offset_tree;
1148 if (TYPE_PACKED (type))
1149 return 0;
1151 unsigned *slot = type_align_map->get (type);
1152 if (slot)
1153 return *slot;
1155 for (tree field = first_field (type);
1156 field != NULL_TREE;
1157 field = DECL_CHAIN (field))
1159 /* Skip if not FIELD_DECL or if alignment is set by user. */
1160 if (TREE_CODE (field) != FIELD_DECL
1161 || DECL_USER_ALIGN (field)
1162 || DECL_ARTIFICIAL (field))
1163 continue;
1165 /* We don't need to process the type further if offset is variable,
1166 since the offsets of remaining members will also be variable. */
1167 if (TREE_CODE (DECL_FIELD_OFFSET (field)) != INTEGER_CST
1168 || TREE_CODE (DECL_FIELD_BIT_OFFSET (field)) != INTEGER_CST)
1169 break;
1171 /* Similarly stop processing the type if offset_tree
1172 does not fit in unsigned HOST_WIDE_INT. */
1173 offset_tree = bit_position (field);
1174 if (!tree_fits_uhwi_p (offset_tree))
1175 break;
1177 offset = tree_to_uhwi (offset_tree);
1178 alignment = get_vec_alignment_for_type (TREE_TYPE (field));
1180 /* Get maximum alignment of vectorized field/array among those members
1181 whose offset is multiple of the vector alignment. */
1182 if (alignment
1183 && (offset % alignment == 0)
1184 && (alignment > max_align))
1185 max_align = alignment;
1188 type_align_map->put (type, max_align);
1189 return max_align;
1192 /* Return alignment of vector type corresponding to decl's scalar type
1193 or 0 if it doesn't exist or the vector alignment is lesser than
1194 decl's alignment. */
1195 static unsigned
1196 get_vec_alignment_for_type (tree type)
1198 if (type == NULL_TREE)
1199 return 0;
1201 gcc_assert (TYPE_P (type));
1203 static unsigned alignment = 0;
1204 switch (TREE_CODE (type))
1206 case ARRAY_TYPE:
1207 alignment = get_vec_alignment_for_array_type (type);
1208 break;
1209 case RECORD_TYPE:
1210 alignment = get_vec_alignment_for_record_type (type);
1211 break;
1212 default:
1213 alignment = 0;
1214 break;
1217 return (alignment > TYPE_ALIGN (type)) ? alignment : 0;
1220 /* Entry point to increase_alignment pass. */
1221 static unsigned int
1222 increase_alignment (void)
1224 varpool_node *vnode;
1226 vect_location = UNKNOWN_LOCATION;
1227 type_align_map = new hash_map<tree, unsigned>;
1229 /* Increase the alignment of all global arrays for vectorization. */
1230 FOR_EACH_DEFINED_VARIABLE (vnode)
1232 tree decl = vnode->decl;
1233 unsigned int alignment;
1235 if ((decl_in_symtab_p (decl)
1236 && !symtab_node::get (decl)->can_increase_alignment_p ())
1237 || DECL_USER_ALIGN (decl) || DECL_ARTIFICIAL (decl))
1238 continue;
1240 alignment = get_vec_alignment_for_type (TREE_TYPE (decl));
1241 if (alignment && vect_can_force_dr_alignment_p (decl, alignment))
1243 vnode->increase_alignment (alignment);
1244 dump_printf (MSG_NOTE, "Increasing alignment of decl: ");
1245 dump_generic_expr (MSG_NOTE, TDF_SLIM, decl);
1246 dump_printf (MSG_NOTE, "\n");
1250 delete type_align_map;
1251 return 0;
1255 namespace {
1257 const pass_data pass_data_ipa_increase_alignment =
1259 SIMPLE_IPA_PASS, /* type */
1260 "increase_alignment", /* name */
1261 OPTGROUP_LOOP | OPTGROUP_VEC, /* optinfo_flags */
1262 TV_IPA_OPT, /* tv_id */
1263 0, /* properties_required */
1264 0, /* properties_provided */
1265 0, /* properties_destroyed */
1266 0, /* todo_flags_start */
1267 0, /* todo_flags_finish */
1270 class pass_ipa_increase_alignment : public simple_ipa_opt_pass
1272 public:
1273 pass_ipa_increase_alignment (gcc::context *ctxt)
1274 : simple_ipa_opt_pass (pass_data_ipa_increase_alignment, ctxt)
1277 /* opt_pass methods: */
1278 virtual bool gate (function *)
1280 return flag_section_anchors && flag_tree_loop_vectorize;
1283 virtual unsigned int execute (function *) { return increase_alignment (); }
1285 }; // class pass_ipa_increase_alignment
1287 } // anon namespace
1289 simple_ipa_opt_pass *
1290 make_pass_ipa_increase_alignment (gcc::context *ctxt)
1292 return new pass_ipa_increase_alignment (ctxt);