Fix GNU coding style for G_.
[official-gcc.git] / gcc / tree-vectorizer.c
blob86cd025fe7398c2bc934a1dc5e7a4ea89c6a76c5
1 /* Vectorizer
2 Copyright (C) 2003-2018 Free Software Foundation, Inc.
3 Contributed by Dorit Naishlos <dorit@il.ibm.com>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 /* Loop and basic block vectorizer.
23 This file contains drivers for the three vectorizers:
24 (1) loop vectorizer (inter-iteration parallelism),
25 (2) loop-aware SLP (intra-iteration parallelism) (invoked by the loop
26 vectorizer)
27 (3) BB vectorizer (out-of-loops), aka SLP
29 The rest of the vectorizer's code is organized as follows:
30 - tree-vect-loop.c - loop specific parts such as reductions, etc. These are
31 used by drivers (1) and (2).
32 - tree-vect-loop-manip.c - vectorizer's loop control-flow utilities, used by
33 drivers (1) and (2).
34 - tree-vect-slp.c - BB vectorization specific analysis and transformation,
35 used by drivers (2) and (3).
36 - tree-vect-stmts.c - statements analysis and transformation (used by all).
37 - tree-vect-data-refs.c - vectorizer specific data-refs analysis and
38 manipulations (used by all).
39 - tree-vect-patterns.c - vectorizable code patterns detector (used by all)
41 Here's a poor attempt at illustrating that:
43 tree-vectorizer.c:
44 loop_vect() loop_aware_slp() slp_vect()
45 | / \ /
46 | / \ /
47 tree-vect-loop.c tree-vect-slp.c
48 | \ \ / / |
49 | \ \/ / |
50 | \ /\ / |
51 | \ / \ / |
52 tree-vect-stmts.c tree-vect-data-refs.c
53 \ /
54 tree-vect-patterns.c
57 #include "config.h"
58 #include "system.h"
59 #include "coretypes.h"
60 #include "backend.h"
61 #include "tree.h"
62 #include "gimple.h"
63 #include "predict.h"
64 #include "tree-pass.h"
65 #include "ssa.h"
66 #include "cgraph.h"
67 #include "fold-const.h"
68 #include "stor-layout.h"
69 #include "gimple-iterator.h"
70 #include "gimple-walk.h"
71 #include "tree-ssa-loop-manip.h"
72 #include "tree-ssa-loop-niter.h"
73 #include "tree-cfg.h"
74 #include "cfgloop.h"
75 #include "tree-vectorizer.h"
76 #include "tree-ssa-propagate.h"
77 #include "dbgcnt.h"
78 #include "tree-scalar-evolution.h"
79 #include "stringpool.h"
80 #include "attribs.h"
81 #include "gimple-pretty-print.h"
84 /* Loop or bb location. */
85 source_location vect_location;
87 /* Vector mapping GIMPLE stmt to stmt_vec_info. */
88 vec<stmt_vec_info> stmt_vec_info_vec;
90 /* Dump a cost entry according to args to F. */
92 void
93 dump_stmt_cost (FILE *f, void *data, int count, enum vect_cost_for_stmt kind,
94 stmt_vec_info stmt_info, int misalign,
95 enum vect_cost_model_location where)
97 fprintf (f, "%p ", data);
98 if (stmt_info)
100 print_gimple_expr (f, STMT_VINFO_STMT (stmt_info), 0, TDF_SLIM);
101 fprintf (f, " ");
103 else
104 fprintf (f, "<unknown> ");
105 fprintf (f, "%d times ", count);
106 const char *ks = "unknown";
107 switch (kind)
109 case scalar_stmt:
110 ks = "scalar_stmt";
111 break;
112 case scalar_load:
113 ks = "scalar_load";
114 break;
115 case scalar_store:
116 ks = "scalar_store";
117 break;
118 case vector_stmt:
119 ks = "vector_stmt";
120 break;
121 case vector_load:
122 ks = "vector_load";
123 break;
124 case vector_gather_load:
125 ks = "vector_gather_load";
126 break;
127 case unaligned_load:
128 ks = "unaligned_load";
129 break;
130 case unaligned_store:
131 ks = "unaligned_store";
132 break;
133 case vector_store:
134 ks = "unaligned_store";
135 break;
136 case vector_scatter_store:
137 ks = "unaligned_store";
138 break;
139 case vec_to_scalar:
140 ks = "unaligned_store";
141 break;
142 case scalar_to_vec:
143 ks = "unaligned_store";
144 break;
145 case cond_branch_not_taken:
146 ks = "unaligned_store";
147 break;
148 case cond_branch_taken:
149 ks = "unaligned_store";
150 break;
151 case vec_perm:
152 ks = "unaligned_store";
153 break;
154 case vec_promote_demote:
155 ks = "unaligned_store";
156 break;
157 case vec_construct:
158 ks = "unaligned_store";
159 break;
161 fprintf (f, "%s ", ks);
162 if (kind == unaligned_load || kind == unaligned_store)
163 fprintf (f, "(misalign %d) ", misalign);
164 const char *ws = "unknown";
165 switch (where)
167 case vect_prologue:
168 ws = "prologue";
169 break;
170 case vect_body:
171 ws = "body";
172 break;
173 case vect_epilogue:
174 ws = "epilogue";
175 break;
177 fprintf (f, "in %s\n", ws);
180 /* For mapping simduid to vectorization factor. */
182 struct simduid_to_vf : free_ptr_hash<simduid_to_vf>
184 unsigned int simduid;
185 poly_uint64 vf;
187 /* hash_table support. */
188 static inline hashval_t hash (const simduid_to_vf *);
189 static inline int equal (const simduid_to_vf *, const simduid_to_vf *);
192 inline hashval_t
193 simduid_to_vf::hash (const simduid_to_vf *p)
195 return p->simduid;
198 inline int
199 simduid_to_vf::equal (const simduid_to_vf *p1, const simduid_to_vf *p2)
201 return p1->simduid == p2->simduid;
204 /* This hash maps the OMP simd array to the corresponding simduid used
205 to index into it. Like thus,
207 _7 = GOMP_SIMD_LANE (simduid.0)
210 D.1737[_7] = stuff;
213 This hash maps from the OMP simd array (D.1737[]) to DECL_UID of
214 simduid.0. */
216 struct simd_array_to_simduid : free_ptr_hash<simd_array_to_simduid>
218 tree decl;
219 unsigned int simduid;
221 /* hash_table support. */
222 static inline hashval_t hash (const simd_array_to_simduid *);
223 static inline int equal (const simd_array_to_simduid *,
224 const simd_array_to_simduid *);
227 inline hashval_t
228 simd_array_to_simduid::hash (const simd_array_to_simduid *p)
230 return DECL_UID (p->decl);
233 inline int
234 simd_array_to_simduid::equal (const simd_array_to_simduid *p1,
235 const simd_array_to_simduid *p2)
237 return p1->decl == p2->decl;
240 /* Fold IFN_GOMP_SIMD_LANE, IFN_GOMP_SIMD_VF, IFN_GOMP_SIMD_LAST_LANE,
241 into their corresponding constants and remove
242 IFN_GOMP_SIMD_ORDERED_{START,END}. */
244 static void
245 adjust_simduid_builtins (hash_table<simduid_to_vf> *htab)
247 basic_block bb;
249 FOR_EACH_BB_FN (bb, cfun)
251 gimple_stmt_iterator i;
253 for (i = gsi_start_bb (bb); !gsi_end_p (i); )
255 poly_uint64 vf = 1;
256 enum internal_fn ifn;
257 gimple *stmt = gsi_stmt (i);
258 tree t;
259 if (!is_gimple_call (stmt)
260 || !gimple_call_internal_p (stmt))
262 gsi_next (&i);
263 continue;
265 ifn = gimple_call_internal_fn (stmt);
266 switch (ifn)
268 case IFN_GOMP_SIMD_LANE:
269 case IFN_GOMP_SIMD_VF:
270 case IFN_GOMP_SIMD_LAST_LANE:
271 break;
272 case IFN_GOMP_SIMD_ORDERED_START:
273 case IFN_GOMP_SIMD_ORDERED_END:
274 if (integer_onep (gimple_call_arg (stmt, 0)))
276 enum built_in_function bcode
277 = (ifn == IFN_GOMP_SIMD_ORDERED_START
278 ? BUILT_IN_GOMP_ORDERED_START
279 : BUILT_IN_GOMP_ORDERED_END);
280 gimple *g
281 = gimple_build_call (builtin_decl_explicit (bcode), 0);
282 tree vdef = gimple_vdef (stmt);
283 gimple_set_vdef (g, vdef);
284 SSA_NAME_DEF_STMT (vdef) = g;
285 gimple_set_vuse (g, gimple_vuse (stmt));
286 gsi_replace (&i, g, true);
287 continue;
289 gsi_remove (&i, true);
290 unlink_stmt_vdef (stmt);
291 continue;
292 default:
293 gsi_next (&i);
294 continue;
296 tree arg = gimple_call_arg (stmt, 0);
297 gcc_assert (arg != NULL_TREE);
298 gcc_assert (TREE_CODE (arg) == SSA_NAME);
299 simduid_to_vf *p = NULL, data;
300 data.simduid = DECL_UID (SSA_NAME_VAR (arg));
301 /* Need to nullify loop safelen field since it's value is not
302 valid after transformation. */
303 if (bb->loop_father && bb->loop_father->safelen > 0)
304 bb->loop_father->safelen = 0;
305 if (htab)
307 p = htab->find (&data);
308 if (p)
309 vf = p->vf;
311 switch (ifn)
313 case IFN_GOMP_SIMD_VF:
314 t = build_int_cst (unsigned_type_node, vf);
315 break;
316 case IFN_GOMP_SIMD_LANE:
317 t = build_int_cst (unsigned_type_node, 0);
318 break;
319 case IFN_GOMP_SIMD_LAST_LANE:
320 t = gimple_call_arg (stmt, 1);
321 break;
322 default:
323 gcc_unreachable ();
325 tree lhs = gimple_call_lhs (stmt);
326 if (lhs)
327 replace_uses_by (lhs, t);
328 release_defs (stmt);
329 gsi_remove (&i, true);
334 /* Helper structure for note_simd_array_uses. */
336 struct note_simd_array_uses_struct
338 hash_table<simd_array_to_simduid> **htab;
339 unsigned int simduid;
342 /* Callback for note_simd_array_uses, called through walk_gimple_op. */
344 static tree
345 note_simd_array_uses_cb (tree *tp, int *walk_subtrees, void *data)
347 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
348 struct note_simd_array_uses_struct *ns
349 = (struct note_simd_array_uses_struct *) wi->info;
351 if (TYPE_P (*tp))
352 *walk_subtrees = 0;
353 else if (VAR_P (*tp)
354 && lookup_attribute ("omp simd array", DECL_ATTRIBUTES (*tp))
355 && DECL_CONTEXT (*tp) == current_function_decl)
357 simd_array_to_simduid data;
358 if (!*ns->htab)
359 *ns->htab = new hash_table<simd_array_to_simduid> (15);
360 data.decl = *tp;
361 data.simduid = ns->simduid;
362 simd_array_to_simduid **slot = (*ns->htab)->find_slot (&data, INSERT);
363 if (*slot == NULL)
365 simd_array_to_simduid *p = XNEW (simd_array_to_simduid);
366 *p = data;
367 *slot = p;
369 else if ((*slot)->simduid != ns->simduid)
370 (*slot)->simduid = -1U;
371 *walk_subtrees = 0;
373 return NULL_TREE;
376 /* Find "omp simd array" temporaries and map them to corresponding
377 simduid. */
379 static void
380 note_simd_array_uses (hash_table<simd_array_to_simduid> **htab)
382 basic_block bb;
383 gimple_stmt_iterator gsi;
384 struct walk_stmt_info wi;
385 struct note_simd_array_uses_struct ns;
387 memset (&wi, 0, sizeof (wi));
388 wi.info = &ns;
389 ns.htab = htab;
391 FOR_EACH_BB_FN (bb, cfun)
392 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
394 gimple *stmt = gsi_stmt (gsi);
395 if (!is_gimple_call (stmt) || !gimple_call_internal_p (stmt))
396 continue;
397 switch (gimple_call_internal_fn (stmt))
399 case IFN_GOMP_SIMD_LANE:
400 case IFN_GOMP_SIMD_VF:
401 case IFN_GOMP_SIMD_LAST_LANE:
402 break;
403 default:
404 continue;
406 tree lhs = gimple_call_lhs (stmt);
407 if (lhs == NULL_TREE)
408 continue;
409 imm_use_iterator use_iter;
410 gimple *use_stmt;
411 ns.simduid = DECL_UID (SSA_NAME_VAR (gimple_call_arg (stmt, 0)));
412 FOR_EACH_IMM_USE_STMT (use_stmt, use_iter, lhs)
413 if (!is_gimple_debug (use_stmt))
414 walk_gimple_op (use_stmt, note_simd_array_uses_cb, &wi);
418 /* Shrink arrays with "omp simd array" attribute to the corresponding
419 vectorization factor. */
421 static void
422 shrink_simd_arrays
423 (hash_table<simd_array_to_simduid> *simd_array_to_simduid_htab,
424 hash_table<simduid_to_vf> *simduid_to_vf_htab)
426 for (hash_table<simd_array_to_simduid>::iterator iter
427 = simd_array_to_simduid_htab->begin ();
428 iter != simd_array_to_simduid_htab->end (); ++iter)
429 if ((*iter)->simduid != -1U)
431 tree decl = (*iter)->decl;
432 poly_uint64 vf = 1;
433 if (simduid_to_vf_htab)
435 simduid_to_vf *p = NULL, data;
436 data.simduid = (*iter)->simduid;
437 p = simduid_to_vf_htab->find (&data);
438 if (p)
439 vf = p->vf;
441 tree atype
442 = build_array_type_nelts (TREE_TYPE (TREE_TYPE (decl)), vf);
443 TREE_TYPE (decl) = atype;
444 relayout_decl (decl);
447 delete simd_array_to_simduid_htab;
450 /* Initialize the vec_info with kind KIND_IN and target cost data
451 TARGET_COST_DATA_IN. */
453 vec_info::vec_info (vec_info::vec_kind kind_in, void *target_cost_data_in)
454 : kind (kind_in),
455 datarefs (vNULL),
456 ddrs (vNULL),
457 target_cost_data (target_cost_data_in)
461 vec_info::~vec_info ()
463 slp_instance instance;
464 struct data_reference *dr;
465 unsigned int i;
467 FOR_EACH_VEC_ELT (datarefs, i, dr)
468 if (dr->aux)
470 free (dr->aux);
471 dr->aux = NULL;
474 FOR_EACH_VEC_ELT (slp_instances, i, instance)
475 vect_free_slp_instance (instance);
477 free_data_refs (datarefs);
478 free_dependence_relations (ddrs);
479 destroy_cost_data (target_cost_data);
482 /* A helper function to free scev and LOOP niter information, as well as
483 clear loop constraint LOOP_C_FINITE. */
485 void
486 vect_free_loop_info_assumptions (struct loop *loop)
488 scev_reset_htab ();
489 /* We need to explicitly reset upper bound information since they are
490 used even after free_numbers_of_iterations_estimates. */
491 loop->any_upper_bound = false;
492 loop->any_likely_upper_bound = false;
493 free_numbers_of_iterations_estimates (loop);
494 loop_constraint_clear (loop, LOOP_C_FINITE);
497 /* Return whether STMT is inside the region we try to vectorize. */
499 bool
500 vect_stmt_in_region_p (vec_info *vinfo, gimple *stmt)
502 if (!gimple_bb (stmt))
503 return false;
505 if (loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo))
507 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
508 if (!flow_bb_inside_loop_p (loop, gimple_bb (stmt)))
509 return false;
511 else
513 bb_vec_info bb_vinfo = as_a <bb_vec_info> (vinfo);
514 if (gimple_bb (stmt) != BB_VINFO_BB (bb_vinfo)
515 || gimple_uid (stmt) == -1U
516 || gimple_code (stmt) == GIMPLE_PHI)
517 return false;
520 return true;
524 /* If LOOP has been versioned during ifcvt, return the internal call
525 guarding it. */
527 static gimple *
528 vect_loop_vectorized_call (struct loop *loop)
530 basic_block bb = loop_preheader_edge (loop)->src;
531 gimple *g;
534 g = last_stmt (bb);
535 if (g)
536 break;
537 if (!single_pred_p (bb))
538 break;
539 bb = single_pred (bb);
541 while (1);
542 if (g && gimple_code (g) == GIMPLE_COND)
544 gimple_stmt_iterator gsi = gsi_for_stmt (g);
545 gsi_prev (&gsi);
546 if (!gsi_end_p (gsi))
548 g = gsi_stmt (gsi);
549 if (gimple_call_internal_p (g, IFN_LOOP_VECTORIZED)
550 && (tree_to_shwi (gimple_call_arg (g, 0)) == loop->num
551 || tree_to_shwi (gimple_call_arg (g, 1)) == loop->num))
552 return g;
555 return NULL;
558 /* If LOOP has been versioned during loop distribution, return the gurading
559 internal call. */
561 static gimple *
562 vect_loop_dist_alias_call (struct loop *loop)
564 basic_block bb;
565 basic_block entry;
566 struct loop *outer, *orig;
567 gimple_stmt_iterator gsi;
568 gimple *g;
570 if (loop->orig_loop_num == 0)
571 return NULL;
573 orig = get_loop (cfun, loop->orig_loop_num);
574 if (orig == NULL)
576 /* The original loop is somehow destroyed. Clear the information. */
577 loop->orig_loop_num = 0;
578 return NULL;
581 if (loop != orig)
582 bb = nearest_common_dominator (CDI_DOMINATORS, loop->header, orig->header);
583 else
584 bb = loop_preheader_edge (loop)->src;
586 outer = bb->loop_father;
587 entry = ENTRY_BLOCK_PTR_FOR_FN (cfun);
589 /* Look upward in dominance tree. */
590 for (; bb != entry && flow_bb_inside_loop_p (outer, bb);
591 bb = get_immediate_dominator (CDI_DOMINATORS, bb))
593 g = last_stmt (bb);
594 if (g == NULL || gimple_code (g) != GIMPLE_COND)
595 continue;
597 gsi = gsi_for_stmt (g);
598 gsi_prev (&gsi);
599 if (gsi_end_p (gsi))
600 continue;
602 g = gsi_stmt (gsi);
603 /* The guarding internal function call must have the same distribution
604 alias id. */
605 if (gimple_call_internal_p (g, IFN_LOOP_DIST_ALIAS)
606 && (tree_to_shwi (gimple_call_arg (g, 0)) == loop->orig_loop_num))
607 return g;
609 return NULL;
612 /* Set the uids of all the statements in basic blocks inside loop
613 represented by LOOP_VINFO. LOOP_VECTORIZED_CALL is the internal
614 call guarding the loop which has been if converted. */
615 static void
616 set_uid_loop_bbs (loop_vec_info loop_vinfo, gimple *loop_vectorized_call)
618 tree arg = gimple_call_arg (loop_vectorized_call, 1);
619 basic_block *bbs;
620 unsigned int i;
621 struct loop *scalar_loop = get_loop (cfun, tree_to_shwi (arg));
623 LOOP_VINFO_SCALAR_LOOP (loop_vinfo) = scalar_loop;
624 gcc_checking_assert (vect_loop_vectorized_call (scalar_loop)
625 == loop_vectorized_call);
626 /* If we are going to vectorize outer loop, prevent vectorization
627 of the inner loop in the scalar loop - either the scalar loop is
628 thrown away, so it is a wasted work, or is used only for
629 a few iterations. */
630 if (scalar_loop->inner)
632 gimple *g = vect_loop_vectorized_call (scalar_loop->inner);
633 if (g)
635 arg = gimple_call_arg (g, 0);
636 get_loop (cfun, tree_to_shwi (arg))->dont_vectorize = true;
637 fold_loop_internal_call (g, boolean_false_node);
640 bbs = get_loop_body (scalar_loop);
641 for (i = 0; i < scalar_loop->num_nodes; i++)
643 basic_block bb = bbs[i];
644 gimple_stmt_iterator gsi;
645 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
647 gimple *phi = gsi_stmt (gsi);
648 gimple_set_uid (phi, 0);
650 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
652 gimple *stmt = gsi_stmt (gsi);
653 gimple_set_uid (stmt, 0);
656 free (bbs);
659 /* Function vectorize_loops.
661 Entry point to loop vectorization phase. */
663 unsigned
664 vectorize_loops (void)
666 unsigned int i;
667 unsigned int num_vectorized_loops = 0;
668 unsigned int vect_loops_num;
669 struct loop *loop;
670 hash_table<simduid_to_vf> *simduid_to_vf_htab = NULL;
671 hash_table<simd_array_to_simduid> *simd_array_to_simduid_htab = NULL;
672 bool any_ifcvt_loops = false;
673 unsigned ret = 0;
674 struct loop *new_loop;
676 vect_loops_num = number_of_loops (cfun);
678 /* Bail out if there are no loops. */
679 if (vect_loops_num <= 1)
680 return 0;
682 if (cfun->has_simduid_loops)
683 note_simd_array_uses (&simd_array_to_simduid_htab);
685 init_stmt_vec_info_vec ();
687 /* ----------- Analyze loops. ----------- */
689 /* If some loop was duplicated, it gets bigger number
690 than all previously defined loops. This fact allows us to run
691 only over initial loops skipping newly generated ones. */
692 FOR_EACH_LOOP (loop, 0)
693 if (loop->dont_vectorize)
695 any_ifcvt_loops = true;
696 /* If-conversion sometimes versions both the outer loop
697 (for the case when outer loop vectorization might be
698 desirable) as well as the inner loop in the scalar version
699 of the loop. So we have:
700 if (LOOP_VECTORIZED (1, 3))
702 loop1
703 loop2
705 else
706 loop3 (copy of loop1)
707 if (LOOP_VECTORIZED (4, 5))
708 loop4 (copy of loop2)
709 else
710 loop5 (copy of loop4)
711 If FOR_EACH_LOOP gives us loop3 first (which has
712 dont_vectorize set), make sure to process loop1 before loop4;
713 so that we can prevent vectorization of loop4 if loop1
714 is successfully vectorized. */
715 if (loop->inner)
717 gimple *loop_vectorized_call
718 = vect_loop_vectorized_call (loop);
719 if (loop_vectorized_call
720 && vect_loop_vectorized_call (loop->inner))
722 tree arg = gimple_call_arg (loop_vectorized_call, 0);
723 struct loop *vector_loop
724 = get_loop (cfun, tree_to_shwi (arg));
725 if (vector_loop && vector_loop != loop)
727 loop = vector_loop;
728 /* Make sure we don't vectorize it twice. */
729 loop->dont_vectorize = true;
730 goto try_vectorize;
735 else
737 loop_vec_info loop_vinfo, orig_loop_vinfo;
738 gimple *loop_vectorized_call, *loop_dist_alias_call;
739 try_vectorize:
740 if (!((flag_tree_loop_vectorize
741 && optimize_loop_nest_for_speed_p (loop))
742 || loop->force_vectorize))
743 continue;
744 orig_loop_vinfo = NULL;
745 loop_vectorized_call = vect_loop_vectorized_call (loop);
746 loop_dist_alias_call = vect_loop_dist_alias_call (loop);
747 vectorize_epilogue:
748 vect_location = find_loop_location (loop);
749 if (LOCATION_LOCUS (vect_location) != UNKNOWN_LOCATION
750 && dump_enabled_p ())
751 dump_printf (MSG_NOTE, "\nAnalyzing loop at %s:%d\n",
752 LOCATION_FILE (vect_location),
753 LOCATION_LINE (vect_location));
755 loop_vinfo = vect_analyze_loop (loop, orig_loop_vinfo);
756 loop->aux = loop_vinfo;
758 if (!loop_vinfo || !LOOP_VINFO_VECTORIZABLE_P (loop_vinfo))
760 /* Free existing information if loop is analyzed with some
761 assumptions. */
762 if (loop_constraint_set_p (loop, LOOP_C_FINITE))
763 vect_free_loop_info_assumptions (loop);
765 /* If we applied if-conversion then try to vectorize the
766 BB of innermost loops.
767 ??? Ideally BB vectorization would learn to vectorize
768 control flow by applying if-conversion on-the-fly, the
769 following retains the if-converted loop body even when
770 only non-if-converted parts took part in BB vectorization. */
771 if (flag_tree_slp_vectorize != 0
772 && loop_vectorized_call
773 && ! loop->inner)
775 basic_block bb = loop->header;
776 bool has_mask_load_store = false;
777 for (gimple_stmt_iterator gsi = gsi_start_bb (bb);
778 !gsi_end_p (gsi); gsi_next (&gsi))
780 gimple *stmt = gsi_stmt (gsi);
781 if (is_gimple_call (stmt)
782 && gimple_call_internal_p (stmt)
783 && (gimple_call_internal_fn (stmt) == IFN_MASK_LOAD
784 || gimple_call_internal_fn (stmt) == IFN_MASK_STORE))
786 has_mask_load_store = true;
787 break;
789 gimple_set_uid (stmt, -1);
790 gimple_set_visited (stmt, false);
792 if (! has_mask_load_store && vect_slp_bb (bb))
794 dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, vect_location,
795 "basic block vectorized\n");
796 fold_loop_internal_call (loop_vectorized_call,
797 boolean_true_node);
798 loop_vectorized_call = NULL;
799 ret |= TODO_cleanup_cfg;
802 /* If outer loop vectorization fails for LOOP_VECTORIZED guarded
803 loop, don't vectorize its inner loop; we'll attempt to
804 vectorize LOOP_VECTORIZED guarded inner loop of the scalar
805 loop version. */
806 if (loop_vectorized_call && loop->inner)
807 loop->inner->dont_vectorize = true;
808 continue;
811 if (!dbg_cnt (vect_loop))
813 /* We may miss some if-converted loops due to
814 debug counter. Set any_ifcvt_loops to visit
815 them at finalization. */
816 any_ifcvt_loops = true;
817 /* Free existing information if loop is analyzed with some
818 assumptions. */
819 if (loop_constraint_set_p (loop, LOOP_C_FINITE))
820 vect_free_loop_info_assumptions (loop);
822 break;
825 if (loop_vectorized_call)
826 set_uid_loop_bbs (loop_vinfo, loop_vectorized_call);
827 if (LOCATION_LOCUS (vect_location) != UNKNOWN_LOCATION
828 && dump_enabled_p ())
829 dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, vect_location,
830 "loop vectorized\n");
831 new_loop = vect_transform_loop (loop_vinfo);
832 num_vectorized_loops++;
833 /* Now that the loop has been vectorized, allow it to be unrolled
834 etc. */
835 loop->force_vectorize = false;
837 if (loop->simduid)
839 simduid_to_vf *simduid_to_vf_data = XNEW (simduid_to_vf);
840 if (!simduid_to_vf_htab)
841 simduid_to_vf_htab = new hash_table<simduid_to_vf> (15);
842 simduid_to_vf_data->simduid = DECL_UID (loop->simduid);
843 simduid_to_vf_data->vf = loop_vinfo->vectorization_factor;
844 *simduid_to_vf_htab->find_slot (simduid_to_vf_data, INSERT)
845 = simduid_to_vf_data;
848 if (loop_vectorized_call)
850 fold_loop_internal_call (loop_vectorized_call, boolean_true_node);
851 loop_vectorized_call = NULL;
852 ret |= TODO_cleanup_cfg;
854 if (loop_dist_alias_call)
856 tree value = gimple_call_arg (loop_dist_alias_call, 1);
857 fold_loop_internal_call (loop_dist_alias_call, value);
858 loop_dist_alias_call = NULL;
859 ret |= TODO_cleanup_cfg;
862 if (new_loop)
864 /* Epilogue of vectorized loop must be vectorized too. */
865 vect_loops_num = number_of_loops (cfun);
866 loop = new_loop;
867 orig_loop_vinfo = loop_vinfo; /* To pass vect_analyze_loop. */
868 goto vectorize_epilogue;
872 vect_location = UNKNOWN_LOCATION;
874 statistics_counter_event (cfun, "Vectorized loops", num_vectorized_loops);
875 if (dump_enabled_p ()
876 || (num_vectorized_loops > 0 && dump_enabled_p ()))
877 dump_printf_loc (MSG_NOTE, vect_location,
878 "vectorized %u loops in function.\n",
879 num_vectorized_loops);
881 /* ----------- Finalize. ----------- */
883 if (any_ifcvt_loops)
884 for (i = 1; i < vect_loops_num; i++)
886 loop = get_loop (cfun, i);
887 if (loop && loop->dont_vectorize)
889 gimple *g = vect_loop_vectorized_call (loop);
890 if (g)
892 fold_loop_internal_call (g, boolean_false_node);
893 ret |= TODO_cleanup_cfg;
894 g = NULL;
896 else
897 g = vect_loop_dist_alias_call (loop);
899 if (g)
901 fold_loop_internal_call (g, boolean_false_node);
902 ret |= TODO_cleanup_cfg;
907 for (i = 1; i < vect_loops_num; i++)
909 loop_vec_info loop_vinfo;
910 bool has_mask_store;
912 loop = get_loop (cfun, i);
913 if (!loop)
914 continue;
915 loop_vinfo = (loop_vec_info) loop->aux;
916 has_mask_store = false;
917 if (loop_vinfo)
918 has_mask_store = LOOP_VINFO_HAS_MASK_STORE (loop_vinfo);
919 delete loop_vinfo;
920 if (has_mask_store
921 && targetm.vectorize.empty_mask_is_expensive (IFN_MASK_STORE))
922 optimize_mask_stores (loop);
923 loop->aux = NULL;
926 free_stmt_vec_info_vec ();
928 /* Fold IFN_GOMP_SIMD_{VF,LANE,LAST_LANE,ORDERED_{START,END}} builtins. */
929 if (cfun->has_simduid_loops)
930 adjust_simduid_builtins (simduid_to_vf_htab);
932 /* Shrink any "omp array simd" temporary arrays to the
933 actual vectorization factors. */
934 if (simd_array_to_simduid_htab)
935 shrink_simd_arrays (simd_array_to_simduid_htab, simduid_to_vf_htab);
936 delete simduid_to_vf_htab;
937 cfun->has_simduid_loops = false;
939 if (num_vectorized_loops > 0)
941 /* If we vectorized any loop only virtual SSA form needs to be updated.
942 ??? Also while we try hard to update loop-closed SSA form we fail
943 to properly do this in some corner-cases (see PR56286). */
944 rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa_only_virtuals);
945 return TODO_cleanup_cfg;
948 return ret;
952 /* Entry point to the simduid cleanup pass. */
954 namespace {
956 const pass_data pass_data_simduid_cleanup =
958 GIMPLE_PASS, /* type */
959 "simduid", /* name */
960 OPTGROUP_NONE, /* optinfo_flags */
961 TV_NONE, /* tv_id */
962 ( PROP_ssa | PROP_cfg ), /* properties_required */
963 0, /* properties_provided */
964 0, /* properties_destroyed */
965 0, /* todo_flags_start */
966 0, /* todo_flags_finish */
969 class pass_simduid_cleanup : public gimple_opt_pass
971 public:
972 pass_simduid_cleanup (gcc::context *ctxt)
973 : gimple_opt_pass (pass_data_simduid_cleanup, ctxt)
976 /* opt_pass methods: */
977 opt_pass * clone () { return new pass_simduid_cleanup (m_ctxt); }
978 virtual bool gate (function *fun) { return fun->has_simduid_loops; }
979 virtual unsigned int execute (function *);
981 }; // class pass_simduid_cleanup
983 unsigned int
984 pass_simduid_cleanup::execute (function *fun)
986 hash_table<simd_array_to_simduid> *simd_array_to_simduid_htab = NULL;
988 note_simd_array_uses (&simd_array_to_simduid_htab);
990 /* Fold IFN_GOMP_SIMD_{VF,LANE,LAST_LANE,ORDERED_{START,END}} builtins. */
991 adjust_simduid_builtins (NULL);
993 /* Shrink any "omp array simd" temporary arrays to the
994 actual vectorization factors. */
995 if (simd_array_to_simduid_htab)
996 shrink_simd_arrays (simd_array_to_simduid_htab, NULL);
997 fun->has_simduid_loops = false;
998 return 0;
1001 } // anon namespace
1003 gimple_opt_pass *
1004 make_pass_simduid_cleanup (gcc::context *ctxt)
1006 return new pass_simduid_cleanup (ctxt);
1010 /* Entry point to basic block SLP phase. */
1012 namespace {
1014 const pass_data pass_data_slp_vectorize =
1016 GIMPLE_PASS, /* type */
1017 "slp", /* name */
1018 OPTGROUP_LOOP | OPTGROUP_VEC, /* optinfo_flags */
1019 TV_TREE_SLP_VECTORIZATION, /* tv_id */
1020 ( PROP_ssa | PROP_cfg ), /* properties_required */
1021 0, /* properties_provided */
1022 0, /* properties_destroyed */
1023 0, /* todo_flags_start */
1024 TODO_update_ssa, /* todo_flags_finish */
1027 class pass_slp_vectorize : public gimple_opt_pass
1029 public:
1030 pass_slp_vectorize (gcc::context *ctxt)
1031 : gimple_opt_pass (pass_data_slp_vectorize, ctxt)
1034 /* opt_pass methods: */
1035 opt_pass * clone () { return new pass_slp_vectorize (m_ctxt); }
1036 virtual bool gate (function *) { return flag_tree_slp_vectorize != 0; }
1037 virtual unsigned int execute (function *);
1039 }; // class pass_slp_vectorize
1041 unsigned int
1042 pass_slp_vectorize::execute (function *fun)
1044 basic_block bb;
1046 bool in_loop_pipeline = scev_initialized_p ();
1047 if (!in_loop_pipeline)
1049 loop_optimizer_init (LOOPS_NORMAL);
1050 scev_initialize ();
1053 /* Mark all stmts as not belonging to the current region and unvisited. */
1054 FOR_EACH_BB_FN (bb, fun)
1056 for (gimple_stmt_iterator gsi = gsi_start_bb (bb); !gsi_end_p (gsi);
1057 gsi_next (&gsi))
1059 gimple *stmt = gsi_stmt (gsi);
1060 gimple_set_uid (stmt, -1);
1061 gimple_set_visited (stmt, false);
1065 init_stmt_vec_info_vec ();
1067 FOR_EACH_BB_FN (bb, fun)
1069 if (vect_slp_bb (bb))
1070 dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, vect_location,
1071 "basic block vectorized\n");
1074 free_stmt_vec_info_vec ();
1076 if (!in_loop_pipeline)
1078 scev_finalize ();
1079 loop_optimizer_finalize ();
1082 return 0;
1085 } // anon namespace
1087 gimple_opt_pass *
1088 make_pass_slp_vectorize (gcc::context *ctxt)
1090 return new pass_slp_vectorize (ctxt);
1094 /* Increase alignment of global arrays to improve vectorization potential.
1095 TODO:
1096 - Consider also structs that have an array field.
1097 - Use ipa analysis to prune arrays that can't be vectorized?
1098 This should involve global alignment analysis and in the future also
1099 array padding. */
1101 static unsigned get_vec_alignment_for_type (tree);
1102 static hash_map<tree, unsigned> *type_align_map;
1104 /* Return alignment of array's vector type corresponding to scalar type.
1105 0 if no vector type exists. */
1106 static unsigned
1107 get_vec_alignment_for_array_type (tree type)
1109 gcc_assert (TREE_CODE (type) == ARRAY_TYPE);
1110 poly_uint64 array_size, vector_size;
1112 tree vectype = get_vectype_for_scalar_type (strip_array_types (type));
1113 if (!vectype
1114 || !poly_int_tree_p (TYPE_SIZE (type), &array_size)
1115 || !poly_int_tree_p (TYPE_SIZE (vectype), &vector_size)
1116 || maybe_lt (array_size, vector_size))
1117 return 0;
1119 return TYPE_ALIGN (vectype);
1122 /* Return alignment of field having maximum alignment of vector type
1123 corresponding to it's scalar type. For now, we only consider fields whose
1124 offset is a multiple of it's vector alignment.
1125 0 if no suitable field is found. */
1126 static unsigned
1127 get_vec_alignment_for_record_type (tree type)
1129 gcc_assert (TREE_CODE (type) == RECORD_TYPE);
1131 unsigned max_align = 0, alignment;
1132 HOST_WIDE_INT offset;
1133 tree offset_tree;
1135 if (TYPE_PACKED (type))
1136 return 0;
1138 unsigned *slot = type_align_map->get (type);
1139 if (slot)
1140 return *slot;
1142 for (tree field = first_field (type);
1143 field != NULL_TREE;
1144 field = DECL_CHAIN (field))
1146 /* Skip if not FIELD_DECL or if alignment is set by user. */
1147 if (TREE_CODE (field) != FIELD_DECL
1148 || DECL_USER_ALIGN (field)
1149 || DECL_ARTIFICIAL (field))
1150 continue;
1152 /* We don't need to process the type further if offset is variable,
1153 since the offsets of remaining members will also be variable. */
1154 if (TREE_CODE (DECL_FIELD_OFFSET (field)) != INTEGER_CST
1155 || TREE_CODE (DECL_FIELD_BIT_OFFSET (field)) != INTEGER_CST)
1156 break;
1158 /* Similarly stop processing the type if offset_tree
1159 does not fit in unsigned HOST_WIDE_INT. */
1160 offset_tree = bit_position (field);
1161 if (!tree_fits_uhwi_p (offset_tree))
1162 break;
1164 offset = tree_to_uhwi (offset_tree);
1165 alignment = get_vec_alignment_for_type (TREE_TYPE (field));
1167 /* Get maximum alignment of vectorized field/array among those members
1168 whose offset is multiple of the vector alignment. */
1169 if (alignment
1170 && (offset % alignment == 0)
1171 && (alignment > max_align))
1172 max_align = alignment;
1175 type_align_map->put (type, max_align);
1176 return max_align;
1179 /* Return alignment of vector type corresponding to decl's scalar type
1180 or 0 if it doesn't exist or the vector alignment is lesser than
1181 decl's alignment. */
1182 static unsigned
1183 get_vec_alignment_for_type (tree type)
1185 if (type == NULL_TREE)
1186 return 0;
1188 gcc_assert (TYPE_P (type));
1190 static unsigned alignment = 0;
1191 switch (TREE_CODE (type))
1193 case ARRAY_TYPE:
1194 alignment = get_vec_alignment_for_array_type (type);
1195 break;
1196 case RECORD_TYPE:
1197 alignment = get_vec_alignment_for_record_type (type);
1198 break;
1199 default:
1200 alignment = 0;
1201 break;
1204 return (alignment > TYPE_ALIGN (type)) ? alignment : 0;
1207 /* Entry point to increase_alignment pass. */
1208 static unsigned int
1209 increase_alignment (void)
1211 varpool_node *vnode;
1213 vect_location = UNKNOWN_LOCATION;
1214 type_align_map = new hash_map<tree, unsigned>;
1216 /* Increase the alignment of all global arrays for vectorization. */
1217 FOR_EACH_DEFINED_VARIABLE (vnode)
1219 tree decl = vnode->decl;
1220 unsigned int alignment;
1222 if ((decl_in_symtab_p (decl)
1223 && !symtab_node::get (decl)->can_increase_alignment_p ())
1224 || DECL_USER_ALIGN (decl) || DECL_ARTIFICIAL (decl))
1225 continue;
1227 alignment = get_vec_alignment_for_type (TREE_TYPE (decl));
1228 if (alignment && vect_can_force_dr_alignment_p (decl, alignment))
1230 vnode->increase_alignment (alignment);
1231 dump_printf (MSG_NOTE, "Increasing alignment of decl: ");
1232 dump_generic_expr (MSG_NOTE, TDF_SLIM, decl);
1233 dump_printf (MSG_NOTE, "\n");
1237 delete type_align_map;
1238 return 0;
1242 namespace {
1244 const pass_data pass_data_ipa_increase_alignment =
1246 SIMPLE_IPA_PASS, /* type */
1247 "increase_alignment", /* name */
1248 OPTGROUP_LOOP | OPTGROUP_VEC, /* optinfo_flags */
1249 TV_IPA_OPT, /* tv_id */
1250 0, /* properties_required */
1251 0, /* properties_provided */
1252 0, /* properties_destroyed */
1253 0, /* todo_flags_start */
1254 0, /* todo_flags_finish */
1257 class pass_ipa_increase_alignment : public simple_ipa_opt_pass
1259 public:
1260 pass_ipa_increase_alignment (gcc::context *ctxt)
1261 : simple_ipa_opt_pass (pass_data_ipa_increase_alignment, ctxt)
1264 /* opt_pass methods: */
1265 virtual bool gate (function *)
1267 return flag_section_anchors && flag_tree_loop_vectorize;
1270 virtual unsigned int execute (function *) { return increase_alignment (); }
1272 }; // class pass_ipa_increase_alignment
1274 } // anon namespace
1276 simple_ipa_opt_pass *
1277 make_pass_ipa_increase_alignment (gcc::context *ctxt)
1279 return new pass_ipa_increase_alignment (ctxt);