OpenACC deviceptr clause: Remove bogus assertion.
[official-gcc.git] / gcc / tree-vect-slp.c
blobde2e60167468be4fe05830483b04ec8048ad1056
1 /* SLP - Basic Block Vectorization
2 Copyright (C) 2007-2014 Free Software Foundation, Inc.
3 Contributed by Dorit Naishlos <dorit@il.ibm.com>
4 and Ira Rosen <irar@il.ibm.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "dumpfile.h"
26 #include "tm.h"
27 #include "tree.h"
28 #include "stor-layout.h"
29 #include "target.h"
30 #include "predict.h"
31 #include "vec.h"
32 #include "hashtab.h"
33 #include "hash-set.h"
34 #include "machmode.h"
35 #include "hard-reg-set.h"
36 #include "input.h"
37 #include "function.h"
38 #include "basic-block.h"
39 #include "gimple-pretty-print.h"
40 #include "tree-ssa-alias.h"
41 #include "internal-fn.h"
42 #include "gimple-expr.h"
43 #include "is-a.h"
44 #include "gimple.h"
45 #include "gimple-iterator.h"
46 #include "gimple-ssa.h"
47 #include "tree-phinodes.h"
48 #include "ssa-iterators.h"
49 #include "stringpool.h"
50 #include "tree-ssanames.h"
51 #include "tree-pass.h"
52 #include "cfgloop.h"
53 #include "expr.h"
54 #include "recog.h" /* FIXME: for insn_data */
55 #include "optabs.h"
56 #include "tree-vectorizer.h"
57 #include "langhooks.h"
59 /* Extract the location of the basic block in the source code.
60 Return the basic block location if succeed and NULL if not. */
62 source_location
63 find_bb_location (basic_block bb)
65 gimple stmt = NULL;
66 gimple_stmt_iterator si;
68 if (!bb)
69 return UNKNOWN_LOCATION;
71 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
73 stmt = gsi_stmt (si);
74 if (gimple_location (stmt) != UNKNOWN_LOCATION)
75 return gimple_location (stmt);
78 return UNKNOWN_LOCATION;
82 /* Recursively free the memory allocated for the SLP tree rooted at NODE. */
84 static void
85 vect_free_slp_tree (slp_tree node)
87 int i;
88 slp_tree child;
90 if (!node)
91 return;
93 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
94 vect_free_slp_tree (child);
96 SLP_TREE_CHILDREN (node).release ();
97 SLP_TREE_SCALAR_STMTS (node).release ();
98 SLP_TREE_VEC_STMTS (node).release ();
99 SLP_TREE_LOAD_PERMUTATION (node).release ();
101 free (node);
105 /* Free the memory allocated for the SLP instance. */
107 void
108 vect_free_slp_instance (slp_instance instance)
110 vect_free_slp_tree (SLP_INSTANCE_TREE (instance));
111 SLP_INSTANCE_LOADS (instance).release ();
112 SLP_INSTANCE_BODY_COST_VEC (instance).release ();
113 free (instance);
117 /* Create an SLP node for SCALAR_STMTS. */
119 static slp_tree
120 vect_create_new_slp_node (vec<gimple> scalar_stmts)
122 slp_tree node;
123 gimple stmt = scalar_stmts[0];
124 unsigned int nops;
126 if (is_gimple_call (stmt))
127 nops = gimple_call_num_args (stmt);
128 else if (is_gimple_assign (stmt))
130 nops = gimple_num_ops (stmt) - 1;
131 if (gimple_assign_rhs_code (stmt) == COND_EXPR)
132 nops++;
134 else
135 return NULL;
137 node = XNEW (struct _slp_tree);
138 SLP_TREE_SCALAR_STMTS (node) = scalar_stmts;
139 SLP_TREE_VEC_STMTS (node).create (0);
140 SLP_TREE_CHILDREN (node).create (nops);
141 SLP_TREE_LOAD_PERMUTATION (node) = vNULL;
143 return node;
147 /* Allocate operands info for NOPS operands, and GROUP_SIZE def-stmts for each
148 operand. */
149 static vec<slp_oprnd_info>
150 vect_create_oprnd_info (int nops, int group_size)
152 int i;
153 slp_oprnd_info oprnd_info;
154 vec<slp_oprnd_info> oprnds_info;
156 oprnds_info.create (nops);
157 for (i = 0; i < nops; i++)
159 oprnd_info = XNEW (struct _slp_oprnd_info);
160 oprnd_info->def_stmts.create (group_size);
161 oprnd_info->first_dt = vect_uninitialized_def;
162 oprnd_info->first_op_type = NULL_TREE;
163 oprnd_info->first_pattern = false;
164 oprnds_info.quick_push (oprnd_info);
167 return oprnds_info;
171 /* Free operands info. */
173 static void
174 vect_free_oprnd_info (vec<slp_oprnd_info> &oprnds_info)
176 int i;
177 slp_oprnd_info oprnd_info;
179 FOR_EACH_VEC_ELT (oprnds_info, i, oprnd_info)
181 oprnd_info->def_stmts.release ();
182 XDELETE (oprnd_info);
185 oprnds_info.release ();
189 /* Find the place of the data-ref in STMT in the interleaving chain that starts
190 from FIRST_STMT. Return -1 if the data-ref is not a part of the chain. */
192 static int
193 vect_get_place_in_interleaving_chain (gimple stmt, gimple first_stmt)
195 gimple next_stmt = first_stmt;
196 int result = 0;
198 if (first_stmt != GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
199 return -1;
203 if (next_stmt == stmt)
204 return result;
205 result++;
206 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
208 while (next_stmt);
210 return -1;
214 /* Get the defs for the rhs of STMT (collect them in OPRNDS_INFO), check that
215 they are of a valid type and that they match the defs of the first stmt of
216 the SLP group (stored in OPRNDS_INFO). If there was a fatal error
217 return -1, if the error could be corrected by swapping operands of the
218 operation return 1, if everything is ok return 0. */
220 static int
221 vect_get_and_check_slp_defs (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
222 gimple stmt, bool first,
223 vec<slp_oprnd_info> *oprnds_info)
225 tree oprnd;
226 unsigned int i, number_of_oprnds;
227 tree def;
228 gimple def_stmt;
229 enum vect_def_type dt = vect_uninitialized_def;
230 struct loop *loop = NULL;
231 bool pattern = false;
232 slp_oprnd_info oprnd_info;
233 int first_op_idx = 1;
234 bool commutative = false;
235 bool first_op_cond = false;
237 if (loop_vinfo)
238 loop = LOOP_VINFO_LOOP (loop_vinfo);
240 if (is_gimple_call (stmt))
242 number_of_oprnds = gimple_call_num_args (stmt);
243 first_op_idx = 3;
245 else if (is_gimple_assign (stmt))
247 enum tree_code code = gimple_assign_rhs_code (stmt);
248 number_of_oprnds = gimple_num_ops (stmt) - 1;
249 if (gimple_assign_rhs_code (stmt) == COND_EXPR)
251 first_op_cond = true;
252 commutative = true;
253 number_of_oprnds++;
255 else
256 commutative = commutative_tree_code (code);
258 else
259 return -1;
261 bool swapped = false;
262 for (i = 0; i < number_of_oprnds; i++)
264 again:
265 if (first_op_cond)
267 if (i == 0 || i == 1)
268 oprnd = TREE_OPERAND (gimple_op (stmt, first_op_idx),
269 swapped ? !i : i);
270 else
271 oprnd = gimple_op (stmt, first_op_idx + i - 1);
273 else
274 oprnd = gimple_op (stmt, first_op_idx + (swapped ? !i : i));
276 oprnd_info = (*oprnds_info)[i];
278 if (!vect_is_simple_use (oprnd, NULL, loop_vinfo, bb_vinfo, &def_stmt,
279 &def, &dt)
280 || (!def_stmt && dt != vect_constant_def))
282 if (dump_enabled_p ())
284 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
285 "Build SLP failed: can't find def for ");
286 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, oprnd);
287 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
290 return -1;
293 /* Check if DEF_STMT is a part of a pattern in LOOP and get the def stmt
294 from the pattern. Check that all the stmts of the node are in the
295 pattern. */
296 if (def_stmt && gimple_bb (def_stmt)
297 && ((loop && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt)))
298 || (!loop && gimple_bb (def_stmt) == BB_VINFO_BB (bb_vinfo)
299 && gimple_code (def_stmt) != GIMPLE_PHI))
300 && vinfo_for_stmt (def_stmt)
301 && STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (def_stmt))
302 && !STMT_VINFO_RELEVANT (vinfo_for_stmt (def_stmt))
303 && !STMT_VINFO_LIVE_P (vinfo_for_stmt (def_stmt)))
305 pattern = true;
306 if (!first && !oprnd_info->first_pattern)
308 if (i == 0
309 && !swapped
310 && commutative)
312 swapped = true;
313 goto again;
316 if (dump_enabled_p ())
318 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
319 "Build SLP failed: some of the stmts"
320 " are in a pattern, and others are not ");
321 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, oprnd);
322 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
325 return 1;
328 def_stmt = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (def_stmt));
329 dt = STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt));
331 if (dt == vect_unknown_def_type)
333 if (dump_enabled_p ())
334 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
335 "Unsupported pattern.\n");
336 return -1;
339 switch (gimple_code (def_stmt))
341 case GIMPLE_PHI:
342 def = gimple_phi_result (def_stmt);
343 break;
345 case GIMPLE_ASSIGN:
346 def = gimple_assign_lhs (def_stmt);
347 break;
349 default:
350 if (dump_enabled_p ())
351 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
352 "unsupported defining stmt:\n");
353 return -1;
357 if (first)
359 oprnd_info->first_dt = dt;
360 oprnd_info->first_pattern = pattern;
361 oprnd_info->first_op_type = TREE_TYPE (oprnd);
363 else
365 /* Not first stmt of the group, check that the def-stmt/s match
366 the def-stmt/s of the first stmt. Allow different definition
367 types for reduction chains: the first stmt must be a
368 vect_reduction_def (a phi node), and the rest
369 vect_internal_def. */
370 if (((oprnd_info->first_dt != dt
371 && !(oprnd_info->first_dt == vect_reduction_def
372 && dt == vect_internal_def)
373 && !((oprnd_info->first_dt == vect_external_def
374 || oprnd_info->first_dt == vect_constant_def)
375 && (dt == vect_external_def
376 || dt == vect_constant_def)))
377 || !types_compatible_p (oprnd_info->first_op_type,
378 TREE_TYPE (oprnd))))
380 /* Try swapping operands if we got a mismatch. */
381 if (i == 0
382 && !swapped
383 && commutative)
385 swapped = true;
386 goto again;
389 if (dump_enabled_p ())
390 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
391 "Build SLP failed: different types\n");
393 return 1;
397 /* Check the types of the definitions. */
398 switch (dt)
400 case vect_constant_def:
401 case vect_external_def:
402 case vect_reduction_def:
403 break;
405 case vect_internal_def:
406 oprnd_info->def_stmts.quick_push (def_stmt);
407 break;
409 default:
410 /* FORNOW: Not supported. */
411 if (dump_enabled_p ())
413 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
414 "Build SLP failed: illegal type of def ");
415 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, def);
416 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
419 return -1;
423 /* Swap operands. */
424 if (swapped)
426 if (first_op_cond)
428 tree cond = gimple_assign_rhs1 (stmt);
429 swap_ssa_operands (stmt, &TREE_OPERAND (cond, 0),
430 &TREE_OPERAND (cond, 1));
431 TREE_SET_CODE (cond, swap_tree_comparison (TREE_CODE (cond)));
433 else
434 swap_ssa_operands (stmt, gimple_assign_rhs1_ptr (stmt),
435 gimple_assign_rhs2_ptr (stmt));
438 return 0;
442 /* Verify if the scalar stmts STMTS are isomorphic, require data
443 permutation or are of unsupported types of operation. Return
444 true if they are, otherwise return false and indicate in *MATCHES
445 which stmts are not isomorphic to the first one. If MATCHES[0]
446 is false then this indicates the comparison could not be
447 carried out or the stmts will never be vectorized by SLP. */
449 static bool
450 vect_build_slp_tree_1 (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
451 vec<gimple> stmts, unsigned int group_size,
452 unsigned nops, unsigned int *max_nunits,
453 unsigned int vectorization_factor, bool *matches)
455 unsigned int i;
456 gimple stmt = stmts[0];
457 enum tree_code first_stmt_code = ERROR_MARK, rhs_code = ERROR_MARK;
458 enum tree_code first_cond_code = ERROR_MARK;
459 tree lhs;
460 bool need_same_oprnds = false;
461 tree vectype, scalar_type, first_op1 = NULL_TREE;
462 optab optab;
463 int icode;
464 machine_mode optab_op2_mode;
465 machine_mode vec_mode;
466 struct data_reference *first_dr;
467 HOST_WIDE_INT dummy;
468 gimple first_load = NULL, prev_first_load = NULL, old_first_load = NULL;
469 tree cond;
471 /* For every stmt in NODE find its def stmt/s. */
472 FOR_EACH_VEC_ELT (stmts, i, stmt)
474 matches[i] = false;
476 if (dump_enabled_p ())
478 dump_printf_loc (MSG_NOTE, vect_location, "Build SLP for ");
479 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
480 dump_printf (MSG_NOTE, "\n");
483 /* Fail to vectorize statements marked as unvectorizable. */
484 if (!STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (stmt)))
486 if (dump_enabled_p ())
488 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
489 "Build SLP failed: unvectorizable statement ");
490 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
491 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
493 /* Fatal mismatch. */
494 matches[0] = false;
495 return false;
498 lhs = gimple_get_lhs (stmt);
499 if (lhs == NULL_TREE)
501 if (dump_enabled_p ())
503 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
504 "Build SLP failed: not GIMPLE_ASSIGN nor "
505 "GIMPLE_CALL ");
506 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
507 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
509 /* Fatal mismatch. */
510 matches[0] = false;
511 return false;
514 if (is_gimple_assign (stmt)
515 && gimple_assign_rhs_code (stmt) == COND_EXPR
516 && (cond = gimple_assign_rhs1 (stmt))
517 && !COMPARISON_CLASS_P (cond))
519 if (dump_enabled_p ())
521 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
522 "Build SLP failed: condition is not "
523 "comparison ");
524 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
525 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
527 /* Fatal mismatch. */
528 matches[0] = false;
529 return false;
532 scalar_type = vect_get_smallest_scalar_type (stmt, &dummy, &dummy);
533 vectype = get_vectype_for_scalar_type (scalar_type);
534 if (!vectype)
536 if (dump_enabled_p ())
538 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
539 "Build SLP failed: unsupported data-type ");
540 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
541 scalar_type);
542 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
544 /* Fatal mismatch. */
545 matches[0] = false;
546 return false;
549 /* In case of multiple types we need to detect the smallest type. */
550 if (*max_nunits < TYPE_VECTOR_SUBPARTS (vectype))
552 *max_nunits = TYPE_VECTOR_SUBPARTS (vectype);
553 if (bb_vinfo)
554 vectorization_factor = *max_nunits;
557 if (is_gimple_call (stmt))
559 rhs_code = CALL_EXPR;
560 if (gimple_call_internal_p (stmt)
561 || gimple_call_tail_p (stmt)
562 || gimple_call_noreturn_p (stmt)
563 || !gimple_call_nothrow_p (stmt)
564 || gimple_call_chain (stmt))
566 if (dump_enabled_p ())
568 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
569 "Build SLP failed: unsupported call type ");
570 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
571 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
573 /* Fatal mismatch. */
574 matches[0] = false;
575 return false;
578 else
579 rhs_code = gimple_assign_rhs_code (stmt);
581 /* Check the operation. */
582 if (i == 0)
584 first_stmt_code = rhs_code;
586 /* Shift arguments should be equal in all the packed stmts for a
587 vector shift with scalar shift operand. */
588 if (rhs_code == LSHIFT_EXPR || rhs_code == RSHIFT_EXPR
589 || rhs_code == LROTATE_EXPR
590 || rhs_code == RROTATE_EXPR)
592 vec_mode = TYPE_MODE (vectype);
594 /* First see if we have a vector/vector shift. */
595 optab = optab_for_tree_code (rhs_code, vectype,
596 optab_vector);
598 if (!optab
599 || optab_handler (optab, vec_mode) == CODE_FOR_nothing)
601 /* No vector/vector shift, try for a vector/scalar shift. */
602 optab = optab_for_tree_code (rhs_code, vectype,
603 optab_scalar);
605 if (!optab)
607 if (dump_enabled_p ())
608 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
609 "Build SLP failed: no optab.\n");
610 /* Fatal mismatch. */
611 matches[0] = false;
612 return false;
614 icode = (int) optab_handler (optab, vec_mode);
615 if (icode == CODE_FOR_nothing)
617 if (dump_enabled_p ())
618 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
619 "Build SLP failed: "
620 "op not supported by target.\n");
621 /* Fatal mismatch. */
622 matches[0] = false;
623 return false;
625 optab_op2_mode = insn_data[icode].operand[2].mode;
626 if (!VECTOR_MODE_P (optab_op2_mode))
628 need_same_oprnds = true;
629 first_op1 = gimple_assign_rhs2 (stmt);
633 else if (rhs_code == WIDEN_LSHIFT_EXPR)
635 need_same_oprnds = true;
636 first_op1 = gimple_assign_rhs2 (stmt);
639 else
641 if (first_stmt_code != rhs_code
642 && (first_stmt_code != IMAGPART_EXPR
643 || rhs_code != REALPART_EXPR)
644 && (first_stmt_code != REALPART_EXPR
645 || rhs_code != IMAGPART_EXPR)
646 && !(STMT_VINFO_GROUPED_ACCESS (vinfo_for_stmt (stmt))
647 && (first_stmt_code == ARRAY_REF
648 || first_stmt_code == BIT_FIELD_REF
649 || first_stmt_code == INDIRECT_REF
650 || first_stmt_code == COMPONENT_REF
651 || first_stmt_code == MEM_REF)))
653 if (dump_enabled_p ())
655 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
656 "Build SLP failed: different operation "
657 "in stmt ");
658 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
659 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
661 /* Mismatch. */
662 continue;
665 if (need_same_oprnds
666 && !operand_equal_p (first_op1, gimple_assign_rhs2 (stmt), 0))
668 if (dump_enabled_p ())
670 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
671 "Build SLP failed: different shift "
672 "arguments in ");
673 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
674 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
676 /* Mismatch. */
677 continue;
680 if (rhs_code == CALL_EXPR)
682 gimple first_stmt = stmts[0];
683 if (gimple_call_num_args (stmt) != nops
684 || !operand_equal_p (gimple_call_fn (first_stmt),
685 gimple_call_fn (stmt), 0)
686 || gimple_call_fntype (first_stmt)
687 != gimple_call_fntype (stmt))
689 if (dump_enabled_p ())
691 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
692 "Build SLP failed: different calls in ");
693 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
694 stmt, 0);
695 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
697 /* Mismatch. */
698 continue;
703 /* Grouped store or load. */
704 if (STMT_VINFO_GROUPED_ACCESS (vinfo_for_stmt (stmt)))
706 if (REFERENCE_CLASS_P (lhs))
708 /* Store. */
711 else
713 /* Load. */
714 unsigned unrolling_factor
715 = least_common_multiple
716 (*max_nunits, group_size) / group_size;
717 /* FORNOW: Check that there is no gap between the loads
718 and no gap between the groups when we need to load
719 multiple groups at once.
720 ??? We should enhance this to only disallow gaps
721 inside vectors. */
722 if ((unrolling_factor > 1
723 && GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) == stmt
724 && GROUP_GAP (vinfo_for_stmt (stmt)) != 0)
725 || (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) != stmt
726 && GROUP_GAP (vinfo_for_stmt (stmt)) != 1))
728 if (dump_enabled_p ())
730 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
731 "Build SLP failed: grouped "
732 "loads have gaps ");
733 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
734 stmt, 0);
735 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
737 /* Fatal mismatch. */
738 matches[0] = false;
739 return false;
742 /* Check that the size of interleaved loads group is not
743 greater than the SLP group size. */
744 unsigned ncopies
745 = vectorization_factor / TYPE_VECTOR_SUBPARTS (vectype);
746 if (loop_vinfo
747 && GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) == stmt
748 && ((GROUP_SIZE (vinfo_for_stmt (stmt))
749 - GROUP_GAP (vinfo_for_stmt (stmt)))
750 > ncopies * group_size))
752 if (dump_enabled_p ())
754 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
755 "Build SLP failed: the number "
756 "of interleaved loads is greater than "
757 "the SLP group size ");
758 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
759 stmt, 0);
760 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
762 /* Fatal mismatch. */
763 matches[0] = false;
764 return false;
767 old_first_load = first_load;
768 first_load = GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt));
769 if (prev_first_load)
771 /* Check that there are no loads from different interleaving
772 chains in the same node. */
773 if (prev_first_load != first_load)
775 if (dump_enabled_p ())
777 dump_printf_loc (MSG_MISSED_OPTIMIZATION,
778 vect_location,
779 "Build SLP failed: different "
780 "interleaving chains in one node ");
781 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
782 stmt, 0);
783 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
785 /* Mismatch. */
786 continue;
789 else
790 prev_first_load = first_load;
792 /* In some cases a group of loads is just the same load
793 repeated N times. Only analyze its cost once. */
794 if (first_load == stmt && old_first_load != first_load)
796 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt));
797 if (vect_supportable_dr_alignment (first_dr, false)
798 == dr_unaligned_unsupported)
800 if (dump_enabled_p ())
802 dump_printf_loc (MSG_MISSED_OPTIMIZATION,
803 vect_location,
804 "Build SLP failed: unsupported "
805 "unaligned load ");
806 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
807 stmt, 0);
808 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
810 /* Fatal mismatch. */
811 matches[0] = false;
812 return false;
816 } /* Grouped access. */
817 else
819 if (TREE_CODE_CLASS (rhs_code) == tcc_reference)
821 /* Not grouped load. */
822 if (dump_enabled_p ())
824 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
825 "Build SLP failed: not grouped load ");
826 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
827 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
830 /* FORNOW: Not grouped loads are not supported. */
831 /* Fatal mismatch. */
832 matches[0] = false;
833 return false;
836 /* Not memory operation. */
837 if (TREE_CODE_CLASS (rhs_code) != tcc_binary
838 && TREE_CODE_CLASS (rhs_code) != tcc_unary
839 && rhs_code != COND_EXPR
840 && rhs_code != CALL_EXPR)
842 if (dump_enabled_p ())
844 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
845 "Build SLP failed: operation");
846 dump_printf (MSG_MISSED_OPTIMIZATION, " unsupported ");
847 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
848 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
850 /* Fatal mismatch. */
851 matches[0] = false;
852 return false;
855 if (rhs_code == COND_EXPR)
857 tree cond_expr = gimple_assign_rhs1 (stmt);
859 if (i == 0)
860 first_cond_code = TREE_CODE (cond_expr);
861 else if (first_cond_code != TREE_CODE (cond_expr))
863 if (dump_enabled_p ())
865 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
866 "Build SLP failed: different"
867 " operation");
868 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
869 stmt, 0);
870 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
872 /* Mismatch. */
873 continue;
878 matches[i] = true;
881 for (i = 0; i < group_size; ++i)
882 if (!matches[i])
883 return false;
885 return true;
888 /* Recursively build an SLP tree starting from NODE.
889 Fail (and return a value not equal to zero) if def-stmts are not
890 isomorphic, require data permutation or are of unsupported types of
891 operation. Otherwise, return 0.
892 The value returned is the depth in the SLP tree where a mismatch
893 was found. */
895 static bool
896 vect_build_slp_tree (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
897 slp_tree *node, unsigned int group_size,
898 unsigned int *max_nunits,
899 vec<slp_tree> *loads,
900 unsigned int vectorization_factor,
901 bool *matches, unsigned *npermutes, unsigned *tree_size,
902 unsigned max_tree_size)
904 unsigned nops, i, this_npermutes = 0, this_tree_size = 0;
905 gimple stmt;
907 if (!matches)
908 matches = XALLOCAVEC (bool, group_size);
909 if (!npermutes)
910 npermutes = &this_npermutes;
912 matches[0] = false;
914 stmt = SLP_TREE_SCALAR_STMTS (*node)[0];
915 if (is_gimple_call (stmt))
916 nops = gimple_call_num_args (stmt);
917 else if (is_gimple_assign (stmt))
919 nops = gimple_num_ops (stmt) - 1;
920 if (gimple_assign_rhs_code (stmt) == COND_EXPR)
921 nops++;
923 else
924 return false;
926 if (!vect_build_slp_tree_1 (loop_vinfo, bb_vinfo,
927 SLP_TREE_SCALAR_STMTS (*node), group_size, nops,
928 max_nunits, vectorization_factor, matches))
929 return false;
931 /* If the SLP node is a load, terminate the recursion. */
932 if (STMT_VINFO_GROUPED_ACCESS (vinfo_for_stmt (stmt))
933 && DR_IS_READ (STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt))))
935 loads->safe_push (*node);
936 return true;
939 /* Get at the operands, verifying they are compatible. */
940 vec<slp_oprnd_info> oprnds_info = vect_create_oprnd_info (nops, group_size);
941 slp_oprnd_info oprnd_info;
942 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (*node), i, stmt)
944 switch (vect_get_and_check_slp_defs (loop_vinfo, bb_vinfo,
945 stmt, (i == 0), &oprnds_info))
947 case 0:
948 break;
949 case -1:
950 matches[0] = false;
951 vect_free_oprnd_info (oprnds_info);
952 return false;
953 case 1:
954 matches[i] = false;
955 break;
958 for (i = 0; i < group_size; ++i)
959 if (!matches[i])
961 vect_free_oprnd_info (oprnds_info);
962 return false;
965 stmt = SLP_TREE_SCALAR_STMTS (*node)[0];
967 /* Create SLP_TREE nodes for the definition node/s. */
968 FOR_EACH_VEC_ELT (oprnds_info, i, oprnd_info)
970 slp_tree child;
971 unsigned old_nloads = loads->length ();
972 unsigned old_max_nunits = *max_nunits;
974 if (oprnd_info->first_dt != vect_internal_def)
975 continue;
977 if (++this_tree_size > max_tree_size)
979 vect_free_oprnd_info (oprnds_info);
980 return false;
983 child = vect_create_new_slp_node (oprnd_info->def_stmts);
984 if (!child)
986 vect_free_oprnd_info (oprnds_info);
987 return false;
990 bool *matches = XALLOCAVEC (bool, group_size);
991 if (vect_build_slp_tree (loop_vinfo, bb_vinfo, &child,
992 group_size, max_nunits, loads,
993 vectorization_factor, matches,
994 npermutes, &this_tree_size, max_tree_size))
996 oprnd_info->def_stmts = vNULL;
997 SLP_TREE_CHILDREN (*node).quick_push (child);
998 continue;
1001 /* If the SLP build for operand zero failed and operand zero
1002 and one can be commutated try that for the scalar stmts
1003 that failed the match. */
1004 if (i == 0
1005 /* A first scalar stmt mismatch signals a fatal mismatch. */
1006 && matches[0]
1007 /* ??? For COND_EXPRs we can swap the comparison operands
1008 as well as the arms under some constraints. */
1009 && nops == 2
1010 && oprnds_info[1]->first_dt == vect_internal_def
1011 && is_gimple_assign (stmt)
1012 && commutative_tree_code (gimple_assign_rhs_code (stmt))
1013 /* Do so only if the number of not successful permutes was nor more
1014 than a cut-ff as re-trying the recursive match on
1015 possibly each level of the tree would expose exponential
1016 behavior. */
1017 && *npermutes < 4)
1019 /* Roll back. */
1020 *max_nunits = old_max_nunits;
1021 loads->truncate (old_nloads);
1022 /* Swap mismatched definition stmts. */
1023 dump_printf_loc (MSG_NOTE, vect_location,
1024 "Re-trying with swapped operands of stmts ");
1025 for (unsigned j = 0; j < group_size; ++j)
1026 if (!matches[j])
1028 gimple tem = oprnds_info[0]->def_stmts[j];
1029 oprnds_info[0]->def_stmts[j] = oprnds_info[1]->def_stmts[j];
1030 oprnds_info[1]->def_stmts[j] = tem;
1031 dump_printf (MSG_NOTE, "%d ", j);
1033 dump_printf (MSG_NOTE, "\n");
1034 /* And try again ... */
1035 if (vect_build_slp_tree (loop_vinfo, bb_vinfo, &child,
1036 group_size, max_nunits, loads,
1037 vectorization_factor,
1038 matches, npermutes, &this_tree_size,
1039 max_tree_size))
1041 oprnd_info->def_stmts = vNULL;
1042 SLP_TREE_CHILDREN (*node).quick_push (child);
1043 continue;
1046 ++*npermutes;
1049 oprnd_info->def_stmts = vNULL;
1050 vect_free_slp_tree (child);
1051 vect_free_oprnd_info (oprnds_info);
1052 return false;
1055 if (tree_size)
1056 *tree_size += this_tree_size;
1058 vect_free_oprnd_info (oprnds_info);
1059 return true;
1062 /* Dump a slp tree NODE using flags specified in DUMP_KIND. */
1064 static void
1065 vect_print_slp_tree (int dump_kind, slp_tree node)
1067 int i;
1068 gimple stmt;
1069 slp_tree child;
1071 if (!node)
1072 return;
1074 dump_printf (dump_kind, "node ");
1075 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
1077 dump_printf (dump_kind, "\n\tstmt %d ", i);
1078 dump_gimple_stmt (dump_kind, TDF_SLIM, stmt, 0);
1080 dump_printf (dump_kind, "\n");
1082 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
1083 vect_print_slp_tree (dump_kind, child);
1087 /* Mark the tree rooted at NODE with MARK (PURE_SLP or HYBRID).
1088 If MARK is HYBRID, it refers to a specific stmt in NODE (the stmt at index
1089 J). Otherwise, MARK is PURE_SLP and J is -1, which indicates that all the
1090 stmts in NODE are to be marked. */
1092 static void
1093 vect_mark_slp_stmts (slp_tree node, enum slp_vect_type mark, int j)
1095 int i;
1096 gimple stmt;
1097 slp_tree child;
1099 if (!node)
1100 return;
1102 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
1103 if (j < 0 || i == j)
1104 STMT_SLP_TYPE (vinfo_for_stmt (stmt)) = mark;
1106 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
1107 vect_mark_slp_stmts (child, mark, j);
1111 /* Mark the statements of the tree rooted at NODE as relevant (vect_used). */
1113 static void
1114 vect_mark_slp_stmts_relevant (slp_tree node)
1116 int i;
1117 gimple stmt;
1118 stmt_vec_info stmt_info;
1119 slp_tree child;
1121 if (!node)
1122 return;
1124 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
1126 stmt_info = vinfo_for_stmt (stmt);
1127 gcc_assert (!STMT_VINFO_RELEVANT (stmt_info)
1128 || STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_scope);
1129 STMT_VINFO_RELEVANT (stmt_info) = vect_used_in_scope;
1132 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
1133 vect_mark_slp_stmts_relevant (child);
1137 /* Rearrange the statements of NODE according to PERMUTATION. */
1139 static void
1140 vect_slp_rearrange_stmts (slp_tree node, unsigned int group_size,
1141 vec<unsigned> permutation)
1143 gimple stmt;
1144 vec<gimple> tmp_stmts;
1145 unsigned int i;
1146 slp_tree child;
1148 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
1149 vect_slp_rearrange_stmts (child, group_size, permutation);
1151 gcc_assert (group_size == SLP_TREE_SCALAR_STMTS (node).length ());
1152 tmp_stmts.create (group_size);
1153 tmp_stmts.quick_grow_cleared (group_size);
1155 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
1156 tmp_stmts[permutation[i]] = stmt;
1158 SLP_TREE_SCALAR_STMTS (node).release ();
1159 SLP_TREE_SCALAR_STMTS (node) = tmp_stmts;
1163 /* Check if the required load permutations in the SLP instance
1164 SLP_INSTN are supported. */
1166 static bool
1167 vect_supported_load_permutation_p (slp_instance slp_instn)
1169 unsigned int group_size = SLP_INSTANCE_GROUP_SIZE (slp_instn);
1170 unsigned int i, j, k, next;
1171 sbitmap load_index;
1172 slp_tree node;
1173 gimple stmt, load, next_load, first_load;
1174 struct data_reference *dr;
1176 if (dump_enabled_p ())
1178 dump_printf_loc (MSG_NOTE, vect_location, "Load permutation ");
1179 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1180 if (node->load_permutation.exists ())
1181 FOR_EACH_VEC_ELT (node->load_permutation, j, next)
1182 dump_printf (MSG_NOTE, "%d ", next);
1183 else
1184 for (k = 0; k < group_size; ++k)
1185 dump_printf (MSG_NOTE, "%d ", k);
1186 dump_printf (MSG_NOTE, "\n");
1189 /* In case of reduction every load permutation is allowed, since the order
1190 of the reduction statements is not important (as opposed to the case of
1191 grouped stores). The only condition we need to check is that all the
1192 load nodes are of the same size and have the same permutation (and then
1193 rearrange all the nodes of the SLP instance according to this
1194 permutation). */
1196 /* Check that all the load nodes are of the same size. */
1197 /* ??? Can't we assert this? */
1198 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1199 if (SLP_TREE_SCALAR_STMTS (node).length () != (unsigned) group_size)
1200 return false;
1202 node = SLP_INSTANCE_TREE (slp_instn);
1203 stmt = SLP_TREE_SCALAR_STMTS (node)[0];
1205 /* Reduction (there are no data-refs in the root).
1206 In reduction chain the order of the loads is important. */
1207 if (!STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt))
1208 && !GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
1210 slp_tree load;
1211 unsigned int lidx;
1213 /* Compare all the permutation sequences to the first one. We know
1214 that at least one load is permuted. */
1215 node = SLP_INSTANCE_LOADS (slp_instn)[0];
1216 if (!node->load_permutation.exists ())
1217 return false;
1218 for (i = 1; SLP_INSTANCE_LOADS (slp_instn).iterate (i, &load); ++i)
1220 if (!load->load_permutation.exists ())
1221 return false;
1222 FOR_EACH_VEC_ELT (load->load_permutation, j, lidx)
1223 if (lidx != node->load_permutation[j])
1224 return false;
1227 /* Check that the loads in the first sequence are different and there
1228 are no gaps between them. */
1229 load_index = sbitmap_alloc (group_size);
1230 bitmap_clear (load_index);
1231 FOR_EACH_VEC_ELT (node->load_permutation, i, lidx)
1233 if (bitmap_bit_p (load_index, lidx))
1235 sbitmap_free (load_index);
1236 return false;
1238 bitmap_set_bit (load_index, lidx);
1240 for (i = 0; i < group_size; i++)
1241 if (!bitmap_bit_p (load_index, i))
1243 sbitmap_free (load_index);
1244 return false;
1246 sbitmap_free (load_index);
1248 /* This permutation is valid for reduction. Since the order of the
1249 statements in the nodes is not important unless they are memory
1250 accesses, we can rearrange the statements in all the nodes
1251 according to the order of the loads. */
1252 vect_slp_rearrange_stmts (SLP_INSTANCE_TREE (slp_instn), group_size,
1253 node->load_permutation);
1255 /* We are done, no actual permutations need to be generated. */
1256 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1257 SLP_TREE_LOAD_PERMUTATION (node).release ();
1258 return true;
1261 /* In basic block vectorization we allow any subchain of an interleaving
1262 chain.
1263 FORNOW: not supported in loop SLP because of realignment compications. */
1264 if (STMT_VINFO_BB_VINFO (vinfo_for_stmt (stmt)))
1266 /* Check that for every node in the instance the loads
1267 form a subchain. */
1268 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1270 next_load = NULL;
1271 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), j, load)
1273 if (j != 0 && next_load != load)
1274 return false;
1275 next_load = GROUP_NEXT_ELEMENT (vinfo_for_stmt (load));
1279 /* Check that the alignment of the first load in every subchain, i.e.,
1280 the first statement in every load node, is supported.
1281 ??? This belongs in alignment checking. */
1282 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1284 first_load = SLP_TREE_SCALAR_STMTS (node)[0];
1285 if (first_load != GROUP_FIRST_ELEMENT (vinfo_for_stmt (first_load)))
1287 dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_load));
1288 if (vect_supportable_dr_alignment (dr, false)
1289 == dr_unaligned_unsupported)
1291 if (dump_enabled_p ())
1293 dump_printf_loc (MSG_MISSED_OPTIMIZATION,
1294 vect_location,
1295 "unsupported unaligned load ");
1296 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
1297 first_load, 0);
1298 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
1300 return false;
1305 /* We are done, no actual permutations need to be generated. */
1306 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1307 SLP_TREE_LOAD_PERMUTATION (node).release ();
1308 return true;
1311 /* FORNOW: the only supported permutation is 0..01..1.. of length equal to
1312 GROUP_SIZE and where each sequence of same drs is of GROUP_SIZE length as
1313 well (unless it's reduction). */
1314 if (SLP_INSTANCE_LOADS (slp_instn).length () != group_size)
1315 return false;
1316 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1317 if (!node->load_permutation.exists ())
1318 return false;
1320 load_index = sbitmap_alloc (group_size);
1321 bitmap_clear (load_index);
1322 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1324 unsigned int lidx = node->load_permutation[0];
1325 if (bitmap_bit_p (load_index, lidx))
1327 sbitmap_free (load_index);
1328 return false;
1330 bitmap_set_bit (load_index, lidx);
1331 FOR_EACH_VEC_ELT (node->load_permutation, j, k)
1332 if (k != lidx)
1334 sbitmap_free (load_index);
1335 return false;
1338 for (i = 0; i < group_size; i++)
1339 if (!bitmap_bit_p (load_index, i))
1341 sbitmap_free (load_index);
1342 return false;
1344 sbitmap_free (load_index);
1346 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1347 if (node->load_permutation.exists ()
1348 && !vect_transform_slp_perm_load
1349 (node, vNULL, NULL,
1350 SLP_INSTANCE_UNROLLING_FACTOR (slp_instn), slp_instn, true))
1351 return false;
1352 return true;
1356 /* Find the first load in the loop that belongs to INSTANCE.
1357 When loads are in several SLP nodes, there can be a case in which the first
1358 load does not appear in the first SLP node to be transformed, causing
1359 incorrect order of statements. Since we generate all the loads together,
1360 they must be inserted before the first load of the SLP instance and not
1361 before the first load of the first node of the instance. */
1363 static gimple
1364 vect_find_first_load_in_slp_instance (slp_instance instance)
1366 int i, j;
1367 slp_tree load_node;
1368 gimple first_load = NULL, load;
1370 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (instance), i, load_node)
1371 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (load_node), j, load)
1372 first_load = get_earlier_stmt (load, first_load);
1374 return first_load;
1378 /* Find the last store in SLP INSTANCE. */
1380 static gimple
1381 vect_find_last_store_in_slp_instance (slp_instance instance)
1383 int i;
1384 slp_tree node;
1385 gimple last_store = NULL, store;
1387 node = SLP_INSTANCE_TREE (instance);
1388 for (i = 0; SLP_TREE_SCALAR_STMTS (node).iterate (i, &store); i++)
1389 last_store = get_later_stmt (store, last_store);
1391 return last_store;
1394 /* Compute the cost for the SLP node NODE in the SLP instance INSTANCE. */
1396 static void
1397 vect_analyze_slp_cost_1 (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
1398 slp_instance instance, slp_tree node,
1399 stmt_vector_for_cost *prologue_cost_vec,
1400 unsigned ncopies_for_cost)
1402 stmt_vector_for_cost *body_cost_vec = &SLP_INSTANCE_BODY_COST_VEC (instance);
1404 unsigned i;
1405 slp_tree child;
1406 gimple stmt, s;
1407 stmt_vec_info stmt_info;
1408 tree lhs;
1409 unsigned group_size = SLP_INSTANCE_GROUP_SIZE (instance);
1411 /* Recurse down the SLP tree. */
1412 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
1413 vect_analyze_slp_cost_1 (loop_vinfo, bb_vinfo,
1414 instance, child, prologue_cost_vec,
1415 ncopies_for_cost);
1417 /* Look at the first scalar stmt to determine the cost. */
1418 stmt = SLP_TREE_SCALAR_STMTS (node)[0];
1419 stmt_info = vinfo_for_stmt (stmt);
1420 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
1422 if (DR_IS_WRITE (STMT_VINFO_DATA_REF (stmt_info)))
1423 vect_model_store_cost (stmt_info, ncopies_for_cost, false,
1424 vect_uninitialized_def,
1425 node, prologue_cost_vec, body_cost_vec);
1426 else
1428 int i;
1429 gcc_checking_assert (DR_IS_READ (STMT_VINFO_DATA_REF (stmt_info)));
1430 vect_model_load_cost (stmt_info, ncopies_for_cost, false,
1431 node, prologue_cost_vec, body_cost_vec);
1432 /* If the load is permuted record the cost for the permutation.
1433 ??? Loads from multiple chains are let through here only
1434 for a single special case involving complex numbers where
1435 in the end no permutation is necessary. */
1436 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, s)
1437 if ((STMT_VINFO_GROUP_FIRST_ELEMENT (vinfo_for_stmt (s))
1438 == STMT_VINFO_GROUP_FIRST_ELEMENT (stmt_info))
1439 && vect_get_place_in_interleaving_chain
1440 (s, STMT_VINFO_GROUP_FIRST_ELEMENT (stmt_info)) != i)
1442 record_stmt_cost (body_cost_vec, group_size, vec_perm,
1443 stmt_info, 0, vect_body);
1444 break;
1448 else
1449 record_stmt_cost (body_cost_vec, ncopies_for_cost, vector_stmt,
1450 stmt_info, 0, vect_body);
1452 /* Scan operands and account for prologue cost of constants/externals.
1453 ??? This over-estimates cost for multiple uses and should be
1454 re-engineered. */
1455 lhs = gimple_get_lhs (stmt);
1456 for (i = 0; i < gimple_num_ops (stmt); ++i)
1458 tree def, op = gimple_op (stmt, i);
1459 gimple def_stmt;
1460 enum vect_def_type dt;
1461 if (!op || op == lhs)
1462 continue;
1463 if (vect_is_simple_use (op, NULL, loop_vinfo, bb_vinfo,
1464 &def_stmt, &def, &dt)
1465 && (dt == vect_constant_def || dt == vect_external_def))
1466 record_stmt_cost (prologue_cost_vec, 1, vector_stmt,
1467 stmt_info, 0, vect_prologue);
1471 /* Compute the cost for the SLP instance INSTANCE. */
1473 static void
1474 vect_analyze_slp_cost (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
1475 slp_instance instance, unsigned nunits)
1477 stmt_vector_for_cost body_cost_vec, prologue_cost_vec;
1478 unsigned ncopies_for_cost;
1479 stmt_info_for_cost *si;
1480 unsigned i;
1482 /* Calculate the number of vector stmts to create based on the unrolling
1483 factor (number of vectors is 1 if NUNITS >= GROUP_SIZE, and is
1484 GROUP_SIZE / NUNITS otherwise. */
1485 unsigned group_size = SLP_INSTANCE_GROUP_SIZE (instance);
1486 ncopies_for_cost = least_common_multiple (nunits, group_size) / nunits;
1488 prologue_cost_vec.create (10);
1489 body_cost_vec.create (10);
1490 SLP_INSTANCE_BODY_COST_VEC (instance) = body_cost_vec;
1491 vect_analyze_slp_cost_1 (loop_vinfo, bb_vinfo,
1492 instance, SLP_INSTANCE_TREE (instance),
1493 &prologue_cost_vec, ncopies_for_cost);
1495 /* Record the prologue costs, which were delayed until we were
1496 sure that SLP was successful. Unlike the body costs, we know
1497 the final values now regardless of the loop vectorization factor. */
1498 void *data = (loop_vinfo ? LOOP_VINFO_TARGET_COST_DATA (loop_vinfo)
1499 : BB_VINFO_TARGET_COST_DATA (bb_vinfo));
1500 FOR_EACH_VEC_ELT (prologue_cost_vec, i, si)
1502 struct _stmt_vec_info *stmt_info
1503 = si->stmt ? vinfo_for_stmt (si->stmt) : NULL;
1504 (void) add_stmt_cost (data, si->count, si->kind, stmt_info,
1505 si->misalign, vect_prologue);
1508 prologue_cost_vec.release ();
1511 /* Analyze an SLP instance starting from a group of grouped stores. Call
1512 vect_build_slp_tree to build a tree of packed stmts if possible.
1513 Return FALSE if it's impossible to SLP any stmt in the loop. */
1515 static bool
1516 vect_analyze_slp_instance (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
1517 gimple stmt, unsigned max_tree_size)
1519 slp_instance new_instance;
1520 slp_tree node;
1521 unsigned int group_size = GROUP_SIZE (vinfo_for_stmt (stmt));
1522 unsigned int unrolling_factor = 1, nunits;
1523 tree vectype, scalar_type = NULL_TREE;
1524 gimple next;
1525 unsigned int vectorization_factor = 0;
1526 int i;
1527 unsigned int max_nunits = 0;
1528 vec<slp_tree> loads;
1529 struct data_reference *dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt));
1530 vec<gimple> scalar_stmts;
1532 if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
1534 if (dr)
1536 scalar_type = TREE_TYPE (DR_REF (dr));
1537 vectype = get_vectype_for_scalar_type (scalar_type);
1539 else
1541 gcc_assert (loop_vinfo);
1542 vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt));
1545 group_size = GROUP_SIZE (vinfo_for_stmt (stmt));
1547 else
1549 gcc_assert (loop_vinfo);
1550 vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt));
1551 group_size = LOOP_VINFO_REDUCTIONS (loop_vinfo).length ();
1554 if (!vectype)
1556 if (dump_enabled_p ())
1558 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1559 "Build SLP failed: unsupported data-type ");
1560 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, scalar_type);
1561 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
1564 return false;
1567 nunits = TYPE_VECTOR_SUBPARTS (vectype);
1568 if (loop_vinfo)
1569 vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1570 else
1571 vectorization_factor = nunits;
1573 /* Calculate the unrolling factor. */
1574 unrolling_factor = least_common_multiple (nunits, group_size) / group_size;
1575 if (unrolling_factor != 1 && !loop_vinfo)
1577 if (dump_enabled_p ())
1578 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1579 "Build SLP failed: unrolling required in basic"
1580 " block SLP\n");
1582 return false;
1585 /* Create a node (a root of the SLP tree) for the packed grouped stores. */
1586 scalar_stmts.create (group_size);
1587 next = stmt;
1588 if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
1590 /* Collect the stores and store them in SLP_TREE_SCALAR_STMTS. */
1591 while (next)
1593 if (STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (next))
1594 && STMT_VINFO_RELATED_STMT (vinfo_for_stmt (next)))
1595 scalar_stmts.safe_push (
1596 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (next)));
1597 else
1598 scalar_stmts.safe_push (next);
1599 next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next));
1602 else
1604 /* Collect reduction statements. */
1605 vec<gimple> reductions = LOOP_VINFO_REDUCTIONS (loop_vinfo);
1606 for (i = 0; reductions.iterate (i, &next); i++)
1607 scalar_stmts.safe_push (next);
1610 node = vect_create_new_slp_node (scalar_stmts);
1612 loads.create (group_size);
1614 /* Build the tree for the SLP instance. */
1615 if (vect_build_slp_tree (loop_vinfo, bb_vinfo, &node, group_size,
1616 &max_nunits, &loads,
1617 vectorization_factor, NULL, NULL, NULL,
1618 max_tree_size))
1620 /* Calculate the unrolling factor based on the smallest type. */
1621 if (max_nunits > nunits)
1622 unrolling_factor = least_common_multiple (max_nunits, group_size)
1623 / group_size;
1625 if (unrolling_factor != 1 && !loop_vinfo)
1627 if (dump_enabled_p ())
1628 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1629 "Build SLP failed: unrolling required in basic"
1630 " block SLP\n");
1631 vect_free_slp_tree (node);
1632 loads.release ();
1633 return false;
1636 /* Create a new SLP instance. */
1637 new_instance = XNEW (struct _slp_instance);
1638 SLP_INSTANCE_TREE (new_instance) = node;
1639 SLP_INSTANCE_GROUP_SIZE (new_instance) = group_size;
1640 SLP_INSTANCE_UNROLLING_FACTOR (new_instance) = unrolling_factor;
1641 SLP_INSTANCE_BODY_COST_VEC (new_instance) = vNULL;
1642 SLP_INSTANCE_LOADS (new_instance) = loads;
1643 SLP_INSTANCE_FIRST_LOAD_STMT (new_instance) = NULL;
1645 /* Compute the load permutation. */
1646 slp_tree load_node;
1647 bool loads_permuted = false;
1648 FOR_EACH_VEC_ELT (loads, i, load_node)
1650 vec<unsigned> load_permutation;
1651 int j;
1652 gimple load, first_stmt;
1653 bool this_load_permuted = false;
1654 load_permutation.create (group_size);
1655 first_stmt = GROUP_FIRST_ELEMENT
1656 (vinfo_for_stmt (SLP_TREE_SCALAR_STMTS (load_node)[0]));
1657 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (load_node), j, load)
1659 int load_place
1660 = vect_get_place_in_interleaving_chain (load, first_stmt);
1661 gcc_assert (load_place != -1);
1662 if (load_place != j)
1663 this_load_permuted = true;
1664 load_permutation.safe_push (load_place);
1666 if (!this_load_permuted)
1668 load_permutation.release ();
1669 continue;
1671 SLP_TREE_LOAD_PERMUTATION (load_node) = load_permutation;
1672 loads_permuted = true;
1675 if (loads_permuted)
1677 if (!vect_supported_load_permutation_p (new_instance))
1679 if (dump_enabled_p ())
1681 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1682 "Build SLP failed: unsupported load "
1683 "permutation ");
1684 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
1685 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
1687 vect_free_slp_instance (new_instance);
1688 return false;
1691 SLP_INSTANCE_FIRST_LOAD_STMT (new_instance)
1692 = vect_find_first_load_in_slp_instance (new_instance);
1695 /* Compute the costs of this SLP instance. */
1696 vect_analyze_slp_cost (loop_vinfo, bb_vinfo,
1697 new_instance, TYPE_VECTOR_SUBPARTS (vectype));
1699 if (loop_vinfo)
1700 LOOP_VINFO_SLP_INSTANCES (loop_vinfo).safe_push (new_instance);
1701 else
1702 BB_VINFO_SLP_INSTANCES (bb_vinfo).safe_push (new_instance);
1704 if (dump_enabled_p ())
1705 vect_print_slp_tree (MSG_NOTE, node);
1707 return true;
1710 /* Failed to SLP. */
1711 /* Free the allocated memory. */
1712 vect_free_slp_tree (node);
1713 loads.release ();
1715 return false;
1719 /* Check if there are stmts in the loop can be vectorized using SLP. Build SLP
1720 trees of packed scalar stmts if SLP is possible. */
1722 bool
1723 vect_analyze_slp (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
1724 unsigned max_tree_size)
1726 unsigned int i;
1727 vec<gimple> grouped_stores;
1728 vec<gimple> reductions = vNULL;
1729 vec<gimple> reduc_chains = vNULL;
1730 gimple first_element;
1731 bool ok = false;
1733 if (dump_enabled_p ())
1734 dump_printf_loc (MSG_NOTE, vect_location, "=== vect_analyze_slp ===\n");
1736 if (loop_vinfo)
1738 grouped_stores = LOOP_VINFO_GROUPED_STORES (loop_vinfo);
1739 reduc_chains = LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo);
1740 reductions = LOOP_VINFO_REDUCTIONS (loop_vinfo);
1742 else
1743 grouped_stores = BB_VINFO_GROUPED_STORES (bb_vinfo);
1745 /* Find SLP sequences starting from groups of grouped stores. */
1746 FOR_EACH_VEC_ELT (grouped_stores, i, first_element)
1747 if (vect_analyze_slp_instance (loop_vinfo, bb_vinfo, first_element,
1748 max_tree_size))
1749 ok = true;
1751 if (bb_vinfo && !ok)
1753 if (dump_enabled_p ())
1754 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1755 "Failed to SLP the basic block.\n");
1757 return false;
1760 if (loop_vinfo
1761 && LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo).length () > 0)
1763 /* Find SLP sequences starting from reduction chains. */
1764 FOR_EACH_VEC_ELT (reduc_chains, i, first_element)
1765 if (vect_analyze_slp_instance (loop_vinfo, bb_vinfo, first_element,
1766 max_tree_size))
1767 ok = true;
1768 else
1769 return false;
1771 /* Don't try to vectorize SLP reductions if reduction chain was
1772 detected. */
1773 return ok;
1776 /* Find SLP sequences starting from groups of reductions. */
1777 if (loop_vinfo && LOOP_VINFO_REDUCTIONS (loop_vinfo).length () > 1
1778 && vect_analyze_slp_instance (loop_vinfo, bb_vinfo, reductions[0],
1779 max_tree_size))
1780 ok = true;
1782 return true;
1786 /* For each possible SLP instance decide whether to SLP it and calculate overall
1787 unrolling factor needed to SLP the loop. Return TRUE if decided to SLP at
1788 least one instance. */
1790 bool
1791 vect_make_slp_decision (loop_vec_info loop_vinfo)
1793 unsigned int i, unrolling_factor = 1;
1794 vec<slp_instance> slp_instances = LOOP_VINFO_SLP_INSTANCES (loop_vinfo);
1795 slp_instance instance;
1796 int decided_to_slp = 0;
1798 if (dump_enabled_p ())
1799 dump_printf_loc (MSG_NOTE, vect_location, "=== vect_make_slp_decision ==="
1800 "\n");
1802 FOR_EACH_VEC_ELT (slp_instances, i, instance)
1804 /* FORNOW: SLP if you can. */
1805 if (unrolling_factor < SLP_INSTANCE_UNROLLING_FACTOR (instance))
1806 unrolling_factor = SLP_INSTANCE_UNROLLING_FACTOR (instance);
1808 /* Mark all the stmts that belong to INSTANCE as PURE_SLP stmts. Later we
1809 call vect_detect_hybrid_slp () to find stmts that need hybrid SLP and
1810 loop-based vectorization. Such stmts will be marked as HYBRID. */
1811 vect_mark_slp_stmts (SLP_INSTANCE_TREE (instance), pure_slp, -1);
1812 decided_to_slp++;
1815 LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo) = unrolling_factor;
1817 if (decided_to_slp && dump_enabled_p ())
1818 dump_printf_loc (MSG_NOTE, vect_location,
1819 "Decided to SLP %d instances. Unrolling factor %d\n",
1820 decided_to_slp, unrolling_factor);
1822 return (decided_to_slp > 0);
1826 /* Find stmts that must be both vectorized and SLPed (since they feed stmts that
1827 can't be SLPed) in the tree rooted at NODE. Mark such stmts as HYBRID. */
1829 static void
1830 vect_detect_hybrid_slp_stmts (slp_tree node)
1832 int i;
1833 vec<gimple> stmts = SLP_TREE_SCALAR_STMTS (node);
1834 gimple stmt = stmts[0];
1835 imm_use_iterator imm_iter;
1836 gimple use_stmt;
1837 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
1838 slp_tree child;
1839 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
1840 struct loop *loop = NULL;
1841 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_vinfo);
1842 basic_block bb = NULL;
1844 if (!node)
1845 return;
1847 if (loop_vinfo)
1848 loop = LOOP_VINFO_LOOP (loop_vinfo);
1849 else
1850 bb = BB_VINFO_BB (bb_vinfo);
1852 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
1853 if (PURE_SLP_STMT (vinfo_for_stmt (stmt))
1854 && TREE_CODE (gimple_op (stmt, 0)) == SSA_NAME)
1855 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, gimple_op (stmt, 0))
1856 if (gimple_bb (use_stmt)
1857 && ((loop && flow_bb_inside_loop_p (loop, gimple_bb (use_stmt)))
1858 || bb == gimple_bb (use_stmt))
1859 && (stmt_vinfo = vinfo_for_stmt (use_stmt))
1860 && !STMT_SLP_TYPE (stmt_vinfo)
1861 && (STMT_VINFO_RELEVANT (stmt_vinfo)
1862 || VECTORIZABLE_CYCLE_DEF (STMT_VINFO_DEF_TYPE (stmt_vinfo))
1863 || (STMT_VINFO_IN_PATTERN_P (stmt_vinfo)
1864 && STMT_VINFO_RELATED_STMT (stmt_vinfo)
1865 && !STMT_SLP_TYPE (vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_vinfo)))))
1866 && !(gimple_code (use_stmt) == GIMPLE_PHI
1867 && STMT_VINFO_DEF_TYPE (stmt_vinfo)
1868 == vect_reduction_def))
1869 vect_mark_slp_stmts (node, hybrid, i);
1871 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
1872 vect_detect_hybrid_slp_stmts (child);
1876 /* Find stmts that must be both vectorized and SLPed. */
1878 void
1879 vect_detect_hybrid_slp (loop_vec_info loop_vinfo)
1881 unsigned int i;
1882 vec<slp_instance> slp_instances = LOOP_VINFO_SLP_INSTANCES (loop_vinfo);
1883 slp_instance instance;
1885 if (dump_enabled_p ())
1886 dump_printf_loc (MSG_NOTE, vect_location, "=== vect_detect_hybrid_slp ==="
1887 "\n");
1889 FOR_EACH_VEC_ELT (slp_instances, i, instance)
1890 vect_detect_hybrid_slp_stmts (SLP_INSTANCE_TREE (instance));
1894 /* Create and initialize a new bb_vec_info struct for BB, as well as
1895 stmt_vec_info structs for all the stmts in it. */
1897 static bb_vec_info
1898 new_bb_vec_info (basic_block bb)
1900 bb_vec_info res = NULL;
1901 gimple_stmt_iterator gsi;
1903 res = (bb_vec_info) xcalloc (1, sizeof (struct _bb_vec_info));
1904 BB_VINFO_BB (res) = bb;
1906 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1908 gimple stmt = gsi_stmt (gsi);
1909 gimple_set_uid (stmt, 0);
1910 set_vinfo_for_stmt (stmt, new_stmt_vec_info (stmt, NULL, res));
1913 BB_VINFO_GROUPED_STORES (res).create (10);
1914 BB_VINFO_SLP_INSTANCES (res).create (2);
1915 BB_VINFO_TARGET_COST_DATA (res) = init_cost (NULL);
1917 bb->aux = res;
1918 return res;
1922 /* Free BB_VINFO struct, as well as all the stmt_vec_info structs of all the
1923 stmts in the basic block. */
1925 static void
1926 destroy_bb_vec_info (bb_vec_info bb_vinfo)
1928 vec<slp_instance> slp_instances;
1929 slp_instance instance;
1930 basic_block bb;
1931 gimple_stmt_iterator si;
1932 unsigned i;
1934 if (!bb_vinfo)
1935 return;
1937 bb = BB_VINFO_BB (bb_vinfo);
1939 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
1941 gimple stmt = gsi_stmt (si);
1942 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1944 if (stmt_info)
1945 /* Free stmt_vec_info. */
1946 free_stmt_vec_info (stmt);
1949 vect_destroy_datarefs (NULL, bb_vinfo);
1950 free_dependence_relations (BB_VINFO_DDRS (bb_vinfo));
1951 BB_VINFO_GROUPED_STORES (bb_vinfo).release ();
1952 slp_instances = BB_VINFO_SLP_INSTANCES (bb_vinfo);
1953 FOR_EACH_VEC_ELT (slp_instances, i, instance)
1954 vect_free_slp_instance (instance);
1955 BB_VINFO_SLP_INSTANCES (bb_vinfo).release ();
1956 destroy_cost_data (BB_VINFO_TARGET_COST_DATA (bb_vinfo));
1957 free (bb_vinfo);
1958 bb->aux = NULL;
1962 /* Analyze statements contained in SLP tree node after recursively analyzing
1963 the subtree. Return TRUE if the operations are supported. */
1965 static bool
1966 vect_slp_analyze_node_operations (bb_vec_info bb_vinfo, slp_tree node)
1968 bool dummy;
1969 int i;
1970 gimple stmt;
1971 slp_tree child;
1973 if (!node)
1974 return true;
1976 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
1977 if (!vect_slp_analyze_node_operations (bb_vinfo, child))
1978 return false;
1980 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
1982 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1983 gcc_assert (stmt_info);
1984 gcc_assert (PURE_SLP_STMT (stmt_info));
1986 if (!vect_analyze_stmt (stmt, &dummy, node))
1987 return false;
1990 return true;
1994 /* Analyze statements in SLP instances of the basic block. Return TRUE if the
1995 operations are supported. */
1997 static bool
1998 vect_slp_analyze_operations (bb_vec_info bb_vinfo)
2000 vec<slp_instance> slp_instances = BB_VINFO_SLP_INSTANCES (bb_vinfo);
2001 slp_instance instance;
2002 int i;
2004 for (i = 0; slp_instances.iterate (i, &instance); )
2006 if (!vect_slp_analyze_node_operations (bb_vinfo,
2007 SLP_INSTANCE_TREE (instance)))
2009 vect_free_slp_instance (instance);
2010 slp_instances.ordered_remove (i);
2012 else
2013 i++;
2016 if (!slp_instances.length ())
2017 return false;
2019 return true;
2023 /* Compute the scalar cost of the SLP node NODE and its children
2024 and return it. Do not account defs that are marked in LIFE and
2025 update LIFE according to uses of NODE. */
2027 static unsigned
2028 vect_bb_slp_scalar_cost (basic_block bb,
2029 slp_tree node, vec<bool, va_heap> *life)
2031 unsigned scalar_cost = 0;
2032 unsigned i;
2033 gimple stmt;
2034 slp_tree child;
2036 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
2038 unsigned stmt_cost;
2039 ssa_op_iter op_iter;
2040 def_operand_p def_p;
2041 stmt_vec_info stmt_info;
2043 if ((*life)[i])
2044 continue;
2046 /* If there is a non-vectorized use of the defs then the scalar
2047 stmt is kept live in which case we do not account it or any
2048 required defs in the SLP children in the scalar cost. This
2049 way we make the vectorization more costly when compared to
2050 the scalar cost. */
2051 FOR_EACH_SSA_DEF_OPERAND (def_p, stmt, op_iter, SSA_OP_DEF)
2053 imm_use_iterator use_iter;
2054 gimple use_stmt;
2055 FOR_EACH_IMM_USE_STMT (use_stmt, use_iter, DEF_FROM_PTR (def_p))
2056 if (!is_gimple_debug (use_stmt)
2057 && (gimple_code (use_stmt) == GIMPLE_PHI
2058 || gimple_bb (use_stmt) != bb
2059 || !STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (use_stmt))))
2061 (*life)[i] = true;
2062 BREAK_FROM_IMM_USE_STMT (use_iter);
2065 if ((*life)[i])
2066 continue;
2068 stmt_info = vinfo_for_stmt (stmt);
2069 if (STMT_VINFO_DATA_REF (stmt_info))
2071 if (DR_IS_READ (STMT_VINFO_DATA_REF (stmt_info)))
2072 stmt_cost = vect_get_stmt_cost (scalar_load);
2073 else
2074 stmt_cost = vect_get_stmt_cost (scalar_store);
2076 else
2077 stmt_cost = vect_get_stmt_cost (scalar_stmt);
2079 scalar_cost += stmt_cost;
2082 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
2083 scalar_cost += vect_bb_slp_scalar_cost (bb, child, life);
2085 return scalar_cost;
2088 /* Check if vectorization of the basic block is profitable. */
2090 static bool
2091 vect_bb_vectorization_profitable_p (bb_vec_info bb_vinfo)
2093 vec<slp_instance> slp_instances = BB_VINFO_SLP_INSTANCES (bb_vinfo);
2094 slp_instance instance;
2095 int i, j;
2096 unsigned int vec_inside_cost = 0, vec_outside_cost = 0, scalar_cost = 0;
2097 unsigned int vec_prologue_cost = 0, vec_epilogue_cost = 0;
2098 void *target_cost_data = BB_VINFO_TARGET_COST_DATA (bb_vinfo);
2099 stmt_vec_info stmt_info = NULL;
2100 stmt_vector_for_cost body_cost_vec;
2101 stmt_info_for_cost *ci;
2103 /* Calculate vector costs. */
2104 FOR_EACH_VEC_ELT (slp_instances, i, instance)
2106 body_cost_vec = SLP_INSTANCE_BODY_COST_VEC (instance);
2108 FOR_EACH_VEC_ELT (body_cost_vec, j, ci)
2110 stmt_info = ci->stmt ? vinfo_for_stmt (ci->stmt) : NULL;
2111 (void) add_stmt_cost (target_cost_data, ci->count, ci->kind,
2112 stmt_info, ci->misalign, vect_body);
2116 /* Calculate scalar cost. */
2117 FOR_EACH_VEC_ELT (slp_instances, i, instance)
2119 auto_vec<bool, 20> life;
2120 life.safe_grow_cleared (SLP_INSTANCE_GROUP_SIZE (instance));
2121 scalar_cost += vect_bb_slp_scalar_cost (BB_VINFO_BB (bb_vinfo),
2122 SLP_INSTANCE_TREE (instance),
2123 &life);
2126 /* Complete the target-specific cost calculation. */
2127 finish_cost (BB_VINFO_TARGET_COST_DATA (bb_vinfo), &vec_prologue_cost,
2128 &vec_inside_cost, &vec_epilogue_cost);
2130 vec_outside_cost = vec_prologue_cost + vec_epilogue_cost;
2132 if (dump_enabled_p ())
2134 dump_printf_loc (MSG_NOTE, vect_location, "Cost model analysis: \n");
2135 dump_printf (MSG_NOTE, " Vector inside of basic block cost: %d\n",
2136 vec_inside_cost);
2137 dump_printf (MSG_NOTE, " Vector prologue cost: %d\n", vec_prologue_cost);
2138 dump_printf (MSG_NOTE, " Vector epilogue cost: %d\n", vec_epilogue_cost);
2139 dump_printf (MSG_NOTE, " Scalar cost of basic block: %d\n", scalar_cost);
2142 /* Vectorization is profitable if its cost is less than the cost of scalar
2143 version. */
2144 if (vec_outside_cost + vec_inside_cost >= scalar_cost)
2145 return false;
2147 return true;
2150 /* Check if the basic block can be vectorized. */
2152 static bb_vec_info
2153 vect_slp_analyze_bb_1 (basic_block bb)
2155 bb_vec_info bb_vinfo;
2156 vec<slp_instance> slp_instances;
2157 slp_instance instance;
2158 int i;
2159 int min_vf = 2;
2160 unsigned n_stmts = 0;
2162 bb_vinfo = new_bb_vec_info (bb);
2163 if (!bb_vinfo)
2164 return NULL;
2166 if (!vect_analyze_data_refs (NULL, bb_vinfo, &min_vf, &n_stmts))
2168 if (dump_enabled_p ())
2169 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2170 "not vectorized: unhandled data-ref in basic "
2171 "block.\n");
2173 destroy_bb_vec_info (bb_vinfo);
2174 return NULL;
2177 if (BB_VINFO_DATAREFS (bb_vinfo).length () < 2)
2179 if (dump_enabled_p ())
2180 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2181 "not vectorized: not enough data-refs in "
2182 "basic block.\n");
2184 destroy_bb_vec_info (bb_vinfo);
2185 return NULL;
2188 if (!vect_analyze_data_ref_accesses (NULL, bb_vinfo))
2190 if (dump_enabled_p ())
2191 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2192 "not vectorized: unhandled data access in "
2193 "basic block.\n");
2195 destroy_bb_vec_info (bb_vinfo);
2196 return NULL;
2199 vect_pattern_recog (NULL, bb_vinfo);
2201 if (!vect_analyze_data_refs_alignment (NULL, bb_vinfo))
2203 if (dump_enabled_p ())
2204 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2205 "not vectorized: bad data alignment in basic "
2206 "block.\n");
2208 destroy_bb_vec_info (bb_vinfo);
2209 return NULL;
2212 /* Check the SLP opportunities in the basic block, analyze and build SLP
2213 trees. */
2214 if (!vect_analyze_slp (NULL, bb_vinfo, n_stmts))
2216 if (dump_enabled_p ())
2217 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2218 "not vectorized: failed to find SLP opportunities "
2219 "in basic block.\n");
2221 destroy_bb_vec_info (bb_vinfo);
2222 return NULL;
2225 slp_instances = BB_VINFO_SLP_INSTANCES (bb_vinfo);
2227 /* Mark all the statements that we want to vectorize as pure SLP and
2228 relevant. */
2229 FOR_EACH_VEC_ELT (slp_instances, i, instance)
2231 vect_mark_slp_stmts (SLP_INSTANCE_TREE (instance), pure_slp, -1);
2232 vect_mark_slp_stmts_relevant (SLP_INSTANCE_TREE (instance));
2235 /* Mark all the statements that we do not want to vectorize. */
2236 for (gimple_stmt_iterator gsi = gsi_start_bb (BB_VINFO_BB (bb_vinfo));
2237 !gsi_end_p (gsi); gsi_next (&gsi))
2239 stmt_vec_info vinfo = vinfo_for_stmt (gsi_stmt (gsi));
2240 if (STMT_SLP_TYPE (vinfo) != pure_slp)
2241 STMT_VINFO_VECTORIZABLE (vinfo) = false;
2244 /* Analyze dependences. At this point all stmts not participating in
2245 vectorization have to be marked. Dependence analysis assumes
2246 that we either vectorize all SLP instances or none at all. */
2247 if (!vect_slp_analyze_data_ref_dependences (bb_vinfo))
2249 if (dump_enabled_p ())
2250 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2251 "not vectorized: unhandled data dependence "
2252 "in basic block.\n");
2254 destroy_bb_vec_info (bb_vinfo);
2255 return NULL;
2258 if (!vect_verify_datarefs_alignment (NULL, bb_vinfo))
2260 if (dump_enabled_p ())
2261 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2262 "not vectorized: unsupported alignment in basic "
2263 "block.\n");
2264 destroy_bb_vec_info (bb_vinfo);
2265 return NULL;
2268 if (!vect_slp_analyze_operations (bb_vinfo))
2270 if (dump_enabled_p ())
2271 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2272 "not vectorized: bad operation in basic block.\n");
2274 destroy_bb_vec_info (bb_vinfo);
2275 return NULL;
2278 /* Cost model: check if the vectorization is worthwhile. */
2279 if (!unlimited_cost_model (NULL)
2280 && !vect_bb_vectorization_profitable_p (bb_vinfo))
2282 if (dump_enabled_p ())
2283 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2284 "not vectorized: vectorization is not "
2285 "profitable.\n");
2287 destroy_bb_vec_info (bb_vinfo);
2288 return NULL;
2291 if (dump_enabled_p ())
2292 dump_printf_loc (MSG_NOTE, vect_location,
2293 "Basic block will be vectorized using SLP\n");
2295 return bb_vinfo;
2299 bb_vec_info
2300 vect_slp_analyze_bb (basic_block bb)
2302 bb_vec_info bb_vinfo;
2303 int insns = 0;
2304 gimple_stmt_iterator gsi;
2305 unsigned int vector_sizes;
2307 if (dump_enabled_p ())
2308 dump_printf_loc (MSG_NOTE, vect_location, "===vect_slp_analyze_bb===\n");
2310 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2312 gimple stmt = gsi_stmt (gsi);
2313 if (!is_gimple_debug (stmt)
2314 && !gimple_nop_p (stmt)
2315 && gimple_code (stmt) != GIMPLE_LABEL)
2316 insns++;
2319 if (insns > PARAM_VALUE (PARAM_SLP_MAX_INSNS_IN_BB))
2321 if (dump_enabled_p ())
2322 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2323 "not vectorized: too many instructions in "
2324 "basic block.\n");
2326 return NULL;
2329 /* Autodetect first vector size we try. */
2330 current_vector_size = 0;
2331 vector_sizes = targetm.vectorize.autovectorize_vector_sizes ();
2333 while (1)
2335 bb_vinfo = vect_slp_analyze_bb_1 (bb);
2336 if (bb_vinfo)
2337 return bb_vinfo;
2339 destroy_bb_vec_info (bb_vinfo);
2341 vector_sizes &= ~current_vector_size;
2342 if (vector_sizes == 0
2343 || current_vector_size == 0)
2344 return NULL;
2346 /* Try the next biggest vector size. */
2347 current_vector_size = 1 << floor_log2 (vector_sizes);
2348 if (dump_enabled_p ())
2349 dump_printf_loc (MSG_NOTE, vect_location,
2350 "***** Re-trying analysis with "
2351 "vector size %d\n", current_vector_size);
2356 /* SLP costs are calculated according to SLP instance unrolling factor (i.e.,
2357 the number of created vector stmts depends on the unrolling factor).
2358 However, the actual number of vector stmts for every SLP node depends on
2359 VF which is set later in vect_analyze_operations (). Hence, SLP costs
2360 should be updated. In this function we assume that the inside costs
2361 calculated in vect_model_xxx_cost are linear in ncopies. */
2363 void
2364 vect_update_slp_costs_according_to_vf (loop_vec_info loop_vinfo)
2366 unsigned int i, j, vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2367 vec<slp_instance> slp_instances = LOOP_VINFO_SLP_INSTANCES (loop_vinfo);
2368 slp_instance instance;
2369 stmt_vector_for_cost body_cost_vec;
2370 stmt_info_for_cost *si;
2371 void *data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo);
2373 if (dump_enabled_p ())
2374 dump_printf_loc (MSG_NOTE, vect_location,
2375 "=== vect_update_slp_costs_according_to_vf ===\n");
2377 FOR_EACH_VEC_ELT (slp_instances, i, instance)
2379 /* We assume that costs are linear in ncopies. */
2380 int ncopies = vf / SLP_INSTANCE_UNROLLING_FACTOR (instance);
2382 /* Record the instance's instructions in the target cost model.
2383 This was delayed until here because the count of instructions
2384 isn't known beforehand. */
2385 body_cost_vec = SLP_INSTANCE_BODY_COST_VEC (instance);
2387 FOR_EACH_VEC_ELT (body_cost_vec, j, si)
2388 (void) add_stmt_cost (data, si->count * ncopies, si->kind,
2389 vinfo_for_stmt (si->stmt), si->misalign,
2390 vect_body);
2395 /* For constant and loop invariant defs of SLP_NODE this function returns
2396 (vector) defs (VEC_OPRNDS) that will be used in the vectorized stmts.
2397 OP_NUM determines if we gather defs for operand 0 or operand 1 of the RHS of
2398 scalar stmts. NUMBER_OF_VECTORS is the number of vector defs to create.
2399 REDUC_INDEX is the index of the reduction operand in the statements, unless
2400 it is -1. */
2402 static void
2403 vect_get_constant_vectors (tree op, slp_tree slp_node,
2404 vec<tree> *vec_oprnds,
2405 unsigned int op_num, unsigned int number_of_vectors,
2406 int reduc_index)
2408 vec<gimple> stmts = SLP_TREE_SCALAR_STMTS (slp_node);
2409 gimple stmt = stmts[0];
2410 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
2411 unsigned nunits;
2412 tree vec_cst;
2413 tree *elts;
2414 unsigned j, number_of_places_left_in_vector;
2415 tree vector_type;
2416 tree vop;
2417 int group_size = stmts.length ();
2418 unsigned int vec_num, i;
2419 unsigned number_of_copies = 1;
2420 vec<tree> voprnds;
2421 voprnds.create (number_of_vectors);
2422 bool constant_p, is_store;
2423 tree neutral_op = NULL;
2424 enum tree_code code = gimple_expr_code (stmt);
2425 gimple def_stmt;
2426 struct loop *loop;
2427 gimple_seq ctor_seq = NULL;
2429 if (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
2430 && reduc_index != -1)
2432 op_num = reduc_index - 1;
2433 op = gimple_op (stmt, reduc_index);
2434 /* For additional copies (see the explanation of NUMBER_OF_COPIES below)
2435 we need either neutral operands or the original operands. See
2436 get_initial_def_for_reduction() for details. */
2437 switch (code)
2439 case WIDEN_SUM_EXPR:
2440 case DOT_PROD_EXPR:
2441 case PLUS_EXPR:
2442 case MINUS_EXPR:
2443 case BIT_IOR_EXPR:
2444 case BIT_XOR_EXPR:
2445 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (op)))
2446 neutral_op = build_real (TREE_TYPE (op), dconst0);
2447 else
2448 neutral_op = build_int_cst (TREE_TYPE (op), 0);
2450 break;
2452 case MULT_EXPR:
2453 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (op)))
2454 neutral_op = build_real (TREE_TYPE (op), dconst1);
2455 else
2456 neutral_op = build_int_cst (TREE_TYPE (op), 1);
2458 break;
2460 case BIT_AND_EXPR:
2461 neutral_op = build_int_cst (TREE_TYPE (op), -1);
2462 break;
2464 /* For MIN/MAX we don't have an easy neutral operand but
2465 the initial values can be used fine here. Only for
2466 a reduction chain we have to force a neutral element. */
2467 case MAX_EXPR:
2468 case MIN_EXPR:
2469 if (!GROUP_FIRST_ELEMENT (stmt_vinfo))
2470 neutral_op = NULL;
2471 else
2473 def_stmt = SSA_NAME_DEF_STMT (op);
2474 loop = (gimple_bb (stmt))->loop_father;
2475 neutral_op = PHI_ARG_DEF_FROM_EDGE (def_stmt,
2476 loop_preheader_edge (loop));
2478 break;
2480 default:
2481 neutral_op = NULL;
2485 if (STMT_VINFO_DATA_REF (stmt_vinfo))
2487 is_store = true;
2488 op = gimple_assign_rhs1 (stmt);
2490 else
2491 is_store = false;
2493 gcc_assert (op);
2495 if (CONSTANT_CLASS_P (op))
2496 constant_p = true;
2497 else
2498 constant_p = false;
2500 vector_type = get_vectype_for_scalar_type (TREE_TYPE (op));
2501 gcc_assert (vector_type);
2502 nunits = TYPE_VECTOR_SUBPARTS (vector_type);
2504 /* NUMBER_OF_COPIES is the number of times we need to use the same values in
2505 created vectors. It is greater than 1 if unrolling is performed.
2507 For example, we have two scalar operands, s1 and s2 (e.g., group of
2508 strided accesses of size two), while NUNITS is four (i.e., four scalars
2509 of this type can be packed in a vector). The output vector will contain
2510 two copies of each scalar operand: {s1, s2, s1, s2}. (NUMBER_OF_COPIES
2511 will be 2).
2513 If GROUP_SIZE > NUNITS, the scalars will be split into several vectors
2514 containing the operands.
2516 For example, NUNITS is four as before, and the group size is 8
2517 (s1, s2, ..., s8). We will create two vectors {s1, s2, s3, s4} and
2518 {s5, s6, s7, s8}. */
2520 number_of_copies = least_common_multiple (nunits, group_size) / group_size;
2522 number_of_places_left_in_vector = nunits;
2523 elts = XALLOCAVEC (tree, nunits);
2524 for (j = 0; j < number_of_copies; j++)
2526 for (i = group_size - 1; stmts.iterate (i, &stmt); i--)
2528 if (is_store)
2529 op = gimple_assign_rhs1 (stmt);
2530 else
2532 switch (code)
2534 case COND_EXPR:
2535 if (op_num == 0 || op_num == 1)
2537 tree cond = gimple_assign_rhs1 (stmt);
2538 op = TREE_OPERAND (cond, op_num);
2540 else
2542 if (op_num == 2)
2543 op = gimple_assign_rhs2 (stmt);
2544 else
2545 op = gimple_assign_rhs3 (stmt);
2547 break;
2549 case CALL_EXPR:
2550 op = gimple_call_arg (stmt, op_num);
2551 break;
2553 case LSHIFT_EXPR:
2554 case RSHIFT_EXPR:
2555 case LROTATE_EXPR:
2556 case RROTATE_EXPR:
2557 op = gimple_op (stmt, op_num + 1);
2558 /* Unlike the other binary operators, shifts/rotates have
2559 the shift count being int, instead of the same type as
2560 the lhs, so make sure the scalar is the right type if
2561 we are dealing with vectors of
2562 long long/long/short/char. */
2563 if (op_num == 1 && TREE_CODE (op) == INTEGER_CST)
2564 op = fold_convert (TREE_TYPE (vector_type), op);
2565 break;
2567 default:
2568 op = gimple_op (stmt, op_num + 1);
2569 break;
2573 if (reduc_index != -1)
2575 loop = (gimple_bb (stmt))->loop_father;
2576 def_stmt = SSA_NAME_DEF_STMT (op);
2578 gcc_assert (loop);
2580 /* Get the def before the loop. In reduction chain we have only
2581 one initial value. */
2582 if ((j != (number_of_copies - 1)
2583 || (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt))
2584 && i != 0))
2585 && neutral_op)
2586 op = neutral_op;
2587 else
2588 op = PHI_ARG_DEF_FROM_EDGE (def_stmt,
2589 loop_preheader_edge (loop));
2592 /* Create 'vect_ = {op0,op1,...,opn}'. */
2593 number_of_places_left_in_vector--;
2594 if (!types_compatible_p (TREE_TYPE (vector_type), TREE_TYPE (op)))
2596 if (CONSTANT_CLASS_P (op))
2598 op = fold_unary (VIEW_CONVERT_EXPR,
2599 TREE_TYPE (vector_type), op);
2600 gcc_assert (op && CONSTANT_CLASS_P (op));
2602 else
2604 tree new_temp
2605 = make_ssa_name (TREE_TYPE (vector_type), NULL);
2606 gimple init_stmt;
2607 op = build1 (VIEW_CONVERT_EXPR, TREE_TYPE (vector_type),
2608 op);
2609 init_stmt
2610 = gimple_build_assign_with_ops (VIEW_CONVERT_EXPR,
2611 new_temp, op, NULL_TREE);
2612 gimple_seq_add_stmt (&ctor_seq, init_stmt);
2613 op = new_temp;
2616 elts[number_of_places_left_in_vector] = op;
2617 if (!CONSTANT_CLASS_P (op))
2618 constant_p = false;
2620 if (number_of_places_left_in_vector == 0)
2622 number_of_places_left_in_vector = nunits;
2624 if (constant_p)
2625 vec_cst = build_vector (vector_type, elts);
2626 else
2628 vec<constructor_elt, va_gc> *v;
2629 unsigned k;
2630 vec_alloc (v, nunits);
2631 for (k = 0; k < nunits; ++k)
2632 CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, elts[k]);
2633 vec_cst = build_constructor (vector_type, v);
2635 voprnds.quick_push (vect_init_vector (stmt, vec_cst,
2636 vector_type, NULL));
2637 if (ctor_seq != NULL)
2639 gimple init_stmt = SSA_NAME_DEF_STMT (voprnds.last ());
2640 gimple_stmt_iterator gsi = gsi_for_stmt (init_stmt);
2641 gsi_insert_seq_before_without_update (&gsi, ctor_seq,
2642 GSI_SAME_STMT);
2643 ctor_seq = NULL;
2649 /* Since the vectors are created in the reverse order, we should invert
2650 them. */
2651 vec_num = voprnds.length ();
2652 for (j = vec_num; j != 0; j--)
2654 vop = voprnds[j - 1];
2655 vec_oprnds->quick_push (vop);
2658 voprnds.release ();
2660 /* In case that VF is greater than the unrolling factor needed for the SLP
2661 group of stmts, NUMBER_OF_VECTORS to be created is greater than
2662 NUMBER_OF_SCALARS/NUNITS or NUNITS/NUMBER_OF_SCALARS, and hence we have
2663 to replicate the vectors. */
2664 while (number_of_vectors > vec_oprnds->length ())
2666 tree neutral_vec = NULL;
2668 if (neutral_op)
2670 if (!neutral_vec)
2671 neutral_vec = build_vector_from_val (vector_type, neutral_op);
2673 vec_oprnds->quick_push (neutral_vec);
2675 else
2677 for (i = 0; vec_oprnds->iterate (i, &vop) && i < vec_num; i++)
2678 vec_oprnds->quick_push (vop);
2684 /* Get vectorized definitions from SLP_NODE that contains corresponding
2685 vectorized def-stmts. */
2687 static void
2688 vect_get_slp_vect_defs (slp_tree slp_node, vec<tree> *vec_oprnds)
2690 tree vec_oprnd;
2691 gimple vec_def_stmt;
2692 unsigned int i;
2694 gcc_assert (SLP_TREE_VEC_STMTS (slp_node).exists ());
2696 FOR_EACH_VEC_ELT (SLP_TREE_VEC_STMTS (slp_node), i, vec_def_stmt)
2698 gcc_assert (vec_def_stmt);
2699 vec_oprnd = gimple_get_lhs (vec_def_stmt);
2700 vec_oprnds->quick_push (vec_oprnd);
2705 /* Get vectorized definitions for SLP_NODE.
2706 If the scalar definitions are loop invariants or constants, collect them and
2707 call vect_get_constant_vectors() to create vector stmts.
2708 Otherwise, the def-stmts must be already vectorized and the vectorized stmts
2709 must be stored in the corresponding child of SLP_NODE, and we call
2710 vect_get_slp_vect_defs () to retrieve them. */
2712 void
2713 vect_get_slp_defs (vec<tree> ops, slp_tree slp_node,
2714 vec<vec<tree> > *vec_oprnds, int reduc_index)
2716 gimple first_stmt;
2717 int number_of_vects = 0, i;
2718 unsigned int child_index = 0;
2719 HOST_WIDE_INT lhs_size_unit, rhs_size_unit;
2720 slp_tree child = NULL;
2721 vec<tree> vec_defs;
2722 tree oprnd;
2723 bool vectorized_defs;
2725 first_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[0];
2726 FOR_EACH_VEC_ELT (ops, i, oprnd)
2728 /* For each operand we check if it has vectorized definitions in a child
2729 node or we need to create them (for invariants and constants). We
2730 check if the LHS of the first stmt of the next child matches OPRND.
2731 If it does, we found the correct child. Otherwise, we call
2732 vect_get_constant_vectors (), and not advance CHILD_INDEX in order
2733 to check this child node for the next operand. */
2734 vectorized_defs = false;
2735 if (SLP_TREE_CHILDREN (slp_node).length () > child_index)
2737 child = SLP_TREE_CHILDREN (slp_node)[child_index];
2739 /* We have to check both pattern and original def, if available. */
2740 gimple first_def = SLP_TREE_SCALAR_STMTS (child)[0];
2741 gimple related = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (first_def));
2743 if (operand_equal_p (oprnd, gimple_get_lhs (first_def), 0)
2744 || (related
2745 && operand_equal_p (oprnd, gimple_get_lhs (related), 0)))
2747 /* The number of vector defs is determined by the number of
2748 vector statements in the node from which we get those
2749 statements. */
2750 number_of_vects = SLP_TREE_NUMBER_OF_VEC_STMTS (child);
2751 vectorized_defs = true;
2752 child_index++;
2756 if (!vectorized_defs)
2758 if (i == 0)
2760 number_of_vects = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
2761 /* Number of vector stmts was calculated according to LHS in
2762 vect_schedule_slp_instance (), fix it by replacing LHS with
2763 RHS, if necessary. See vect_get_smallest_scalar_type () for
2764 details. */
2765 vect_get_smallest_scalar_type (first_stmt, &lhs_size_unit,
2766 &rhs_size_unit);
2767 if (rhs_size_unit != lhs_size_unit)
2769 number_of_vects *= rhs_size_unit;
2770 number_of_vects /= lhs_size_unit;
2775 /* Allocate memory for vectorized defs. */
2776 vec_defs = vNULL;
2777 vec_defs.create (number_of_vects);
2779 /* For reduction defs we call vect_get_constant_vectors (), since we are
2780 looking for initial loop invariant values. */
2781 if (vectorized_defs && reduc_index == -1)
2782 /* The defs are already vectorized. */
2783 vect_get_slp_vect_defs (child, &vec_defs);
2784 else
2785 /* Build vectors from scalar defs. */
2786 vect_get_constant_vectors (oprnd, slp_node, &vec_defs, i,
2787 number_of_vects, reduc_index);
2789 vec_oprnds->quick_push (vec_defs);
2791 /* For reductions, we only need initial values. */
2792 if (reduc_index != -1)
2793 return;
2798 /* Create NCOPIES permutation statements using the mask MASK_BYTES (by
2799 building a vector of type MASK_TYPE from it) and two input vectors placed in
2800 DR_CHAIN at FIRST_VEC_INDX and SECOND_VEC_INDX for the first copy and
2801 shifting by STRIDE elements of DR_CHAIN for every copy.
2802 (STRIDE is the number of vectorized stmts for NODE divided by the number of
2803 copies).
2804 VECT_STMTS_COUNTER specifies the index in the vectorized stmts of NODE, where
2805 the created stmts must be inserted. */
2807 static inline void
2808 vect_create_mask_and_perm (gimple stmt, gimple next_scalar_stmt,
2809 tree mask, int first_vec_indx, int second_vec_indx,
2810 gimple_stmt_iterator *gsi, slp_tree node,
2811 tree vectype, vec<tree> dr_chain,
2812 int ncopies, int vect_stmts_counter)
2814 tree perm_dest;
2815 gimple perm_stmt = NULL;
2816 stmt_vec_info next_stmt_info;
2817 int i, stride;
2818 tree first_vec, second_vec, data_ref;
2820 stride = SLP_TREE_NUMBER_OF_VEC_STMTS (node) / ncopies;
2822 /* Initialize the vect stmts of NODE to properly insert the generated
2823 stmts later. */
2824 for (i = SLP_TREE_VEC_STMTS (node).length ();
2825 i < (int) SLP_TREE_NUMBER_OF_VEC_STMTS (node); i++)
2826 SLP_TREE_VEC_STMTS (node).quick_push (NULL);
2828 perm_dest = vect_create_destination_var (gimple_assign_lhs (stmt), vectype);
2829 for (i = 0; i < ncopies; i++)
2831 first_vec = dr_chain[first_vec_indx];
2832 second_vec = dr_chain[second_vec_indx];
2834 /* Generate the permute statement. */
2835 perm_stmt = gimple_build_assign_with_ops (VEC_PERM_EXPR, perm_dest,
2836 first_vec, second_vec, mask);
2837 data_ref = make_ssa_name (perm_dest, perm_stmt);
2838 gimple_set_lhs (perm_stmt, data_ref);
2839 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
2841 /* Store the vector statement in NODE. */
2842 SLP_TREE_VEC_STMTS (node)[stride * i + vect_stmts_counter] = perm_stmt;
2844 first_vec_indx += stride;
2845 second_vec_indx += stride;
2848 /* Mark the scalar stmt as vectorized. */
2849 next_stmt_info = vinfo_for_stmt (next_scalar_stmt);
2850 STMT_VINFO_VEC_STMT (next_stmt_info) = perm_stmt;
2854 /* Given FIRST_MASK_ELEMENT - the mask element in element representation,
2855 return in CURRENT_MASK_ELEMENT its equivalent in target specific
2856 representation. Check that the mask is valid and return FALSE if not.
2857 Return TRUE in NEED_NEXT_VECTOR if the permutation requires to move to
2858 the next vector, i.e., the current first vector is not needed. */
2860 static bool
2861 vect_get_mask_element (gimple stmt, int first_mask_element, int m,
2862 int mask_nunits, bool only_one_vec, int index,
2863 unsigned char *mask, int *current_mask_element,
2864 bool *need_next_vector, int *number_of_mask_fixes,
2865 bool *mask_fixed, bool *needs_first_vector)
2867 int i;
2869 /* Convert to target specific representation. */
2870 *current_mask_element = first_mask_element + m;
2871 /* Adjust the value in case it's a mask for second and third vectors. */
2872 *current_mask_element -= mask_nunits * (*number_of_mask_fixes - 1);
2874 if (*current_mask_element < mask_nunits)
2875 *needs_first_vector = true;
2877 /* We have only one input vector to permute but the mask accesses values in
2878 the next vector as well. */
2879 if (only_one_vec && *current_mask_element >= mask_nunits)
2881 if (dump_enabled_p ())
2883 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2884 "permutation requires at least two vectors ");
2885 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
2886 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
2889 return false;
2892 /* The mask requires the next vector. */
2893 while (*current_mask_element >= mask_nunits * 2)
2895 if (*needs_first_vector || *mask_fixed)
2897 /* We either need the first vector too or have already moved to the
2898 next vector. In both cases, this permutation needs three
2899 vectors. */
2900 if (dump_enabled_p ())
2902 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2903 "permutation requires at "
2904 "least three vectors ");
2905 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
2906 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
2909 return false;
2912 /* We move to the next vector, dropping the first one and working with
2913 the second and the third - we need to adjust the values of the mask
2914 accordingly. */
2915 *current_mask_element -= mask_nunits * *number_of_mask_fixes;
2917 for (i = 0; i < index; i++)
2918 mask[i] -= mask_nunits * *number_of_mask_fixes;
2920 (*number_of_mask_fixes)++;
2921 *mask_fixed = true;
2924 *need_next_vector = *mask_fixed;
2926 /* This was the last element of this mask. Start a new one. */
2927 if (index == mask_nunits - 1)
2929 *number_of_mask_fixes = 1;
2930 *mask_fixed = false;
2931 *needs_first_vector = false;
2934 return true;
2938 /* Generate vector permute statements from a list of loads in DR_CHAIN.
2939 If ANALYZE_ONLY is TRUE, only check that it is possible to create valid
2940 permute statements for the SLP node NODE of the SLP instance
2941 SLP_NODE_INSTANCE. */
2943 bool
2944 vect_transform_slp_perm_load (slp_tree node, vec<tree> dr_chain,
2945 gimple_stmt_iterator *gsi, int vf,
2946 slp_instance slp_node_instance, bool analyze_only)
2948 gimple stmt = SLP_TREE_SCALAR_STMTS (node)[0];
2949 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2950 tree mask_element_type = NULL_TREE, mask_type;
2951 int i, j, k, nunits, vec_index = 0, scalar_index;
2952 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
2953 gimple next_scalar_stmt;
2954 int group_size = SLP_INSTANCE_GROUP_SIZE (slp_node_instance);
2955 int first_mask_element;
2956 int index, unroll_factor, current_mask_element, ncopies;
2957 unsigned char *mask;
2958 bool only_one_vec = false, need_next_vector = false;
2959 int first_vec_index, second_vec_index, orig_vec_stmts_num, vect_stmts_counter;
2960 int number_of_mask_fixes = 1;
2961 bool mask_fixed = false;
2962 bool needs_first_vector = false;
2963 machine_mode mode;
2965 mode = TYPE_MODE (vectype);
2967 if (!can_vec_perm_p (mode, false, NULL))
2969 if (dump_enabled_p ())
2971 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2972 "no vect permute for ");
2973 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
2974 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
2976 return false;
2979 /* The generic VEC_PERM_EXPR code always uses an integral type of the
2980 same size as the vector element being permuted. */
2981 mask_element_type = lang_hooks.types.type_for_mode
2982 (int_mode_for_mode (TYPE_MODE (TREE_TYPE (vectype))), 1);
2983 mask_type = get_vectype_for_scalar_type (mask_element_type);
2984 nunits = TYPE_VECTOR_SUBPARTS (vectype);
2985 mask = XALLOCAVEC (unsigned char, nunits);
2986 unroll_factor = SLP_INSTANCE_UNROLLING_FACTOR (slp_node_instance);
2988 /* The number of vector stmts to generate based only on SLP_NODE_INSTANCE
2989 unrolling factor. */
2990 orig_vec_stmts_num = group_size *
2991 SLP_INSTANCE_UNROLLING_FACTOR (slp_node_instance) / nunits;
2992 if (orig_vec_stmts_num == 1)
2993 only_one_vec = true;
2995 /* Number of copies is determined by the final vectorization factor
2996 relatively to SLP_NODE_INSTANCE unrolling factor. */
2997 ncopies = vf / SLP_INSTANCE_UNROLLING_FACTOR (slp_node_instance);
2999 if (!STMT_VINFO_GROUPED_ACCESS (stmt_info))
3000 return false;
3002 /* Generate permutation masks for every NODE. Number of masks for each NODE
3003 is equal to GROUP_SIZE.
3004 E.g., we have a group of three nodes with three loads from the same
3005 location in each node, and the vector size is 4. I.e., we have a
3006 a0b0c0a1b1c1... sequence and we need to create the following vectors:
3007 for a's: a0a0a0a1 a1a1a2a2 a2a3a3a3
3008 for b's: b0b0b0b1 b1b1b2b2 b2b3b3b3
3011 The masks for a's should be: {0,0,0,3} {3,3,6,6} {6,9,9,9}.
3012 The last mask is illegal since we assume two operands for permute
3013 operation, and the mask element values can't be outside that range.
3014 Hence, the last mask must be converted into {2,5,5,5}.
3015 For the first two permutations we need the first and the second input
3016 vectors: {a0,b0,c0,a1} and {b1,c1,a2,b2}, and for the last permutation
3017 we need the second and the third vectors: {b1,c1,a2,b2} and
3018 {c2,a3,b3,c3}. */
3021 scalar_index = 0;
3022 index = 0;
3023 vect_stmts_counter = 0;
3024 vec_index = 0;
3025 first_vec_index = vec_index++;
3026 if (only_one_vec)
3027 second_vec_index = first_vec_index;
3028 else
3029 second_vec_index = vec_index++;
3031 for (j = 0; j < unroll_factor; j++)
3033 for (k = 0; k < group_size; k++)
3035 i = SLP_TREE_LOAD_PERMUTATION (node)[k];
3036 first_mask_element = i + j * group_size;
3037 if (!vect_get_mask_element (stmt, first_mask_element, 0,
3038 nunits, only_one_vec, index,
3039 mask, &current_mask_element,
3040 &need_next_vector,
3041 &number_of_mask_fixes, &mask_fixed,
3042 &needs_first_vector))
3043 return false;
3044 gcc_assert (current_mask_element < 2 * nunits);
3045 mask[index++] = current_mask_element;
3047 if (index == nunits)
3049 index = 0;
3050 if (!can_vec_perm_p (mode, false, mask))
3052 if (dump_enabled_p ())
3054 dump_printf_loc (MSG_MISSED_OPTIMIZATION,
3055 vect_location,
3056 "unsupported vect permute { ");
3057 for (i = 0; i < nunits; ++i)
3058 dump_printf (MSG_MISSED_OPTIMIZATION, "%d ",
3059 mask[i]);
3060 dump_printf (MSG_MISSED_OPTIMIZATION, "}\n");
3062 return false;
3065 if (!analyze_only)
3067 int l;
3068 tree mask_vec, *mask_elts;
3069 mask_elts = XALLOCAVEC (tree, nunits);
3070 for (l = 0; l < nunits; ++l)
3071 mask_elts[l] = build_int_cst (mask_element_type,
3072 mask[l]);
3073 mask_vec = build_vector (mask_type, mask_elts);
3075 if (need_next_vector)
3077 first_vec_index = second_vec_index;
3078 second_vec_index = vec_index;
3081 next_scalar_stmt
3082 = SLP_TREE_SCALAR_STMTS (node)[scalar_index++];
3084 vect_create_mask_and_perm (stmt, next_scalar_stmt,
3085 mask_vec, first_vec_index, second_vec_index,
3086 gsi, node, vectype, dr_chain,
3087 ncopies, vect_stmts_counter++);
3094 return true;
3099 /* Vectorize SLP instance tree in postorder. */
3101 static bool
3102 vect_schedule_slp_instance (slp_tree node, slp_instance instance,
3103 unsigned int vectorization_factor)
3105 gimple stmt;
3106 bool grouped_store, is_store;
3107 gimple_stmt_iterator si;
3108 stmt_vec_info stmt_info;
3109 unsigned int vec_stmts_size, nunits, group_size;
3110 tree vectype;
3111 int i;
3112 slp_tree child;
3114 if (!node)
3115 return false;
3117 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
3118 vect_schedule_slp_instance (child, instance, vectorization_factor);
3120 stmt = SLP_TREE_SCALAR_STMTS (node)[0];
3121 stmt_info = vinfo_for_stmt (stmt);
3123 /* VECTYPE is the type of the destination. */
3124 vectype = STMT_VINFO_VECTYPE (stmt_info);
3125 nunits = (unsigned int) TYPE_VECTOR_SUBPARTS (vectype);
3126 group_size = SLP_INSTANCE_GROUP_SIZE (instance);
3128 /* For each SLP instance calculate number of vector stmts to be created
3129 for the scalar stmts in each node of the SLP tree. Number of vector
3130 elements in one vector iteration is the number of scalar elements in
3131 one scalar iteration (GROUP_SIZE) multiplied by VF divided by vector
3132 size. */
3133 vec_stmts_size = (vectorization_factor * group_size) / nunits;
3135 if (!SLP_TREE_VEC_STMTS (node).exists ())
3137 SLP_TREE_VEC_STMTS (node).create (vec_stmts_size);
3138 SLP_TREE_NUMBER_OF_VEC_STMTS (node) = vec_stmts_size;
3141 if (dump_enabled_p ())
3143 dump_printf_loc (MSG_NOTE,vect_location,
3144 "------>vectorizing SLP node starting from: ");
3145 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
3146 dump_printf (MSG_NOTE, "\n");
3149 /* Loads should be inserted before the first load. */
3150 if (SLP_INSTANCE_FIRST_LOAD_STMT (instance)
3151 && STMT_VINFO_GROUPED_ACCESS (stmt_info)
3152 && !REFERENCE_CLASS_P (gimple_get_lhs (stmt))
3153 && SLP_TREE_LOAD_PERMUTATION (node).exists ())
3154 si = gsi_for_stmt (SLP_INSTANCE_FIRST_LOAD_STMT (instance));
3155 else if (is_pattern_stmt_p (stmt_info))
3156 si = gsi_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info));
3157 else
3158 si = gsi_for_stmt (stmt);
3160 /* Stores should be inserted just before the last store. */
3161 if (STMT_VINFO_GROUPED_ACCESS (stmt_info)
3162 && REFERENCE_CLASS_P (gimple_get_lhs (stmt)))
3164 gimple last_store = vect_find_last_store_in_slp_instance (instance);
3165 if (is_pattern_stmt_p (vinfo_for_stmt (last_store)))
3166 last_store = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (last_store));
3167 si = gsi_for_stmt (last_store);
3170 /* Mark the first element of the reduction chain as reduction to properly
3171 transform the node. In the analysis phase only the last element of the
3172 chain is marked as reduction. */
3173 if (GROUP_FIRST_ELEMENT (stmt_info) && !STMT_VINFO_GROUPED_ACCESS (stmt_info)
3174 && GROUP_FIRST_ELEMENT (stmt_info) == stmt)
3176 STMT_VINFO_DEF_TYPE (stmt_info) = vect_reduction_def;
3177 STMT_VINFO_TYPE (stmt_info) = reduc_vec_info_type;
3180 is_store = vect_transform_stmt (stmt, &si, &grouped_store, node, instance);
3181 return is_store;
3184 /* Replace scalar calls from SLP node NODE with setting of their lhs to zero.
3185 For loop vectorization this is done in vectorizable_call, but for SLP
3186 it needs to be deferred until end of vect_schedule_slp, because multiple
3187 SLP instances may refer to the same scalar stmt. */
3189 static void
3190 vect_remove_slp_scalar_calls (slp_tree node)
3192 gimple stmt, new_stmt;
3193 gimple_stmt_iterator gsi;
3194 int i;
3195 slp_tree child;
3196 tree lhs;
3197 stmt_vec_info stmt_info;
3199 if (!node)
3200 return;
3202 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
3203 vect_remove_slp_scalar_calls (child);
3205 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
3207 if (!is_gimple_call (stmt) || gimple_bb (stmt) == NULL)
3208 continue;
3209 stmt_info = vinfo_for_stmt (stmt);
3210 if (stmt_info == NULL
3211 || is_pattern_stmt_p (stmt_info)
3212 || !PURE_SLP_STMT (stmt_info))
3213 continue;
3214 lhs = gimple_call_lhs (stmt);
3215 new_stmt = gimple_build_assign (lhs, build_zero_cst (TREE_TYPE (lhs)));
3216 set_vinfo_for_stmt (new_stmt, stmt_info);
3217 set_vinfo_for_stmt (stmt, NULL);
3218 STMT_VINFO_STMT (stmt_info) = new_stmt;
3219 gsi = gsi_for_stmt (stmt);
3220 gsi_replace (&gsi, new_stmt, false);
3221 SSA_NAME_DEF_STMT (gimple_assign_lhs (new_stmt)) = new_stmt;
3225 /* Generate vector code for all SLP instances in the loop/basic block. */
3227 bool
3228 vect_schedule_slp (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo)
3230 vec<slp_instance> slp_instances;
3231 slp_instance instance;
3232 unsigned int i, vf;
3233 bool is_store = false;
3235 if (loop_vinfo)
3237 slp_instances = LOOP_VINFO_SLP_INSTANCES (loop_vinfo);
3238 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
3240 else
3242 slp_instances = BB_VINFO_SLP_INSTANCES (bb_vinfo);
3243 vf = 1;
3246 FOR_EACH_VEC_ELT (slp_instances, i, instance)
3248 /* Schedule the tree of INSTANCE. */
3249 is_store = vect_schedule_slp_instance (SLP_INSTANCE_TREE (instance),
3250 instance, vf);
3251 if (dump_enabled_p ())
3252 dump_printf_loc (MSG_NOTE, vect_location,
3253 "vectorizing stmts using SLP.\n");
3256 FOR_EACH_VEC_ELT (slp_instances, i, instance)
3258 slp_tree root = SLP_INSTANCE_TREE (instance);
3259 gimple store;
3260 unsigned int j;
3261 gimple_stmt_iterator gsi;
3263 /* Remove scalar call stmts. Do not do this for basic-block
3264 vectorization as not all uses may be vectorized.
3265 ??? Why should this be necessary? DCE should be able to
3266 remove the stmts itself.
3267 ??? For BB vectorization we can as well remove scalar
3268 stmts starting from the SLP tree root if they have no
3269 uses. */
3270 if (loop_vinfo)
3271 vect_remove_slp_scalar_calls (root);
3273 for (j = 0; SLP_TREE_SCALAR_STMTS (root).iterate (j, &store)
3274 && j < SLP_INSTANCE_GROUP_SIZE (instance); j++)
3276 if (!STMT_VINFO_DATA_REF (vinfo_for_stmt (store)))
3277 break;
3279 if (is_pattern_stmt_p (vinfo_for_stmt (store)))
3280 store = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (store));
3281 /* Free the attached stmt_vec_info and remove the stmt. */
3282 gsi = gsi_for_stmt (store);
3283 unlink_stmt_vdef (store);
3284 gsi_remove (&gsi, true);
3285 release_defs (store);
3286 free_stmt_vec_info (store);
3290 return is_store;
3294 /* Vectorize the basic block. */
3296 void
3297 vect_slp_transform_bb (basic_block bb)
3299 bb_vec_info bb_vinfo = vec_info_for_bb (bb);
3300 gimple_stmt_iterator si;
3302 gcc_assert (bb_vinfo);
3304 if (dump_enabled_p ())
3305 dump_printf_loc (MSG_NOTE, vect_location, "SLPing BB\n");
3307 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
3309 gimple stmt = gsi_stmt (si);
3310 stmt_vec_info stmt_info;
3312 if (dump_enabled_p ())
3314 dump_printf_loc (MSG_NOTE, vect_location,
3315 "------>SLPing statement: ");
3316 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
3317 dump_printf (MSG_NOTE, "\n");
3320 stmt_info = vinfo_for_stmt (stmt);
3321 gcc_assert (stmt_info);
3323 /* Schedule all the SLP instances when the first SLP stmt is reached. */
3324 if (STMT_SLP_TYPE (stmt_info))
3326 vect_schedule_slp (NULL, bb_vinfo);
3327 break;
3331 if (dump_enabled_p ())
3332 dump_printf_loc (MSG_NOTE, vect_location,
3333 "BASIC BLOCK VECTORIZED\n");
3335 destroy_bb_vec_info (bb_vinfo);