Daily bump.
[official-gcc.git] / gcc / tree-vect-slp.c
blob7a03b2e9f65e7cdeb40db08b7765b3541d58fa7b
1 /* SLP - Basic Block Vectorization
2 Copyright (C) 2007-2014 Free Software Foundation, Inc.
3 Contributed by Dorit Naishlos <dorit@il.ibm.com>
4 and Ira Rosen <irar@il.ibm.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "dumpfile.h"
26 #include "tm.h"
27 #include "tree.h"
28 #include "stor-layout.h"
29 #include "target.h"
30 #include "basic-block.h"
31 #include "gimple-pretty-print.h"
32 #include "tree-ssa-alias.h"
33 #include "internal-fn.h"
34 #include "gimple-expr.h"
35 #include "is-a.h"
36 #include "gimple.h"
37 #include "gimple-iterator.h"
38 #include "gimple-ssa.h"
39 #include "tree-phinodes.h"
40 #include "ssa-iterators.h"
41 #include "stringpool.h"
42 #include "tree-ssanames.h"
43 #include "tree-pass.h"
44 #include "cfgloop.h"
45 #include "expr.h"
46 #include "recog.h" /* FIXME: for insn_data */
47 #include "optabs.h"
48 #include "tree-vectorizer.h"
49 #include "langhooks.h"
50 #include "gimple-walk.h"
52 /* Extract the location of the basic block in the source code.
53 Return the basic block location if succeed and NULL if not. */
55 source_location
56 find_bb_location (basic_block bb)
58 gimple stmt = NULL;
59 gimple_stmt_iterator si;
61 if (!bb)
62 return UNKNOWN_LOCATION;
64 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
66 stmt = gsi_stmt (si);
67 if (gimple_location (stmt) != UNKNOWN_LOCATION)
68 return gimple_location (stmt);
71 return UNKNOWN_LOCATION;
75 /* Recursively free the memory allocated for the SLP tree rooted at NODE. */
77 static void
78 vect_free_slp_tree (slp_tree node)
80 int i;
81 slp_tree child;
83 if (!node)
84 return;
86 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
87 vect_free_slp_tree (child);
89 SLP_TREE_CHILDREN (node).release ();
90 SLP_TREE_SCALAR_STMTS (node).release ();
91 SLP_TREE_VEC_STMTS (node).release ();
92 SLP_TREE_LOAD_PERMUTATION (node).release ();
94 free (node);
98 /* Free the memory allocated for the SLP instance. */
100 void
101 vect_free_slp_instance (slp_instance instance)
103 vect_free_slp_tree (SLP_INSTANCE_TREE (instance));
104 SLP_INSTANCE_LOADS (instance).release ();
105 SLP_INSTANCE_BODY_COST_VEC (instance).release ();
106 free (instance);
110 /* Create an SLP node for SCALAR_STMTS. */
112 static slp_tree
113 vect_create_new_slp_node (vec<gimple> scalar_stmts)
115 slp_tree node;
116 gimple stmt = scalar_stmts[0];
117 unsigned int nops;
119 if (is_gimple_call (stmt))
120 nops = gimple_call_num_args (stmt);
121 else if (is_gimple_assign (stmt))
123 nops = gimple_num_ops (stmt) - 1;
124 if (gimple_assign_rhs_code (stmt) == COND_EXPR)
125 nops++;
127 else
128 return NULL;
130 node = XNEW (struct _slp_tree);
131 SLP_TREE_SCALAR_STMTS (node) = scalar_stmts;
132 SLP_TREE_VEC_STMTS (node).create (0);
133 SLP_TREE_CHILDREN (node).create (nops);
134 SLP_TREE_LOAD_PERMUTATION (node) = vNULL;
136 return node;
140 /* Allocate operands info for NOPS operands, and GROUP_SIZE def-stmts for each
141 operand. */
142 static vec<slp_oprnd_info>
143 vect_create_oprnd_info (int nops, int group_size)
145 int i;
146 slp_oprnd_info oprnd_info;
147 vec<slp_oprnd_info> oprnds_info;
149 oprnds_info.create (nops);
150 for (i = 0; i < nops; i++)
152 oprnd_info = XNEW (struct _slp_oprnd_info);
153 oprnd_info->def_stmts.create (group_size);
154 oprnd_info->first_dt = vect_uninitialized_def;
155 oprnd_info->first_op_type = NULL_TREE;
156 oprnd_info->first_pattern = false;
157 oprnds_info.quick_push (oprnd_info);
160 return oprnds_info;
164 /* Free operands info. */
166 static void
167 vect_free_oprnd_info (vec<slp_oprnd_info> &oprnds_info)
169 int i;
170 slp_oprnd_info oprnd_info;
172 FOR_EACH_VEC_ELT (oprnds_info, i, oprnd_info)
174 oprnd_info->def_stmts.release ();
175 XDELETE (oprnd_info);
178 oprnds_info.release ();
182 /* Find the place of the data-ref in STMT in the interleaving chain that starts
183 from FIRST_STMT. Return -1 if the data-ref is not a part of the chain. */
185 static int
186 vect_get_place_in_interleaving_chain (gimple stmt, gimple first_stmt)
188 gimple next_stmt = first_stmt;
189 int result = 0;
191 if (first_stmt != GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
192 return -1;
196 if (next_stmt == stmt)
197 return result;
198 result++;
199 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
201 while (next_stmt);
203 return -1;
207 /* Get the defs for the rhs of STMT (collect them in OPRNDS_INFO), check that
208 they are of a valid type and that they match the defs of the first stmt of
209 the SLP group (stored in OPRNDS_INFO). */
211 static bool
212 vect_get_and_check_slp_defs (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
213 gimple stmt, bool first,
214 vec<slp_oprnd_info> *oprnds_info)
216 tree oprnd;
217 unsigned int i, number_of_oprnds;
218 tree def;
219 gimple def_stmt;
220 enum vect_def_type dt = vect_uninitialized_def;
221 struct loop *loop = NULL;
222 bool pattern = false;
223 slp_oprnd_info oprnd_info;
224 int op_idx = 1;
225 tree compare_rhs = NULL_TREE;
227 if (loop_vinfo)
228 loop = LOOP_VINFO_LOOP (loop_vinfo);
230 if (is_gimple_call (stmt))
232 number_of_oprnds = gimple_call_num_args (stmt);
233 op_idx = 3;
235 else if (is_gimple_assign (stmt))
237 number_of_oprnds = gimple_num_ops (stmt) - 1;
238 if (gimple_assign_rhs_code (stmt) == COND_EXPR)
239 number_of_oprnds++;
241 else
242 return false;
244 for (i = 0; i < number_of_oprnds; i++)
246 if (compare_rhs)
248 oprnd = compare_rhs;
249 compare_rhs = NULL_TREE;
251 else
252 oprnd = gimple_op (stmt, op_idx++);
254 oprnd_info = (*oprnds_info)[i];
256 if (COMPARISON_CLASS_P (oprnd))
258 compare_rhs = TREE_OPERAND (oprnd, 1);
259 oprnd = TREE_OPERAND (oprnd, 0);
262 if (!vect_is_simple_use (oprnd, NULL, loop_vinfo, bb_vinfo, &def_stmt,
263 &def, &dt)
264 || (!def_stmt && dt != vect_constant_def))
266 if (dump_enabled_p ())
268 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
269 "Build SLP failed: can't find def for ");
270 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, oprnd);
271 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
274 return false;
277 /* Check if DEF_STMT is a part of a pattern in LOOP and get the def stmt
278 from the pattern. Check that all the stmts of the node are in the
279 pattern. */
280 if (def_stmt && gimple_bb (def_stmt)
281 && ((loop && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt)))
282 || (!loop && gimple_bb (def_stmt) == BB_VINFO_BB (bb_vinfo)
283 && gimple_code (def_stmt) != GIMPLE_PHI))
284 && vinfo_for_stmt (def_stmt)
285 && STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (def_stmt))
286 && !STMT_VINFO_RELEVANT (vinfo_for_stmt (def_stmt))
287 && !STMT_VINFO_LIVE_P (vinfo_for_stmt (def_stmt)))
289 pattern = true;
290 if (!first && !oprnd_info->first_pattern)
292 if (dump_enabled_p ())
294 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
295 "Build SLP failed: some of the stmts"
296 " are in a pattern, and others are not ");
297 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, oprnd);
298 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
301 return false;
304 def_stmt = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (def_stmt));
305 dt = STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt));
307 if (dt == vect_unknown_def_type)
309 if (dump_enabled_p ())
310 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
311 "Unsupported pattern.\n");
312 return false;
315 switch (gimple_code (def_stmt))
317 case GIMPLE_PHI:
318 def = gimple_phi_result (def_stmt);
319 break;
321 case GIMPLE_ASSIGN:
322 def = gimple_assign_lhs (def_stmt);
323 break;
325 default:
326 if (dump_enabled_p ())
327 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
328 "unsupported defining stmt:\n");
329 return false;
333 if (first)
335 oprnd_info->first_dt = dt;
336 oprnd_info->first_pattern = pattern;
337 oprnd_info->first_op_type = TREE_TYPE (oprnd);
339 else
341 /* Not first stmt of the group, check that the def-stmt/s match
342 the def-stmt/s of the first stmt. Allow different definition
343 types for reduction chains: the first stmt must be a
344 vect_reduction_def (a phi node), and the rest
345 vect_internal_def. */
346 if (((oprnd_info->first_dt != dt
347 && !(oprnd_info->first_dt == vect_reduction_def
348 && dt == vect_internal_def)
349 && !((oprnd_info->first_dt == vect_external_def
350 || oprnd_info->first_dt == vect_constant_def)
351 && (dt == vect_external_def
352 || dt == vect_constant_def)))
353 || !types_compatible_p (oprnd_info->first_op_type,
354 TREE_TYPE (oprnd))))
356 if (dump_enabled_p ())
357 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
358 "Build SLP failed: different types\n");
360 return false;
364 /* Check the types of the definitions. */
365 switch (dt)
367 case vect_constant_def:
368 case vect_external_def:
369 case vect_reduction_def:
370 break;
372 case vect_internal_def:
373 oprnd_info->def_stmts.quick_push (def_stmt);
374 break;
376 default:
377 /* FORNOW: Not supported. */
378 if (dump_enabled_p ())
380 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
381 "Build SLP failed: illegal type of def ");
382 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, def);
383 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
386 return false;
390 return true;
394 /* Verify if the scalar stmts STMTS are isomorphic, require data
395 permutation or are of unsupported types of operation. Return
396 true if they are, otherwise return false and indicate in *MATCHES
397 which stmts are not isomorphic to the first one. If MATCHES[0]
398 is false then this indicates the comparison could not be
399 carried out or the stmts will never be vectorized by SLP. */
401 static bool
402 vect_build_slp_tree_1 (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
403 vec<gimple> stmts, unsigned int group_size,
404 unsigned nops, unsigned int *max_nunits,
405 unsigned int vectorization_factor, bool *matches)
407 unsigned int i;
408 gimple stmt = stmts[0];
409 enum tree_code first_stmt_code = ERROR_MARK, rhs_code = ERROR_MARK;
410 enum tree_code first_cond_code = ERROR_MARK;
411 tree lhs;
412 bool need_same_oprnds = false;
413 tree vectype, scalar_type, first_op1 = NULL_TREE;
414 optab optab;
415 int icode;
416 enum machine_mode optab_op2_mode;
417 enum machine_mode vec_mode;
418 struct data_reference *first_dr;
419 HOST_WIDE_INT dummy;
420 gimple first_load = NULL, prev_first_load = NULL, old_first_load = NULL;
421 tree cond;
423 /* For every stmt in NODE find its def stmt/s. */
424 FOR_EACH_VEC_ELT (stmts, i, stmt)
426 matches[i] = false;
428 if (dump_enabled_p ())
430 dump_printf_loc (MSG_NOTE, vect_location, "Build SLP for ");
431 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
432 dump_printf (MSG_NOTE, "\n");
435 /* Fail to vectorize statements marked as unvectorizable. */
436 if (!STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (stmt)))
438 if (dump_enabled_p ())
440 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
441 "Build SLP failed: unvectorizable statement ");
442 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
443 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
445 /* Fatal mismatch. */
446 matches[0] = false;
447 return false;
450 lhs = gimple_get_lhs (stmt);
451 if (lhs == NULL_TREE)
453 if (dump_enabled_p ())
455 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
456 "Build SLP failed: not GIMPLE_ASSIGN nor "
457 "GIMPLE_CALL ");
458 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
459 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
461 /* Fatal mismatch. */
462 matches[0] = false;
463 return false;
466 if (is_gimple_assign (stmt)
467 && gimple_assign_rhs_code (stmt) == COND_EXPR
468 && (cond = gimple_assign_rhs1 (stmt))
469 && !COMPARISON_CLASS_P (cond))
471 if (dump_enabled_p ())
473 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
474 "Build SLP failed: condition is not "
475 "comparison ");
476 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
477 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
479 /* Fatal mismatch. */
480 matches[0] = false;
481 return false;
484 scalar_type = vect_get_smallest_scalar_type (stmt, &dummy, &dummy);
485 vectype = get_vectype_for_scalar_type (scalar_type);
486 if (!vectype)
488 if (dump_enabled_p ())
490 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
491 "Build SLP failed: unsupported data-type ");
492 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
493 scalar_type);
494 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
496 /* Fatal mismatch. */
497 matches[0] = false;
498 return false;
501 /* In case of multiple types we need to detect the smallest type. */
502 if (*max_nunits < TYPE_VECTOR_SUBPARTS (vectype))
504 *max_nunits = TYPE_VECTOR_SUBPARTS (vectype);
505 if (bb_vinfo)
506 vectorization_factor = *max_nunits;
509 if (is_gimple_call (stmt))
511 rhs_code = CALL_EXPR;
512 if (gimple_call_internal_p (stmt)
513 || gimple_call_tail_p (stmt)
514 || gimple_call_noreturn_p (stmt)
515 || !gimple_call_nothrow_p (stmt)
516 || gimple_call_chain (stmt))
518 if (dump_enabled_p ())
520 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
521 "Build SLP failed: unsupported call type ");
522 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
523 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
525 /* Fatal mismatch. */
526 matches[0] = false;
527 return false;
530 else
531 rhs_code = gimple_assign_rhs_code (stmt);
533 /* Check the operation. */
534 if (i == 0)
536 first_stmt_code = rhs_code;
538 /* Shift arguments should be equal in all the packed stmts for a
539 vector shift with scalar shift operand. */
540 if (rhs_code == LSHIFT_EXPR || rhs_code == RSHIFT_EXPR
541 || rhs_code == LROTATE_EXPR
542 || rhs_code == RROTATE_EXPR)
544 vec_mode = TYPE_MODE (vectype);
546 /* First see if we have a vector/vector shift. */
547 optab = optab_for_tree_code (rhs_code, vectype,
548 optab_vector);
550 if (!optab
551 || optab_handler (optab, vec_mode) == CODE_FOR_nothing)
553 /* No vector/vector shift, try for a vector/scalar shift. */
554 optab = optab_for_tree_code (rhs_code, vectype,
555 optab_scalar);
557 if (!optab)
559 if (dump_enabled_p ())
560 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
561 "Build SLP failed: no optab.\n");
562 /* Fatal mismatch. */
563 matches[0] = false;
564 return false;
566 icode = (int) optab_handler (optab, vec_mode);
567 if (icode == CODE_FOR_nothing)
569 if (dump_enabled_p ())
570 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
571 "Build SLP failed: "
572 "op not supported by target.\n");
573 /* Fatal mismatch. */
574 matches[0] = false;
575 return false;
577 optab_op2_mode = insn_data[icode].operand[2].mode;
578 if (!VECTOR_MODE_P (optab_op2_mode))
580 need_same_oprnds = true;
581 first_op1 = gimple_assign_rhs2 (stmt);
585 else if (rhs_code == WIDEN_LSHIFT_EXPR)
587 need_same_oprnds = true;
588 first_op1 = gimple_assign_rhs2 (stmt);
591 else
593 if (first_stmt_code != rhs_code
594 && (first_stmt_code != IMAGPART_EXPR
595 || rhs_code != REALPART_EXPR)
596 && (first_stmt_code != REALPART_EXPR
597 || rhs_code != IMAGPART_EXPR)
598 && !(STMT_VINFO_GROUPED_ACCESS (vinfo_for_stmt (stmt))
599 && (first_stmt_code == ARRAY_REF
600 || first_stmt_code == BIT_FIELD_REF
601 || first_stmt_code == INDIRECT_REF
602 || first_stmt_code == COMPONENT_REF
603 || first_stmt_code == MEM_REF)))
605 if (dump_enabled_p ())
607 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
608 "Build SLP failed: different operation "
609 "in stmt ");
610 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
611 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
613 /* Mismatch. */
614 continue;
617 if (need_same_oprnds
618 && !operand_equal_p (first_op1, gimple_assign_rhs2 (stmt), 0))
620 if (dump_enabled_p ())
622 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
623 "Build SLP failed: different shift "
624 "arguments in ");
625 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
626 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
628 /* Mismatch. */
629 continue;
632 if (rhs_code == CALL_EXPR)
634 gimple first_stmt = stmts[0];
635 if (gimple_call_num_args (stmt) != nops
636 || !operand_equal_p (gimple_call_fn (first_stmt),
637 gimple_call_fn (stmt), 0)
638 || gimple_call_fntype (first_stmt)
639 != gimple_call_fntype (stmt))
641 if (dump_enabled_p ())
643 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
644 "Build SLP failed: different calls in ");
645 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
646 stmt, 0);
647 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
649 /* Mismatch. */
650 continue;
655 /* Grouped store or load. */
656 if (STMT_VINFO_GROUPED_ACCESS (vinfo_for_stmt (stmt)))
658 if (REFERENCE_CLASS_P (lhs))
660 /* Store. */
663 else
665 /* Load. */
666 unsigned unrolling_factor
667 = least_common_multiple
668 (*max_nunits, group_size) / group_size;
669 /* FORNOW: Check that there is no gap between the loads
670 and no gap between the groups when we need to load
671 multiple groups at once.
672 ??? We should enhance this to only disallow gaps
673 inside vectors. */
674 if ((unrolling_factor > 1
675 && ((GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) == stmt
676 && GROUP_GAP (vinfo_for_stmt (stmt)) != 0)
677 /* If the group is split up then GROUP_GAP
678 isn't correct here, nor is GROUP_FIRST_ELEMENT. */
679 || GROUP_SIZE (vinfo_for_stmt (stmt)) > group_size))
680 || (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) != stmt
681 && GROUP_GAP (vinfo_for_stmt (stmt)) != 1))
683 if (dump_enabled_p ())
685 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
686 "Build SLP failed: grouped "
687 "loads have gaps ");
688 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
689 stmt, 0);
690 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
692 /* Fatal mismatch. */
693 matches[0] = false;
694 return false;
697 /* Check that the size of interleaved loads group is not
698 greater than the SLP group size. */
699 unsigned ncopies
700 = vectorization_factor / TYPE_VECTOR_SUBPARTS (vectype);
701 if (loop_vinfo
702 && GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) == stmt
703 && ((GROUP_SIZE (vinfo_for_stmt (stmt))
704 - GROUP_GAP (vinfo_for_stmt (stmt)))
705 > ncopies * group_size))
707 if (dump_enabled_p ())
709 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
710 "Build SLP failed: the number "
711 "of interleaved loads is greater than "
712 "the SLP group size ");
713 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
714 stmt, 0);
715 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
717 /* Fatal mismatch. */
718 matches[0] = false;
719 return false;
722 old_first_load = first_load;
723 first_load = GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt));
724 if (prev_first_load)
726 /* Check that there are no loads from different interleaving
727 chains in the same node. */
728 if (prev_first_load != first_load)
730 if (dump_enabled_p ())
732 dump_printf_loc (MSG_MISSED_OPTIMIZATION,
733 vect_location,
734 "Build SLP failed: different "
735 "interleaving chains in one node ");
736 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
737 stmt, 0);
738 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
740 /* Mismatch. */
741 continue;
744 else
745 prev_first_load = first_load;
747 /* In some cases a group of loads is just the same load
748 repeated N times. Only analyze its cost once. */
749 if (first_load == stmt && old_first_load != first_load)
751 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt));
752 if (vect_supportable_dr_alignment (first_dr, false)
753 == dr_unaligned_unsupported)
755 if (dump_enabled_p ())
757 dump_printf_loc (MSG_MISSED_OPTIMIZATION,
758 vect_location,
759 "Build SLP failed: unsupported "
760 "unaligned load ");
761 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
762 stmt, 0);
763 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
765 /* Fatal mismatch. */
766 matches[0] = false;
767 return false;
771 } /* Grouped access. */
772 else
774 if (TREE_CODE_CLASS (rhs_code) == tcc_reference)
776 /* Not grouped load. */
777 if (dump_enabled_p ())
779 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
780 "Build SLP failed: not grouped load ");
781 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
782 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
785 /* FORNOW: Not grouped loads are not supported. */
786 /* Fatal mismatch. */
787 matches[0] = false;
788 return false;
791 /* Not memory operation. */
792 if (TREE_CODE_CLASS (rhs_code) != tcc_binary
793 && TREE_CODE_CLASS (rhs_code) != tcc_unary
794 && rhs_code != COND_EXPR
795 && rhs_code != CALL_EXPR)
797 if (dump_enabled_p ())
799 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
800 "Build SLP failed: operation");
801 dump_printf (MSG_MISSED_OPTIMIZATION, " unsupported ");
802 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
803 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
805 /* Fatal mismatch. */
806 matches[0] = false;
807 return false;
810 if (rhs_code == COND_EXPR)
812 tree cond_expr = gimple_assign_rhs1 (stmt);
814 if (i == 0)
815 first_cond_code = TREE_CODE (cond_expr);
816 else if (first_cond_code != TREE_CODE (cond_expr))
818 if (dump_enabled_p ())
820 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
821 "Build SLP failed: different"
822 " operation");
823 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
824 stmt, 0);
825 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
827 /* Mismatch. */
828 continue;
833 matches[i] = true;
836 for (i = 0; i < group_size; ++i)
837 if (!matches[i])
838 return false;
840 return true;
843 /* Recursively build an SLP tree starting from NODE.
844 Fail (and return a value not equal to zero) if def-stmts are not
845 isomorphic, require data permutation or are of unsupported types of
846 operation. Otherwise, return 0.
847 The value returned is the depth in the SLP tree where a mismatch
848 was found. */
850 static bool
851 vect_build_slp_tree (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
852 slp_tree *node, unsigned int group_size,
853 unsigned int *max_nunits,
854 vec<slp_tree> *loads,
855 unsigned int vectorization_factor,
856 bool *matches, unsigned *npermutes, unsigned *tree_size,
857 unsigned max_tree_size)
859 unsigned nops, i, this_npermutes = 0, this_tree_size = 0;
860 gimple stmt;
862 if (!matches)
863 matches = XALLOCAVEC (bool, group_size);
864 if (!npermutes)
865 npermutes = &this_npermutes;
867 matches[0] = false;
869 stmt = SLP_TREE_SCALAR_STMTS (*node)[0];
870 if (is_gimple_call (stmt))
871 nops = gimple_call_num_args (stmt);
872 else if (is_gimple_assign (stmt))
874 nops = gimple_num_ops (stmt) - 1;
875 if (gimple_assign_rhs_code (stmt) == COND_EXPR)
876 nops++;
878 else
879 return false;
881 if (!vect_build_slp_tree_1 (loop_vinfo, bb_vinfo,
882 SLP_TREE_SCALAR_STMTS (*node), group_size, nops,
883 max_nunits, vectorization_factor, matches))
884 return false;
886 /* If the SLP node is a load, terminate the recursion. */
887 if (STMT_VINFO_GROUPED_ACCESS (vinfo_for_stmt (stmt))
888 && DR_IS_READ (STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt))))
890 loads->safe_push (*node);
891 return true;
894 /* Get at the operands, verifying they are compatible. */
895 vec<slp_oprnd_info> oprnds_info = vect_create_oprnd_info (nops, group_size);
896 slp_oprnd_info oprnd_info;
897 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (*node), i, stmt)
899 if (!vect_get_and_check_slp_defs (loop_vinfo, bb_vinfo,
900 stmt, (i == 0), &oprnds_info))
902 vect_free_oprnd_info (oprnds_info);
903 return false;
907 stmt = SLP_TREE_SCALAR_STMTS (*node)[0];
909 /* Create SLP_TREE nodes for the definition node/s. */
910 FOR_EACH_VEC_ELT (oprnds_info, i, oprnd_info)
912 slp_tree child;
913 unsigned old_nloads = loads->length ();
914 unsigned old_max_nunits = *max_nunits;
916 if (oprnd_info->first_dt != vect_internal_def)
917 continue;
919 if (++this_tree_size > max_tree_size)
921 vect_free_oprnd_info (oprnds_info);
922 return false;
925 child = vect_create_new_slp_node (oprnd_info->def_stmts);
926 if (!child)
928 vect_free_oprnd_info (oprnds_info);
929 return false;
932 bool *matches = XALLOCAVEC (bool, group_size);
933 if (vect_build_slp_tree (loop_vinfo, bb_vinfo, &child,
934 group_size, max_nunits, loads,
935 vectorization_factor, matches,
936 npermutes, &this_tree_size, max_tree_size))
938 oprnd_info->def_stmts = vNULL;
939 SLP_TREE_CHILDREN (*node).quick_push (child);
940 continue;
943 /* If the SLP build for operand zero failed and operand zero
944 and one can be commutated try that for the scalar stmts
945 that failed the match. */
946 if (i == 0
947 /* A first scalar stmt mismatch signals a fatal mismatch. */
948 && matches[0]
949 /* ??? For COND_EXPRs we can swap the comparison operands
950 as well as the arms under some constraints. */
951 && nops == 2
952 && oprnds_info[1]->first_dt == vect_internal_def
953 && is_gimple_assign (stmt)
954 && commutative_tree_code (gimple_assign_rhs_code (stmt))
955 /* Do so only if the number of not successful permutes was nor more
956 than a cut-ff as re-trying the recursive match on
957 possibly each level of the tree would expose exponential
958 behavior. */
959 && *npermutes < 4)
961 /* Roll back. */
962 *max_nunits = old_max_nunits;
963 loads->truncate (old_nloads);
964 /* Swap mismatched definition stmts. */
965 for (unsigned j = 0; j < group_size; ++j)
966 if (!matches[j])
968 gimple tem = oprnds_info[0]->def_stmts[j];
969 oprnds_info[0]->def_stmts[j] = oprnds_info[1]->def_stmts[j];
970 oprnds_info[1]->def_stmts[j] = tem;
972 /* And try again ... */
973 if (vect_build_slp_tree (loop_vinfo, bb_vinfo, &child,
974 group_size, max_nunits, loads,
975 vectorization_factor,
976 matches, npermutes, &this_tree_size,
977 max_tree_size))
979 oprnd_info->def_stmts = vNULL;
980 SLP_TREE_CHILDREN (*node).quick_push (child);
981 continue;
984 ++*npermutes;
987 oprnd_info->def_stmts = vNULL;
988 vect_free_slp_tree (child);
989 vect_free_oprnd_info (oprnds_info);
990 return false;
993 if (tree_size)
994 *tree_size += this_tree_size;
996 vect_free_oprnd_info (oprnds_info);
997 return true;
1000 /* Dump a slp tree NODE using flags specified in DUMP_KIND. */
1002 static void
1003 vect_print_slp_tree (int dump_kind, slp_tree node)
1005 int i;
1006 gimple stmt;
1007 slp_tree child;
1009 if (!node)
1010 return;
1012 dump_printf (dump_kind, "node ");
1013 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
1015 dump_printf (dump_kind, "\n\tstmt %d ", i);
1016 dump_gimple_stmt (dump_kind, TDF_SLIM, stmt, 0);
1018 dump_printf (dump_kind, "\n");
1020 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
1021 vect_print_slp_tree (dump_kind, child);
1025 /* Mark the tree rooted at NODE with MARK (PURE_SLP or HYBRID).
1026 If MARK is HYBRID, it refers to a specific stmt in NODE (the stmt at index
1027 J). Otherwise, MARK is PURE_SLP and J is -1, which indicates that all the
1028 stmts in NODE are to be marked. */
1030 static void
1031 vect_mark_slp_stmts (slp_tree node, enum slp_vect_type mark, int j)
1033 int i;
1034 gimple stmt;
1035 slp_tree child;
1037 if (!node)
1038 return;
1040 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
1041 if (j < 0 || i == j)
1042 STMT_SLP_TYPE (vinfo_for_stmt (stmt)) = mark;
1044 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
1045 vect_mark_slp_stmts (child, mark, j);
1049 /* Mark the statements of the tree rooted at NODE as relevant (vect_used). */
1051 static void
1052 vect_mark_slp_stmts_relevant (slp_tree node)
1054 int i;
1055 gimple stmt;
1056 stmt_vec_info stmt_info;
1057 slp_tree child;
1059 if (!node)
1060 return;
1062 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
1064 stmt_info = vinfo_for_stmt (stmt);
1065 gcc_assert (!STMT_VINFO_RELEVANT (stmt_info)
1066 || STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_scope);
1067 STMT_VINFO_RELEVANT (stmt_info) = vect_used_in_scope;
1070 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
1071 vect_mark_slp_stmts_relevant (child);
1075 /* Rearrange the statements of NODE according to PERMUTATION. */
1077 static void
1078 vect_slp_rearrange_stmts (slp_tree node, unsigned int group_size,
1079 vec<unsigned> permutation)
1081 gimple stmt;
1082 vec<gimple> tmp_stmts;
1083 unsigned int i;
1084 slp_tree child;
1086 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
1087 vect_slp_rearrange_stmts (child, group_size, permutation);
1089 gcc_assert (group_size == SLP_TREE_SCALAR_STMTS (node).length ());
1090 tmp_stmts.create (group_size);
1091 tmp_stmts.quick_grow_cleared (group_size);
1093 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
1094 tmp_stmts[permutation[i]] = stmt;
1096 SLP_TREE_SCALAR_STMTS (node).release ();
1097 SLP_TREE_SCALAR_STMTS (node) = tmp_stmts;
1101 /* Check if the required load permutations in the SLP instance
1102 SLP_INSTN are supported. */
1104 static bool
1105 vect_supported_load_permutation_p (slp_instance slp_instn)
1107 unsigned int group_size = SLP_INSTANCE_GROUP_SIZE (slp_instn);
1108 unsigned int i, j, k, next;
1109 sbitmap load_index;
1110 slp_tree node;
1111 gimple stmt, load, next_load, first_load;
1112 struct data_reference *dr;
1114 if (dump_enabled_p ())
1116 dump_printf_loc (MSG_NOTE, vect_location, "Load permutation ");
1117 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1118 if (node->load_permutation.exists ())
1119 FOR_EACH_VEC_ELT (node->load_permutation, j, next)
1120 dump_printf (MSG_NOTE, "%d ", next);
1121 else
1122 for (k = 0; k < group_size; ++k)
1123 dump_printf (MSG_NOTE, "%d ", k);
1124 dump_printf (MSG_NOTE, "\n");
1127 /* In case of reduction every load permutation is allowed, since the order
1128 of the reduction statements is not important (as opposed to the case of
1129 grouped stores). The only condition we need to check is that all the
1130 load nodes are of the same size and have the same permutation (and then
1131 rearrange all the nodes of the SLP instance according to this
1132 permutation). */
1134 /* Check that all the load nodes are of the same size. */
1135 /* ??? Can't we assert this? */
1136 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1137 if (SLP_TREE_SCALAR_STMTS (node).length () != (unsigned) group_size)
1138 return false;
1140 node = SLP_INSTANCE_TREE (slp_instn);
1141 stmt = SLP_TREE_SCALAR_STMTS (node)[0];
1143 /* Reduction (there are no data-refs in the root).
1144 In reduction chain the order of the loads is important. */
1145 if (!STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt))
1146 && !GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
1148 slp_tree load;
1149 unsigned int lidx;
1151 /* Compare all the permutation sequences to the first one. We know
1152 that at least one load is permuted. */
1153 node = SLP_INSTANCE_LOADS (slp_instn)[0];
1154 if (!node->load_permutation.exists ())
1155 return false;
1156 for (i = 1; SLP_INSTANCE_LOADS (slp_instn).iterate (i, &load); ++i)
1158 if (!load->load_permutation.exists ())
1159 return false;
1160 FOR_EACH_VEC_ELT (load->load_permutation, j, lidx)
1161 if (lidx != node->load_permutation[j])
1162 return false;
1165 /* Check that the loads in the first sequence are different and there
1166 are no gaps between them. */
1167 load_index = sbitmap_alloc (group_size);
1168 bitmap_clear (load_index);
1169 FOR_EACH_VEC_ELT (node->load_permutation, i, lidx)
1171 if (bitmap_bit_p (load_index, lidx))
1173 sbitmap_free (load_index);
1174 return false;
1176 bitmap_set_bit (load_index, lidx);
1178 for (i = 0; i < group_size; i++)
1179 if (!bitmap_bit_p (load_index, i))
1181 sbitmap_free (load_index);
1182 return false;
1184 sbitmap_free (load_index);
1186 /* This permutation is valid for reduction. Since the order of the
1187 statements in the nodes is not important unless they are memory
1188 accesses, we can rearrange the statements in all the nodes
1189 according to the order of the loads. */
1190 vect_slp_rearrange_stmts (SLP_INSTANCE_TREE (slp_instn), group_size,
1191 node->load_permutation);
1193 /* We are done, no actual permutations need to be generated. */
1194 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1195 SLP_TREE_LOAD_PERMUTATION (node).release ();
1196 return true;
1199 /* In basic block vectorization we allow any subchain of an interleaving
1200 chain.
1201 FORNOW: not supported in loop SLP because of realignment compications. */
1202 if (STMT_VINFO_BB_VINFO (vinfo_for_stmt (stmt)))
1204 /* Check that for every node in the instance the loads
1205 form a subchain. */
1206 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1208 next_load = NULL;
1209 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), j, load)
1211 if (j != 0 && next_load != load)
1212 return false;
1213 next_load = GROUP_NEXT_ELEMENT (vinfo_for_stmt (load));
1217 /* Check that the alignment of the first load in every subchain, i.e.,
1218 the first statement in every load node, is supported.
1219 ??? This belongs in alignment checking. */
1220 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1222 first_load = SLP_TREE_SCALAR_STMTS (node)[0];
1223 if (first_load != GROUP_FIRST_ELEMENT (vinfo_for_stmt (first_load)))
1225 dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_load));
1226 if (vect_supportable_dr_alignment (dr, false)
1227 == dr_unaligned_unsupported)
1229 if (dump_enabled_p ())
1231 dump_printf_loc (MSG_MISSED_OPTIMIZATION,
1232 vect_location,
1233 "unsupported unaligned load ");
1234 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
1235 first_load, 0);
1236 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
1238 return false;
1243 /* We are done, no actual permutations need to be generated. */
1244 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1245 SLP_TREE_LOAD_PERMUTATION (node).release ();
1246 return true;
1249 /* FORNOW: the only supported permutation is 0..01..1.. of length equal to
1250 GROUP_SIZE and where each sequence of same drs is of GROUP_SIZE length as
1251 well (unless it's reduction). */
1252 if (SLP_INSTANCE_LOADS (slp_instn).length () != group_size)
1253 return false;
1254 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1255 if (!node->load_permutation.exists ())
1256 return false;
1258 load_index = sbitmap_alloc (group_size);
1259 bitmap_clear (load_index);
1260 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1262 unsigned int lidx = node->load_permutation[0];
1263 if (bitmap_bit_p (load_index, lidx))
1265 sbitmap_free (load_index);
1266 return false;
1268 bitmap_set_bit (load_index, lidx);
1269 FOR_EACH_VEC_ELT (node->load_permutation, j, k)
1270 if (k != lidx)
1272 sbitmap_free (load_index);
1273 return false;
1276 for (i = 0; i < group_size; i++)
1277 if (!bitmap_bit_p (load_index, i))
1279 sbitmap_free (load_index);
1280 return false;
1282 sbitmap_free (load_index);
1284 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1285 if (node->load_permutation.exists ()
1286 && !vect_transform_slp_perm_load
1287 (node, vNULL, NULL,
1288 SLP_INSTANCE_UNROLLING_FACTOR (slp_instn), slp_instn, true))
1289 return false;
1290 return true;
1294 /* Find the first load in the loop that belongs to INSTANCE.
1295 When loads are in several SLP nodes, there can be a case in which the first
1296 load does not appear in the first SLP node to be transformed, causing
1297 incorrect order of statements. Since we generate all the loads together,
1298 they must be inserted before the first load of the SLP instance and not
1299 before the first load of the first node of the instance. */
1301 static gimple
1302 vect_find_first_load_in_slp_instance (slp_instance instance)
1304 int i, j;
1305 slp_tree load_node;
1306 gimple first_load = NULL, load;
1308 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (instance), i, load_node)
1309 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (load_node), j, load)
1310 first_load = get_earlier_stmt (load, first_load);
1312 return first_load;
1316 /* Find the last store in SLP INSTANCE. */
1318 static gimple
1319 vect_find_last_store_in_slp_instance (slp_instance instance)
1321 int i;
1322 slp_tree node;
1323 gimple last_store = NULL, store;
1325 node = SLP_INSTANCE_TREE (instance);
1326 for (i = 0; SLP_TREE_SCALAR_STMTS (node).iterate (i, &store); i++)
1327 last_store = get_later_stmt (store, last_store);
1329 return last_store;
1332 /* Compute the cost for the SLP node NODE in the SLP instance INSTANCE. */
1334 static void
1335 vect_analyze_slp_cost_1 (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
1336 slp_instance instance, slp_tree node,
1337 stmt_vector_for_cost *prologue_cost_vec,
1338 unsigned ncopies_for_cost)
1340 stmt_vector_for_cost *body_cost_vec = &SLP_INSTANCE_BODY_COST_VEC (instance);
1342 unsigned i;
1343 slp_tree child;
1344 gimple stmt, s;
1345 stmt_vec_info stmt_info;
1346 tree lhs;
1347 unsigned group_size = SLP_INSTANCE_GROUP_SIZE (instance);
1349 /* Recurse down the SLP tree. */
1350 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
1351 vect_analyze_slp_cost_1 (loop_vinfo, bb_vinfo,
1352 instance, child, prologue_cost_vec,
1353 ncopies_for_cost);
1355 /* Look at the first scalar stmt to determine the cost. */
1356 stmt = SLP_TREE_SCALAR_STMTS (node)[0];
1357 stmt_info = vinfo_for_stmt (stmt);
1358 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
1360 if (DR_IS_WRITE (STMT_VINFO_DATA_REF (stmt_info)))
1361 vect_model_store_cost (stmt_info, ncopies_for_cost, false,
1362 vect_uninitialized_def,
1363 node, prologue_cost_vec, body_cost_vec);
1364 else
1366 int i;
1367 gcc_checking_assert (DR_IS_READ (STMT_VINFO_DATA_REF (stmt_info)));
1368 vect_model_load_cost (stmt_info, ncopies_for_cost, false,
1369 node, prologue_cost_vec, body_cost_vec);
1370 /* If the load is permuted record the cost for the permutation.
1371 ??? Loads from multiple chains are let through here only
1372 for a single special case involving complex numbers where
1373 in the end no permutation is necessary. */
1374 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, s)
1375 if ((STMT_VINFO_GROUP_FIRST_ELEMENT (vinfo_for_stmt (s))
1376 == STMT_VINFO_GROUP_FIRST_ELEMENT (stmt_info))
1377 && vect_get_place_in_interleaving_chain
1378 (s, STMT_VINFO_GROUP_FIRST_ELEMENT (stmt_info)) != i)
1380 record_stmt_cost (body_cost_vec, group_size, vec_perm,
1381 stmt_info, 0, vect_body);
1382 break;
1386 else
1387 record_stmt_cost (body_cost_vec, ncopies_for_cost, vector_stmt,
1388 stmt_info, 0, vect_body);
1390 /* Scan operands and account for prologue cost of constants/externals.
1391 ??? This over-estimates cost for multiple uses and should be
1392 re-engineered. */
1393 lhs = gimple_get_lhs (stmt);
1394 for (i = 0; i < gimple_num_ops (stmt); ++i)
1396 tree def, op = gimple_op (stmt, i);
1397 gimple def_stmt;
1398 enum vect_def_type dt;
1399 if (!op || op == lhs)
1400 continue;
1401 if (vect_is_simple_use (op, NULL, loop_vinfo, bb_vinfo,
1402 &def_stmt, &def, &dt)
1403 && (dt == vect_constant_def || dt == vect_external_def))
1404 record_stmt_cost (prologue_cost_vec, 1, vector_stmt,
1405 stmt_info, 0, vect_prologue);
1409 /* Compute the cost for the SLP instance INSTANCE. */
1411 static void
1412 vect_analyze_slp_cost (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
1413 slp_instance instance, unsigned nunits)
1415 stmt_vector_for_cost body_cost_vec, prologue_cost_vec;
1416 unsigned ncopies_for_cost;
1417 stmt_info_for_cost *si;
1418 unsigned i;
1420 /* Calculate the number of vector stmts to create based on the unrolling
1421 factor (number of vectors is 1 if NUNITS >= GROUP_SIZE, and is
1422 GROUP_SIZE / NUNITS otherwise. */
1423 unsigned group_size = SLP_INSTANCE_GROUP_SIZE (instance);
1424 ncopies_for_cost = least_common_multiple (nunits, group_size) / nunits;
1426 prologue_cost_vec.create (10);
1427 body_cost_vec.create (10);
1428 SLP_INSTANCE_BODY_COST_VEC (instance) = body_cost_vec;
1429 vect_analyze_slp_cost_1 (loop_vinfo, bb_vinfo,
1430 instance, SLP_INSTANCE_TREE (instance),
1431 &prologue_cost_vec, ncopies_for_cost);
1433 /* Record the prologue costs, which were delayed until we were
1434 sure that SLP was successful. Unlike the body costs, we know
1435 the final values now regardless of the loop vectorization factor. */
1436 void *data = (loop_vinfo ? LOOP_VINFO_TARGET_COST_DATA (loop_vinfo)
1437 : BB_VINFO_TARGET_COST_DATA (bb_vinfo));
1438 FOR_EACH_VEC_ELT (prologue_cost_vec, i, si)
1440 struct _stmt_vec_info *stmt_info
1441 = si->stmt ? vinfo_for_stmt (si->stmt) : NULL;
1442 (void) add_stmt_cost (data, si->count, si->kind, stmt_info,
1443 si->misalign, vect_prologue);
1446 prologue_cost_vec.release ();
1449 /* Analyze an SLP instance starting from a group of grouped stores. Call
1450 vect_build_slp_tree to build a tree of packed stmts if possible.
1451 Return FALSE if it's impossible to SLP any stmt in the loop. */
1453 static bool
1454 vect_analyze_slp_instance (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
1455 gimple stmt, unsigned max_tree_size)
1457 slp_instance new_instance;
1458 slp_tree node;
1459 unsigned int group_size = GROUP_SIZE (vinfo_for_stmt (stmt));
1460 unsigned int unrolling_factor = 1, nunits;
1461 tree vectype, scalar_type = NULL_TREE;
1462 gimple next;
1463 unsigned int vectorization_factor = 0;
1464 int i;
1465 unsigned int max_nunits = 0;
1466 vec<slp_tree> loads;
1467 struct data_reference *dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt));
1468 vec<gimple> scalar_stmts;
1470 if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
1472 if (dr)
1474 scalar_type = TREE_TYPE (DR_REF (dr));
1475 vectype = get_vectype_for_scalar_type (scalar_type);
1477 else
1479 gcc_assert (loop_vinfo);
1480 vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt));
1483 group_size = GROUP_SIZE (vinfo_for_stmt (stmt));
1485 else
1487 gcc_assert (loop_vinfo);
1488 vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt));
1489 group_size = LOOP_VINFO_REDUCTIONS (loop_vinfo).length ();
1492 if (!vectype)
1494 if (dump_enabled_p ())
1496 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1497 "Build SLP failed: unsupported data-type ");
1498 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, scalar_type);
1499 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
1502 return false;
1505 nunits = TYPE_VECTOR_SUBPARTS (vectype);
1506 if (loop_vinfo)
1507 vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1508 else
1509 vectorization_factor = nunits;
1511 /* Calculate the unrolling factor. */
1512 unrolling_factor = least_common_multiple (nunits, group_size) / group_size;
1513 if (unrolling_factor != 1 && !loop_vinfo)
1515 if (dump_enabled_p ())
1516 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1517 "Build SLP failed: unrolling required in basic"
1518 " block SLP\n");
1520 return false;
1523 /* Create a node (a root of the SLP tree) for the packed grouped stores. */
1524 scalar_stmts.create (group_size);
1525 next = stmt;
1526 if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
1528 /* Collect the stores and store them in SLP_TREE_SCALAR_STMTS. */
1529 while (next)
1531 if (STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (next))
1532 && STMT_VINFO_RELATED_STMT (vinfo_for_stmt (next)))
1533 scalar_stmts.safe_push (
1534 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (next)));
1535 else
1536 scalar_stmts.safe_push (next);
1537 next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next));
1540 else
1542 /* Collect reduction statements. */
1543 vec<gimple> reductions = LOOP_VINFO_REDUCTIONS (loop_vinfo);
1544 for (i = 0; reductions.iterate (i, &next); i++)
1545 scalar_stmts.safe_push (next);
1548 node = vect_create_new_slp_node (scalar_stmts);
1550 loads.create (group_size);
1552 /* Build the tree for the SLP instance. */
1553 if (vect_build_slp_tree (loop_vinfo, bb_vinfo, &node, group_size,
1554 &max_nunits, &loads,
1555 vectorization_factor, NULL, NULL, NULL,
1556 max_tree_size))
1558 /* Calculate the unrolling factor based on the smallest type. */
1559 if (max_nunits > nunits)
1560 unrolling_factor = least_common_multiple (max_nunits, group_size)
1561 / group_size;
1563 if (unrolling_factor != 1 && !loop_vinfo)
1565 if (dump_enabled_p ())
1566 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1567 "Build SLP failed: unrolling required in basic"
1568 " block SLP\n");
1569 vect_free_slp_tree (node);
1570 loads.release ();
1571 return false;
1574 /* Create a new SLP instance. */
1575 new_instance = XNEW (struct _slp_instance);
1576 SLP_INSTANCE_TREE (new_instance) = node;
1577 SLP_INSTANCE_GROUP_SIZE (new_instance) = group_size;
1578 SLP_INSTANCE_UNROLLING_FACTOR (new_instance) = unrolling_factor;
1579 SLP_INSTANCE_BODY_COST_VEC (new_instance) = vNULL;
1580 SLP_INSTANCE_LOADS (new_instance) = loads;
1581 SLP_INSTANCE_FIRST_LOAD_STMT (new_instance) = NULL;
1583 /* Compute the load permutation. */
1584 slp_tree load_node;
1585 bool loads_permuted = false;
1586 FOR_EACH_VEC_ELT (loads, i, load_node)
1588 vec<unsigned> load_permutation;
1589 int j;
1590 gimple load, first_stmt;
1591 bool this_load_permuted = false;
1592 load_permutation.create (group_size);
1593 first_stmt = GROUP_FIRST_ELEMENT
1594 (vinfo_for_stmt (SLP_TREE_SCALAR_STMTS (load_node)[0]));
1595 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (load_node), j, load)
1597 int load_place
1598 = vect_get_place_in_interleaving_chain (load, first_stmt);
1599 gcc_assert (load_place != -1);
1600 if (load_place != j)
1601 this_load_permuted = true;
1602 load_permutation.safe_push (load_place);
1604 if (!this_load_permuted)
1606 load_permutation.release ();
1607 continue;
1609 SLP_TREE_LOAD_PERMUTATION (load_node) = load_permutation;
1610 loads_permuted = true;
1613 if (loads_permuted)
1615 if (!vect_supported_load_permutation_p (new_instance))
1617 if (dump_enabled_p ())
1619 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1620 "Build SLP failed: unsupported load "
1621 "permutation ");
1622 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
1623 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
1625 vect_free_slp_instance (new_instance);
1626 return false;
1629 SLP_INSTANCE_FIRST_LOAD_STMT (new_instance)
1630 = vect_find_first_load_in_slp_instance (new_instance);
1633 /* Compute the costs of this SLP instance. */
1634 vect_analyze_slp_cost (loop_vinfo, bb_vinfo,
1635 new_instance, TYPE_VECTOR_SUBPARTS (vectype));
1637 if (loop_vinfo)
1638 LOOP_VINFO_SLP_INSTANCES (loop_vinfo).safe_push (new_instance);
1639 else
1640 BB_VINFO_SLP_INSTANCES (bb_vinfo).safe_push (new_instance);
1642 if (dump_enabled_p ())
1643 vect_print_slp_tree (MSG_NOTE, node);
1645 return true;
1648 /* Failed to SLP. */
1649 /* Free the allocated memory. */
1650 vect_free_slp_tree (node);
1651 loads.release ();
1653 return false;
1657 /* Check if there are stmts in the loop can be vectorized using SLP. Build SLP
1658 trees of packed scalar stmts if SLP is possible. */
1660 bool
1661 vect_analyze_slp (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
1662 unsigned max_tree_size)
1664 unsigned int i;
1665 vec<gimple> grouped_stores;
1666 vec<gimple> reductions = vNULL;
1667 vec<gimple> reduc_chains = vNULL;
1668 gimple first_element;
1669 bool ok = false;
1671 if (dump_enabled_p ())
1672 dump_printf_loc (MSG_NOTE, vect_location, "=== vect_analyze_slp ===\n");
1674 if (loop_vinfo)
1676 grouped_stores = LOOP_VINFO_GROUPED_STORES (loop_vinfo);
1677 reduc_chains = LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo);
1678 reductions = LOOP_VINFO_REDUCTIONS (loop_vinfo);
1680 else
1681 grouped_stores = BB_VINFO_GROUPED_STORES (bb_vinfo);
1683 /* Find SLP sequences starting from groups of grouped stores. */
1684 FOR_EACH_VEC_ELT (grouped_stores, i, first_element)
1685 if (vect_analyze_slp_instance (loop_vinfo, bb_vinfo, first_element,
1686 max_tree_size))
1687 ok = true;
1689 if (bb_vinfo && !ok)
1691 if (dump_enabled_p ())
1692 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1693 "Failed to SLP the basic block.\n");
1695 return false;
1698 if (loop_vinfo
1699 && LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo).length () > 0)
1701 /* Find SLP sequences starting from reduction chains. */
1702 FOR_EACH_VEC_ELT (reduc_chains, i, first_element)
1703 if (vect_analyze_slp_instance (loop_vinfo, bb_vinfo, first_element,
1704 max_tree_size))
1705 ok = true;
1706 else
1707 return false;
1709 /* Don't try to vectorize SLP reductions if reduction chain was
1710 detected. */
1711 return ok;
1714 /* Find SLP sequences starting from groups of reductions. */
1715 if (loop_vinfo && LOOP_VINFO_REDUCTIONS (loop_vinfo).length () > 1
1716 && vect_analyze_slp_instance (loop_vinfo, bb_vinfo, reductions[0],
1717 max_tree_size))
1718 ok = true;
1720 return true;
1724 /* For each possible SLP instance decide whether to SLP it and calculate overall
1725 unrolling factor needed to SLP the loop. Return TRUE if decided to SLP at
1726 least one instance. */
1728 bool
1729 vect_make_slp_decision (loop_vec_info loop_vinfo)
1731 unsigned int i, unrolling_factor = 1;
1732 vec<slp_instance> slp_instances = LOOP_VINFO_SLP_INSTANCES (loop_vinfo);
1733 slp_instance instance;
1734 int decided_to_slp = 0;
1736 if (dump_enabled_p ())
1737 dump_printf_loc (MSG_NOTE, vect_location, "=== vect_make_slp_decision ==="
1738 "\n");
1740 FOR_EACH_VEC_ELT (slp_instances, i, instance)
1742 /* FORNOW: SLP if you can. */
1743 if (unrolling_factor < SLP_INSTANCE_UNROLLING_FACTOR (instance))
1744 unrolling_factor = SLP_INSTANCE_UNROLLING_FACTOR (instance);
1746 /* Mark all the stmts that belong to INSTANCE as PURE_SLP stmts. Later we
1747 call vect_detect_hybrid_slp () to find stmts that need hybrid SLP and
1748 loop-based vectorization. Such stmts will be marked as HYBRID. */
1749 vect_mark_slp_stmts (SLP_INSTANCE_TREE (instance), pure_slp, -1);
1750 decided_to_slp++;
1753 LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo) = unrolling_factor;
1755 if (decided_to_slp && dump_enabled_p ())
1756 dump_printf_loc (MSG_NOTE, vect_location,
1757 "Decided to SLP %d instances. Unrolling factor %d\n",
1758 decided_to_slp, unrolling_factor);
1760 return (decided_to_slp > 0);
1764 /* Find stmts that must be both vectorized and SLPed (since they feed stmts that
1765 can't be SLPed) in the tree rooted at NODE. Mark such stmts as HYBRID. */
1767 static void
1768 vect_detect_hybrid_slp_stmts (slp_tree node, unsigned i, slp_vect_type stype)
1770 gimple stmt = SLP_TREE_SCALAR_STMTS (node)[i];
1771 imm_use_iterator imm_iter;
1772 gimple use_stmt;
1773 stmt_vec_info use_vinfo, stmt_vinfo = vinfo_for_stmt (stmt);
1774 slp_tree child;
1775 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
1776 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1777 int j;
1779 /* Propagate hybrid down the SLP tree. */
1780 if (stype == hybrid)
1782 else if (HYBRID_SLP_STMT (stmt_vinfo))
1783 stype = hybrid;
1784 else
1786 /* Check if a pure SLP stmt has uses in non-SLP stmts. */
1787 gcc_checking_assert (PURE_SLP_STMT (stmt_vinfo));
1788 if (TREE_CODE (gimple_op (stmt, 0)) == SSA_NAME)
1789 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, gimple_op (stmt, 0))
1790 if (gimple_bb (use_stmt)
1791 && flow_bb_inside_loop_p (loop, gimple_bb (use_stmt))
1792 && (use_vinfo = vinfo_for_stmt (use_stmt))
1793 && !STMT_SLP_TYPE (use_vinfo)
1794 && (STMT_VINFO_RELEVANT (use_vinfo)
1795 || VECTORIZABLE_CYCLE_DEF (STMT_VINFO_DEF_TYPE (use_vinfo))
1796 || (STMT_VINFO_IN_PATTERN_P (use_vinfo)
1797 && STMT_VINFO_RELATED_STMT (use_vinfo)
1798 && !STMT_SLP_TYPE (vinfo_for_stmt
1799 (STMT_VINFO_RELATED_STMT (use_vinfo)))))
1800 && !(gimple_code (use_stmt) == GIMPLE_PHI
1801 && STMT_VINFO_DEF_TYPE (use_vinfo) == vect_reduction_def))
1802 stype = hybrid;
1805 if (stype == hybrid)
1806 STMT_SLP_TYPE (stmt_vinfo) = hybrid;
1808 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), j, child)
1809 vect_detect_hybrid_slp_stmts (child, i, stype);
1812 /* Helpers for vect_detect_hybrid_slp walking pattern stmt uses. */
1814 static tree
1815 vect_detect_hybrid_slp_1 (tree *tp, int *, void *data)
1817 walk_stmt_info *wi = (walk_stmt_info *)data;
1818 struct loop *loopp = (struct loop *)wi->info;
1820 if (wi->is_lhs)
1821 return NULL_TREE;
1823 if (TREE_CODE (*tp) == SSA_NAME
1824 && !SSA_NAME_IS_DEFAULT_DEF (*tp))
1826 gimple def_stmt = SSA_NAME_DEF_STMT (*tp);
1827 if (flow_bb_inside_loop_p (loopp, gimple_bb (def_stmt))
1828 && PURE_SLP_STMT (vinfo_for_stmt (def_stmt)))
1829 STMT_SLP_TYPE (vinfo_for_stmt (def_stmt)) = hybrid;
1832 return NULL_TREE;
1835 static tree
1836 vect_detect_hybrid_slp_2 (gimple_stmt_iterator *gsi, bool *handled,
1837 walk_stmt_info *)
1839 /* If the stmt is in a SLP instance then this isn't a reason
1840 to mark use definitions in other SLP instances as hybrid. */
1841 if (STMT_SLP_TYPE (vinfo_for_stmt (gsi_stmt (*gsi))) != loop_vect)
1842 *handled = true;
1843 return NULL_TREE;
1846 /* Find stmts that must be both vectorized and SLPed. */
1848 void
1849 vect_detect_hybrid_slp (loop_vec_info loop_vinfo)
1851 unsigned int i;
1852 vec<slp_instance> slp_instances = LOOP_VINFO_SLP_INSTANCES (loop_vinfo);
1853 slp_instance instance;
1855 if (dump_enabled_p ())
1856 dump_printf_loc (MSG_NOTE, vect_location, "=== vect_detect_hybrid_slp ==="
1857 "\n");
1859 /* First walk all pattern stmt in the loop and mark defs of uses as
1860 hybrid because immediate uses in them are not recorded. */
1861 for (i = 0; i < LOOP_VINFO_LOOP (loop_vinfo)->num_nodes; ++i)
1863 basic_block bb = LOOP_VINFO_BBS (loop_vinfo)[i];
1864 for (gimple_stmt_iterator gsi = gsi_start_bb (bb); !gsi_end_p (gsi);
1865 gsi_next (&gsi))
1867 gimple stmt = gsi_stmt (gsi);
1868 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1869 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
1871 walk_stmt_info wi;
1872 memset (&wi, 0, sizeof (wi));
1873 wi.info = LOOP_VINFO_LOOP (loop_vinfo);
1874 gimple_stmt_iterator gsi2
1875 = gsi_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info));
1876 walk_gimple_stmt (&gsi2, vect_detect_hybrid_slp_2,
1877 vect_detect_hybrid_slp_1, &wi);
1878 walk_gimple_seq (STMT_VINFO_PATTERN_DEF_SEQ (stmt_info),
1879 vect_detect_hybrid_slp_2,
1880 vect_detect_hybrid_slp_1, &wi);
1885 /* Then walk the SLP instance trees marking stmts with uses in
1886 non-SLP stmts as hybrid, also propagating hybrid down the
1887 SLP tree, collecting the above info on-the-fly. */
1888 FOR_EACH_VEC_ELT (slp_instances, i, instance)
1890 for (unsigned i = 0; i < SLP_INSTANCE_GROUP_SIZE (instance); ++i)
1891 vect_detect_hybrid_slp_stmts (SLP_INSTANCE_TREE (instance),
1892 i, pure_slp);
1897 /* Create and initialize a new bb_vec_info struct for BB, as well as
1898 stmt_vec_info structs for all the stmts in it. */
1900 static bb_vec_info
1901 new_bb_vec_info (basic_block bb)
1903 bb_vec_info res = NULL;
1904 gimple_stmt_iterator gsi;
1906 res = (bb_vec_info) xcalloc (1, sizeof (struct _bb_vec_info));
1907 BB_VINFO_BB (res) = bb;
1909 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1911 gimple stmt = gsi_stmt (gsi);
1912 gimple_set_uid (stmt, 0);
1913 set_vinfo_for_stmt (stmt, new_stmt_vec_info (stmt, NULL, res));
1916 BB_VINFO_GROUPED_STORES (res).create (10);
1917 BB_VINFO_SLP_INSTANCES (res).create (2);
1918 BB_VINFO_TARGET_COST_DATA (res) = init_cost (NULL);
1920 bb->aux = res;
1921 return res;
1925 /* Free BB_VINFO struct, as well as all the stmt_vec_info structs of all the
1926 stmts in the basic block. */
1928 static void
1929 destroy_bb_vec_info (bb_vec_info bb_vinfo)
1931 vec<slp_instance> slp_instances;
1932 slp_instance instance;
1933 basic_block bb;
1934 gimple_stmt_iterator si;
1935 unsigned i;
1937 if (!bb_vinfo)
1938 return;
1940 bb = BB_VINFO_BB (bb_vinfo);
1942 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
1944 gimple stmt = gsi_stmt (si);
1945 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1947 if (stmt_info)
1948 /* Free stmt_vec_info. */
1949 free_stmt_vec_info (stmt);
1952 vect_destroy_datarefs (NULL, bb_vinfo);
1953 free_dependence_relations (BB_VINFO_DDRS (bb_vinfo));
1954 BB_VINFO_GROUPED_STORES (bb_vinfo).release ();
1955 slp_instances = BB_VINFO_SLP_INSTANCES (bb_vinfo);
1956 FOR_EACH_VEC_ELT (slp_instances, i, instance)
1957 vect_free_slp_instance (instance);
1958 BB_VINFO_SLP_INSTANCES (bb_vinfo).release ();
1959 destroy_cost_data (BB_VINFO_TARGET_COST_DATA (bb_vinfo));
1960 free (bb_vinfo);
1961 bb->aux = NULL;
1965 /* Analyze statements contained in SLP tree node after recursively analyzing
1966 the subtree. Return TRUE if the operations are supported. */
1968 static bool
1969 vect_slp_analyze_node_operations (bb_vec_info bb_vinfo, slp_tree node)
1971 bool dummy;
1972 int i;
1973 gimple stmt;
1974 slp_tree child;
1976 if (!node)
1977 return true;
1979 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
1980 if (!vect_slp_analyze_node_operations (bb_vinfo, child))
1981 return false;
1983 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
1985 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1986 gcc_assert (stmt_info);
1987 gcc_assert (PURE_SLP_STMT (stmt_info));
1989 if (!vect_analyze_stmt (stmt, &dummy, node))
1990 return false;
1993 return true;
1997 /* Analyze statements in SLP instances of the basic block. Return TRUE if the
1998 operations are supported. */
2000 static bool
2001 vect_slp_analyze_operations (bb_vec_info bb_vinfo)
2003 vec<slp_instance> slp_instances = BB_VINFO_SLP_INSTANCES (bb_vinfo);
2004 slp_instance instance;
2005 int i;
2007 for (i = 0; slp_instances.iterate (i, &instance); )
2009 if (!vect_slp_analyze_node_operations (bb_vinfo,
2010 SLP_INSTANCE_TREE (instance)))
2012 vect_free_slp_instance (instance);
2013 slp_instances.ordered_remove (i);
2015 else
2016 i++;
2019 if (!slp_instances.length ())
2020 return false;
2022 return true;
2026 /* Compute the scalar cost of the SLP node NODE and its children
2027 and return it. Do not account defs that are marked in LIFE and
2028 update LIFE according to uses of NODE. */
2030 static unsigned
2031 vect_bb_slp_scalar_cost (basic_block bb,
2032 slp_tree node, vec<bool, va_heap> *life)
2034 unsigned scalar_cost = 0;
2035 unsigned i;
2036 gimple stmt;
2037 slp_tree child;
2039 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
2041 unsigned stmt_cost;
2042 ssa_op_iter op_iter;
2043 def_operand_p def_p;
2044 stmt_vec_info stmt_info;
2046 if ((*life)[i])
2047 continue;
2049 /* If there is a non-vectorized use of the defs then the scalar
2050 stmt is kept live in which case we do not account it or any
2051 required defs in the SLP children in the scalar cost. This
2052 way we make the vectorization more costly when compared to
2053 the scalar cost. */
2054 FOR_EACH_SSA_DEF_OPERAND (def_p, stmt, op_iter, SSA_OP_DEF)
2056 imm_use_iterator use_iter;
2057 gimple use_stmt;
2058 FOR_EACH_IMM_USE_STMT (use_stmt, use_iter, DEF_FROM_PTR (def_p))
2059 if (!is_gimple_debug (use_stmt)
2060 && (gimple_code (use_stmt) == GIMPLE_PHI
2061 || gimple_bb (use_stmt) != bb
2062 || !STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (use_stmt))))
2064 (*life)[i] = true;
2065 BREAK_FROM_IMM_USE_STMT (use_iter);
2068 if ((*life)[i])
2069 continue;
2071 stmt_info = vinfo_for_stmt (stmt);
2072 if (STMT_VINFO_DATA_REF (stmt_info))
2074 if (DR_IS_READ (STMT_VINFO_DATA_REF (stmt_info)))
2075 stmt_cost = vect_get_stmt_cost (scalar_load);
2076 else
2077 stmt_cost = vect_get_stmt_cost (scalar_store);
2079 else
2080 stmt_cost = vect_get_stmt_cost (scalar_stmt);
2082 scalar_cost += stmt_cost;
2085 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
2086 scalar_cost += vect_bb_slp_scalar_cost (bb, child, life);
2088 return scalar_cost;
2091 /* Check if vectorization of the basic block is profitable. */
2093 static bool
2094 vect_bb_vectorization_profitable_p (bb_vec_info bb_vinfo)
2096 vec<slp_instance> slp_instances = BB_VINFO_SLP_INSTANCES (bb_vinfo);
2097 slp_instance instance;
2098 int i, j;
2099 unsigned int vec_inside_cost = 0, vec_outside_cost = 0, scalar_cost = 0;
2100 unsigned int vec_prologue_cost = 0, vec_epilogue_cost = 0;
2101 void *target_cost_data = BB_VINFO_TARGET_COST_DATA (bb_vinfo);
2102 stmt_vec_info stmt_info = NULL;
2103 stmt_vector_for_cost body_cost_vec;
2104 stmt_info_for_cost *ci;
2106 /* Calculate vector costs. */
2107 FOR_EACH_VEC_ELT (slp_instances, i, instance)
2109 body_cost_vec = SLP_INSTANCE_BODY_COST_VEC (instance);
2111 FOR_EACH_VEC_ELT (body_cost_vec, j, ci)
2113 stmt_info = ci->stmt ? vinfo_for_stmt (ci->stmt) : NULL;
2114 (void) add_stmt_cost (target_cost_data, ci->count, ci->kind,
2115 stmt_info, ci->misalign, vect_body);
2119 /* Calculate scalar cost. */
2120 FOR_EACH_VEC_ELT (slp_instances, i, instance)
2122 auto_vec<bool, 20> life;
2123 life.safe_grow_cleared (SLP_INSTANCE_GROUP_SIZE (instance));
2124 scalar_cost += vect_bb_slp_scalar_cost (BB_VINFO_BB (bb_vinfo),
2125 SLP_INSTANCE_TREE (instance),
2126 &life);
2129 /* Complete the target-specific cost calculation. */
2130 finish_cost (BB_VINFO_TARGET_COST_DATA (bb_vinfo), &vec_prologue_cost,
2131 &vec_inside_cost, &vec_epilogue_cost);
2133 vec_outside_cost = vec_prologue_cost + vec_epilogue_cost;
2135 if (dump_enabled_p ())
2137 dump_printf_loc (MSG_NOTE, vect_location, "Cost model analysis: \n");
2138 dump_printf (MSG_NOTE, " Vector inside of basic block cost: %d\n",
2139 vec_inside_cost);
2140 dump_printf (MSG_NOTE, " Vector prologue cost: %d\n", vec_prologue_cost);
2141 dump_printf (MSG_NOTE, " Vector epilogue cost: %d\n", vec_epilogue_cost);
2142 dump_printf (MSG_NOTE, " Scalar cost of basic block: %d\n", scalar_cost);
2145 /* Vectorization is profitable if its cost is less than the cost of scalar
2146 version. */
2147 if (vec_outside_cost + vec_inside_cost >= scalar_cost)
2148 return false;
2150 return true;
2153 /* Check if the basic block can be vectorized. */
2155 static bb_vec_info
2156 vect_slp_analyze_bb_1 (basic_block bb)
2158 bb_vec_info bb_vinfo;
2159 vec<slp_instance> slp_instances;
2160 slp_instance instance;
2161 int i;
2162 int min_vf = 2;
2163 unsigned n_stmts = 0;
2165 bb_vinfo = new_bb_vec_info (bb);
2166 if (!bb_vinfo)
2167 return NULL;
2169 if (!vect_analyze_data_refs (NULL, bb_vinfo, &min_vf, &n_stmts))
2171 if (dump_enabled_p ())
2172 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2173 "not vectorized: unhandled data-ref in basic "
2174 "block.\n");
2176 destroy_bb_vec_info (bb_vinfo);
2177 return NULL;
2180 if (BB_VINFO_DATAREFS (bb_vinfo).length () < 2)
2182 if (dump_enabled_p ())
2183 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2184 "not vectorized: not enough data-refs in "
2185 "basic block.\n");
2187 destroy_bb_vec_info (bb_vinfo);
2188 return NULL;
2191 if (!vect_analyze_data_ref_accesses (NULL, bb_vinfo))
2193 if (dump_enabled_p ())
2194 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2195 "not vectorized: unhandled data access in "
2196 "basic block.\n");
2198 destroy_bb_vec_info (bb_vinfo);
2199 return NULL;
2202 vect_pattern_recog (NULL, bb_vinfo);
2204 if (!vect_analyze_data_refs_alignment (NULL, bb_vinfo))
2206 if (dump_enabled_p ())
2207 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2208 "not vectorized: bad data alignment in basic "
2209 "block.\n");
2211 destroy_bb_vec_info (bb_vinfo);
2212 return NULL;
2215 /* Check the SLP opportunities in the basic block, analyze and build SLP
2216 trees. */
2217 if (!vect_analyze_slp (NULL, bb_vinfo, n_stmts))
2219 if (dump_enabled_p ())
2220 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2221 "not vectorized: failed to find SLP opportunities "
2222 "in basic block.\n");
2224 destroy_bb_vec_info (bb_vinfo);
2225 return NULL;
2228 slp_instances = BB_VINFO_SLP_INSTANCES (bb_vinfo);
2230 /* Mark all the statements that we want to vectorize as pure SLP and
2231 relevant. */
2232 FOR_EACH_VEC_ELT (slp_instances, i, instance)
2234 vect_mark_slp_stmts (SLP_INSTANCE_TREE (instance), pure_slp, -1);
2235 vect_mark_slp_stmts_relevant (SLP_INSTANCE_TREE (instance));
2238 /* Mark all the statements that we do not want to vectorize. */
2239 for (gimple_stmt_iterator gsi = gsi_start_bb (BB_VINFO_BB (bb_vinfo));
2240 !gsi_end_p (gsi); gsi_next (&gsi))
2242 stmt_vec_info vinfo = vinfo_for_stmt (gsi_stmt (gsi));
2243 if (STMT_SLP_TYPE (vinfo) != pure_slp)
2244 STMT_VINFO_VECTORIZABLE (vinfo) = false;
2247 /* Analyze dependences. At this point all stmts not participating in
2248 vectorization have to be marked. Dependence analysis assumes
2249 that we either vectorize all SLP instances or none at all. */
2250 if (!vect_slp_analyze_data_ref_dependences (bb_vinfo))
2252 if (dump_enabled_p ())
2253 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2254 "not vectorized: unhandled data dependence "
2255 "in basic block.\n");
2257 destroy_bb_vec_info (bb_vinfo);
2258 return NULL;
2261 if (!vect_verify_datarefs_alignment (NULL, bb_vinfo))
2263 if (dump_enabled_p ())
2264 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2265 "not vectorized: unsupported alignment in basic "
2266 "block.\n");
2267 destroy_bb_vec_info (bb_vinfo);
2268 return NULL;
2271 if (!vect_slp_analyze_operations (bb_vinfo))
2273 if (dump_enabled_p ())
2274 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2275 "not vectorized: bad operation in basic block.\n");
2277 destroy_bb_vec_info (bb_vinfo);
2278 return NULL;
2281 /* Cost model: check if the vectorization is worthwhile. */
2282 if (!unlimited_cost_model (NULL)
2283 && !vect_bb_vectorization_profitable_p (bb_vinfo))
2285 if (dump_enabled_p ())
2286 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2287 "not vectorized: vectorization is not "
2288 "profitable.\n");
2290 destroy_bb_vec_info (bb_vinfo);
2291 return NULL;
2294 if (dump_enabled_p ())
2295 dump_printf_loc (MSG_NOTE, vect_location,
2296 "Basic block will be vectorized using SLP\n");
2298 return bb_vinfo;
2302 bb_vec_info
2303 vect_slp_analyze_bb (basic_block bb)
2305 bb_vec_info bb_vinfo;
2306 int insns = 0;
2307 gimple_stmt_iterator gsi;
2308 unsigned int vector_sizes;
2310 if (dump_enabled_p ())
2311 dump_printf_loc (MSG_NOTE, vect_location, "===vect_slp_analyze_bb===\n");
2313 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2315 gimple stmt = gsi_stmt (gsi);
2316 if (!is_gimple_debug (stmt)
2317 && !gimple_nop_p (stmt)
2318 && gimple_code (stmt) != GIMPLE_LABEL)
2319 insns++;
2322 if (insns > PARAM_VALUE (PARAM_SLP_MAX_INSNS_IN_BB))
2324 if (dump_enabled_p ())
2325 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2326 "not vectorized: too many instructions in "
2327 "basic block.\n");
2329 return NULL;
2332 /* Autodetect first vector size we try. */
2333 current_vector_size = 0;
2334 vector_sizes = targetm.vectorize.autovectorize_vector_sizes ();
2336 while (1)
2338 bb_vinfo = vect_slp_analyze_bb_1 (bb);
2339 if (bb_vinfo)
2340 return bb_vinfo;
2342 destroy_bb_vec_info (bb_vinfo);
2344 vector_sizes &= ~current_vector_size;
2345 if (vector_sizes == 0
2346 || current_vector_size == 0)
2347 return NULL;
2349 /* Try the next biggest vector size. */
2350 current_vector_size = 1 << floor_log2 (vector_sizes);
2351 if (dump_enabled_p ())
2352 dump_printf_loc (MSG_NOTE, vect_location,
2353 "***** Re-trying analysis with "
2354 "vector size %d\n", current_vector_size);
2359 /* SLP costs are calculated according to SLP instance unrolling factor (i.e.,
2360 the number of created vector stmts depends on the unrolling factor).
2361 However, the actual number of vector stmts for every SLP node depends on
2362 VF which is set later in vect_analyze_operations (). Hence, SLP costs
2363 should be updated. In this function we assume that the inside costs
2364 calculated in vect_model_xxx_cost are linear in ncopies. */
2366 void
2367 vect_update_slp_costs_according_to_vf (loop_vec_info loop_vinfo)
2369 unsigned int i, j, vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2370 vec<slp_instance> slp_instances = LOOP_VINFO_SLP_INSTANCES (loop_vinfo);
2371 slp_instance instance;
2372 stmt_vector_for_cost body_cost_vec;
2373 stmt_info_for_cost *si;
2374 void *data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo);
2376 if (dump_enabled_p ())
2377 dump_printf_loc (MSG_NOTE, vect_location,
2378 "=== vect_update_slp_costs_according_to_vf ===\n");
2380 FOR_EACH_VEC_ELT (slp_instances, i, instance)
2382 /* We assume that costs are linear in ncopies. */
2383 int ncopies = vf / SLP_INSTANCE_UNROLLING_FACTOR (instance);
2385 /* Record the instance's instructions in the target cost model.
2386 This was delayed until here because the count of instructions
2387 isn't known beforehand. */
2388 body_cost_vec = SLP_INSTANCE_BODY_COST_VEC (instance);
2390 FOR_EACH_VEC_ELT (body_cost_vec, j, si)
2391 (void) add_stmt_cost (data, si->count * ncopies, si->kind,
2392 vinfo_for_stmt (si->stmt), si->misalign,
2393 vect_body);
2398 /* For constant and loop invariant defs of SLP_NODE this function returns
2399 (vector) defs (VEC_OPRNDS) that will be used in the vectorized stmts.
2400 OP_NUM determines if we gather defs for operand 0 or operand 1 of the RHS of
2401 scalar stmts. NUMBER_OF_VECTORS is the number of vector defs to create.
2402 REDUC_INDEX is the index of the reduction operand in the statements, unless
2403 it is -1. */
2405 static void
2406 vect_get_constant_vectors (tree op, slp_tree slp_node,
2407 vec<tree> *vec_oprnds,
2408 unsigned int op_num, unsigned int number_of_vectors,
2409 int reduc_index)
2411 vec<gimple> stmts = SLP_TREE_SCALAR_STMTS (slp_node);
2412 gimple stmt = stmts[0];
2413 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
2414 unsigned nunits;
2415 tree vec_cst;
2416 tree *elts;
2417 unsigned j, number_of_places_left_in_vector;
2418 tree vector_type;
2419 tree vop;
2420 int group_size = stmts.length ();
2421 unsigned int vec_num, i;
2422 unsigned number_of_copies = 1;
2423 vec<tree> voprnds;
2424 voprnds.create (number_of_vectors);
2425 bool constant_p, is_store;
2426 tree neutral_op = NULL;
2427 enum tree_code code = gimple_expr_code (stmt);
2428 gimple def_stmt;
2429 struct loop *loop;
2430 gimple_seq ctor_seq = NULL;
2432 if (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
2433 && reduc_index != -1)
2435 op_num = reduc_index - 1;
2436 op = gimple_op (stmt, reduc_index);
2437 /* For additional copies (see the explanation of NUMBER_OF_COPIES below)
2438 we need either neutral operands or the original operands. See
2439 get_initial_def_for_reduction() for details. */
2440 switch (code)
2442 case WIDEN_SUM_EXPR:
2443 case DOT_PROD_EXPR:
2444 case PLUS_EXPR:
2445 case MINUS_EXPR:
2446 case BIT_IOR_EXPR:
2447 case BIT_XOR_EXPR:
2448 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (op)))
2449 neutral_op = build_real (TREE_TYPE (op), dconst0);
2450 else
2451 neutral_op = build_int_cst (TREE_TYPE (op), 0);
2453 break;
2455 case MULT_EXPR:
2456 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (op)))
2457 neutral_op = build_real (TREE_TYPE (op), dconst1);
2458 else
2459 neutral_op = build_int_cst (TREE_TYPE (op), 1);
2461 break;
2463 case BIT_AND_EXPR:
2464 neutral_op = build_int_cst (TREE_TYPE (op), -1);
2465 break;
2467 /* For MIN/MAX we don't have an easy neutral operand but
2468 the initial values can be used fine here. Only for
2469 a reduction chain we have to force a neutral element. */
2470 case MAX_EXPR:
2471 case MIN_EXPR:
2472 if (!GROUP_FIRST_ELEMENT (stmt_vinfo))
2473 neutral_op = NULL;
2474 else
2476 def_stmt = SSA_NAME_DEF_STMT (op);
2477 loop = (gimple_bb (stmt))->loop_father;
2478 neutral_op = PHI_ARG_DEF_FROM_EDGE (def_stmt,
2479 loop_preheader_edge (loop));
2481 break;
2483 default:
2484 neutral_op = NULL;
2488 if (STMT_VINFO_DATA_REF (stmt_vinfo))
2490 is_store = true;
2491 op = gimple_assign_rhs1 (stmt);
2493 else
2494 is_store = false;
2496 gcc_assert (op);
2498 if (CONSTANT_CLASS_P (op))
2499 constant_p = true;
2500 else
2501 constant_p = false;
2503 vector_type = get_vectype_for_scalar_type (TREE_TYPE (op));
2504 gcc_assert (vector_type);
2505 nunits = TYPE_VECTOR_SUBPARTS (vector_type);
2507 /* NUMBER_OF_COPIES is the number of times we need to use the same values in
2508 created vectors. It is greater than 1 if unrolling is performed.
2510 For example, we have two scalar operands, s1 and s2 (e.g., group of
2511 strided accesses of size two), while NUNITS is four (i.e., four scalars
2512 of this type can be packed in a vector). The output vector will contain
2513 two copies of each scalar operand: {s1, s2, s1, s2}. (NUMBER_OF_COPIES
2514 will be 2).
2516 If GROUP_SIZE > NUNITS, the scalars will be split into several vectors
2517 containing the operands.
2519 For example, NUNITS is four as before, and the group size is 8
2520 (s1, s2, ..., s8). We will create two vectors {s1, s2, s3, s4} and
2521 {s5, s6, s7, s8}. */
2523 number_of_copies = least_common_multiple (nunits, group_size) / group_size;
2525 number_of_places_left_in_vector = nunits;
2526 elts = XALLOCAVEC (tree, nunits);
2527 for (j = 0; j < number_of_copies; j++)
2529 for (i = group_size - 1; stmts.iterate (i, &stmt); i--)
2531 if (is_store)
2532 op = gimple_assign_rhs1 (stmt);
2533 else
2535 switch (code)
2537 case COND_EXPR:
2538 if (op_num == 0 || op_num == 1)
2540 tree cond = gimple_assign_rhs1 (stmt);
2541 op = TREE_OPERAND (cond, op_num);
2543 else
2545 if (op_num == 2)
2546 op = gimple_assign_rhs2 (stmt);
2547 else
2548 op = gimple_assign_rhs3 (stmt);
2550 break;
2552 case CALL_EXPR:
2553 op = gimple_call_arg (stmt, op_num);
2554 break;
2556 case LSHIFT_EXPR:
2557 case RSHIFT_EXPR:
2558 case LROTATE_EXPR:
2559 case RROTATE_EXPR:
2560 op = gimple_op (stmt, op_num + 1);
2561 /* Unlike the other binary operators, shifts/rotates have
2562 the shift count being int, instead of the same type as
2563 the lhs, so make sure the scalar is the right type if
2564 we are dealing with vectors of
2565 long long/long/short/char. */
2566 if (op_num == 1 && TREE_CODE (op) == INTEGER_CST)
2567 op = fold_convert (TREE_TYPE (vector_type), op);
2568 break;
2570 default:
2571 op = gimple_op (stmt, op_num + 1);
2572 break;
2576 if (reduc_index != -1)
2578 loop = (gimple_bb (stmt))->loop_father;
2579 def_stmt = SSA_NAME_DEF_STMT (op);
2581 gcc_assert (loop);
2583 /* Get the def before the loop. In reduction chain we have only
2584 one initial value. */
2585 if ((j != (number_of_copies - 1)
2586 || (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt))
2587 && i != 0))
2588 && neutral_op)
2589 op = neutral_op;
2590 else
2591 op = PHI_ARG_DEF_FROM_EDGE (def_stmt,
2592 loop_preheader_edge (loop));
2595 /* Create 'vect_ = {op0,op1,...,opn}'. */
2596 number_of_places_left_in_vector--;
2597 if (!types_compatible_p (TREE_TYPE (vector_type), TREE_TYPE (op)))
2599 if (CONSTANT_CLASS_P (op))
2601 op = fold_unary (VIEW_CONVERT_EXPR,
2602 TREE_TYPE (vector_type), op);
2603 gcc_assert (op && CONSTANT_CLASS_P (op));
2605 else
2607 tree new_temp
2608 = make_ssa_name (TREE_TYPE (vector_type), NULL);
2609 gimple init_stmt;
2610 op = build1 (VIEW_CONVERT_EXPR, TREE_TYPE (vector_type),
2611 op);
2612 init_stmt
2613 = gimple_build_assign_with_ops (VIEW_CONVERT_EXPR,
2614 new_temp, op, NULL_TREE);
2615 gimple_seq_add_stmt (&ctor_seq, init_stmt);
2616 op = new_temp;
2619 elts[number_of_places_left_in_vector] = op;
2620 if (!CONSTANT_CLASS_P (op))
2621 constant_p = false;
2623 if (number_of_places_left_in_vector == 0)
2625 number_of_places_left_in_vector = nunits;
2627 if (constant_p)
2628 vec_cst = build_vector (vector_type, elts);
2629 else
2631 vec<constructor_elt, va_gc> *v;
2632 unsigned k;
2633 vec_alloc (v, nunits);
2634 for (k = 0; k < nunits; ++k)
2635 CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, elts[k]);
2636 vec_cst = build_constructor (vector_type, v);
2638 voprnds.quick_push (vect_init_vector (stmt, vec_cst,
2639 vector_type, NULL));
2640 if (ctor_seq != NULL)
2642 gimple init_stmt = SSA_NAME_DEF_STMT (voprnds.last ());
2643 gimple_stmt_iterator gsi = gsi_for_stmt (init_stmt);
2644 gsi_insert_seq_before_without_update (&gsi, ctor_seq,
2645 GSI_SAME_STMT);
2646 ctor_seq = NULL;
2652 /* Since the vectors are created in the reverse order, we should invert
2653 them. */
2654 vec_num = voprnds.length ();
2655 for (j = vec_num; j != 0; j--)
2657 vop = voprnds[j - 1];
2658 vec_oprnds->quick_push (vop);
2661 voprnds.release ();
2663 /* In case that VF is greater than the unrolling factor needed for the SLP
2664 group of stmts, NUMBER_OF_VECTORS to be created is greater than
2665 NUMBER_OF_SCALARS/NUNITS or NUNITS/NUMBER_OF_SCALARS, and hence we have
2666 to replicate the vectors. */
2667 while (number_of_vectors > vec_oprnds->length ())
2669 tree neutral_vec = NULL;
2671 if (neutral_op)
2673 if (!neutral_vec)
2674 neutral_vec = build_vector_from_val (vector_type, neutral_op);
2676 vec_oprnds->quick_push (neutral_vec);
2678 else
2680 for (i = 0; vec_oprnds->iterate (i, &vop) && i < vec_num; i++)
2681 vec_oprnds->quick_push (vop);
2687 /* Get vectorized definitions from SLP_NODE that contains corresponding
2688 vectorized def-stmts. */
2690 static void
2691 vect_get_slp_vect_defs (slp_tree slp_node, vec<tree> *vec_oprnds)
2693 tree vec_oprnd;
2694 gimple vec_def_stmt;
2695 unsigned int i;
2697 gcc_assert (SLP_TREE_VEC_STMTS (slp_node).exists ());
2699 FOR_EACH_VEC_ELT (SLP_TREE_VEC_STMTS (slp_node), i, vec_def_stmt)
2701 gcc_assert (vec_def_stmt);
2702 vec_oprnd = gimple_get_lhs (vec_def_stmt);
2703 vec_oprnds->quick_push (vec_oprnd);
2708 /* Get vectorized definitions for SLP_NODE.
2709 If the scalar definitions are loop invariants or constants, collect them and
2710 call vect_get_constant_vectors() to create vector stmts.
2711 Otherwise, the def-stmts must be already vectorized and the vectorized stmts
2712 must be stored in the corresponding child of SLP_NODE, and we call
2713 vect_get_slp_vect_defs () to retrieve them. */
2715 void
2716 vect_get_slp_defs (vec<tree> ops, slp_tree slp_node,
2717 vec<vec<tree> > *vec_oprnds, int reduc_index)
2719 gimple first_stmt;
2720 int number_of_vects = 0, i;
2721 unsigned int child_index = 0;
2722 HOST_WIDE_INT lhs_size_unit, rhs_size_unit;
2723 slp_tree child = NULL;
2724 vec<tree> vec_defs;
2725 tree oprnd;
2726 bool vectorized_defs;
2728 first_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[0];
2729 FOR_EACH_VEC_ELT (ops, i, oprnd)
2731 /* For each operand we check if it has vectorized definitions in a child
2732 node or we need to create them (for invariants and constants). We
2733 check if the LHS of the first stmt of the next child matches OPRND.
2734 If it does, we found the correct child. Otherwise, we call
2735 vect_get_constant_vectors (), and not advance CHILD_INDEX in order
2736 to check this child node for the next operand. */
2737 vectorized_defs = false;
2738 if (SLP_TREE_CHILDREN (slp_node).length () > child_index)
2740 child = SLP_TREE_CHILDREN (slp_node)[child_index];
2742 /* We have to check both pattern and original def, if available. */
2743 gimple first_def = SLP_TREE_SCALAR_STMTS (child)[0];
2744 gimple related = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (first_def));
2746 if (operand_equal_p (oprnd, gimple_get_lhs (first_def), 0)
2747 || (related
2748 && operand_equal_p (oprnd, gimple_get_lhs (related), 0)))
2750 /* The number of vector defs is determined by the number of
2751 vector statements in the node from which we get those
2752 statements. */
2753 number_of_vects = SLP_TREE_NUMBER_OF_VEC_STMTS (child);
2754 vectorized_defs = true;
2755 child_index++;
2759 if (!vectorized_defs)
2761 if (i == 0)
2763 number_of_vects = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
2764 /* Number of vector stmts was calculated according to LHS in
2765 vect_schedule_slp_instance (), fix it by replacing LHS with
2766 RHS, if necessary. See vect_get_smallest_scalar_type () for
2767 details. */
2768 vect_get_smallest_scalar_type (first_stmt, &lhs_size_unit,
2769 &rhs_size_unit);
2770 if (rhs_size_unit != lhs_size_unit)
2772 number_of_vects *= rhs_size_unit;
2773 number_of_vects /= lhs_size_unit;
2778 /* Allocate memory for vectorized defs. */
2779 vec_defs = vNULL;
2780 vec_defs.create (number_of_vects);
2782 /* For reduction defs we call vect_get_constant_vectors (), since we are
2783 looking for initial loop invariant values. */
2784 if (vectorized_defs && reduc_index == -1)
2785 /* The defs are already vectorized. */
2786 vect_get_slp_vect_defs (child, &vec_defs);
2787 else
2788 /* Build vectors from scalar defs. */
2789 vect_get_constant_vectors (oprnd, slp_node, &vec_defs, i,
2790 number_of_vects, reduc_index);
2792 vec_oprnds->quick_push (vec_defs);
2794 /* For reductions, we only need initial values. */
2795 if (reduc_index != -1)
2796 return;
2801 /* Create NCOPIES permutation statements using the mask MASK_BYTES (by
2802 building a vector of type MASK_TYPE from it) and two input vectors placed in
2803 DR_CHAIN at FIRST_VEC_INDX and SECOND_VEC_INDX for the first copy and
2804 shifting by STRIDE elements of DR_CHAIN for every copy.
2805 (STRIDE is the number of vectorized stmts for NODE divided by the number of
2806 copies).
2807 VECT_STMTS_COUNTER specifies the index in the vectorized stmts of NODE, where
2808 the created stmts must be inserted. */
2810 static inline void
2811 vect_create_mask_and_perm (gimple stmt, gimple next_scalar_stmt,
2812 tree mask, int first_vec_indx, int second_vec_indx,
2813 gimple_stmt_iterator *gsi, slp_tree node,
2814 tree vectype, vec<tree> dr_chain,
2815 int ncopies, int vect_stmts_counter)
2817 tree perm_dest;
2818 gimple perm_stmt = NULL;
2819 stmt_vec_info next_stmt_info;
2820 int i, stride;
2821 tree first_vec, second_vec, data_ref;
2823 stride = SLP_TREE_NUMBER_OF_VEC_STMTS (node) / ncopies;
2825 /* Initialize the vect stmts of NODE to properly insert the generated
2826 stmts later. */
2827 for (i = SLP_TREE_VEC_STMTS (node).length ();
2828 i < (int) SLP_TREE_NUMBER_OF_VEC_STMTS (node); i++)
2829 SLP_TREE_VEC_STMTS (node).quick_push (NULL);
2831 perm_dest = vect_create_destination_var (gimple_assign_lhs (stmt), vectype);
2832 for (i = 0; i < ncopies; i++)
2834 first_vec = dr_chain[first_vec_indx];
2835 second_vec = dr_chain[second_vec_indx];
2837 /* Generate the permute statement. */
2838 perm_stmt = gimple_build_assign_with_ops (VEC_PERM_EXPR, perm_dest,
2839 first_vec, second_vec, mask);
2840 data_ref = make_ssa_name (perm_dest, perm_stmt);
2841 gimple_set_lhs (perm_stmt, data_ref);
2842 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
2844 /* Store the vector statement in NODE. */
2845 SLP_TREE_VEC_STMTS (node)[stride * i + vect_stmts_counter] = perm_stmt;
2847 first_vec_indx += stride;
2848 second_vec_indx += stride;
2851 /* Mark the scalar stmt as vectorized. */
2852 next_stmt_info = vinfo_for_stmt (next_scalar_stmt);
2853 STMT_VINFO_VEC_STMT (next_stmt_info) = perm_stmt;
2857 /* Given FIRST_MASK_ELEMENT - the mask element in element representation,
2858 return in CURRENT_MASK_ELEMENT its equivalent in target specific
2859 representation. Check that the mask is valid and return FALSE if not.
2860 Return TRUE in NEED_NEXT_VECTOR if the permutation requires to move to
2861 the next vector, i.e., the current first vector is not needed. */
2863 static bool
2864 vect_get_mask_element (gimple stmt, int first_mask_element, int m,
2865 int mask_nunits, bool only_one_vec, int index,
2866 unsigned char *mask, int *current_mask_element,
2867 bool *need_next_vector, int *number_of_mask_fixes,
2868 bool *mask_fixed, bool *needs_first_vector)
2870 int i;
2872 /* Convert to target specific representation. */
2873 *current_mask_element = first_mask_element + m;
2874 /* Adjust the value in case it's a mask for second and third vectors. */
2875 *current_mask_element -= mask_nunits * (*number_of_mask_fixes - 1);
2877 if (*current_mask_element < mask_nunits)
2878 *needs_first_vector = true;
2880 /* We have only one input vector to permute but the mask accesses values in
2881 the next vector as well. */
2882 if (only_one_vec && *current_mask_element >= mask_nunits)
2884 if (dump_enabled_p ())
2886 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2887 "permutation requires at least two vectors ");
2888 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
2889 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
2892 return false;
2895 /* The mask requires the next vector. */
2896 if (*current_mask_element >= mask_nunits * 2)
2898 if (*needs_first_vector || *mask_fixed)
2900 /* We either need the first vector too or have already moved to the
2901 next vector. In both cases, this permutation needs three
2902 vectors. */
2903 if (dump_enabled_p ())
2905 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2906 "permutation requires at "
2907 "least three vectors ");
2908 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
2909 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
2912 return false;
2915 /* We move to the next vector, dropping the first one and working with
2916 the second and the third - we need to adjust the values of the mask
2917 accordingly. */
2918 *current_mask_element -= mask_nunits * *number_of_mask_fixes;
2920 for (i = 0; i < index; i++)
2921 mask[i] -= mask_nunits * *number_of_mask_fixes;
2923 (*number_of_mask_fixes)++;
2924 *mask_fixed = true;
2927 *need_next_vector = *mask_fixed;
2929 /* This was the last element of this mask. Start a new one. */
2930 if (index == mask_nunits - 1)
2932 *number_of_mask_fixes = 1;
2933 *mask_fixed = false;
2934 *needs_first_vector = false;
2937 return true;
2941 /* Generate vector permute statements from a list of loads in DR_CHAIN.
2942 If ANALYZE_ONLY is TRUE, only check that it is possible to create valid
2943 permute statements for the SLP node NODE of the SLP instance
2944 SLP_NODE_INSTANCE. */
2946 bool
2947 vect_transform_slp_perm_load (slp_tree node, vec<tree> dr_chain,
2948 gimple_stmt_iterator *gsi, int vf,
2949 slp_instance slp_node_instance, bool analyze_only)
2951 gimple stmt = SLP_TREE_SCALAR_STMTS (node)[0];
2952 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2953 tree mask_element_type = NULL_TREE, mask_type;
2954 int i, j, k, nunits, vec_index = 0, scalar_index;
2955 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
2956 gimple next_scalar_stmt;
2957 int group_size = SLP_INSTANCE_GROUP_SIZE (slp_node_instance);
2958 int first_mask_element;
2959 int index, unroll_factor, current_mask_element, ncopies;
2960 unsigned char *mask;
2961 bool only_one_vec = false, need_next_vector = false;
2962 int first_vec_index, second_vec_index, orig_vec_stmts_num, vect_stmts_counter;
2963 int number_of_mask_fixes = 1;
2964 bool mask_fixed = false;
2965 bool needs_first_vector = false;
2966 enum machine_mode mode;
2968 mode = TYPE_MODE (vectype);
2970 if (!can_vec_perm_p (mode, false, NULL))
2972 if (dump_enabled_p ())
2974 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2975 "no vect permute for ");
2976 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
2977 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
2979 return false;
2982 /* The generic VEC_PERM_EXPR code always uses an integral type of the
2983 same size as the vector element being permuted. */
2984 mask_element_type = lang_hooks.types.type_for_mode
2985 (int_mode_for_mode (TYPE_MODE (TREE_TYPE (vectype))), 1);
2986 mask_type = get_vectype_for_scalar_type (mask_element_type);
2987 nunits = TYPE_VECTOR_SUBPARTS (vectype);
2988 mask = XALLOCAVEC (unsigned char, nunits);
2989 unroll_factor = SLP_INSTANCE_UNROLLING_FACTOR (slp_node_instance);
2991 /* The number of vector stmts to generate based only on SLP_NODE_INSTANCE
2992 unrolling factor. */
2993 orig_vec_stmts_num = group_size *
2994 SLP_INSTANCE_UNROLLING_FACTOR (slp_node_instance) / nunits;
2995 if (orig_vec_stmts_num == 1)
2996 only_one_vec = true;
2998 /* Number of copies is determined by the final vectorization factor
2999 relatively to SLP_NODE_INSTANCE unrolling factor. */
3000 ncopies = vf / SLP_INSTANCE_UNROLLING_FACTOR (slp_node_instance);
3002 if (!STMT_VINFO_GROUPED_ACCESS (stmt_info))
3003 return false;
3005 /* Generate permutation masks for every NODE. Number of masks for each NODE
3006 is equal to GROUP_SIZE.
3007 E.g., we have a group of three nodes with three loads from the same
3008 location in each node, and the vector size is 4. I.e., we have a
3009 a0b0c0a1b1c1... sequence and we need to create the following vectors:
3010 for a's: a0a0a0a1 a1a1a2a2 a2a3a3a3
3011 for b's: b0b0b0b1 b1b1b2b2 b2b3b3b3
3014 The masks for a's should be: {0,0,0,3} {3,3,6,6} {6,9,9,9}.
3015 The last mask is illegal since we assume two operands for permute
3016 operation, and the mask element values can't be outside that range.
3017 Hence, the last mask must be converted into {2,5,5,5}.
3018 For the first two permutations we need the first and the second input
3019 vectors: {a0,b0,c0,a1} and {b1,c1,a2,b2}, and for the last permutation
3020 we need the second and the third vectors: {b1,c1,a2,b2} and
3021 {c2,a3,b3,c3}. */
3024 scalar_index = 0;
3025 index = 0;
3026 vect_stmts_counter = 0;
3027 vec_index = 0;
3028 first_vec_index = vec_index++;
3029 if (only_one_vec)
3030 second_vec_index = first_vec_index;
3031 else
3032 second_vec_index = vec_index++;
3034 for (j = 0; j < unroll_factor; j++)
3036 for (k = 0; k < group_size; k++)
3038 i = SLP_TREE_LOAD_PERMUTATION (node)[k];
3039 first_mask_element = i + j * group_size;
3040 if (!vect_get_mask_element (stmt, first_mask_element, 0,
3041 nunits, only_one_vec, index,
3042 mask, &current_mask_element,
3043 &need_next_vector,
3044 &number_of_mask_fixes, &mask_fixed,
3045 &needs_first_vector))
3046 return false;
3047 mask[index++] = current_mask_element;
3049 if (index == nunits)
3051 index = 0;
3052 if (!can_vec_perm_p (mode, false, mask))
3054 if (dump_enabled_p ())
3056 dump_printf_loc (MSG_MISSED_OPTIMIZATION,
3057 vect_location,
3058 "unsupported vect permute { ");
3059 for (i = 0; i < nunits; ++i)
3060 dump_printf (MSG_MISSED_OPTIMIZATION, "%d ",
3061 mask[i]);
3062 dump_printf (MSG_MISSED_OPTIMIZATION, "}\n");
3064 return false;
3067 if (!analyze_only)
3069 int l;
3070 tree mask_vec, *mask_elts;
3071 mask_elts = XALLOCAVEC (tree, nunits);
3072 for (l = 0; l < nunits; ++l)
3073 mask_elts[l] = build_int_cst (mask_element_type,
3074 mask[l]);
3075 mask_vec = build_vector (mask_type, mask_elts);
3077 if (need_next_vector)
3079 first_vec_index = second_vec_index;
3080 second_vec_index = vec_index;
3083 next_scalar_stmt
3084 = SLP_TREE_SCALAR_STMTS (node)[scalar_index++];
3086 vect_create_mask_and_perm (stmt, next_scalar_stmt,
3087 mask_vec, first_vec_index, second_vec_index,
3088 gsi, node, vectype, dr_chain,
3089 ncopies, vect_stmts_counter++);
3096 return true;
3101 /* Vectorize SLP instance tree in postorder. */
3103 static bool
3104 vect_schedule_slp_instance (slp_tree node, slp_instance instance,
3105 unsigned int vectorization_factor)
3107 gimple stmt;
3108 bool grouped_store, is_store;
3109 gimple_stmt_iterator si;
3110 stmt_vec_info stmt_info;
3111 unsigned int vec_stmts_size, nunits, group_size;
3112 tree vectype;
3113 int i;
3114 slp_tree child;
3116 if (!node)
3117 return false;
3119 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
3120 vect_schedule_slp_instance (child, instance, vectorization_factor);
3122 stmt = SLP_TREE_SCALAR_STMTS (node)[0];
3123 stmt_info = vinfo_for_stmt (stmt);
3125 /* VECTYPE is the type of the destination. */
3126 vectype = STMT_VINFO_VECTYPE (stmt_info);
3127 nunits = (unsigned int) TYPE_VECTOR_SUBPARTS (vectype);
3128 group_size = SLP_INSTANCE_GROUP_SIZE (instance);
3130 /* For each SLP instance calculate number of vector stmts to be created
3131 for the scalar stmts in each node of the SLP tree. Number of vector
3132 elements in one vector iteration is the number of scalar elements in
3133 one scalar iteration (GROUP_SIZE) multiplied by VF divided by vector
3134 size. */
3135 vec_stmts_size = (vectorization_factor * group_size) / nunits;
3137 if (!SLP_TREE_VEC_STMTS (node).exists ())
3139 SLP_TREE_VEC_STMTS (node).create (vec_stmts_size);
3140 SLP_TREE_NUMBER_OF_VEC_STMTS (node) = vec_stmts_size;
3143 if (dump_enabled_p ())
3145 dump_printf_loc (MSG_NOTE,vect_location,
3146 "------>vectorizing SLP node starting from: ");
3147 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
3148 dump_printf (MSG_NOTE, "\n");
3151 /* Loads should be inserted before the first load. */
3152 if (SLP_INSTANCE_FIRST_LOAD_STMT (instance)
3153 && STMT_VINFO_GROUPED_ACCESS (stmt_info)
3154 && !REFERENCE_CLASS_P (gimple_get_lhs (stmt))
3155 && SLP_TREE_LOAD_PERMUTATION (node).exists ())
3156 si = gsi_for_stmt (SLP_INSTANCE_FIRST_LOAD_STMT (instance));
3157 else if (is_pattern_stmt_p (stmt_info))
3158 si = gsi_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info));
3159 else
3160 si = gsi_for_stmt (stmt);
3162 /* Stores should be inserted just before the last store. */
3163 if (STMT_VINFO_GROUPED_ACCESS (stmt_info)
3164 && REFERENCE_CLASS_P (gimple_get_lhs (stmt)))
3166 gimple last_store = vect_find_last_store_in_slp_instance (instance);
3167 if (is_pattern_stmt_p (vinfo_for_stmt (last_store)))
3168 last_store = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (last_store));
3169 si = gsi_for_stmt (last_store);
3172 /* Mark the first element of the reduction chain as reduction to properly
3173 transform the node. In the analysis phase only the last element of the
3174 chain is marked as reduction. */
3175 if (GROUP_FIRST_ELEMENT (stmt_info) && !STMT_VINFO_GROUPED_ACCESS (stmt_info)
3176 && GROUP_FIRST_ELEMENT (stmt_info) == stmt)
3178 STMT_VINFO_DEF_TYPE (stmt_info) = vect_reduction_def;
3179 STMT_VINFO_TYPE (stmt_info) = reduc_vec_info_type;
3182 is_store = vect_transform_stmt (stmt, &si, &grouped_store, node, instance);
3183 return is_store;
3186 /* Replace scalar calls from SLP node NODE with setting of their lhs to zero.
3187 For loop vectorization this is done in vectorizable_call, but for SLP
3188 it needs to be deferred until end of vect_schedule_slp, because multiple
3189 SLP instances may refer to the same scalar stmt. */
3191 static void
3192 vect_remove_slp_scalar_calls (slp_tree node)
3194 gimple stmt, new_stmt;
3195 gimple_stmt_iterator gsi;
3196 int i;
3197 slp_tree child;
3198 tree lhs;
3199 stmt_vec_info stmt_info;
3201 if (!node)
3202 return;
3204 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
3205 vect_remove_slp_scalar_calls (child);
3207 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
3209 if (!is_gimple_call (stmt) || gimple_bb (stmt) == NULL)
3210 continue;
3211 stmt_info = vinfo_for_stmt (stmt);
3212 if (stmt_info == NULL
3213 || is_pattern_stmt_p (stmt_info)
3214 || !PURE_SLP_STMT (stmt_info))
3215 continue;
3216 lhs = gimple_call_lhs (stmt);
3217 new_stmt = gimple_build_assign (lhs, build_zero_cst (TREE_TYPE (lhs)));
3218 set_vinfo_for_stmt (new_stmt, stmt_info);
3219 set_vinfo_for_stmt (stmt, NULL);
3220 STMT_VINFO_STMT (stmt_info) = new_stmt;
3221 gsi = gsi_for_stmt (stmt);
3222 gsi_replace (&gsi, new_stmt, false);
3223 SSA_NAME_DEF_STMT (gimple_assign_lhs (new_stmt)) = new_stmt;
3227 /* Generate vector code for all SLP instances in the loop/basic block. */
3229 bool
3230 vect_schedule_slp (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo)
3232 vec<slp_instance> slp_instances;
3233 slp_instance instance;
3234 unsigned int i, vf;
3235 bool is_store = false;
3237 if (loop_vinfo)
3239 slp_instances = LOOP_VINFO_SLP_INSTANCES (loop_vinfo);
3240 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
3242 else
3244 slp_instances = BB_VINFO_SLP_INSTANCES (bb_vinfo);
3245 vf = 1;
3248 FOR_EACH_VEC_ELT (slp_instances, i, instance)
3250 /* Schedule the tree of INSTANCE. */
3251 is_store = vect_schedule_slp_instance (SLP_INSTANCE_TREE (instance),
3252 instance, vf);
3253 if (dump_enabled_p ())
3254 dump_printf_loc (MSG_NOTE, vect_location,
3255 "vectorizing stmts using SLP.\n");
3258 FOR_EACH_VEC_ELT (slp_instances, i, instance)
3260 slp_tree root = SLP_INSTANCE_TREE (instance);
3261 gimple store;
3262 unsigned int j;
3263 gimple_stmt_iterator gsi;
3265 /* Remove scalar call stmts. Do not do this for basic-block
3266 vectorization as not all uses may be vectorized.
3267 ??? Why should this be necessary? DCE should be able to
3268 remove the stmts itself.
3269 ??? For BB vectorization we can as well remove scalar
3270 stmts starting from the SLP tree root if they have no
3271 uses. */
3272 if (loop_vinfo)
3273 vect_remove_slp_scalar_calls (root);
3275 for (j = 0; SLP_TREE_SCALAR_STMTS (root).iterate (j, &store)
3276 && j < SLP_INSTANCE_GROUP_SIZE (instance); j++)
3278 if (!STMT_VINFO_DATA_REF (vinfo_for_stmt (store)))
3279 break;
3281 if (is_pattern_stmt_p (vinfo_for_stmt (store)))
3282 store = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (store));
3283 /* Free the attached stmt_vec_info and remove the stmt. */
3284 gsi = gsi_for_stmt (store);
3285 unlink_stmt_vdef (store);
3286 gsi_remove (&gsi, true);
3287 release_defs (store);
3288 free_stmt_vec_info (store);
3292 return is_store;
3296 /* Vectorize the basic block. */
3298 void
3299 vect_slp_transform_bb (basic_block bb)
3301 bb_vec_info bb_vinfo = vec_info_for_bb (bb);
3302 gimple_stmt_iterator si;
3304 gcc_assert (bb_vinfo);
3306 if (dump_enabled_p ())
3307 dump_printf_loc (MSG_NOTE, vect_location, "SLPing BB\n");
3309 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
3311 gimple stmt = gsi_stmt (si);
3312 stmt_vec_info stmt_info;
3314 if (dump_enabled_p ())
3316 dump_printf_loc (MSG_NOTE, vect_location,
3317 "------>SLPing statement: ");
3318 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
3319 dump_printf (MSG_NOTE, "\n");
3322 stmt_info = vinfo_for_stmt (stmt);
3323 gcc_assert (stmt_info);
3325 /* Schedule all the SLP instances when the first SLP stmt is reached. */
3326 if (STMT_SLP_TYPE (stmt_info))
3328 vect_schedule_slp (NULL, bb_vinfo);
3329 break;
3333 if (dump_enabled_p ())
3334 dump_printf_loc (MSG_NOTE, vect_location,
3335 "BASIC BLOCK VECTORIZED\n");
3337 destroy_bb_vec_info (bb_vinfo);