2013-10-11 Marc Glisse <marc.glisse@inria.fr>
[official-gcc.git] / gcc / tree-vect-slp.c
blobb3b3abec11005a49ce734e84dd40d63128203fc3
1 /* SLP - Basic Block Vectorization
2 Copyright (C) 2007-2013 Free Software Foundation, Inc.
3 Contributed by Dorit Naishlos <dorit@il.ibm.com>
4 and Ira Rosen <irar@il.ibm.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "dumpfile.h"
26 #include "tm.h"
27 #include "ggc.h"
28 #include "tree.h"
29 #include "target.h"
30 #include "basic-block.h"
31 #include "gimple-pretty-print.h"
32 #include "tree-ssa.h"
33 #include "tree-pass.h"
34 #include "cfgloop.h"
35 #include "expr.h"
36 #include "recog.h" /* FIXME: for insn_data */
37 #include "optabs.h"
38 #include "tree-vectorizer.h"
39 #include "langhooks.h"
41 /* Extract the location of the basic block in the source code.
42 Return the basic block location if succeed and NULL if not. */
44 LOC
45 find_bb_location (basic_block bb)
47 gimple stmt = NULL;
48 gimple_stmt_iterator si;
50 if (!bb)
51 return UNKNOWN_LOC;
53 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
55 stmt = gsi_stmt (si);
56 if (gimple_location (stmt) != UNKNOWN_LOC)
57 return gimple_location (stmt);
60 return UNKNOWN_LOC;
64 /* Recursively free the memory allocated for the SLP tree rooted at NODE. */
66 static void
67 vect_free_slp_tree (slp_tree node)
69 int i;
70 slp_tree child;
72 if (!node)
73 return;
75 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
76 vect_free_slp_tree (child);
78 SLP_TREE_CHILDREN (node).release ();
79 SLP_TREE_SCALAR_STMTS (node).release ();
80 SLP_TREE_VEC_STMTS (node).release ();
81 SLP_TREE_LOAD_PERMUTATION (node).release ();
83 free (node);
87 /* Free the memory allocated for the SLP instance. */
89 void
90 vect_free_slp_instance (slp_instance instance)
92 vect_free_slp_tree (SLP_INSTANCE_TREE (instance));
93 SLP_INSTANCE_LOADS (instance).release ();
94 SLP_INSTANCE_BODY_COST_VEC (instance).release ();
95 free (instance);
99 /* Create an SLP node for SCALAR_STMTS. */
101 static slp_tree
102 vect_create_new_slp_node (vec<gimple> scalar_stmts)
104 slp_tree node;
105 gimple stmt = scalar_stmts[0];
106 unsigned int nops;
108 if (is_gimple_call (stmt))
109 nops = gimple_call_num_args (stmt);
110 else if (is_gimple_assign (stmt))
112 nops = gimple_num_ops (stmt) - 1;
113 if (gimple_assign_rhs_code (stmt) == COND_EXPR)
114 nops++;
116 else
117 return NULL;
119 node = XNEW (struct _slp_tree);
120 SLP_TREE_SCALAR_STMTS (node) = scalar_stmts;
121 SLP_TREE_VEC_STMTS (node).create (0);
122 SLP_TREE_CHILDREN (node).create (nops);
123 SLP_TREE_LOAD_PERMUTATION (node) = vNULL;
125 return node;
129 /* Allocate operands info for NOPS operands, and GROUP_SIZE def-stmts for each
130 operand. */
131 static vec<slp_oprnd_info>
132 vect_create_oprnd_info (int nops, int group_size)
134 int i;
135 slp_oprnd_info oprnd_info;
136 vec<slp_oprnd_info> oprnds_info;
138 oprnds_info.create (nops);
139 for (i = 0; i < nops; i++)
141 oprnd_info = XNEW (struct _slp_oprnd_info);
142 oprnd_info->def_stmts.create (group_size);
143 oprnd_info->first_dt = vect_uninitialized_def;
144 oprnd_info->first_op_type = NULL_TREE;
145 oprnd_info->first_pattern = false;
146 oprnds_info.quick_push (oprnd_info);
149 return oprnds_info;
153 /* Free operands info. */
155 static void
156 vect_free_oprnd_info (vec<slp_oprnd_info> &oprnds_info)
158 int i;
159 slp_oprnd_info oprnd_info;
161 FOR_EACH_VEC_ELT (oprnds_info, i, oprnd_info)
163 oprnd_info->def_stmts.release ();
164 XDELETE (oprnd_info);
167 oprnds_info.release ();
171 /* Find the place of the data-ref in STMT in the interleaving chain that starts
172 from FIRST_STMT. Return -1 if the data-ref is not a part of the chain. */
174 static int
175 vect_get_place_in_interleaving_chain (gimple stmt, gimple first_stmt)
177 gimple next_stmt = first_stmt;
178 int result = 0;
180 if (first_stmt != GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
181 return -1;
185 if (next_stmt == stmt)
186 return result;
187 result++;
188 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
190 while (next_stmt);
192 return -1;
196 /* Get the defs for the rhs of STMT (collect them in OPRNDS_INFO), check that
197 they are of a valid type and that they match the defs of the first stmt of
198 the SLP group (stored in OPRNDS_INFO). */
200 static bool
201 vect_get_and_check_slp_defs (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
202 gimple stmt, bool first,
203 vec<slp_oprnd_info> *oprnds_info)
205 tree oprnd;
206 unsigned int i, number_of_oprnds;
207 tree def;
208 gimple def_stmt;
209 enum vect_def_type dt = vect_uninitialized_def;
210 struct loop *loop = NULL;
211 bool pattern = false;
212 slp_oprnd_info oprnd_info;
213 int op_idx = 1;
214 tree compare_rhs = NULL_TREE;
216 if (loop_vinfo)
217 loop = LOOP_VINFO_LOOP (loop_vinfo);
219 if (is_gimple_call (stmt))
221 number_of_oprnds = gimple_call_num_args (stmt);
222 op_idx = 3;
224 else if (is_gimple_assign (stmt))
226 number_of_oprnds = gimple_num_ops (stmt) - 1;
227 if (gimple_assign_rhs_code (stmt) == COND_EXPR)
228 number_of_oprnds++;
230 else
231 return false;
233 for (i = 0; i < number_of_oprnds; i++)
235 if (compare_rhs)
237 oprnd = compare_rhs;
238 compare_rhs = NULL_TREE;
240 else
241 oprnd = gimple_op (stmt, op_idx++);
243 oprnd_info = (*oprnds_info)[i];
245 if (COMPARISON_CLASS_P (oprnd))
247 compare_rhs = TREE_OPERAND (oprnd, 1);
248 oprnd = TREE_OPERAND (oprnd, 0);
251 if (!vect_is_simple_use (oprnd, NULL, loop_vinfo, bb_vinfo, &def_stmt,
252 &def, &dt)
253 || (!def_stmt && dt != vect_constant_def))
255 if (dump_enabled_p ())
257 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
258 "Build SLP failed: can't find def for ");
259 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, oprnd);
260 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
263 return false;
266 /* Check if DEF_STMT is a part of a pattern in LOOP and get the def stmt
267 from the pattern. Check that all the stmts of the node are in the
268 pattern. */
269 if (def_stmt && gimple_bb (def_stmt)
270 && ((loop && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt)))
271 || (!loop && gimple_bb (def_stmt) == BB_VINFO_BB (bb_vinfo)
272 && gimple_code (def_stmt) != GIMPLE_PHI))
273 && vinfo_for_stmt (def_stmt)
274 && STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (def_stmt))
275 && !STMT_VINFO_RELEVANT (vinfo_for_stmt (def_stmt))
276 && !STMT_VINFO_LIVE_P (vinfo_for_stmt (def_stmt)))
278 pattern = true;
279 if (!first && !oprnd_info->first_pattern)
281 if (dump_enabled_p ())
283 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
284 "Build SLP failed: some of the stmts"
285 " are in a pattern, and others are not ");
286 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, oprnd);
287 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
290 return false;
293 def_stmt = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (def_stmt));
294 dt = STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt));
296 if (dt == vect_unknown_def_type)
298 if (dump_enabled_p ())
299 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
300 "Unsupported pattern.\n");
301 return false;
304 switch (gimple_code (def_stmt))
306 case GIMPLE_PHI:
307 def = gimple_phi_result (def_stmt);
308 break;
310 case GIMPLE_ASSIGN:
311 def = gimple_assign_lhs (def_stmt);
312 break;
314 default:
315 if (dump_enabled_p ())
316 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
317 "unsupported defining stmt:\n");
318 return false;
322 if (first)
324 oprnd_info->first_dt = dt;
325 oprnd_info->first_pattern = pattern;
326 oprnd_info->first_op_type = TREE_TYPE (oprnd);
328 else
330 /* Not first stmt of the group, check that the def-stmt/s match
331 the def-stmt/s of the first stmt. Allow different definition
332 types for reduction chains: the first stmt must be a
333 vect_reduction_def (a phi node), and the rest
334 vect_internal_def. */
335 if (((oprnd_info->first_dt != dt
336 && !(oprnd_info->first_dt == vect_reduction_def
337 && dt == vect_internal_def)
338 && !((oprnd_info->first_dt == vect_external_def
339 || oprnd_info->first_dt == vect_constant_def)
340 && (dt == vect_external_def
341 || dt == vect_constant_def)))
342 || !types_compatible_p (oprnd_info->first_op_type,
343 TREE_TYPE (oprnd))))
345 if (dump_enabled_p ())
346 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
347 "Build SLP failed: different types\n");
349 return false;
353 /* Check the types of the definitions. */
354 switch (dt)
356 case vect_constant_def:
357 case vect_external_def:
358 case vect_reduction_def:
359 break;
361 case vect_internal_def:
362 oprnd_info->def_stmts.quick_push (def_stmt);
363 break;
365 default:
366 /* FORNOW: Not supported. */
367 if (dump_enabled_p ())
369 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
370 "Build SLP failed: illegal type of def ");
371 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, def);
372 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
375 return false;
379 return true;
383 /* Verify if the scalar stmts STMTS are isomorphic, require data
384 permutation or are of unsupported types of operation. Return
385 true if they are, otherwise return false and indicate in *MATCHES
386 which stmts are not isomorphic to the first one. If MATCHES[0]
387 is false then this indicates the comparison could not be
388 carried out or the stmts will never be vectorized by SLP. */
390 static bool
391 vect_build_slp_tree_1 (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
392 vec<gimple> stmts, unsigned int group_size,
393 unsigned nops, unsigned int *max_nunits,
394 unsigned int vectorization_factor, bool *matches)
396 unsigned int i;
397 gimple stmt = stmts[0];
398 enum tree_code first_stmt_code = ERROR_MARK, rhs_code = ERROR_MARK;
399 enum tree_code first_cond_code = ERROR_MARK;
400 tree lhs;
401 bool need_same_oprnds = false;
402 tree vectype, scalar_type, first_op1 = NULL_TREE;
403 optab optab;
404 int icode;
405 enum machine_mode optab_op2_mode;
406 enum machine_mode vec_mode;
407 struct data_reference *first_dr;
408 HOST_WIDE_INT dummy;
409 gimple first_load = NULL, prev_first_load = NULL, old_first_load = NULL;
410 tree cond;
412 /* For every stmt in NODE find its def stmt/s. */
413 FOR_EACH_VEC_ELT (stmts, i, stmt)
415 matches[i] = false;
417 if (dump_enabled_p ())
419 dump_printf_loc (MSG_NOTE, vect_location, "Build SLP for ");
420 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
421 dump_printf (MSG_NOTE, "\n");
424 /* Fail to vectorize statements marked as unvectorizable. */
425 if (!STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (stmt)))
427 if (dump_enabled_p ())
429 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
430 "Build SLP failed: unvectorizable statement ");
431 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
432 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
434 /* Fatal mismatch. */
435 matches[0] = false;
436 return false;
439 lhs = gimple_get_lhs (stmt);
440 if (lhs == NULL_TREE)
442 if (dump_enabled_p ())
444 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
445 "Build SLP failed: not GIMPLE_ASSIGN nor "
446 "GIMPLE_CALL ");
447 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
448 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
450 /* Fatal mismatch. */
451 matches[0] = false;
452 return false;
455 if (is_gimple_assign (stmt)
456 && gimple_assign_rhs_code (stmt) == COND_EXPR
457 && (cond = gimple_assign_rhs1 (stmt))
458 && !COMPARISON_CLASS_P (cond))
460 if (dump_enabled_p ())
462 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
463 "Build SLP failed: condition is not "
464 "comparison ");
465 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
466 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
468 /* Fatal mismatch. */
469 matches[0] = false;
470 return false;
473 scalar_type = vect_get_smallest_scalar_type (stmt, &dummy, &dummy);
474 vectype = get_vectype_for_scalar_type (scalar_type);
475 if (!vectype)
477 if (dump_enabled_p ())
479 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
480 "Build SLP failed: unsupported data-type ");
481 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
482 scalar_type);
483 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
485 /* Fatal mismatch. */
486 matches[0] = false;
487 return false;
490 /* In case of multiple types we need to detect the smallest type. */
491 if (*max_nunits < TYPE_VECTOR_SUBPARTS (vectype))
493 *max_nunits = TYPE_VECTOR_SUBPARTS (vectype);
494 if (bb_vinfo)
495 vectorization_factor = *max_nunits;
498 if (is_gimple_call (stmt))
500 rhs_code = CALL_EXPR;
501 if (gimple_call_internal_p (stmt)
502 || gimple_call_tail_p (stmt)
503 || gimple_call_noreturn_p (stmt)
504 || !gimple_call_nothrow_p (stmt)
505 || gimple_call_chain (stmt))
507 if (dump_enabled_p ())
509 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
510 "Build SLP failed: unsupported call type ");
511 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
512 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
514 /* Fatal mismatch. */
515 matches[0] = false;
516 return false;
519 else
520 rhs_code = gimple_assign_rhs_code (stmt);
522 /* Check the operation. */
523 if (i == 0)
525 first_stmt_code = rhs_code;
527 /* Shift arguments should be equal in all the packed stmts for a
528 vector shift with scalar shift operand. */
529 if (rhs_code == LSHIFT_EXPR || rhs_code == RSHIFT_EXPR
530 || rhs_code == LROTATE_EXPR
531 || rhs_code == RROTATE_EXPR)
533 vec_mode = TYPE_MODE (vectype);
535 /* First see if we have a vector/vector shift. */
536 optab = optab_for_tree_code (rhs_code, vectype,
537 optab_vector);
539 if (!optab
540 || optab_handler (optab, vec_mode) == CODE_FOR_nothing)
542 /* No vector/vector shift, try for a vector/scalar shift. */
543 optab = optab_for_tree_code (rhs_code, vectype,
544 optab_scalar);
546 if (!optab)
548 if (dump_enabled_p ())
549 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
550 "Build SLP failed: no optab.\n");
551 /* Fatal mismatch. */
552 matches[0] = false;
553 return false;
555 icode = (int) optab_handler (optab, vec_mode);
556 if (icode == CODE_FOR_nothing)
558 if (dump_enabled_p ())
559 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
560 "Build SLP failed: "
561 "op not supported by target.\n");
562 /* Fatal mismatch. */
563 matches[0] = false;
564 return false;
566 optab_op2_mode = insn_data[icode].operand[2].mode;
567 if (!VECTOR_MODE_P (optab_op2_mode))
569 need_same_oprnds = true;
570 first_op1 = gimple_assign_rhs2 (stmt);
574 else if (rhs_code == WIDEN_LSHIFT_EXPR)
576 need_same_oprnds = true;
577 first_op1 = gimple_assign_rhs2 (stmt);
580 else
582 if (first_stmt_code != rhs_code
583 && (first_stmt_code != IMAGPART_EXPR
584 || rhs_code != REALPART_EXPR)
585 && (first_stmt_code != REALPART_EXPR
586 || rhs_code != IMAGPART_EXPR)
587 && !(STMT_VINFO_GROUPED_ACCESS (vinfo_for_stmt (stmt))
588 && (first_stmt_code == ARRAY_REF
589 || first_stmt_code == BIT_FIELD_REF
590 || first_stmt_code == INDIRECT_REF
591 || first_stmt_code == COMPONENT_REF
592 || first_stmt_code == MEM_REF)))
594 if (dump_enabled_p ())
596 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
597 "Build SLP failed: different operation "
598 "in stmt ");
599 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
600 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
602 /* Mismatch. */
603 continue;
606 if (need_same_oprnds
607 && !operand_equal_p (first_op1, gimple_assign_rhs2 (stmt), 0))
609 if (dump_enabled_p ())
611 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
612 "Build SLP failed: different shift "
613 "arguments in ");
614 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
615 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
617 /* Mismatch. */
618 continue;
621 if (rhs_code == CALL_EXPR)
623 gimple first_stmt = stmts[0];
624 if (gimple_call_num_args (stmt) != nops
625 || !operand_equal_p (gimple_call_fn (first_stmt),
626 gimple_call_fn (stmt), 0)
627 || gimple_call_fntype (first_stmt)
628 != gimple_call_fntype (stmt))
630 if (dump_enabled_p ())
632 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
633 "Build SLP failed: different calls in ");
634 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
635 stmt, 0);
636 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
638 /* Mismatch. */
639 continue;
644 /* Grouped store or load. */
645 if (STMT_VINFO_GROUPED_ACCESS (vinfo_for_stmt (stmt)))
647 if (REFERENCE_CLASS_P (lhs))
649 /* Store. */
652 else
654 /* Load. */
655 unsigned unrolling_factor
656 = least_common_multiple
657 (*max_nunits, group_size) / group_size;
658 /* FORNOW: Check that there is no gap between the loads
659 and no gap between the groups when we need to load
660 multiple groups at once.
661 ??? We should enhance this to only disallow gaps
662 inside vectors. */
663 if ((unrolling_factor > 1
664 && GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) == stmt
665 && GROUP_GAP (vinfo_for_stmt (stmt)) != 0)
666 || (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) != stmt
667 && GROUP_GAP (vinfo_for_stmt (stmt)) != 1))
669 if (dump_enabled_p ())
671 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
672 "Build SLP failed: grouped "
673 "loads have gaps ");
674 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
675 stmt, 0);
676 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
678 /* Fatal mismatch. */
679 matches[0] = false;
680 return false;
683 /* Check that the size of interleaved loads group is not
684 greater than the SLP group size. */
685 unsigned ncopies
686 = vectorization_factor / TYPE_VECTOR_SUBPARTS (vectype);
687 if (loop_vinfo
688 && GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) == stmt
689 && ((GROUP_SIZE (vinfo_for_stmt (stmt))
690 - GROUP_GAP (vinfo_for_stmt (stmt)))
691 > ncopies * group_size))
693 if (dump_enabled_p ())
695 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
696 "Build SLP failed: the number "
697 "of interleaved loads is greater than "
698 "the SLP group size ");
699 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
700 stmt, 0);
701 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
703 /* Fatal mismatch. */
704 matches[0] = false;
705 return false;
708 old_first_load = first_load;
709 first_load = GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt));
710 if (prev_first_load)
712 /* Check that there are no loads from different interleaving
713 chains in the same node. */
714 if (prev_first_load != first_load)
716 if (dump_enabled_p ())
718 dump_printf_loc (MSG_MISSED_OPTIMIZATION,
719 vect_location,
720 "Build SLP failed: different "
721 "interleaving chains in one node ");
722 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
723 stmt, 0);
724 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
726 /* Mismatch. */
727 continue;
730 else
731 prev_first_load = first_load;
733 /* In some cases a group of loads is just the same load
734 repeated N times. Only analyze its cost once. */
735 if (first_load == stmt && old_first_load != first_load)
737 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt));
738 if (vect_supportable_dr_alignment (first_dr, false)
739 == dr_unaligned_unsupported)
741 if (dump_enabled_p ())
743 dump_printf_loc (MSG_MISSED_OPTIMIZATION,
744 vect_location,
745 "Build SLP failed: unsupported "
746 "unaligned load ");
747 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
748 stmt, 0);
749 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
751 /* Fatal mismatch. */
752 matches[0] = false;
753 return false;
757 } /* Grouped access. */
758 else
760 if (TREE_CODE_CLASS (rhs_code) == tcc_reference)
762 /* Not grouped load. */
763 if (dump_enabled_p ())
765 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
766 "Build SLP failed: not grouped load ");
767 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
768 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
771 /* FORNOW: Not grouped loads are not supported. */
772 /* Fatal mismatch. */
773 matches[0] = false;
774 return false;
777 /* Not memory operation. */
778 if (TREE_CODE_CLASS (rhs_code) != tcc_binary
779 && TREE_CODE_CLASS (rhs_code) != tcc_unary
780 && rhs_code != COND_EXPR
781 && rhs_code != CALL_EXPR)
783 if (dump_enabled_p ())
785 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
786 "Build SLP failed: operation");
787 dump_printf (MSG_MISSED_OPTIMIZATION, " unsupported ");
788 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
789 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
791 /* Fatal mismatch. */
792 matches[0] = false;
793 return false;
796 if (rhs_code == COND_EXPR)
798 tree cond_expr = gimple_assign_rhs1 (stmt);
800 if (i == 0)
801 first_cond_code = TREE_CODE (cond_expr);
802 else if (first_cond_code != TREE_CODE (cond_expr))
804 if (dump_enabled_p ())
806 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
807 "Build SLP failed: different"
808 " operation");
809 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
810 stmt, 0);
811 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
813 /* Mismatch. */
814 continue;
819 matches[i] = true;
822 for (i = 0; i < group_size; ++i)
823 if (!matches[i])
824 return false;
826 return true;
829 /* Recursively build an SLP tree starting from NODE.
830 Fail (and return a value not equal to zero) if def-stmts are not
831 isomorphic, require data permutation or are of unsupported types of
832 operation. Otherwise, return 0.
833 The value returned is the depth in the SLP tree where a mismatch
834 was found. */
836 static bool
837 vect_build_slp_tree (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
838 slp_tree *node, unsigned int group_size,
839 unsigned int *max_nunits,
840 vec<slp_tree> *loads,
841 unsigned int vectorization_factor,
842 bool *matches, unsigned *npermutes)
844 unsigned nops, i, this_npermutes = 0;
845 gimple stmt;
847 if (!matches)
848 matches = XALLOCAVEC (bool, group_size);
849 if (!npermutes)
850 npermutes = &this_npermutes;
852 matches[0] = false;
854 stmt = SLP_TREE_SCALAR_STMTS (*node)[0];
855 if (is_gimple_call (stmt))
856 nops = gimple_call_num_args (stmt);
857 else if (is_gimple_assign (stmt))
859 nops = gimple_num_ops (stmt) - 1;
860 if (gimple_assign_rhs_code (stmt) == COND_EXPR)
861 nops++;
863 else
864 return false;
866 if (!vect_build_slp_tree_1 (loop_vinfo, bb_vinfo,
867 SLP_TREE_SCALAR_STMTS (*node), group_size, nops,
868 max_nunits, vectorization_factor, matches))
869 return false;
871 /* If the SLP node is a load, terminate the recursion. */
872 if (STMT_VINFO_GROUPED_ACCESS (vinfo_for_stmt (stmt))
873 && DR_IS_READ (STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt))))
875 loads->safe_push (*node);
876 return true;
879 /* Get at the operands, verifying they are compatible. */
880 vec<slp_oprnd_info> oprnds_info = vect_create_oprnd_info (nops, group_size);
881 slp_oprnd_info oprnd_info;
882 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (*node), i, stmt)
884 if (!vect_get_and_check_slp_defs (loop_vinfo, bb_vinfo,
885 stmt, (i == 0), &oprnds_info))
887 vect_free_oprnd_info (oprnds_info);
888 return false;
892 stmt = SLP_TREE_SCALAR_STMTS (*node)[0];
894 /* Create SLP_TREE nodes for the definition node/s. */
895 FOR_EACH_VEC_ELT (oprnds_info, i, oprnd_info)
897 slp_tree child;
898 unsigned old_nloads = loads->length ();
899 unsigned old_max_nunits = *max_nunits;
901 if (oprnd_info->first_dt != vect_internal_def)
902 continue;
904 child = vect_create_new_slp_node (oprnd_info->def_stmts);
905 if (!child)
907 vect_free_oprnd_info (oprnds_info);
908 return false;
911 bool *matches = XALLOCAVEC (bool, group_size);
912 if (vect_build_slp_tree (loop_vinfo, bb_vinfo, &child,
913 group_size, max_nunits, loads,
914 vectorization_factor, matches, npermutes))
916 oprnd_info->def_stmts = vNULL;
917 SLP_TREE_CHILDREN (*node).quick_push (child);
918 continue;
921 /* If the SLP build for operand zero failed and operand zero
922 and one can be commutated try that for the scalar stmts
923 that failed the match. */
924 if (i == 0
925 /* A first scalar stmt mismatch signals a fatal mismatch. */
926 && matches[0]
927 /* ??? For COND_EXPRs we can swap the comparison operands
928 as well as the arms under some constraints. */
929 && nops == 2
930 && oprnds_info[1]->first_dt == vect_internal_def
931 && is_gimple_assign (stmt)
932 && commutative_tree_code (gimple_assign_rhs_code (stmt))
933 /* Do so only if the number of not successful permutes was nor more
934 than a cut-ff as re-trying the recursive match on
935 possibly each level of the tree would expose exponential
936 behavior. */
937 && *npermutes < 4)
939 /* Roll back. */
940 *max_nunits = old_max_nunits;
941 loads->truncate (old_nloads);
942 /* Swap mismatched definition stmts. */
943 for (unsigned j = 0; j < group_size; ++j)
944 if (!matches[j])
946 gimple tem = oprnds_info[0]->def_stmts[j];
947 oprnds_info[0]->def_stmts[j] = oprnds_info[1]->def_stmts[j];
948 oprnds_info[1]->def_stmts[j] = tem;
950 /* And try again ... */
951 if (vect_build_slp_tree (loop_vinfo, bb_vinfo, &child,
952 group_size, max_nunits, loads,
953 vectorization_factor,
954 matches, npermutes))
956 oprnd_info->def_stmts = vNULL;
957 SLP_TREE_CHILDREN (*node).quick_push (child);
958 continue;
961 ++*npermutes;
964 oprnd_info->def_stmts = vNULL;
965 vect_free_slp_tree (child);
966 vect_free_oprnd_info (oprnds_info);
967 return false;
970 vect_free_oprnd_info (oprnds_info);
971 return true;
974 /* Dump a slp tree NODE using flags specified in DUMP_KIND. */
976 static void
977 vect_print_slp_tree (int dump_kind, slp_tree node)
979 int i;
980 gimple stmt;
981 slp_tree child;
983 if (!node)
984 return;
986 dump_printf (dump_kind, "node ");
987 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
989 dump_printf (dump_kind, "\n\tstmt %d ", i);
990 dump_gimple_stmt (dump_kind, TDF_SLIM, stmt, 0);
992 dump_printf (dump_kind, "\n");
994 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
995 vect_print_slp_tree (dump_kind, child);
999 /* Mark the tree rooted at NODE with MARK (PURE_SLP or HYBRID).
1000 If MARK is HYBRID, it refers to a specific stmt in NODE (the stmt at index
1001 J). Otherwise, MARK is PURE_SLP and J is -1, which indicates that all the
1002 stmts in NODE are to be marked. */
1004 static void
1005 vect_mark_slp_stmts (slp_tree node, enum slp_vect_type mark, int j)
1007 int i;
1008 gimple stmt;
1009 slp_tree child;
1011 if (!node)
1012 return;
1014 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
1015 if (j < 0 || i == j)
1016 STMT_SLP_TYPE (vinfo_for_stmt (stmt)) = mark;
1018 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
1019 vect_mark_slp_stmts (child, mark, j);
1023 /* Mark the statements of the tree rooted at NODE as relevant (vect_used). */
1025 static void
1026 vect_mark_slp_stmts_relevant (slp_tree node)
1028 int i;
1029 gimple stmt;
1030 stmt_vec_info stmt_info;
1031 slp_tree child;
1033 if (!node)
1034 return;
1036 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
1038 stmt_info = vinfo_for_stmt (stmt);
1039 gcc_assert (!STMT_VINFO_RELEVANT (stmt_info)
1040 || STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_scope);
1041 STMT_VINFO_RELEVANT (stmt_info) = vect_used_in_scope;
1044 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
1045 vect_mark_slp_stmts_relevant (child);
1049 /* Rearrange the statements of NODE according to PERMUTATION. */
1051 static void
1052 vect_slp_rearrange_stmts (slp_tree node, unsigned int group_size,
1053 vec<unsigned> permutation)
1055 gimple stmt;
1056 vec<gimple> tmp_stmts;
1057 unsigned int i;
1058 slp_tree child;
1060 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
1061 vect_slp_rearrange_stmts (child, group_size, permutation);
1063 gcc_assert (group_size == SLP_TREE_SCALAR_STMTS (node).length ());
1064 tmp_stmts.create (group_size);
1065 tmp_stmts.quick_grow_cleared (group_size);
1067 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
1068 tmp_stmts[permutation[i]] = stmt;
1070 SLP_TREE_SCALAR_STMTS (node).release ();
1071 SLP_TREE_SCALAR_STMTS (node) = tmp_stmts;
1075 /* Check if the required load permutations in the SLP instance
1076 SLP_INSTN are supported. */
1078 static bool
1079 vect_supported_load_permutation_p (slp_instance slp_instn)
1081 unsigned int group_size = SLP_INSTANCE_GROUP_SIZE (slp_instn);
1082 unsigned int i, j, k, next;
1083 sbitmap load_index;
1084 slp_tree node;
1085 gimple stmt, load, next_load, first_load;
1086 struct data_reference *dr;
1088 if (dump_enabled_p ())
1090 dump_printf_loc (MSG_NOTE, vect_location, "Load permutation ");
1091 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1092 if (node->load_permutation.exists ())
1093 FOR_EACH_VEC_ELT (node->load_permutation, j, next)
1094 dump_printf (MSG_NOTE, "%d ", next);
1095 else
1096 for (i = 0; i < group_size; ++i)
1097 dump_printf (MSG_NOTE, "%d ", i);
1098 dump_printf (MSG_NOTE, "\n");
1101 /* In case of reduction every load permutation is allowed, since the order
1102 of the reduction statements is not important (as opposed to the case of
1103 grouped stores). The only condition we need to check is that all the
1104 load nodes are of the same size and have the same permutation (and then
1105 rearrange all the nodes of the SLP instance according to this
1106 permutation). */
1108 /* Check that all the load nodes are of the same size. */
1109 /* ??? Can't we assert this? */
1110 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1111 if (SLP_TREE_SCALAR_STMTS (node).length () != (unsigned) group_size)
1112 return false;
1114 node = SLP_INSTANCE_TREE (slp_instn);
1115 stmt = SLP_TREE_SCALAR_STMTS (node)[0];
1117 /* Reduction (there are no data-refs in the root).
1118 In reduction chain the order of the loads is important. */
1119 if (!STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt))
1120 && !GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
1122 slp_tree load;
1123 unsigned int lidx;
1125 /* Compare all the permutation sequences to the first one. We know
1126 that at least one load is permuted. */
1127 node = SLP_INSTANCE_LOADS (slp_instn)[0];
1128 if (!node->load_permutation.exists ())
1129 return false;
1130 for (i = 1; SLP_INSTANCE_LOADS (slp_instn).iterate (i, &load); ++i)
1132 if (!load->load_permutation.exists ())
1133 return false;
1134 FOR_EACH_VEC_ELT (load->load_permutation, j, lidx)
1135 if (lidx != node->load_permutation[j])
1136 return false;
1139 /* Check that the loads in the first sequence are different and there
1140 are no gaps between them. */
1141 load_index = sbitmap_alloc (group_size);
1142 bitmap_clear (load_index);
1143 FOR_EACH_VEC_ELT (node->load_permutation, i, lidx)
1145 if (bitmap_bit_p (load_index, lidx))
1147 sbitmap_free (load_index);
1148 return false;
1150 bitmap_set_bit (load_index, lidx);
1152 for (i = 0; i < group_size; i++)
1153 if (!bitmap_bit_p (load_index, i))
1155 sbitmap_free (load_index);
1156 return false;
1158 sbitmap_free (load_index);
1160 /* This permutation is valid for reduction. Since the order of the
1161 statements in the nodes is not important unless they are memory
1162 accesses, we can rearrange the statements in all the nodes
1163 according to the order of the loads. */
1164 vect_slp_rearrange_stmts (SLP_INSTANCE_TREE (slp_instn), group_size,
1165 node->load_permutation);
1167 /* We are done, no actual permutations need to be generated. */
1168 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1169 SLP_TREE_LOAD_PERMUTATION (node).release ();
1170 return true;
1173 /* In basic block vectorization we allow any subchain of an interleaving
1174 chain.
1175 FORNOW: not supported in loop SLP because of realignment compications. */
1176 if (STMT_VINFO_BB_VINFO (vinfo_for_stmt (stmt)))
1178 /* Check that for every node in the instance the loads
1179 form a subchain. */
1180 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1182 next_load = NULL;
1183 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), j, load)
1185 if (j != 0 && next_load != load)
1186 return false;
1187 next_load = GROUP_NEXT_ELEMENT (vinfo_for_stmt (load));
1191 /* Check that the alignment of the first load in every subchain, i.e.,
1192 the first statement in every load node, is supported.
1193 ??? This belongs in alignment checking. */
1194 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1196 first_load = SLP_TREE_SCALAR_STMTS (node)[0];
1197 if (first_load != GROUP_FIRST_ELEMENT (vinfo_for_stmt (first_load)))
1199 dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_load));
1200 if (vect_supportable_dr_alignment (dr, false)
1201 == dr_unaligned_unsupported)
1203 if (dump_enabled_p ())
1205 dump_printf_loc (MSG_MISSED_OPTIMIZATION,
1206 vect_location,
1207 "unsupported unaligned load ");
1208 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
1209 first_load, 0);
1210 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
1212 return false;
1217 /* We are done, no actual permutations need to be generated. */
1218 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1219 SLP_TREE_LOAD_PERMUTATION (node).release ();
1220 return true;
1223 /* FORNOW: the only supported permutation is 0..01..1.. of length equal to
1224 GROUP_SIZE and where each sequence of same drs is of GROUP_SIZE length as
1225 well (unless it's reduction). */
1226 if (SLP_INSTANCE_LOADS (slp_instn).length () != group_size)
1227 return false;
1228 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1229 if (!node->load_permutation.exists ())
1230 return false;
1232 load_index = sbitmap_alloc (group_size);
1233 bitmap_clear (load_index);
1234 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1236 unsigned int lidx = node->load_permutation[0];
1237 if (bitmap_bit_p (load_index, lidx))
1239 sbitmap_free (load_index);
1240 return false;
1242 bitmap_set_bit (load_index, lidx);
1243 FOR_EACH_VEC_ELT (node->load_permutation, j, k)
1244 if (k != lidx)
1246 sbitmap_free (load_index);
1247 return false;
1250 for (i = 0; i < group_size; i++)
1251 if (!bitmap_bit_p (load_index, i))
1253 sbitmap_free (load_index);
1254 return false;
1256 sbitmap_free (load_index);
1258 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1259 if (node->load_permutation.exists ()
1260 && !vect_transform_slp_perm_load
1261 (node, vNULL, NULL,
1262 SLP_INSTANCE_UNROLLING_FACTOR (slp_instn), slp_instn, true))
1263 return false;
1264 return true;
1268 /* Find the first load in the loop that belongs to INSTANCE.
1269 When loads are in several SLP nodes, there can be a case in which the first
1270 load does not appear in the first SLP node to be transformed, causing
1271 incorrect order of statements. Since we generate all the loads together,
1272 they must be inserted before the first load of the SLP instance and not
1273 before the first load of the first node of the instance. */
1275 static gimple
1276 vect_find_first_load_in_slp_instance (slp_instance instance)
1278 int i, j;
1279 slp_tree load_node;
1280 gimple first_load = NULL, load;
1282 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (instance), i, load_node)
1283 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (load_node), j, load)
1284 first_load = get_earlier_stmt (load, first_load);
1286 return first_load;
1290 /* Find the last store in SLP INSTANCE. */
1292 static gimple
1293 vect_find_last_store_in_slp_instance (slp_instance instance)
1295 int i;
1296 slp_tree node;
1297 gimple last_store = NULL, store;
1299 node = SLP_INSTANCE_TREE (instance);
1300 for (i = 0; SLP_TREE_SCALAR_STMTS (node).iterate (i, &store); i++)
1301 last_store = get_later_stmt (store, last_store);
1303 return last_store;
1306 /* Compute the cost for the SLP node NODE in the SLP instance INSTANCE. */
1308 static void
1309 vect_analyze_slp_cost_1 (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
1310 slp_instance instance, slp_tree node,
1311 stmt_vector_for_cost *prologue_cost_vec,
1312 unsigned ncopies_for_cost)
1314 stmt_vector_for_cost *body_cost_vec = &SLP_INSTANCE_BODY_COST_VEC (instance);
1316 unsigned i;
1317 slp_tree child;
1318 gimple stmt, s;
1319 stmt_vec_info stmt_info;
1320 tree lhs;
1321 unsigned group_size = SLP_INSTANCE_GROUP_SIZE (instance);
1323 /* Recurse down the SLP tree. */
1324 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
1325 vect_analyze_slp_cost_1 (loop_vinfo, bb_vinfo,
1326 instance, child, prologue_cost_vec,
1327 ncopies_for_cost);
1329 /* Look at the first scalar stmt to determine the cost. */
1330 stmt = SLP_TREE_SCALAR_STMTS (node)[0];
1331 stmt_info = vinfo_for_stmt (stmt);
1332 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
1334 if (DR_IS_WRITE (STMT_VINFO_DATA_REF (stmt_info)))
1335 vect_model_store_cost (stmt_info, ncopies_for_cost, false,
1336 vect_uninitialized_def,
1337 node, prologue_cost_vec, body_cost_vec);
1338 else
1340 int i;
1341 gcc_checking_assert (DR_IS_READ (STMT_VINFO_DATA_REF (stmt_info)));
1342 vect_model_load_cost (stmt_info, ncopies_for_cost, false,
1343 node, prologue_cost_vec, body_cost_vec);
1344 /* If the load is permuted record the cost for the permutation.
1345 ??? Loads from multiple chains are let through here only
1346 for a single special case involving complex numbers where
1347 in the end no permutation is necessary. */
1348 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, s)
1349 if ((STMT_VINFO_GROUP_FIRST_ELEMENT (vinfo_for_stmt (s))
1350 == STMT_VINFO_GROUP_FIRST_ELEMENT (stmt_info))
1351 && vect_get_place_in_interleaving_chain
1352 (s, STMT_VINFO_GROUP_FIRST_ELEMENT (stmt_info)) != i)
1354 record_stmt_cost (body_cost_vec, group_size, vec_perm,
1355 stmt_info, 0, vect_body);
1356 break;
1360 else
1361 record_stmt_cost (body_cost_vec, ncopies_for_cost, vector_stmt,
1362 stmt_info, 0, vect_body);
1364 /* Scan operands and account for prologue cost of constants/externals.
1365 ??? This over-estimates cost for multiple uses and should be
1366 re-engineered. */
1367 lhs = gimple_get_lhs (stmt);
1368 for (i = 0; i < gimple_num_ops (stmt); ++i)
1370 tree def, op = gimple_op (stmt, i);
1371 gimple def_stmt;
1372 enum vect_def_type dt;
1373 if (!op || op == lhs)
1374 continue;
1375 if (vect_is_simple_use (op, NULL, loop_vinfo, bb_vinfo,
1376 &def_stmt, &def, &dt)
1377 && (dt == vect_constant_def || dt == vect_external_def))
1378 record_stmt_cost (prologue_cost_vec, 1, vector_stmt,
1379 stmt_info, 0, vect_prologue);
1383 /* Compute the cost for the SLP instance INSTANCE. */
1385 static void
1386 vect_analyze_slp_cost (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
1387 slp_instance instance, unsigned nunits)
1389 stmt_vector_for_cost body_cost_vec, prologue_cost_vec;
1390 unsigned ncopies_for_cost;
1391 stmt_info_for_cost *si;
1392 unsigned i;
1394 /* Calculate the number of vector stmts to create based on the unrolling
1395 factor (number of vectors is 1 if NUNITS >= GROUP_SIZE, and is
1396 GROUP_SIZE / NUNITS otherwise. */
1397 unsigned group_size = SLP_INSTANCE_GROUP_SIZE (instance);
1398 ncopies_for_cost = least_common_multiple (nunits, group_size) / nunits;
1400 prologue_cost_vec.create (10);
1401 body_cost_vec.create (10);
1402 SLP_INSTANCE_BODY_COST_VEC (instance) = body_cost_vec;
1403 vect_analyze_slp_cost_1 (loop_vinfo, bb_vinfo,
1404 instance, SLP_INSTANCE_TREE (instance),
1405 &prologue_cost_vec, ncopies_for_cost);
1407 /* Record the prologue costs, which were delayed until we were
1408 sure that SLP was successful. Unlike the body costs, we know
1409 the final values now regardless of the loop vectorization factor. */
1410 void *data = (loop_vinfo ? LOOP_VINFO_TARGET_COST_DATA (loop_vinfo)
1411 : BB_VINFO_TARGET_COST_DATA (bb_vinfo));
1412 FOR_EACH_VEC_ELT (prologue_cost_vec, i, si)
1414 struct _stmt_vec_info *stmt_info
1415 = si->stmt ? vinfo_for_stmt (si->stmt) : NULL;
1416 (void) add_stmt_cost (data, si->count, si->kind, stmt_info,
1417 si->misalign, vect_prologue);
1420 prologue_cost_vec.release ();
1423 /* Analyze an SLP instance starting from a group of grouped stores. Call
1424 vect_build_slp_tree to build a tree of packed stmts if possible.
1425 Return FALSE if it's impossible to SLP any stmt in the loop. */
1427 static bool
1428 vect_analyze_slp_instance (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
1429 gimple stmt)
1431 slp_instance new_instance;
1432 slp_tree node;
1433 unsigned int group_size = GROUP_SIZE (vinfo_for_stmt (stmt));
1434 unsigned int unrolling_factor = 1, nunits;
1435 tree vectype, scalar_type = NULL_TREE;
1436 gimple next;
1437 unsigned int vectorization_factor = 0;
1438 int i;
1439 unsigned int max_nunits = 0;
1440 vec<slp_tree> loads;
1441 struct data_reference *dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt));
1442 vec<gimple> scalar_stmts;
1444 if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
1446 if (dr)
1448 scalar_type = TREE_TYPE (DR_REF (dr));
1449 vectype = get_vectype_for_scalar_type (scalar_type);
1451 else
1453 gcc_assert (loop_vinfo);
1454 vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt));
1457 group_size = GROUP_SIZE (vinfo_for_stmt (stmt));
1459 else
1461 gcc_assert (loop_vinfo);
1462 vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt));
1463 group_size = LOOP_VINFO_REDUCTIONS (loop_vinfo).length ();
1466 if (!vectype)
1468 if (dump_enabled_p ())
1470 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1471 "Build SLP failed: unsupported data-type ");
1472 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, scalar_type);
1473 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
1476 return false;
1479 nunits = TYPE_VECTOR_SUBPARTS (vectype);
1480 if (loop_vinfo)
1481 vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1482 else
1483 vectorization_factor = nunits;
1485 /* Calculate the unrolling factor. */
1486 unrolling_factor = least_common_multiple (nunits, group_size) / group_size;
1487 if (unrolling_factor != 1 && !loop_vinfo)
1489 if (dump_enabled_p ())
1490 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1491 "Build SLP failed: unrolling required in basic"
1492 " block SLP\n");
1494 return false;
1497 /* Create a node (a root of the SLP tree) for the packed grouped stores. */
1498 scalar_stmts.create (group_size);
1499 next = stmt;
1500 if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
1502 /* Collect the stores and store them in SLP_TREE_SCALAR_STMTS. */
1503 while (next)
1505 if (STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (next))
1506 && STMT_VINFO_RELATED_STMT (vinfo_for_stmt (next)))
1507 scalar_stmts.safe_push (
1508 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (next)));
1509 else
1510 scalar_stmts.safe_push (next);
1511 next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next));
1514 else
1516 /* Collect reduction statements. */
1517 vec<gimple> reductions = LOOP_VINFO_REDUCTIONS (loop_vinfo);
1518 for (i = 0; reductions.iterate (i, &next); i++)
1519 scalar_stmts.safe_push (next);
1522 node = vect_create_new_slp_node (scalar_stmts);
1524 loads.create (group_size);
1526 /* Build the tree for the SLP instance. */
1527 if (vect_build_slp_tree (loop_vinfo, bb_vinfo, &node, group_size,
1528 &max_nunits, &loads,
1529 vectorization_factor, NULL, NULL))
1531 /* Calculate the unrolling factor based on the smallest type. */
1532 if (max_nunits > nunits)
1533 unrolling_factor = least_common_multiple (max_nunits, group_size)
1534 / group_size;
1536 if (unrolling_factor != 1 && !loop_vinfo)
1538 if (dump_enabled_p ())
1539 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1540 "Build SLP failed: unrolling required in basic"
1541 " block SLP\n");
1542 vect_free_slp_tree (node);
1543 loads.release ();
1544 return false;
1547 /* Create a new SLP instance. */
1548 new_instance = XNEW (struct _slp_instance);
1549 SLP_INSTANCE_TREE (new_instance) = node;
1550 SLP_INSTANCE_GROUP_SIZE (new_instance) = group_size;
1551 SLP_INSTANCE_UNROLLING_FACTOR (new_instance) = unrolling_factor;
1552 SLP_INSTANCE_BODY_COST_VEC (new_instance) = vNULL;
1553 SLP_INSTANCE_LOADS (new_instance) = loads;
1554 SLP_INSTANCE_FIRST_LOAD_STMT (new_instance) = NULL;
1556 /* Compute the load permutation. */
1557 slp_tree load_node;
1558 bool loads_permuted = false;
1559 FOR_EACH_VEC_ELT (loads, i, load_node)
1561 vec<unsigned> load_permutation;
1562 int j;
1563 gimple load, first_stmt;
1564 bool this_load_permuted = false;
1565 load_permutation.create (group_size);
1566 first_stmt = GROUP_FIRST_ELEMENT
1567 (vinfo_for_stmt (SLP_TREE_SCALAR_STMTS (load_node)[0]));
1568 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (load_node), j, load)
1570 int load_place
1571 = vect_get_place_in_interleaving_chain (load, first_stmt);
1572 gcc_assert (load_place != -1);
1573 if (load_place != j)
1574 this_load_permuted = true;
1575 load_permutation.safe_push (load_place);
1577 if (!this_load_permuted)
1579 load_permutation.release ();
1580 continue;
1582 SLP_TREE_LOAD_PERMUTATION (load_node) = load_permutation;
1583 loads_permuted = true;
1586 if (loads_permuted)
1588 if (!vect_supported_load_permutation_p (new_instance))
1590 if (dump_enabled_p ())
1592 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1593 "Build SLP failed: unsupported load "
1594 "permutation ");
1595 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
1596 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
1598 vect_free_slp_instance (new_instance);
1599 return false;
1602 SLP_INSTANCE_FIRST_LOAD_STMT (new_instance)
1603 = vect_find_first_load_in_slp_instance (new_instance);
1606 /* Compute the costs of this SLP instance. */
1607 vect_analyze_slp_cost (loop_vinfo, bb_vinfo,
1608 new_instance, TYPE_VECTOR_SUBPARTS (vectype));
1610 if (loop_vinfo)
1611 LOOP_VINFO_SLP_INSTANCES (loop_vinfo).safe_push (new_instance);
1612 else
1613 BB_VINFO_SLP_INSTANCES (bb_vinfo).safe_push (new_instance);
1615 if (dump_enabled_p ())
1616 vect_print_slp_tree (MSG_NOTE, node);
1618 return true;
1621 /* Failed to SLP. */
1622 /* Free the allocated memory. */
1623 vect_free_slp_tree (node);
1624 loads.release ();
1626 return false;
1630 /* Check if there are stmts in the loop can be vectorized using SLP. Build SLP
1631 trees of packed scalar stmts if SLP is possible. */
1633 bool
1634 vect_analyze_slp (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo)
1636 unsigned int i;
1637 vec<gimple> grouped_stores;
1638 vec<gimple> reductions = vNULL;
1639 vec<gimple> reduc_chains = vNULL;
1640 gimple first_element;
1641 bool ok = false;
1643 if (dump_enabled_p ())
1644 dump_printf_loc (MSG_NOTE, vect_location, "=== vect_analyze_slp ===\n");
1646 if (loop_vinfo)
1648 grouped_stores = LOOP_VINFO_GROUPED_STORES (loop_vinfo);
1649 reduc_chains = LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo);
1650 reductions = LOOP_VINFO_REDUCTIONS (loop_vinfo);
1652 else
1653 grouped_stores = BB_VINFO_GROUPED_STORES (bb_vinfo);
1655 /* Find SLP sequences starting from groups of grouped stores. */
1656 FOR_EACH_VEC_ELT (grouped_stores, i, first_element)
1657 if (vect_analyze_slp_instance (loop_vinfo, bb_vinfo, first_element))
1658 ok = true;
1660 if (bb_vinfo && !ok)
1662 if (dump_enabled_p ())
1663 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1664 "Failed to SLP the basic block.\n");
1666 return false;
1669 if (loop_vinfo
1670 && LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo).length () > 0)
1672 /* Find SLP sequences starting from reduction chains. */
1673 FOR_EACH_VEC_ELT (reduc_chains, i, first_element)
1674 if (vect_analyze_slp_instance (loop_vinfo, bb_vinfo, first_element))
1675 ok = true;
1676 else
1677 return false;
1679 /* Don't try to vectorize SLP reductions if reduction chain was
1680 detected. */
1681 return ok;
1684 /* Find SLP sequences starting from groups of reductions. */
1685 if (loop_vinfo && LOOP_VINFO_REDUCTIONS (loop_vinfo).length () > 1
1686 && vect_analyze_slp_instance (loop_vinfo, bb_vinfo, reductions[0]))
1687 ok = true;
1689 return true;
1693 /* For each possible SLP instance decide whether to SLP it and calculate overall
1694 unrolling factor needed to SLP the loop. Return TRUE if decided to SLP at
1695 least one instance. */
1697 bool
1698 vect_make_slp_decision (loop_vec_info loop_vinfo)
1700 unsigned int i, unrolling_factor = 1;
1701 vec<slp_instance> slp_instances = LOOP_VINFO_SLP_INSTANCES (loop_vinfo);
1702 slp_instance instance;
1703 int decided_to_slp = 0;
1705 if (dump_enabled_p ())
1706 dump_printf_loc (MSG_NOTE, vect_location, "=== vect_make_slp_decision ==="
1707 "\n");
1709 FOR_EACH_VEC_ELT (slp_instances, i, instance)
1711 /* FORNOW: SLP if you can. */
1712 if (unrolling_factor < SLP_INSTANCE_UNROLLING_FACTOR (instance))
1713 unrolling_factor = SLP_INSTANCE_UNROLLING_FACTOR (instance);
1715 /* Mark all the stmts that belong to INSTANCE as PURE_SLP stmts. Later we
1716 call vect_detect_hybrid_slp () to find stmts that need hybrid SLP and
1717 loop-based vectorization. Such stmts will be marked as HYBRID. */
1718 vect_mark_slp_stmts (SLP_INSTANCE_TREE (instance), pure_slp, -1);
1719 decided_to_slp++;
1722 LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo) = unrolling_factor;
1724 if (decided_to_slp && dump_enabled_p ())
1725 dump_printf_loc (MSG_NOTE, vect_location,
1726 "Decided to SLP %d instances. Unrolling factor %d\n",
1727 decided_to_slp, unrolling_factor);
1729 return (decided_to_slp > 0);
1733 /* Find stmts that must be both vectorized and SLPed (since they feed stmts that
1734 can't be SLPed) in the tree rooted at NODE. Mark such stmts as HYBRID. */
1736 static void
1737 vect_detect_hybrid_slp_stmts (slp_tree node)
1739 int i;
1740 vec<gimple> stmts = SLP_TREE_SCALAR_STMTS (node);
1741 gimple stmt = stmts[0];
1742 imm_use_iterator imm_iter;
1743 gimple use_stmt;
1744 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
1745 slp_tree child;
1746 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
1747 struct loop *loop = NULL;
1748 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_vinfo);
1749 basic_block bb = NULL;
1751 if (!node)
1752 return;
1754 if (loop_vinfo)
1755 loop = LOOP_VINFO_LOOP (loop_vinfo);
1756 else
1757 bb = BB_VINFO_BB (bb_vinfo);
1759 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
1760 if (PURE_SLP_STMT (vinfo_for_stmt (stmt))
1761 && TREE_CODE (gimple_op (stmt, 0)) == SSA_NAME)
1762 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, gimple_op (stmt, 0))
1763 if (gimple_bb (use_stmt)
1764 && ((loop && flow_bb_inside_loop_p (loop, gimple_bb (use_stmt)))
1765 || bb == gimple_bb (use_stmt))
1766 && (stmt_vinfo = vinfo_for_stmt (use_stmt))
1767 && !STMT_SLP_TYPE (stmt_vinfo)
1768 && (STMT_VINFO_RELEVANT (stmt_vinfo)
1769 || VECTORIZABLE_CYCLE_DEF (STMT_VINFO_DEF_TYPE (stmt_vinfo)))
1770 && !(gimple_code (use_stmt) == GIMPLE_PHI
1771 && STMT_VINFO_DEF_TYPE (stmt_vinfo)
1772 == vect_reduction_def))
1773 vect_mark_slp_stmts (node, hybrid, i);
1775 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
1776 vect_detect_hybrid_slp_stmts (child);
1780 /* Find stmts that must be both vectorized and SLPed. */
1782 void
1783 vect_detect_hybrid_slp (loop_vec_info loop_vinfo)
1785 unsigned int i;
1786 vec<slp_instance> slp_instances = LOOP_VINFO_SLP_INSTANCES (loop_vinfo);
1787 slp_instance instance;
1789 if (dump_enabled_p ())
1790 dump_printf_loc (MSG_NOTE, vect_location, "=== vect_detect_hybrid_slp ==="
1791 "\n");
1793 FOR_EACH_VEC_ELT (slp_instances, i, instance)
1794 vect_detect_hybrid_slp_stmts (SLP_INSTANCE_TREE (instance));
1798 /* Create and initialize a new bb_vec_info struct for BB, as well as
1799 stmt_vec_info structs for all the stmts in it. */
1801 static bb_vec_info
1802 new_bb_vec_info (basic_block bb)
1804 bb_vec_info res = NULL;
1805 gimple_stmt_iterator gsi;
1807 res = (bb_vec_info) xcalloc (1, sizeof (struct _bb_vec_info));
1808 BB_VINFO_BB (res) = bb;
1810 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1812 gimple stmt = gsi_stmt (gsi);
1813 gimple_set_uid (stmt, 0);
1814 set_vinfo_for_stmt (stmt, new_stmt_vec_info (stmt, NULL, res));
1817 BB_VINFO_GROUPED_STORES (res).create (10);
1818 BB_VINFO_SLP_INSTANCES (res).create (2);
1819 BB_VINFO_TARGET_COST_DATA (res) = init_cost (NULL);
1821 bb->aux = res;
1822 return res;
1826 /* Free BB_VINFO struct, as well as all the stmt_vec_info structs of all the
1827 stmts in the basic block. */
1829 static void
1830 destroy_bb_vec_info (bb_vec_info bb_vinfo)
1832 vec<slp_instance> slp_instances;
1833 slp_instance instance;
1834 basic_block bb;
1835 gimple_stmt_iterator si;
1836 unsigned i;
1838 if (!bb_vinfo)
1839 return;
1841 bb = BB_VINFO_BB (bb_vinfo);
1843 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
1845 gimple stmt = gsi_stmt (si);
1846 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1848 if (stmt_info)
1849 /* Free stmt_vec_info. */
1850 free_stmt_vec_info (stmt);
1853 vect_destroy_datarefs (NULL, bb_vinfo);
1854 free_dependence_relations (BB_VINFO_DDRS (bb_vinfo));
1855 BB_VINFO_GROUPED_STORES (bb_vinfo).release ();
1856 slp_instances = BB_VINFO_SLP_INSTANCES (bb_vinfo);
1857 FOR_EACH_VEC_ELT (slp_instances, i, instance)
1858 vect_free_slp_instance (instance);
1859 BB_VINFO_SLP_INSTANCES (bb_vinfo).release ();
1860 destroy_cost_data (BB_VINFO_TARGET_COST_DATA (bb_vinfo));
1861 free (bb_vinfo);
1862 bb->aux = NULL;
1866 /* Analyze statements contained in SLP tree node after recursively analyzing
1867 the subtree. Return TRUE if the operations are supported. */
1869 static bool
1870 vect_slp_analyze_node_operations (bb_vec_info bb_vinfo, slp_tree node)
1872 bool dummy;
1873 int i;
1874 gimple stmt;
1875 slp_tree child;
1877 if (!node)
1878 return true;
1880 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
1881 if (!vect_slp_analyze_node_operations (bb_vinfo, child))
1882 return false;
1884 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
1886 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1887 gcc_assert (stmt_info);
1888 gcc_assert (PURE_SLP_STMT (stmt_info));
1890 if (!vect_analyze_stmt (stmt, &dummy, node))
1891 return false;
1894 return true;
1898 /* Analyze statements in SLP instances of the basic block. Return TRUE if the
1899 operations are supported. */
1901 static bool
1902 vect_slp_analyze_operations (bb_vec_info bb_vinfo)
1904 vec<slp_instance> slp_instances = BB_VINFO_SLP_INSTANCES (bb_vinfo);
1905 slp_instance instance;
1906 int i;
1908 for (i = 0; slp_instances.iterate (i, &instance); )
1910 if (!vect_slp_analyze_node_operations (bb_vinfo,
1911 SLP_INSTANCE_TREE (instance)))
1913 vect_free_slp_instance (instance);
1914 slp_instances.ordered_remove (i);
1916 else
1917 i++;
1920 if (!slp_instances.length ())
1921 return false;
1923 return true;
1927 /* Compute the scalar cost of the SLP node NODE and its children
1928 and return it. Do not account defs that are marked in LIFE and
1929 update LIFE according to uses of NODE. */
1931 static unsigned
1932 vect_bb_slp_scalar_cost (basic_block bb,
1933 slp_tree node, vec<bool, va_stack> life)
1935 unsigned scalar_cost = 0;
1936 unsigned i;
1937 gimple stmt;
1938 slp_tree child;
1940 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
1942 unsigned stmt_cost;
1943 ssa_op_iter op_iter;
1944 def_operand_p def_p;
1945 stmt_vec_info stmt_info;
1947 if (life[i])
1948 continue;
1950 /* If there is a non-vectorized use of the defs then the scalar
1951 stmt is kept live in which case we do not account it or any
1952 required defs in the SLP children in the scalar cost. This
1953 way we make the vectorization more costly when compared to
1954 the scalar cost. */
1955 FOR_EACH_SSA_DEF_OPERAND (def_p, stmt, op_iter, SSA_OP_DEF)
1957 imm_use_iterator use_iter;
1958 gimple use_stmt;
1959 FOR_EACH_IMM_USE_STMT (use_stmt, use_iter, DEF_FROM_PTR (def_p))
1960 if (gimple_code (use_stmt) == GIMPLE_PHI
1961 || gimple_bb (use_stmt) != bb
1962 || !STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (use_stmt)))
1964 life[i] = true;
1965 BREAK_FROM_IMM_USE_STMT (use_iter);
1968 if (life[i])
1969 continue;
1971 stmt_info = vinfo_for_stmt (stmt);
1972 if (STMT_VINFO_DATA_REF (stmt_info))
1974 if (DR_IS_READ (STMT_VINFO_DATA_REF (stmt_info)))
1975 stmt_cost = vect_get_stmt_cost (scalar_load);
1976 else
1977 stmt_cost = vect_get_stmt_cost (scalar_store);
1979 else
1980 stmt_cost = vect_get_stmt_cost (scalar_stmt);
1982 scalar_cost += stmt_cost;
1985 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
1986 scalar_cost += vect_bb_slp_scalar_cost (bb, child, life);
1988 return scalar_cost;
1991 /* Check if vectorization of the basic block is profitable. */
1993 static bool
1994 vect_bb_vectorization_profitable_p (bb_vec_info bb_vinfo)
1996 vec<slp_instance> slp_instances = BB_VINFO_SLP_INSTANCES (bb_vinfo);
1997 slp_instance instance;
1998 int i, j;
1999 unsigned int vec_inside_cost = 0, vec_outside_cost = 0, scalar_cost = 0;
2000 unsigned int vec_prologue_cost = 0, vec_epilogue_cost = 0;
2001 void *target_cost_data = BB_VINFO_TARGET_COST_DATA (bb_vinfo);
2002 stmt_vec_info stmt_info = NULL;
2003 stmt_vector_for_cost body_cost_vec;
2004 stmt_info_for_cost *ci;
2006 /* Calculate vector costs. */
2007 FOR_EACH_VEC_ELT (slp_instances, i, instance)
2009 body_cost_vec = SLP_INSTANCE_BODY_COST_VEC (instance);
2011 FOR_EACH_VEC_ELT (body_cost_vec, j, ci)
2013 stmt_info = ci->stmt ? vinfo_for_stmt (ci->stmt) : NULL;
2014 (void) add_stmt_cost (target_cost_data, ci->count, ci->kind,
2015 stmt_info, ci->misalign, vect_body);
2019 /* Calculate scalar cost. */
2020 FOR_EACH_VEC_ELT (slp_instances, i, instance)
2022 vec<bool, va_stack> life;
2023 vec_stack_alloc (bool, life, SLP_INSTANCE_GROUP_SIZE (instance));
2024 life.quick_grow_cleared (SLP_INSTANCE_GROUP_SIZE (instance));
2025 scalar_cost += vect_bb_slp_scalar_cost (BB_VINFO_BB (bb_vinfo),
2026 SLP_INSTANCE_TREE (instance),
2027 life);
2028 life.release ();
2031 /* Complete the target-specific cost calculation. */
2032 finish_cost (BB_VINFO_TARGET_COST_DATA (bb_vinfo), &vec_prologue_cost,
2033 &vec_inside_cost, &vec_epilogue_cost);
2035 vec_outside_cost = vec_prologue_cost + vec_epilogue_cost;
2037 if (dump_enabled_p ())
2039 dump_printf_loc (MSG_NOTE, vect_location, "Cost model analysis: \n");
2040 dump_printf (MSG_NOTE, " Vector inside of basic block cost: %d\n",
2041 vec_inside_cost);
2042 dump_printf (MSG_NOTE, " Vector prologue cost: %d\n", vec_prologue_cost);
2043 dump_printf (MSG_NOTE, " Vector epilogue cost: %d\n", vec_epilogue_cost);
2044 dump_printf (MSG_NOTE, " Scalar cost of basic block: %d\n", scalar_cost);
2047 /* Vectorization is profitable if its cost is less than the cost of scalar
2048 version. */
2049 if (vec_outside_cost + vec_inside_cost >= scalar_cost)
2050 return false;
2052 return true;
2055 /* Check if the basic block can be vectorized. */
2057 static bb_vec_info
2058 vect_slp_analyze_bb_1 (basic_block bb)
2060 bb_vec_info bb_vinfo;
2061 vec<slp_instance> slp_instances;
2062 slp_instance instance;
2063 int i;
2064 int min_vf = 2;
2066 bb_vinfo = new_bb_vec_info (bb);
2067 if (!bb_vinfo)
2068 return NULL;
2070 if (!vect_analyze_data_refs (NULL, bb_vinfo, &min_vf))
2072 if (dump_enabled_p ())
2073 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2074 "not vectorized: unhandled data-ref in basic "
2075 "block.\n");
2077 destroy_bb_vec_info (bb_vinfo);
2078 return NULL;
2081 if (BB_VINFO_DATAREFS (bb_vinfo).length () < 2)
2083 if (dump_enabled_p ())
2084 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2085 "not vectorized: not enough data-refs in "
2086 "basic block.\n");
2088 destroy_bb_vec_info (bb_vinfo);
2089 return NULL;
2092 if (!vect_analyze_data_ref_accesses (NULL, bb_vinfo))
2094 if (dump_enabled_p ())
2095 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2096 "not vectorized: unhandled data access in "
2097 "basic block.\n");
2099 destroy_bb_vec_info (bb_vinfo);
2100 return NULL;
2103 vect_pattern_recog (NULL, bb_vinfo);
2105 if (!vect_slp_analyze_data_ref_dependences (bb_vinfo))
2107 if (dump_enabled_p ())
2108 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2109 "not vectorized: unhandled data dependence "
2110 "in basic block.\n");
2112 destroy_bb_vec_info (bb_vinfo);
2113 return NULL;
2116 if (!vect_analyze_data_refs_alignment (NULL, bb_vinfo))
2118 if (dump_enabled_p ())
2119 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2120 "not vectorized: bad data alignment in basic "
2121 "block.\n");
2123 destroy_bb_vec_info (bb_vinfo);
2124 return NULL;
2127 /* Check the SLP opportunities in the basic block, analyze and build SLP
2128 trees. */
2129 if (!vect_analyze_slp (NULL, bb_vinfo))
2131 if (dump_enabled_p ())
2132 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2133 "not vectorized: failed to find SLP opportunities "
2134 "in basic block.\n");
2136 destroy_bb_vec_info (bb_vinfo);
2137 return NULL;
2140 slp_instances = BB_VINFO_SLP_INSTANCES (bb_vinfo);
2142 /* Mark all the statements that we want to vectorize as pure SLP and
2143 relevant. */
2144 FOR_EACH_VEC_ELT (slp_instances, i, instance)
2146 vect_mark_slp_stmts (SLP_INSTANCE_TREE (instance), pure_slp, -1);
2147 vect_mark_slp_stmts_relevant (SLP_INSTANCE_TREE (instance));
2150 if (!vect_verify_datarefs_alignment (NULL, bb_vinfo))
2152 if (dump_enabled_p ())
2153 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2154 "not vectorized: unsupported alignment in basic "
2155 "block.\n");
2156 destroy_bb_vec_info (bb_vinfo);
2157 return NULL;
2160 if (!vect_slp_analyze_operations (bb_vinfo))
2162 if (dump_enabled_p ())
2163 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2164 "not vectorized: bad operation in basic block.\n");
2166 destroy_bb_vec_info (bb_vinfo);
2167 return NULL;
2170 /* Cost model: check if the vectorization is worthwhile. */
2171 if (!unlimited_cost_model ()
2172 && !vect_bb_vectorization_profitable_p (bb_vinfo))
2174 if (dump_enabled_p ())
2175 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2176 "not vectorized: vectorization is not "
2177 "profitable.\n");
2179 destroy_bb_vec_info (bb_vinfo);
2180 return NULL;
2183 if (dump_enabled_p ())
2184 dump_printf_loc (MSG_NOTE, vect_location,
2185 "Basic block will be vectorized using SLP\n");
2187 return bb_vinfo;
2191 bb_vec_info
2192 vect_slp_analyze_bb (basic_block bb)
2194 bb_vec_info bb_vinfo;
2195 int insns = 0;
2196 gimple_stmt_iterator gsi;
2197 unsigned int vector_sizes;
2199 if (dump_enabled_p ())
2200 dump_printf_loc (MSG_NOTE, vect_location, "===vect_slp_analyze_bb===\n");
2202 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2204 gimple stmt = gsi_stmt (gsi);
2205 if (!is_gimple_debug (stmt)
2206 && !gimple_nop_p (stmt)
2207 && gimple_code (stmt) != GIMPLE_LABEL)
2208 insns++;
2211 if (insns > PARAM_VALUE (PARAM_SLP_MAX_INSNS_IN_BB))
2213 if (dump_enabled_p ())
2214 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2215 "not vectorized: too many instructions in "
2216 "basic block.\n");
2218 return NULL;
2221 /* Autodetect first vector size we try. */
2222 current_vector_size = 0;
2223 vector_sizes = targetm.vectorize.autovectorize_vector_sizes ();
2225 while (1)
2227 bb_vinfo = vect_slp_analyze_bb_1 (bb);
2228 if (bb_vinfo)
2229 return bb_vinfo;
2231 destroy_bb_vec_info (bb_vinfo);
2233 vector_sizes &= ~current_vector_size;
2234 if (vector_sizes == 0
2235 || current_vector_size == 0)
2236 return NULL;
2238 /* Try the next biggest vector size. */
2239 current_vector_size = 1 << floor_log2 (vector_sizes);
2240 if (dump_enabled_p ())
2241 dump_printf_loc (MSG_NOTE, vect_location,
2242 "***** Re-trying analysis with "
2243 "vector size %d\n", current_vector_size);
2248 /* SLP costs are calculated according to SLP instance unrolling factor (i.e.,
2249 the number of created vector stmts depends on the unrolling factor).
2250 However, the actual number of vector stmts for every SLP node depends on
2251 VF which is set later in vect_analyze_operations (). Hence, SLP costs
2252 should be updated. In this function we assume that the inside costs
2253 calculated in vect_model_xxx_cost are linear in ncopies. */
2255 void
2256 vect_update_slp_costs_according_to_vf (loop_vec_info loop_vinfo)
2258 unsigned int i, j, vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2259 vec<slp_instance> slp_instances = LOOP_VINFO_SLP_INSTANCES (loop_vinfo);
2260 slp_instance instance;
2261 stmt_vector_for_cost body_cost_vec;
2262 stmt_info_for_cost *si;
2263 void *data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo);
2265 if (dump_enabled_p ())
2266 dump_printf_loc (MSG_NOTE, vect_location,
2267 "=== vect_update_slp_costs_according_to_vf ===\n");
2269 FOR_EACH_VEC_ELT (slp_instances, i, instance)
2271 /* We assume that costs are linear in ncopies. */
2272 int ncopies = vf / SLP_INSTANCE_UNROLLING_FACTOR (instance);
2274 /* Record the instance's instructions in the target cost model.
2275 This was delayed until here because the count of instructions
2276 isn't known beforehand. */
2277 body_cost_vec = SLP_INSTANCE_BODY_COST_VEC (instance);
2279 FOR_EACH_VEC_ELT (body_cost_vec, j, si)
2280 (void) add_stmt_cost (data, si->count * ncopies, si->kind,
2281 vinfo_for_stmt (si->stmt), si->misalign,
2282 vect_body);
2287 /* For constant and loop invariant defs of SLP_NODE this function returns
2288 (vector) defs (VEC_OPRNDS) that will be used in the vectorized stmts.
2289 OP_NUM determines if we gather defs for operand 0 or operand 1 of the RHS of
2290 scalar stmts. NUMBER_OF_VECTORS is the number of vector defs to create.
2291 REDUC_INDEX is the index of the reduction operand in the statements, unless
2292 it is -1. */
2294 static void
2295 vect_get_constant_vectors (tree op, slp_tree slp_node,
2296 vec<tree> *vec_oprnds,
2297 unsigned int op_num, unsigned int number_of_vectors,
2298 int reduc_index)
2300 vec<gimple> stmts = SLP_TREE_SCALAR_STMTS (slp_node);
2301 gimple stmt = stmts[0];
2302 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
2303 unsigned nunits;
2304 tree vec_cst;
2305 tree *elts;
2306 unsigned j, number_of_places_left_in_vector;
2307 tree vector_type;
2308 tree vop;
2309 int group_size = stmts.length ();
2310 unsigned int vec_num, i;
2311 unsigned number_of_copies = 1;
2312 vec<tree> voprnds;
2313 voprnds.create (number_of_vectors);
2314 bool constant_p, is_store;
2315 tree neutral_op = NULL;
2316 enum tree_code code = gimple_expr_code (stmt);
2317 gimple def_stmt;
2318 struct loop *loop;
2319 gimple_seq ctor_seq = NULL;
2321 if (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
2322 && reduc_index != -1)
2324 op_num = reduc_index - 1;
2325 op = gimple_op (stmt, reduc_index);
2326 /* For additional copies (see the explanation of NUMBER_OF_COPIES below)
2327 we need either neutral operands or the original operands. See
2328 get_initial_def_for_reduction() for details. */
2329 switch (code)
2331 case WIDEN_SUM_EXPR:
2332 case DOT_PROD_EXPR:
2333 case PLUS_EXPR:
2334 case MINUS_EXPR:
2335 case BIT_IOR_EXPR:
2336 case BIT_XOR_EXPR:
2337 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (op)))
2338 neutral_op = build_real (TREE_TYPE (op), dconst0);
2339 else
2340 neutral_op = build_int_cst (TREE_TYPE (op), 0);
2342 break;
2344 case MULT_EXPR:
2345 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (op)))
2346 neutral_op = build_real (TREE_TYPE (op), dconst1);
2347 else
2348 neutral_op = build_int_cst (TREE_TYPE (op), 1);
2350 break;
2352 case BIT_AND_EXPR:
2353 neutral_op = build_int_cst (TREE_TYPE (op), -1);
2354 break;
2356 case MAX_EXPR:
2357 case MIN_EXPR:
2358 def_stmt = SSA_NAME_DEF_STMT (op);
2359 loop = (gimple_bb (stmt))->loop_father;
2360 neutral_op = PHI_ARG_DEF_FROM_EDGE (def_stmt,
2361 loop_preheader_edge (loop));
2362 break;
2364 default:
2365 neutral_op = NULL;
2369 if (STMT_VINFO_DATA_REF (stmt_vinfo))
2371 is_store = true;
2372 op = gimple_assign_rhs1 (stmt);
2374 else
2375 is_store = false;
2377 gcc_assert (op);
2379 if (CONSTANT_CLASS_P (op))
2380 constant_p = true;
2381 else
2382 constant_p = false;
2384 vector_type = get_vectype_for_scalar_type (TREE_TYPE (op));
2385 gcc_assert (vector_type);
2386 nunits = TYPE_VECTOR_SUBPARTS (vector_type);
2388 /* NUMBER_OF_COPIES is the number of times we need to use the same values in
2389 created vectors. It is greater than 1 if unrolling is performed.
2391 For example, we have two scalar operands, s1 and s2 (e.g., group of
2392 strided accesses of size two), while NUNITS is four (i.e., four scalars
2393 of this type can be packed in a vector). The output vector will contain
2394 two copies of each scalar operand: {s1, s2, s1, s2}. (NUMBER_OF_COPIES
2395 will be 2).
2397 If GROUP_SIZE > NUNITS, the scalars will be split into several vectors
2398 containing the operands.
2400 For example, NUNITS is four as before, and the group size is 8
2401 (s1, s2, ..., s8). We will create two vectors {s1, s2, s3, s4} and
2402 {s5, s6, s7, s8}. */
2404 number_of_copies = least_common_multiple (nunits, group_size) / group_size;
2406 number_of_places_left_in_vector = nunits;
2407 elts = XALLOCAVEC (tree, nunits);
2408 for (j = 0; j < number_of_copies; j++)
2410 for (i = group_size - 1; stmts.iterate (i, &stmt); i--)
2412 if (is_store)
2413 op = gimple_assign_rhs1 (stmt);
2414 else
2416 switch (code)
2418 case COND_EXPR:
2419 if (op_num == 0 || op_num == 1)
2421 tree cond = gimple_assign_rhs1 (stmt);
2422 op = TREE_OPERAND (cond, op_num);
2424 else
2426 if (op_num == 2)
2427 op = gimple_assign_rhs2 (stmt);
2428 else
2429 op = gimple_assign_rhs3 (stmt);
2431 break;
2433 case CALL_EXPR:
2434 op = gimple_call_arg (stmt, op_num);
2435 break;
2437 case LSHIFT_EXPR:
2438 case RSHIFT_EXPR:
2439 case LROTATE_EXPR:
2440 case RROTATE_EXPR:
2441 op = gimple_op (stmt, op_num + 1);
2442 /* Unlike the other binary operators, shifts/rotates have
2443 the shift count being int, instead of the same type as
2444 the lhs, so make sure the scalar is the right type if
2445 we are dealing with vectors of
2446 long long/long/short/char. */
2447 if (op_num == 1 && TREE_CODE (op) == INTEGER_CST)
2448 op = fold_convert (TREE_TYPE (vector_type), op);
2449 break;
2451 default:
2452 op = gimple_op (stmt, op_num + 1);
2453 break;
2457 if (reduc_index != -1)
2459 loop = (gimple_bb (stmt))->loop_father;
2460 def_stmt = SSA_NAME_DEF_STMT (op);
2462 gcc_assert (loop);
2464 /* Get the def before the loop. In reduction chain we have only
2465 one initial value. */
2466 if ((j != (number_of_copies - 1)
2467 || (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt))
2468 && i != 0))
2469 && neutral_op)
2470 op = neutral_op;
2471 else
2472 op = PHI_ARG_DEF_FROM_EDGE (def_stmt,
2473 loop_preheader_edge (loop));
2476 /* Create 'vect_ = {op0,op1,...,opn}'. */
2477 number_of_places_left_in_vector--;
2478 if (!types_compatible_p (TREE_TYPE (vector_type), TREE_TYPE (op)))
2480 if (CONSTANT_CLASS_P (op))
2482 op = fold_unary (VIEW_CONVERT_EXPR,
2483 TREE_TYPE (vector_type), op);
2484 gcc_assert (op && CONSTANT_CLASS_P (op));
2486 else
2488 tree new_temp
2489 = make_ssa_name (TREE_TYPE (vector_type), NULL);
2490 gimple init_stmt;
2491 op = build1 (VIEW_CONVERT_EXPR, TREE_TYPE (vector_type),
2492 op);
2493 init_stmt
2494 = gimple_build_assign_with_ops (VIEW_CONVERT_EXPR,
2495 new_temp, op, NULL_TREE);
2496 gimple_seq_add_stmt (&ctor_seq, init_stmt);
2497 op = new_temp;
2500 elts[number_of_places_left_in_vector] = op;
2501 if (!CONSTANT_CLASS_P (op))
2502 constant_p = false;
2504 if (number_of_places_left_in_vector == 0)
2506 number_of_places_left_in_vector = nunits;
2508 if (constant_p)
2509 vec_cst = build_vector (vector_type, elts);
2510 else
2512 vec<constructor_elt, va_gc> *v;
2513 unsigned k;
2514 vec_alloc (v, nunits);
2515 for (k = 0; k < nunits; ++k)
2516 CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, elts[k]);
2517 vec_cst = build_constructor (vector_type, v);
2519 voprnds.quick_push (vect_init_vector (stmt, vec_cst,
2520 vector_type, NULL));
2521 if (ctor_seq != NULL)
2523 gimple init_stmt = SSA_NAME_DEF_STMT (voprnds.last ());
2524 gimple_stmt_iterator gsi = gsi_for_stmt (init_stmt);
2525 gsi_insert_seq_before_without_update (&gsi, ctor_seq,
2526 GSI_SAME_STMT);
2527 ctor_seq = NULL;
2533 /* Since the vectors are created in the reverse order, we should invert
2534 them. */
2535 vec_num = voprnds.length ();
2536 for (j = vec_num; j != 0; j--)
2538 vop = voprnds[j - 1];
2539 vec_oprnds->quick_push (vop);
2542 voprnds.release ();
2544 /* In case that VF is greater than the unrolling factor needed for the SLP
2545 group of stmts, NUMBER_OF_VECTORS to be created is greater than
2546 NUMBER_OF_SCALARS/NUNITS or NUNITS/NUMBER_OF_SCALARS, and hence we have
2547 to replicate the vectors. */
2548 while (number_of_vectors > vec_oprnds->length ())
2550 tree neutral_vec = NULL;
2552 if (neutral_op)
2554 if (!neutral_vec)
2555 neutral_vec = build_vector_from_val (vector_type, neutral_op);
2557 vec_oprnds->quick_push (neutral_vec);
2559 else
2561 for (i = 0; vec_oprnds->iterate (i, &vop) && i < vec_num; i++)
2562 vec_oprnds->quick_push (vop);
2568 /* Get vectorized definitions from SLP_NODE that contains corresponding
2569 vectorized def-stmts. */
2571 static void
2572 vect_get_slp_vect_defs (slp_tree slp_node, vec<tree> *vec_oprnds)
2574 tree vec_oprnd;
2575 gimple vec_def_stmt;
2576 unsigned int i;
2578 gcc_assert (SLP_TREE_VEC_STMTS (slp_node).exists ());
2580 FOR_EACH_VEC_ELT (SLP_TREE_VEC_STMTS (slp_node), i, vec_def_stmt)
2582 gcc_assert (vec_def_stmt);
2583 vec_oprnd = gimple_get_lhs (vec_def_stmt);
2584 vec_oprnds->quick_push (vec_oprnd);
2589 /* Get vectorized definitions for SLP_NODE.
2590 If the scalar definitions are loop invariants or constants, collect them and
2591 call vect_get_constant_vectors() to create vector stmts.
2592 Otherwise, the def-stmts must be already vectorized and the vectorized stmts
2593 must be stored in the corresponding child of SLP_NODE, and we call
2594 vect_get_slp_vect_defs () to retrieve them. */
2596 void
2597 vect_get_slp_defs (vec<tree> ops, slp_tree slp_node,
2598 vec<vec<tree> > *vec_oprnds, int reduc_index)
2600 gimple first_stmt;
2601 int number_of_vects = 0, i;
2602 unsigned int child_index = 0;
2603 HOST_WIDE_INT lhs_size_unit, rhs_size_unit;
2604 slp_tree child = NULL;
2605 vec<tree> vec_defs;
2606 tree oprnd;
2607 bool vectorized_defs;
2609 first_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[0];
2610 FOR_EACH_VEC_ELT (ops, i, oprnd)
2612 /* For each operand we check if it has vectorized definitions in a child
2613 node or we need to create them (for invariants and constants). We
2614 check if the LHS of the first stmt of the next child matches OPRND.
2615 If it does, we found the correct child. Otherwise, we call
2616 vect_get_constant_vectors (), and not advance CHILD_INDEX in order
2617 to check this child node for the next operand. */
2618 vectorized_defs = false;
2619 if (SLP_TREE_CHILDREN (slp_node).length () > child_index)
2621 child = SLP_TREE_CHILDREN (slp_node)[child_index];
2623 /* We have to check both pattern and original def, if available. */
2624 gimple first_def = SLP_TREE_SCALAR_STMTS (child)[0];
2625 gimple related = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (first_def));
2627 if (operand_equal_p (oprnd, gimple_get_lhs (first_def), 0)
2628 || (related
2629 && operand_equal_p (oprnd, gimple_get_lhs (related), 0)))
2631 /* The number of vector defs is determined by the number of
2632 vector statements in the node from which we get those
2633 statements. */
2634 number_of_vects = SLP_TREE_NUMBER_OF_VEC_STMTS (child);
2635 vectorized_defs = true;
2636 child_index++;
2640 if (!vectorized_defs)
2642 if (i == 0)
2644 number_of_vects = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
2645 /* Number of vector stmts was calculated according to LHS in
2646 vect_schedule_slp_instance (), fix it by replacing LHS with
2647 RHS, if necessary. See vect_get_smallest_scalar_type () for
2648 details. */
2649 vect_get_smallest_scalar_type (first_stmt, &lhs_size_unit,
2650 &rhs_size_unit);
2651 if (rhs_size_unit != lhs_size_unit)
2653 number_of_vects *= rhs_size_unit;
2654 number_of_vects /= lhs_size_unit;
2659 /* Allocate memory for vectorized defs. */
2660 vec_defs = vNULL;
2661 vec_defs.create (number_of_vects);
2663 /* For reduction defs we call vect_get_constant_vectors (), since we are
2664 looking for initial loop invariant values. */
2665 if (vectorized_defs && reduc_index == -1)
2666 /* The defs are already vectorized. */
2667 vect_get_slp_vect_defs (child, &vec_defs);
2668 else
2669 /* Build vectors from scalar defs. */
2670 vect_get_constant_vectors (oprnd, slp_node, &vec_defs, i,
2671 number_of_vects, reduc_index);
2673 vec_oprnds->quick_push (vec_defs);
2675 /* For reductions, we only need initial values. */
2676 if (reduc_index != -1)
2677 return;
2682 /* Create NCOPIES permutation statements using the mask MASK_BYTES (by
2683 building a vector of type MASK_TYPE from it) and two input vectors placed in
2684 DR_CHAIN at FIRST_VEC_INDX and SECOND_VEC_INDX for the first copy and
2685 shifting by STRIDE elements of DR_CHAIN for every copy.
2686 (STRIDE is the number of vectorized stmts for NODE divided by the number of
2687 copies).
2688 VECT_STMTS_COUNTER specifies the index in the vectorized stmts of NODE, where
2689 the created stmts must be inserted. */
2691 static inline void
2692 vect_create_mask_and_perm (gimple stmt, gimple next_scalar_stmt,
2693 tree mask, int first_vec_indx, int second_vec_indx,
2694 gimple_stmt_iterator *gsi, slp_tree node,
2695 tree vectype, vec<tree> dr_chain,
2696 int ncopies, int vect_stmts_counter)
2698 tree perm_dest;
2699 gimple perm_stmt = NULL;
2700 stmt_vec_info next_stmt_info;
2701 int i, stride;
2702 tree first_vec, second_vec, data_ref;
2704 stride = SLP_TREE_NUMBER_OF_VEC_STMTS (node) / ncopies;
2706 /* Initialize the vect stmts of NODE to properly insert the generated
2707 stmts later. */
2708 for (i = SLP_TREE_VEC_STMTS (node).length ();
2709 i < (int) SLP_TREE_NUMBER_OF_VEC_STMTS (node); i++)
2710 SLP_TREE_VEC_STMTS (node).quick_push (NULL);
2712 perm_dest = vect_create_destination_var (gimple_assign_lhs (stmt), vectype);
2713 for (i = 0; i < ncopies; i++)
2715 first_vec = dr_chain[first_vec_indx];
2716 second_vec = dr_chain[second_vec_indx];
2718 /* Generate the permute statement. */
2719 perm_stmt = gimple_build_assign_with_ops (VEC_PERM_EXPR, perm_dest,
2720 first_vec, second_vec, mask);
2721 data_ref = make_ssa_name (perm_dest, perm_stmt);
2722 gimple_set_lhs (perm_stmt, data_ref);
2723 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
2725 /* Store the vector statement in NODE. */
2726 SLP_TREE_VEC_STMTS (node)[stride * i + vect_stmts_counter] = perm_stmt;
2728 first_vec_indx += stride;
2729 second_vec_indx += stride;
2732 /* Mark the scalar stmt as vectorized. */
2733 next_stmt_info = vinfo_for_stmt (next_scalar_stmt);
2734 STMT_VINFO_VEC_STMT (next_stmt_info) = perm_stmt;
2738 /* Given FIRST_MASK_ELEMENT - the mask element in element representation,
2739 return in CURRENT_MASK_ELEMENT its equivalent in target specific
2740 representation. Check that the mask is valid and return FALSE if not.
2741 Return TRUE in NEED_NEXT_VECTOR if the permutation requires to move to
2742 the next vector, i.e., the current first vector is not needed. */
2744 static bool
2745 vect_get_mask_element (gimple stmt, int first_mask_element, int m,
2746 int mask_nunits, bool only_one_vec, int index,
2747 unsigned char *mask, int *current_mask_element,
2748 bool *need_next_vector, int *number_of_mask_fixes,
2749 bool *mask_fixed, bool *needs_first_vector)
2751 int i;
2753 /* Convert to target specific representation. */
2754 *current_mask_element = first_mask_element + m;
2755 /* Adjust the value in case it's a mask for second and third vectors. */
2756 *current_mask_element -= mask_nunits * (*number_of_mask_fixes - 1);
2758 if (*current_mask_element < mask_nunits)
2759 *needs_first_vector = true;
2761 /* We have only one input vector to permute but the mask accesses values in
2762 the next vector as well. */
2763 if (only_one_vec && *current_mask_element >= mask_nunits)
2765 if (dump_enabled_p ())
2767 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2768 "permutation requires at least two vectors ");
2769 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
2770 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
2773 return false;
2776 /* The mask requires the next vector. */
2777 if (*current_mask_element >= mask_nunits * 2)
2779 if (*needs_first_vector || *mask_fixed)
2781 /* We either need the first vector too or have already moved to the
2782 next vector. In both cases, this permutation needs three
2783 vectors. */
2784 if (dump_enabled_p ())
2786 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2787 "permutation requires at "
2788 "least three vectors ");
2789 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
2790 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
2793 return false;
2796 /* We move to the next vector, dropping the first one and working with
2797 the second and the third - we need to adjust the values of the mask
2798 accordingly. */
2799 *current_mask_element -= mask_nunits * *number_of_mask_fixes;
2801 for (i = 0; i < index; i++)
2802 mask[i] -= mask_nunits * *number_of_mask_fixes;
2804 (*number_of_mask_fixes)++;
2805 *mask_fixed = true;
2808 *need_next_vector = *mask_fixed;
2810 /* This was the last element of this mask. Start a new one. */
2811 if (index == mask_nunits - 1)
2813 *number_of_mask_fixes = 1;
2814 *mask_fixed = false;
2815 *needs_first_vector = false;
2818 return true;
2822 /* Generate vector permute statements from a list of loads in DR_CHAIN.
2823 If ANALYZE_ONLY is TRUE, only check that it is possible to create valid
2824 permute statements for the SLP node NODE of the SLP instance
2825 SLP_NODE_INSTANCE. */
2827 bool
2828 vect_transform_slp_perm_load (slp_tree node, vec<tree> dr_chain,
2829 gimple_stmt_iterator *gsi, int vf,
2830 slp_instance slp_node_instance, bool analyze_only)
2832 gimple stmt = SLP_TREE_SCALAR_STMTS (node)[0];
2833 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2834 tree mask_element_type = NULL_TREE, mask_type;
2835 int i, j, k, nunits, vec_index = 0, scalar_index;
2836 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
2837 gimple next_scalar_stmt;
2838 int group_size = SLP_INSTANCE_GROUP_SIZE (slp_node_instance);
2839 int first_mask_element;
2840 int index, unroll_factor, current_mask_element, ncopies;
2841 unsigned char *mask;
2842 bool only_one_vec = false, need_next_vector = false;
2843 int first_vec_index, second_vec_index, orig_vec_stmts_num, vect_stmts_counter;
2844 int number_of_mask_fixes = 1;
2845 bool mask_fixed = false;
2846 bool needs_first_vector = false;
2847 enum machine_mode mode;
2849 mode = TYPE_MODE (vectype);
2851 if (!can_vec_perm_p (mode, false, NULL))
2853 if (dump_enabled_p ())
2855 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2856 "no vect permute for ");
2857 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
2858 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
2860 return false;
2863 /* The generic VEC_PERM_EXPR code always uses an integral type of the
2864 same size as the vector element being permuted. */
2865 mask_element_type = lang_hooks.types.type_for_mode
2866 (int_mode_for_mode (TYPE_MODE (TREE_TYPE (vectype))), 1);
2867 mask_type = get_vectype_for_scalar_type (mask_element_type);
2868 nunits = TYPE_VECTOR_SUBPARTS (vectype);
2869 mask = XALLOCAVEC (unsigned char, nunits);
2870 unroll_factor = SLP_INSTANCE_UNROLLING_FACTOR (slp_node_instance);
2872 /* The number of vector stmts to generate based only on SLP_NODE_INSTANCE
2873 unrolling factor. */
2874 orig_vec_stmts_num = group_size *
2875 SLP_INSTANCE_UNROLLING_FACTOR (slp_node_instance) / nunits;
2876 if (orig_vec_stmts_num == 1)
2877 only_one_vec = true;
2879 /* Number of copies is determined by the final vectorization factor
2880 relatively to SLP_NODE_INSTANCE unrolling factor. */
2881 ncopies = vf / SLP_INSTANCE_UNROLLING_FACTOR (slp_node_instance);
2883 if (!STMT_VINFO_GROUPED_ACCESS (stmt_info))
2884 return false;
2886 /* Generate permutation masks for every NODE. Number of masks for each NODE
2887 is equal to GROUP_SIZE.
2888 E.g., we have a group of three nodes with three loads from the same
2889 location in each node, and the vector size is 4. I.e., we have a
2890 a0b0c0a1b1c1... sequence and we need to create the following vectors:
2891 for a's: a0a0a0a1 a1a1a2a2 a2a3a3a3
2892 for b's: b0b0b0b1 b1b1b2b2 b2b3b3b3
2895 The masks for a's should be: {0,0,0,3} {3,3,6,6} {6,9,9,9}.
2896 The last mask is illegal since we assume two operands for permute
2897 operation, and the mask element values can't be outside that range.
2898 Hence, the last mask must be converted into {2,5,5,5}.
2899 For the first two permutations we need the first and the second input
2900 vectors: {a0,b0,c0,a1} and {b1,c1,a2,b2}, and for the last permutation
2901 we need the second and the third vectors: {b1,c1,a2,b2} and
2902 {c2,a3,b3,c3}. */
2905 scalar_index = 0;
2906 index = 0;
2907 vect_stmts_counter = 0;
2908 vec_index = 0;
2909 first_vec_index = vec_index++;
2910 if (only_one_vec)
2911 second_vec_index = first_vec_index;
2912 else
2913 second_vec_index = vec_index++;
2915 for (j = 0; j < unroll_factor; j++)
2917 for (k = 0; k < group_size; k++)
2919 i = SLP_TREE_LOAD_PERMUTATION (node)[k];
2920 first_mask_element = i + j * group_size;
2921 if (!vect_get_mask_element (stmt, first_mask_element, 0,
2922 nunits, only_one_vec, index,
2923 mask, &current_mask_element,
2924 &need_next_vector,
2925 &number_of_mask_fixes, &mask_fixed,
2926 &needs_first_vector))
2927 return false;
2928 mask[index++] = current_mask_element;
2930 if (index == nunits)
2932 index = 0;
2933 if (!can_vec_perm_p (mode, false, mask))
2935 if (dump_enabled_p ())
2937 dump_printf_loc (MSG_MISSED_OPTIMIZATION,
2938 vect_location,
2939 "unsupported vect permute { ");
2940 for (i = 0; i < nunits; ++i)
2941 dump_printf (MSG_MISSED_OPTIMIZATION, "%d ",
2942 mask[i]);
2943 dump_printf (MSG_MISSED_OPTIMIZATION, "}\n");
2945 return false;
2948 if (!analyze_only)
2950 int l;
2951 tree mask_vec, *mask_elts;
2952 mask_elts = XALLOCAVEC (tree, nunits);
2953 for (l = 0; l < nunits; ++l)
2954 mask_elts[l] = build_int_cst (mask_element_type,
2955 mask[l]);
2956 mask_vec = build_vector (mask_type, mask_elts);
2958 if (need_next_vector)
2960 first_vec_index = second_vec_index;
2961 second_vec_index = vec_index;
2964 next_scalar_stmt
2965 = SLP_TREE_SCALAR_STMTS (node)[scalar_index++];
2967 vect_create_mask_and_perm (stmt, next_scalar_stmt,
2968 mask_vec, first_vec_index, second_vec_index,
2969 gsi, node, vectype, dr_chain,
2970 ncopies, vect_stmts_counter++);
2977 return true;
2982 /* Vectorize SLP instance tree in postorder. */
2984 static bool
2985 vect_schedule_slp_instance (slp_tree node, slp_instance instance,
2986 unsigned int vectorization_factor)
2988 gimple stmt;
2989 bool grouped_store, is_store;
2990 gimple_stmt_iterator si;
2991 stmt_vec_info stmt_info;
2992 unsigned int vec_stmts_size, nunits, group_size;
2993 tree vectype;
2994 int i;
2995 slp_tree child;
2997 if (!node)
2998 return false;
3000 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
3001 vect_schedule_slp_instance (child, instance, vectorization_factor);
3003 stmt = SLP_TREE_SCALAR_STMTS (node)[0];
3004 stmt_info = vinfo_for_stmt (stmt);
3006 /* VECTYPE is the type of the destination. */
3007 vectype = STMT_VINFO_VECTYPE (stmt_info);
3008 nunits = (unsigned int) TYPE_VECTOR_SUBPARTS (vectype);
3009 group_size = SLP_INSTANCE_GROUP_SIZE (instance);
3011 /* For each SLP instance calculate number of vector stmts to be created
3012 for the scalar stmts in each node of the SLP tree. Number of vector
3013 elements in one vector iteration is the number of scalar elements in
3014 one scalar iteration (GROUP_SIZE) multiplied by VF divided by vector
3015 size. */
3016 vec_stmts_size = (vectorization_factor * group_size) / nunits;
3018 if (!SLP_TREE_VEC_STMTS (node).exists ())
3020 SLP_TREE_VEC_STMTS (node).create (vec_stmts_size);
3021 SLP_TREE_NUMBER_OF_VEC_STMTS (node) = vec_stmts_size;
3024 if (dump_enabled_p ())
3026 dump_printf_loc (MSG_NOTE,vect_location,
3027 "------>vectorizing SLP node starting from: ");
3028 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
3029 dump_printf (MSG_NOTE, "\n");
3032 /* Loads should be inserted before the first load. */
3033 if (SLP_INSTANCE_FIRST_LOAD_STMT (instance)
3034 && STMT_VINFO_GROUPED_ACCESS (stmt_info)
3035 && !REFERENCE_CLASS_P (gimple_get_lhs (stmt))
3036 && SLP_TREE_LOAD_PERMUTATION (node).exists ())
3037 si = gsi_for_stmt (SLP_INSTANCE_FIRST_LOAD_STMT (instance));
3038 else if (is_pattern_stmt_p (stmt_info))
3039 si = gsi_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info));
3040 else
3041 si = gsi_for_stmt (stmt);
3043 /* Stores should be inserted just before the last store. */
3044 if (STMT_VINFO_GROUPED_ACCESS (stmt_info)
3045 && REFERENCE_CLASS_P (gimple_get_lhs (stmt)))
3047 gimple last_store = vect_find_last_store_in_slp_instance (instance);
3048 if (is_pattern_stmt_p (vinfo_for_stmt (last_store)))
3049 last_store = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (last_store));
3050 si = gsi_for_stmt (last_store);
3053 /* Mark the first element of the reduction chain as reduction to properly
3054 transform the node. In the analysis phase only the last element of the
3055 chain is marked as reduction. */
3056 if (GROUP_FIRST_ELEMENT (stmt_info) && !STMT_VINFO_GROUPED_ACCESS (stmt_info)
3057 && GROUP_FIRST_ELEMENT (stmt_info) == stmt)
3059 STMT_VINFO_DEF_TYPE (stmt_info) = vect_reduction_def;
3060 STMT_VINFO_TYPE (stmt_info) = reduc_vec_info_type;
3063 is_store = vect_transform_stmt (stmt, &si, &grouped_store, node, instance);
3064 return is_store;
3067 /* Replace scalar calls from SLP node NODE with setting of their lhs to zero.
3068 For loop vectorization this is done in vectorizable_call, but for SLP
3069 it needs to be deferred until end of vect_schedule_slp, because multiple
3070 SLP instances may refer to the same scalar stmt. */
3072 static void
3073 vect_remove_slp_scalar_calls (slp_tree node)
3075 gimple stmt, new_stmt;
3076 gimple_stmt_iterator gsi;
3077 int i;
3078 slp_tree child;
3079 tree lhs;
3080 stmt_vec_info stmt_info;
3082 if (!node)
3083 return;
3085 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
3086 vect_remove_slp_scalar_calls (child);
3088 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
3090 if (!is_gimple_call (stmt) || gimple_bb (stmt) == NULL)
3091 continue;
3092 stmt_info = vinfo_for_stmt (stmt);
3093 if (stmt_info == NULL
3094 || is_pattern_stmt_p (stmt_info)
3095 || !PURE_SLP_STMT (stmt_info))
3096 continue;
3097 lhs = gimple_call_lhs (stmt);
3098 new_stmt = gimple_build_assign (lhs, build_zero_cst (TREE_TYPE (lhs)));
3099 set_vinfo_for_stmt (new_stmt, stmt_info);
3100 set_vinfo_for_stmt (stmt, NULL);
3101 STMT_VINFO_STMT (stmt_info) = new_stmt;
3102 gsi = gsi_for_stmt (stmt);
3103 gsi_replace (&gsi, new_stmt, false);
3104 SSA_NAME_DEF_STMT (gimple_assign_lhs (new_stmt)) = new_stmt;
3108 /* Generate vector code for all SLP instances in the loop/basic block. */
3110 bool
3111 vect_schedule_slp (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo)
3113 vec<slp_instance> slp_instances;
3114 slp_instance instance;
3115 unsigned int i, vf;
3116 bool is_store = false;
3118 if (loop_vinfo)
3120 slp_instances = LOOP_VINFO_SLP_INSTANCES (loop_vinfo);
3121 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
3123 else
3125 slp_instances = BB_VINFO_SLP_INSTANCES (bb_vinfo);
3126 vf = 1;
3129 FOR_EACH_VEC_ELT (slp_instances, i, instance)
3131 /* Schedule the tree of INSTANCE. */
3132 is_store = vect_schedule_slp_instance (SLP_INSTANCE_TREE (instance),
3133 instance, vf);
3134 if (dump_enabled_p ())
3135 dump_printf_loc (MSG_NOTE, vect_location,
3136 "vectorizing stmts using SLP.\n");
3139 FOR_EACH_VEC_ELT (slp_instances, i, instance)
3141 slp_tree root = SLP_INSTANCE_TREE (instance);
3142 gimple store;
3143 unsigned int j;
3144 gimple_stmt_iterator gsi;
3146 /* Remove scalar call stmts. Do not do this for basic-block
3147 vectorization as not all uses may be vectorized.
3148 ??? Why should this be necessary? DCE should be able to
3149 remove the stmts itself.
3150 ??? For BB vectorization we can as well remove scalar
3151 stmts starting from the SLP tree root if they have no
3152 uses. */
3153 if (loop_vinfo)
3154 vect_remove_slp_scalar_calls (root);
3156 for (j = 0; SLP_TREE_SCALAR_STMTS (root).iterate (j, &store)
3157 && j < SLP_INSTANCE_GROUP_SIZE (instance); j++)
3159 if (!STMT_VINFO_DATA_REF (vinfo_for_stmt (store)))
3160 break;
3162 if (is_pattern_stmt_p (vinfo_for_stmt (store)))
3163 store = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (store));
3164 /* Free the attached stmt_vec_info and remove the stmt. */
3165 gsi = gsi_for_stmt (store);
3166 unlink_stmt_vdef (store);
3167 gsi_remove (&gsi, true);
3168 release_defs (store);
3169 free_stmt_vec_info (store);
3173 return is_store;
3177 /* Vectorize the basic block. */
3179 void
3180 vect_slp_transform_bb (basic_block bb)
3182 bb_vec_info bb_vinfo = vec_info_for_bb (bb);
3183 gimple_stmt_iterator si;
3185 gcc_assert (bb_vinfo);
3187 if (dump_enabled_p ())
3188 dump_printf_loc (MSG_NOTE, vect_location, "SLPing BB\n");
3190 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
3192 gimple stmt = gsi_stmt (si);
3193 stmt_vec_info stmt_info;
3195 if (dump_enabled_p ())
3197 dump_printf_loc (MSG_NOTE, vect_location,
3198 "------>SLPing statement: ");
3199 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
3200 dump_printf (MSG_NOTE, "\n");
3203 stmt_info = vinfo_for_stmt (stmt);
3204 gcc_assert (stmt_info);
3206 /* Schedule all the SLP instances when the first SLP stmt is reached. */
3207 if (STMT_SLP_TYPE (stmt_info))
3209 vect_schedule_slp (NULL, bb_vinfo);
3210 break;
3214 if (dump_enabled_p ())
3215 dump_printf_loc (MSG_NOTE, vect_location,
3216 "BASIC BLOCK VECTORIZED\n");
3218 destroy_bb_vec_info (bb_vinfo);