Released GUPC 4.9.0.1 based on version 203902.
[official-gcc.git] / gcc / tree-vect-slp.c
blob80f75bc84224d5e409d3efca14bd229c485f5297
1 /* SLP - Basic Block Vectorization
2 Copyright (C) 2007-2013 Free Software Foundation, Inc.
3 Contributed by Dorit Naishlos <dorit@il.ibm.com>
4 and Ira Rosen <irar@il.ibm.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "dumpfile.h"
26 #include "tm.h"
27 #include "ggc.h"
28 #include "tree.h"
29 #include "target.h"
30 #include "basic-block.h"
31 #include "gimple-pretty-print.h"
32 #include "gimple.h"
33 #include "gimple-ssa.h"
34 #include "tree-phinodes.h"
35 #include "ssa-iterators.h"
36 #include "tree-ssanames.h"
37 #include "tree-pass.h"
38 #include "cfgloop.h"
39 #include "expr.h"
40 #include "recog.h" /* FIXME: for insn_data */
41 #include "optabs.h"
42 #include "tree-vectorizer.h"
43 #include "langhooks.h"
45 /* Extract the location of the basic block in the source code.
46 Return the basic block location if succeed and NULL if not. */
48 LOC
49 find_bb_location (basic_block bb)
51 gimple stmt = NULL;
52 gimple_stmt_iterator si;
54 if (!bb)
55 return UNKNOWN_LOC;
57 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
59 stmt = gsi_stmt (si);
60 if (gimple_location (stmt) != UNKNOWN_LOC)
61 return gimple_location (stmt);
64 return UNKNOWN_LOC;
68 /* Recursively free the memory allocated for the SLP tree rooted at NODE. */
70 static void
71 vect_free_slp_tree (slp_tree node)
73 int i;
74 slp_tree child;
76 if (!node)
77 return;
79 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
80 vect_free_slp_tree (child);
82 SLP_TREE_CHILDREN (node).release ();
83 SLP_TREE_SCALAR_STMTS (node).release ();
84 SLP_TREE_VEC_STMTS (node).release ();
85 SLP_TREE_LOAD_PERMUTATION (node).release ();
87 free (node);
91 /* Free the memory allocated for the SLP instance. */
93 void
94 vect_free_slp_instance (slp_instance instance)
96 vect_free_slp_tree (SLP_INSTANCE_TREE (instance));
97 SLP_INSTANCE_LOADS (instance).release ();
98 SLP_INSTANCE_BODY_COST_VEC (instance).release ();
99 free (instance);
103 /* Create an SLP node for SCALAR_STMTS. */
105 static slp_tree
106 vect_create_new_slp_node (vec<gimple> scalar_stmts)
108 slp_tree node;
109 gimple stmt = scalar_stmts[0];
110 unsigned int nops;
112 if (is_gimple_call (stmt))
113 nops = gimple_call_num_args (stmt);
114 else if (is_gimple_assign (stmt))
116 nops = gimple_num_ops (stmt) - 1;
117 if (gimple_assign_rhs_code (stmt) == COND_EXPR)
118 nops++;
120 else
121 return NULL;
123 node = XNEW (struct _slp_tree);
124 SLP_TREE_SCALAR_STMTS (node) = scalar_stmts;
125 SLP_TREE_VEC_STMTS (node).create (0);
126 SLP_TREE_CHILDREN (node).create (nops);
127 SLP_TREE_LOAD_PERMUTATION (node) = vNULL;
129 return node;
133 /* Allocate operands info for NOPS operands, and GROUP_SIZE def-stmts for each
134 operand. */
135 static vec<slp_oprnd_info>
136 vect_create_oprnd_info (int nops, int group_size)
138 int i;
139 slp_oprnd_info oprnd_info;
140 vec<slp_oprnd_info> oprnds_info;
142 oprnds_info.create (nops);
143 for (i = 0; i < nops; i++)
145 oprnd_info = XNEW (struct _slp_oprnd_info);
146 oprnd_info->def_stmts.create (group_size);
147 oprnd_info->first_dt = vect_uninitialized_def;
148 oprnd_info->first_op_type = NULL_TREE;
149 oprnd_info->first_pattern = false;
150 oprnds_info.quick_push (oprnd_info);
153 return oprnds_info;
157 /* Free operands info. */
159 static void
160 vect_free_oprnd_info (vec<slp_oprnd_info> &oprnds_info)
162 int i;
163 slp_oprnd_info oprnd_info;
165 FOR_EACH_VEC_ELT (oprnds_info, i, oprnd_info)
167 oprnd_info->def_stmts.release ();
168 XDELETE (oprnd_info);
171 oprnds_info.release ();
175 /* Find the place of the data-ref in STMT in the interleaving chain that starts
176 from FIRST_STMT. Return -1 if the data-ref is not a part of the chain. */
178 static int
179 vect_get_place_in_interleaving_chain (gimple stmt, gimple first_stmt)
181 gimple next_stmt = first_stmt;
182 int result = 0;
184 if (first_stmt != GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
185 return -1;
189 if (next_stmt == stmt)
190 return result;
191 result++;
192 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
194 while (next_stmt);
196 return -1;
200 /* Get the defs for the rhs of STMT (collect them in OPRNDS_INFO), check that
201 they are of a valid type and that they match the defs of the first stmt of
202 the SLP group (stored in OPRNDS_INFO). */
204 static bool
205 vect_get_and_check_slp_defs (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
206 gimple stmt, bool first,
207 vec<slp_oprnd_info> *oprnds_info)
209 tree oprnd;
210 unsigned int i, number_of_oprnds;
211 tree def;
212 gimple def_stmt;
213 enum vect_def_type dt = vect_uninitialized_def;
214 struct loop *loop = NULL;
215 bool pattern = false;
216 slp_oprnd_info oprnd_info;
217 int op_idx = 1;
218 tree compare_rhs = NULL_TREE;
220 if (loop_vinfo)
221 loop = LOOP_VINFO_LOOP (loop_vinfo);
223 if (is_gimple_call (stmt))
225 number_of_oprnds = gimple_call_num_args (stmt);
226 op_idx = 3;
228 else if (is_gimple_assign (stmt))
230 number_of_oprnds = gimple_num_ops (stmt) - 1;
231 if (gimple_assign_rhs_code (stmt) == COND_EXPR)
232 number_of_oprnds++;
234 else
235 return false;
237 for (i = 0; i < number_of_oprnds; i++)
239 if (compare_rhs)
241 oprnd = compare_rhs;
242 compare_rhs = NULL_TREE;
244 else
245 oprnd = gimple_op (stmt, op_idx++);
247 oprnd_info = (*oprnds_info)[i];
249 if (COMPARISON_CLASS_P (oprnd))
251 compare_rhs = TREE_OPERAND (oprnd, 1);
252 oprnd = TREE_OPERAND (oprnd, 0);
255 if (!vect_is_simple_use (oprnd, NULL, loop_vinfo, bb_vinfo, &def_stmt,
256 &def, &dt)
257 || (!def_stmt && dt != vect_constant_def))
259 if (dump_enabled_p ())
261 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
262 "Build SLP failed: can't find def for ");
263 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, oprnd);
264 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
267 return false;
270 /* Check if DEF_STMT is a part of a pattern in LOOP and get the def stmt
271 from the pattern. Check that all the stmts of the node are in the
272 pattern. */
273 if (def_stmt && gimple_bb (def_stmt)
274 && ((loop && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt)))
275 || (!loop && gimple_bb (def_stmt) == BB_VINFO_BB (bb_vinfo)
276 && gimple_code (def_stmt) != GIMPLE_PHI))
277 && vinfo_for_stmt (def_stmt)
278 && STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (def_stmt))
279 && !STMT_VINFO_RELEVANT (vinfo_for_stmt (def_stmt))
280 && !STMT_VINFO_LIVE_P (vinfo_for_stmt (def_stmt)))
282 pattern = true;
283 if (!first && !oprnd_info->first_pattern)
285 if (dump_enabled_p ())
287 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
288 "Build SLP failed: some of the stmts"
289 " are in a pattern, and others are not ");
290 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, oprnd);
291 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
294 return false;
297 def_stmt = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (def_stmt));
298 dt = STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt));
300 if (dt == vect_unknown_def_type)
302 if (dump_enabled_p ())
303 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
304 "Unsupported pattern.\n");
305 return false;
308 switch (gimple_code (def_stmt))
310 case GIMPLE_PHI:
311 def = gimple_phi_result (def_stmt);
312 break;
314 case GIMPLE_ASSIGN:
315 def = gimple_assign_lhs (def_stmt);
316 break;
318 default:
319 if (dump_enabled_p ())
320 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
321 "unsupported defining stmt:\n");
322 return false;
326 if (first)
328 oprnd_info->first_dt = dt;
329 oprnd_info->first_pattern = pattern;
330 oprnd_info->first_op_type = TREE_TYPE (oprnd);
332 else
334 /* Not first stmt of the group, check that the def-stmt/s match
335 the def-stmt/s of the first stmt. Allow different definition
336 types for reduction chains: the first stmt must be a
337 vect_reduction_def (a phi node), and the rest
338 vect_internal_def. */
339 if (((oprnd_info->first_dt != dt
340 && !(oprnd_info->first_dt == vect_reduction_def
341 && dt == vect_internal_def)
342 && !((oprnd_info->first_dt == vect_external_def
343 || oprnd_info->first_dt == vect_constant_def)
344 && (dt == vect_external_def
345 || dt == vect_constant_def)))
346 || !types_compatible_p (oprnd_info->first_op_type,
347 TREE_TYPE (oprnd))))
349 if (dump_enabled_p ())
350 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
351 "Build SLP failed: different types\n");
353 return false;
357 /* Check the types of the definitions. */
358 switch (dt)
360 case vect_constant_def:
361 case vect_external_def:
362 case vect_reduction_def:
363 break;
365 case vect_internal_def:
366 oprnd_info->def_stmts.quick_push (def_stmt);
367 break;
369 default:
370 /* FORNOW: Not supported. */
371 if (dump_enabled_p ())
373 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
374 "Build SLP failed: illegal type of def ");
375 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, def);
376 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
379 return false;
383 return true;
387 /* Verify if the scalar stmts STMTS are isomorphic, require data
388 permutation or are of unsupported types of operation. Return
389 true if they are, otherwise return false and indicate in *MATCHES
390 which stmts are not isomorphic to the first one. If MATCHES[0]
391 is false then this indicates the comparison could not be
392 carried out or the stmts will never be vectorized by SLP. */
394 static bool
395 vect_build_slp_tree_1 (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
396 vec<gimple> stmts, unsigned int group_size,
397 unsigned nops, unsigned int *max_nunits,
398 unsigned int vectorization_factor, bool *matches)
400 unsigned int i;
401 gimple stmt = stmts[0];
402 enum tree_code first_stmt_code = ERROR_MARK, rhs_code = ERROR_MARK;
403 enum tree_code first_cond_code = ERROR_MARK;
404 tree lhs;
405 bool need_same_oprnds = false;
406 tree vectype, scalar_type, first_op1 = NULL_TREE;
407 optab optab;
408 int icode;
409 enum machine_mode optab_op2_mode;
410 enum machine_mode vec_mode;
411 struct data_reference *first_dr;
412 HOST_WIDE_INT dummy;
413 gimple first_load = NULL, prev_first_load = NULL, old_first_load = NULL;
414 tree cond;
416 /* For every stmt in NODE find its def stmt/s. */
417 FOR_EACH_VEC_ELT (stmts, i, stmt)
419 matches[i] = false;
421 if (dump_enabled_p ())
423 dump_printf_loc (MSG_NOTE, vect_location, "Build SLP for ");
424 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
425 dump_printf (MSG_NOTE, "\n");
428 /* Fail to vectorize statements marked as unvectorizable. */
429 if (!STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (stmt)))
431 if (dump_enabled_p ())
433 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
434 "Build SLP failed: unvectorizable statement ");
435 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
436 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
438 /* Fatal mismatch. */
439 matches[0] = false;
440 return false;
443 lhs = gimple_get_lhs (stmt);
444 if (lhs == NULL_TREE)
446 if (dump_enabled_p ())
448 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
449 "Build SLP failed: not GIMPLE_ASSIGN nor "
450 "GIMPLE_CALL ");
451 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
452 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
454 /* Fatal mismatch. */
455 matches[0] = false;
456 return false;
459 if (is_gimple_assign (stmt)
460 && gimple_assign_rhs_code (stmt) == COND_EXPR
461 && (cond = gimple_assign_rhs1 (stmt))
462 && !COMPARISON_CLASS_P (cond))
464 if (dump_enabled_p ())
466 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
467 "Build SLP failed: condition is not "
468 "comparison ");
469 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
470 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
472 /* Fatal mismatch. */
473 matches[0] = false;
474 return false;
477 scalar_type = vect_get_smallest_scalar_type (stmt, &dummy, &dummy);
478 vectype = get_vectype_for_scalar_type (scalar_type);
479 if (!vectype)
481 if (dump_enabled_p ())
483 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
484 "Build SLP failed: unsupported data-type ");
485 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
486 scalar_type);
487 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
489 /* Fatal mismatch. */
490 matches[0] = false;
491 return false;
494 /* In case of multiple types we need to detect the smallest type. */
495 if (*max_nunits < TYPE_VECTOR_SUBPARTS (vectype))
497 *max_nunits = TYPE_VECTOR_SUBPARTS (vectype);
498 if (bb_vinfo)
499 vectorization_factor = *max_nunits;
502 if (is_gimple_call (stmt))
504 rhs_code = CALL_EXPR;
505 if (gimple_call_internal_p (stmt)
506 || gimple_call_tail_p (stmt)
507 || gimple_call_noreturn_p (stmt)
508 || !gimple_call_nothrow_p (stmt)
509 || gimple_call_chain (stmt))
511 if (dump_enabled_p ())
513 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
514 "Build SLP failed: unsupported call type ");
515 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
516 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
518 /* Fatal mismatch. */
519 matches[0] = false;
520 return false;
523 else
524 rhs_code = gimple_assign_rhs_code (stmt);
526 /* Check the operation. */
527 if (i == 0)
529 first_stmt_code = rhs_code;
531 /* Shift arguments should be equal in all the packed stmts for a
532 vector shift with scalar shift operand. */
533 if (rhs_code == LSHIFT_EXPR || rhs_code == RSHIFT_EXPR
534 || rhs_code == LROTATE_EXPR
535 || rhs_code == RROTATE_EXPR)
537 vec_mode = TYPE_MODE (vectype);
539 /* First see if we have a vector/vector shift. */
540 optab = optab_for_tree_code (rhs_code, vectype,
541 optab_vector);
543 if (!optab
544 || optab_handler (optab, vec_mode) == CODE_FOR_nothing)
546 /* No vector/vector shift, try for a vector/scalar shift. */
547 optab = optab_for_tree_code (rhs_code, vectype,
548 optab_scalar);
550 if (!optab)
552 if (dump_enabled_p ())
553 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
554 "Build SLP failed: no optab.\n");
555 /* Fatal mismatch. */
556 matches[0] = false;
557 return false;
559 icode = (int) optab_handler (optab, vec_mode);
560 if (icode == CODE_FOR_nothing)
562 if (dump_enabled_p ())
563 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
564 "Build SLP failed: "
565 "op not supported by target.\n");
566 /* Fatal mismatch. */
567 matches[0] = false;
568 return false;
570 optab_op2_mode = insn_data[icode].operand[2].mode;
571 if (!VECTOR_MODE_P (optab_op2_mode))
573 need_same_oprnds = true;
574 first_op1 = gimple_assign_rhs2 (stmt);
578 else if (rhs_code == WIDEN_LSHIFT_EXPR)
580 need_same_oprnds = true;
581 first_op1 = gimple_assign_rhs2 (stmt);
584 else
586 if (first_stmt_code != rhs_code
587 && (first_stmt_code != IMAGPART_EXPR
588 || rhs_code != REALPART_EXPR)
589 && (first_stmt_code != REALPART_EXPR
590 || rhs_code != IMAGPART_EXPR)
591 && !(STMT_VINFO_GROUPED_ACCESS (vinfo_for_stmt (stmt))
592 && (first_stmt_code == ARRAY_REF
593 || first_stmt_code == BIT_FIELD_REF
594 || first_stmt_code == INDIRECT_REF
595 || first_stmt_code == COMPONENT_REF
596 || first_stmt_code == MEM_REF)))
598 if (dump_enabled_p ())
600 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
601 "Build SLP failed: different operation "
602 "in stmt ");
603 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
604 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
606 /* Mismatch. */
607 continue;
610 if (need_same_oprnds
611 && !operand_equal_p (first_op1, gimple_assign_rhs2 (stmt), 0))
613 if (dump_enabled_p ())
615 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
616 "Build SLP failed: different shift "
617 "arguments in ");
618 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
619 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
621 /* Mismatch. */
622 continue;
625 if (rhs_code == CALL_EXPR)
627 gimple first_stmt = stmts[0];
628 if (gimple_call_num_args (stmt) != nops
629 || !operand_equal_p (gimple_call_fn (first_stmt),
630 gimple_call_fn (stmt), 0)
631 || gimple_call_fntype (first_stmt)
632 != gimple_call_fntype (stmt))
634 if (dump_enabled_p ())
636 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
637 "Build SLP failed: different calls in ");
638 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
639 stmt, 0);
640 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
642 /* Mismatch. */
643 continue;
648 /* Grouped store or load. */
649 if (STMT_VINFO_GROUPED_ACCESS (vinfo_for_stmt (stmt)))
651 if (REFERENCE_CLASS_P (lhs))
653 /* Store. */
656 else
658 /* Load. */
659 unsigned unrolling_factor
660 = least_common_multiple
661 (*max_nunits, group_size) / group_size;
662 /* FORNOW: Check that there is no gap between the loads
663 and no gap between the groups when we need to load
664 multiple groups at once.
665 ??? We should enhance this to only disallow gaps
666 inside vectors. */
667 if ((unrolling_factor > 1
668 && GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) == stmt
669 && GROUP_GAP (vinfo_for_stmt (stmt)) != 0)
670 || (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) != stmt
671 && GROUP_GAP (vinfo_for_stmt (stmt)) != 1))
673 if (dump_enabled_p ())
675 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
676 "Build SLP failed: grouped "
677 "loads have gaps ");
678 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
679 stmt, 0);
680 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
682 /* Fatal mismatch. */
683 matches[0] = false;
684 return false;
687 /* Check that the size of interleaved loads group is not
688 greater than the SLP group size. */
689 unsigned ncopies
690 = vectorization_factor / TYPE_VECTOR_SUBPARTS (vectype);
691 if (loop_vinfo
692 && GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) == stmt
693 && ((GROUP_SIZE (vinfo_for_stmt (stmt))
694 - GROUP_GAP (vinfo_for_stmt (stmt)))
695 > ncopies * group_size))
697 if (dump_enabled_p ())
699 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
700 "Build SLP failed: the number "
701 "of interleaved loads is greater than "
702 "the SLP group size ");
703 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
704 stmt, 0);
705 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
707 /* Fatal mismatch. */
708 matches[0] = false;
709 return false;
712 old_first_load = first_load;
713 first_load = GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt));
714 if (prev_first_load)
716 /* Check that there are no loads from different interleaving
717 chains in the same node. */
718 if (prev_first_load != first_load)
720 if (dump_enabled_p ())
722 dump_printf_loc (MSG_MISSED_OPTIMIZATION,
723 vect_location,
724 "Build SLP failed: different "
725 "interleaving chains in one node ");
726 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
727 stmt, 0);
728 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
730 /* Mismatch. */
731 continue;
734 else
735 prev_first_load = first_load;
737 /* In some cases a group of loads is just the same load
738 repeated N times. Only analyze its cost once. */
739 if (first_load == stmt && old_first_load != first_load)
741 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt));
742 if (vect_supportable_dr_alignment (first_dr, false)
743 == dr_unaligned_unsupported)
745 if (dump_enabled_p ())
747 dump_printf_loc (MSG_MISSED_OPTIMIZATION,
748 vect_location,
749 "Build SLP failed: unsupported "
750 "unaligned load ");
751 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
752 stmt, 0);
753 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
755 /* Fatal mismatch. */
756 matches[0] = false;
757 return false;
761 } /* Grouped access. */
762 else
764 if (TREE_CODE_CLASS (rhs_code) == tcc_reference)
766 /* Not grouped load. */
767 if (dump_enabled_p ())
769 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
770 "Build SLP failed: not grouped load ");
771 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
772 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
775 /* FORNOW: Not grouped loads are not supported. */
776 /* Fatal mismatch. */
777 matches[0] = false;
778 return false;
781 /* Not memory operation. */
782 if (TREE_CODE_CLASS (rhs_code) != tcc_binary
783 && TREE_CODE_CLASS (rhs_code) != tcc_unary
784 && rhs_code != COND_EXPR
785 && rhs_code != CALL_EXPR)
787 if (dump_enabled_p ())
789 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
790 "Build SLP failed: operation");
791 dump_printf (MSG_MISSED_OPTIMIZATION, " unsupported ");
792 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
793 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
795 /* Fatal mismatch. */
796 matches[0] = false;
797 return false;
800 if (rhs_code == COND_EXPR)
802 tree cond_expr = gimple_assign_rhs1 (stmt);
804 if (i == 0)
805 first_cond_code = TREE_CODE (cond_expr);
806 else if (first_cond_code != TREE_CODE (cond_expr))
808 if (dump_enabled_p ())
810 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
811 "Build SLP failed: different"
812 " operation");
813 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
814 stmt, 0);
815 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
817 /* Mismatch. */
818 continue;
823 matches[i] = true;
826 for (i = 0; i < group_size; ++i)
827 if (!matches[i])
828 return false;
830 return true;
833 /* Recursively build an SLP tree starting from NODE.
834 Fail (and return a value not equal to zero) if def-stmts are not
835 isomorphic, require data permutation or are of unsupported types of
836 operation. Otherwise, return 0.
837 The value returned is the depth in the SLP tree where a mismatch
838 was found. */
840 static bool
841 vect_build_slp_tree (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
842 slp_tree *node, unsigned int group_size,
843 unsigned int *max_nunits,
844 vec<slp_tree> *loads,
845 unsigned int vectorization_factor,
846 bool *matches, unsigned *npermutes)
848 unsigned nops, i, this_npermutes = 0;
849 gimple stmt;
851 if (!matches)
852 matches = XALLOCAVEC (bool, group_size);
853 if (!npermutes)
854 npermutes = &this_npermutes;
856 matches[0] = false;
858 stmt = SLP_TREE_SCALAR_STMTS (*node)[0];
859 if (is_gimple_call (stmt))
860 nops = gimple_call_num_args (stmt);
861 else if (is_gimple_assign (stmt))
863 nops = gimple_num_ops (stmt) - 1;
864 if (gimple_assign_rhs_code (stmt) == COND_EXPR)
865 nops++;
867 else
868 return false;
870 if (!vect_build_slp_tree_1 (loop_vinfo, bb_vinfo,
871 SLP_TREE_SCALAR_STMTS (*node), group_size, nops,
872 max_nunits, vectorization_factor, matches))
873 return false;
875 /* If the SLP node is a load, terminate the recursion. */
876 if (STMT_VINFO_GROUPED_ACCESS (vinfo_for_stmt (stmt))
877 && DR_IS_READ (STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt))))
879 loads->safe_push (*node);
880 return true;
883 /* Get at the operands, verifying they are compatible. */
884 vec<slp_oprnd_info> oprnds_info = vect_create_oprnd_info (nops, group_size);
885 slp_oprnd_info oprnd_info;
886 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (*node), i, stmt)
888 if (!vect_get_and_check_slp_defs (loop_vinfo, bb_vinfo,
889 stmt, (i == 0), &oprnds_info))
891 vect_free_oprnd_info (oprnds_info);
892 return false;
896 stmt = SLP_TREE_SCALAR_STMTS (*node)[0];
898 /* Create SLP_TREE nodes for the definition node/s. */
899 FOR_EACH_VEC_ELT (oprnds_info, i, oprnd_info)
901 slp_tree child;
902 unsigned old_nloads = loads->length ();
903 unsigned old_max_nunits = *max_nunits;
905 if (oprnd_info->first_dt != vect_internal_def)
906 continue;
908 child = vect_create_new_slp_node (oprnd_info->def_stmts);
909 if (!child)
911 vect_free_oprnd_info (oprnds_info);
912 return false;
915 bool *matches = XALLOCAVEC (bool, group_size);
916 if (vect_build_slp_tree (loop_vinfo, bb_vinfo, &child,
917 group_size, max_nunits, loads,
918 vectorization_factor, matches, npermutes))
920 oprnd_info->def_stmts = vNULL;
921 SLP_TREE_CHILDREN (*node).quick_push (child);
922 continue;
925 /* If the SLP build for operand zero failed and operand zero
926 and one can be commutated try that for the scalar stmts
927 that failed the match. */
928 if (i == 0
929 /* A first scalar stmt mismatch signals a fatal mismatch. */
930 && matches[0]
931 /* ??? For COND_EXPRs we can swap the comparison operands
932 as well as the arms under some constraints. */
933 && nops == 2
934 && oprnds_info[1]->first_dt == vect_internal_def
935 && is_gimple_assign (stmt)
936 && commutative_tree_code (gimple_assign_rhs_code (stmt))
937 /* Do so only if the number of not successful permutes was nor more
938 than a cut-ff as re-trying the recursive match on
939 possibly each level of the tree would expose exponential
940 behavior. */
941 && *npermutes < 4)
943 /* Roll back. */
944 *max_nunits = old_max_nunits;
945 loads->truncate (old_nloads);
946 /* Swap mismatched definition stmts. */
947 for (unsigned j = 0; j < group_size; ++j)
948 if (!matches[j])
950 gimple tem = oprnds_info[0]->def_stmts[j];
951 oprnds_info[0]->def_stmts[j] = oprnds_info[1]->def_stmts[j];
952 oprnds_info[1]->def_stmts[j] = tem;
954 /* And try again ... */
955 if (vect_build_slp_tree (loop_vinfo, bb_vinfo, &child,
956 group_size, max_nunits, loads,
957 vectorization_factor,
958 matches, npermutes))
960 oprnd_info->def_stmts = vNULL;
961 SLP_TREE_CHILDREN (*node).quick_push (child);
962 continue;
965 ++*npermutes;
968 oprnd_info->def_stmts = vNULL;
969 vect_free_slp_tree (child);
970 vect_free_oprnd_info (oprnds_info);
971 return false;
974 vect_free_oprnd_info (oprnds_info);
975 return true;
978 /* Dump a slp tree NODE using flags specified in DUMP_KIND. */
980 static void
981 vect_print_slp_tree (int dump_kind, slp_tree node)
983 int i;
984 gimple stmt;
985 slp_tree child;
987 if (!node)
988 return;
990 dump_printf (dump_kind, "node ");
991 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
993 dump_printf (dump_kind, "\n\tstmt %d ", i);
994 dump_gimple_stmt (dump_kind, TDF_SLIM, stmt, 0);
996 dump_printf (dump_kind, "\n");
998 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
999 vect_print_slp_tree (dump_kind, child);
1003 /* Mark the tree rooted at NODE with MARK (PURE_SLP or HYBRID).
1004 If MARK is HYBRID, it refers to a specific stmt in NODE (the stmt at index
1005 J). Otherwise, MARK is PURE_SLP and J is -1, which indicates that all the
1006 stmts in NODE are to be marked. */
1008 static void
1009 vect_mark_slp_stmts (slp_tree node, enum slp_vect_type mark, int j)
1011 int i;
1012 gimple stmt;
1013 slp_tree child;
1015 if (!node)
1016 return;
1018 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
1019 if (j < 0 || i == j)
1020 STMT_SLP_TYPE (vinfo_for_stmt (stmt)) = mark;
1022 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
1023 vect_mark_slp_stmts (child, mark, j);
1027 /* Mark the statements of the tree rooted at NODE as relevant (vect_used). */
1029 static void
1030 vect_mark_slp_stmts_relevant (slp_tree node)
1032 int i;
1033 gimple stmt;
1034 stmt_vec_info stmt_info;
1035 slp_tree child;
1037 if (!node)
1038 return;
1040 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
1042 stmt_info = vinfo_for_stmt (stmt);
1043 gcc_assert (!STMT_VINFO_RELEVANT (stmt_info)
1044 || STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_scope);
1045 STMT_VINFO_RELEVANT (stmt_info) = vect_used_in_scope;
1048 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
1049 vect_mark_slp_stmts_relevant (child);
1053 /* Rearrange the statements of NODE according to PERMUTATION. */
1055 static void
1056 vect_slp_rearrange_stmts (slp_tree node, unsigned int group_size,
1057 vec<unsigned> permutation)
1059 gimple stmt;
1060 vec<gimple> tmp_stmts;
1061 unsigned int i;
1062 slp_tree child;
1064 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
1065 vect_slp_rearrange_stmts (child, group_size, permutation);
1067 gcc_assert (group_size == SLP_TREE_SCALAR_STMTS (node).length ());
1068 tmp_stmts.create (group_size);
1069 tmp_stmts.quick_grow_cleared (group_size);
1071 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
1072 tmp_stmts[permutation[i]] = stmt;
1074 SLP_TREE_SCALAR_STMTS (node).release ();
1075 SLP_TREE_SCALAR_STMTS (node) = tmp_stmts;
1079 /* Check if the required load permutations in the SLP instance
1080 SLP_INSTN are supported. */
1082 static bool
1083 vect_supported_load_permutation_p (slp_instance slp_instn)
1085 unsigned int group_size = SLP_INSTANCE_GROUP_SIZE (slp_instn);
1086 unsigned int i, j, k, next;
1087 sbitmap load_index;
1088 slp_tree node;
1089 gimple stmt, load, next_load, first_load;
1090 struct data_reference *dr;
1092 if (dump_enabled_p ())
1094 dump_printf_loc (MSG_NOTE, vect_location, "Load permutation ");
1095 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1096 if (node->load_permutation.exists ())
1097 FOR_EACH_VEC_ELT (node->load_permutation, j, next)
1098 dump_printf (MSG_NOTE, "%d ", next);
1099 else
1100 for (i = 0; i < group_size; ++i)
1101 dump_printf (MSG_NOTE, "%d ", i);
1102 dump_printf (MSG_NOTE, "\n");
1105 /* In case of reduction every load permutation is allowed, since the order
1106 of the reduction statements is not important (as opposed to the case of
1107 grouped stores). The only condition we need to check is that all the
1108 load nodes are of the same size and have the same permutation (and then
1109 rearrange all the nodes of the SLP instance according to this
1110 permutation). */
1112 /* Check that all the load nodes are of the same size. */
1113 /* ??? Can't we assert this? */
1114 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1115 if (SLP_TREE_SCALAR_STMTS (node).length () != (unsigned) group_size)
1116 return false;
1118 node = SLP_INSTANCE_TREE (slp_instn);
1119 stmt = SLP_TREE_SCALAR_STMTS (node)[0];
1121 /* Reduction (there are no data-refs in the root).
1122 In reduction chain the order of the loads is important. */
1123 if (!STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt))
1124 && !GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
1126 slp_tree load;
1127 unsigned int lidx;
1129 /* Compare all the permutation sequences to the first one. We know
1130 that at least one load is permuted. */
1131 node = SLP_INSTANCE_LOADS (slp_instn)[0];
1132 if (!node->load_permutation.exists ())
1133 return false;
1134 for (i = 1; SLP_INSTANCE_LOADS (slp_instn).iterate (i, &load); ++i)
1136 if (!load->load_permutation.exists ())
1137 return false;
1138 FOR_EACH_VEC_ELT (load->load_permutation, j, lidx)
1139 if (lidx != node->load_permutation[j])
1140 return false;
1143 /* Check that the loads in the first sequence are different and there
1144 are no gaps between them. */
1145 load_index = sbitmap_alloc (group_size);
1146 bitmap_clear (load_index);
1147 FOR_EACH_VEC_ELT (node->load_permutation, i, lidx)
1149 if (bitmap_bit_p (load_index, lidx))
1151 sbitmap_free (load_index);
1152 return false;
1154 bitmap_set_bit (load_index, lidx);
1156 for (i = 0; i < group_size; i++)
1157 if (!bitmap_bit_p (load_index, i))
1159 sbitmap_free (load_index);
1160 return false;
1162 sbitmap_free (load_index);
1164 /* This permutation is valid for reduction. Since the order of the
1165 statements in the nodes is not important unless they are memory
1166 accesses, we can rearrange the statements in all the nodes
1167 according to the order of the loads. */
1168 vect_slp_rearrange_stmts (SLP_INSTANCE_TREE (slp_instn), group_size,
1169 node->load_permutation);
1171 /* We are done, no actual permutations need to be generated. */
1172 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1173 SLP_TREE_LOAD_PERMUTATION (node).release ();
1174 return true;
1177 /* In basic block vectorization we allow any subchain of an interleaving
1178 chain.
1179 FORNOW: not supported in loop SLP because of realignment compications. */
1180 if (STMT_VINFO_BB_VINFO (vinfo_for_stmt (stmt)))
1182 /* Check that for every node in the instance the loads
1183 form a subchain. */
1184 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1186 next_load = NULL;
1187 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), j, load)
1189 if (j != 0 && next_load != load)
1190 return false;
1191 next_load = GROUP_NEXT_ELEMENT (vinfo_for_stmt (load));
1195 /* Check that the alignment of the first load in every subchain, i.e.,
1196 the first statement in every load node, is supported.
1197 ??? This belongs in alignment checking. */
1198 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1200 first_load = SLP_TREE_SCALAR_STMTS (node)[0];
1201 if (first_load != GROUP_FIRST_ELEMENT (vinfo_for_stmt (first_load)))
1203 dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_load));
1204 if (vect_supportable_dr_alignment (dr, false)
1205 == dr_unaligned_unsupported)
1207 if (dump_enabled_p ())
1209 dump_printf_loc (MSG_MISSED_OPTIMIZATION,
1210 vect_location,
1211 "unsupported unaligned load ");
1212 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
1213 first_load, 0);
1214 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
1216 return false;
1221 /* We are done, no actual permutations need to be generated. */
1222 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1223 SLP_TREE_LOAD_PERMUTATION (node).release ();
1224 return true;
1227 /* FORNOW: the only supported permutation is 0..01..1.. of length equal to
1228 GROUP_SIZE and where each sequence of same drs is of GROUP_SIZE length as
1229 well (unless it's reduction). */
1230 if (SLP_INSTANCE_LOADS (slp_instn).length () != group_size)
1231 return false;
1232 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1233 if (!node->load_permutation.exists ())
1234 return false;
1236 load_index = sbitmap_alloc (group_size);
1237 bitmap_clear (load_index);
1238 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1240 unsigned int lidx = node->load_permutation[0];
1241 if (bitmap_bit_p (load_index, lidx))
1243 sbitmap_free (load_index);
1244 return false;
1246 bitmap_set_bit (load_index, lidx);
1247 FOR_EACH_VEC_ELT (node->load_permutation, j, k)
1248 if (k != lidx)
1250 sbitmap_free (load_index);
1251 return false;
1254 for (i = 0; i < group_size; i++)
1255 if (!bitmap_bit_p (load_index, i))
1257 sbitmap_free (load_index);
1258 return false;
1260 sbitmap_free (load_index);
1262 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1263 if (node->load_permutation.exists ()
1264 && !vect_transform_slp_perm_load
1265 (node, vNULL, NULL,
1266 SLP_INSTANCE_UNROLLING_FACTOR (slp_instn), slp_instn, true))
1267 return false;
1268 return true;
1272 /* Find the first load in the loop that belongs to INSTANCE.
1273 When loads are in several SLP nodes, there can be a case in which the first
1274 load does not appear in the first SLP node to be transformed, causing
1275 incorrect order of statements. Since we generate all the loads together,
1276 they must be inserted before the first load of the SLP instance and not
1277 before the first load of the first node of the instance. */
1279 static gimple
1280 vect_find_first_load_in_slp_instance (slp_instance instance)
1282 int i, j;
1283 slp_tree load_node;
1284 gimple first_load = NULL, load;
1286 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (instance), i, load_node)
1287 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (load_node), j, load)
1288 first_load = get_earlier_stmt (load, first_load);
1290 return first_load;
1294 /* Find the last store in SLP INSTANCE. */
1296 static gimple
1297 vect_find_last_store_in_slp_instance (slp_instance instance)
1299 int i;
1300 slp_tree node;
1301 gimple last_store = NULL, store;
1303 node = SLP_INSTANCE_TREE (instance);
1304 for (i = 0; SLP_TREE_SCALAR_STMTS (node).iterate (i, &store); i++)
1305 last_store = get_later_stmt (store, last_store);
1307 return last_store;
1310 /* Compute the cost for the SLP node NODE in the SLP instance INSTANCE. */
1312 static void
1313 vect_analyze_slp_cost_1 (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
1314 slp_instance instance, slp_tree node,
1315 stmt_vector_for_cost *prologue_cost_vec,
1316 unsigned ncopies_for_cost)
1318 stmt_vector_for_cost *body_cost_vec = &SLP_INSTANCE_BODY_COST_VEC (instance);
1320 unsigned i;
1321 slp_tree child;
1322 gimple stmt, s;
1323 stmt_vec_info stmt_info;
1324 tree lhs;
1325 unsigned group_size = SLP_INSTANCE_GROUP_SIZE (instance);
1327 /* Recurse down the SLP tree. */
1328 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
1329 vect_analyze_slp_cost_1 (loop_vinfo, bb_vinfo,
1330 instance, child, prologue_cost_vec,
1331 ncopies_for_cost);
1333 /* Look at the first scalar stmt to determine the cost. */
1334 stmt = SLP_TREE_SCALAR_STMTS (node)[0];
1335 stmt_info = vinfo_for_stmt (stmt);
1336 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
1338 if (DR_IS_WRITE (STMT_VINFO_DATA_REF (stmt_info)))
1339 vect_model_store_cost (stmt_info, ncopies_for_cost, false,
1340 vect_uninitialized_def,
1341 node, prologue_cost_vec, body_cost_vec);
1342 else
1344 int i;
1345 gcc_checking_assert (DR_IS_READ (STMT_VINFO_DATA_REF (stmt_info)));
1346 vect_model_load_cost (stmt_info, ncopies_for_cost, false,
1347 node, prologue_cost_vec, body_cost_vec);
1348 /* If the load is permuted record the cost for the permutation.
1349 ??? Loads from multiple chains are let through here only
1350 for a single special case involving complex numbers where
1351 in the end no permutation is necessary. */
1352 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, s)
1353 if ((STMT_VINFO_GROUP_FIRST_ELEMENT (vinfo_for_stmt (s))
1354 == STMT_VINFO_GROUP_FIRST_ELEMENT (stmt_info))
1355 && vect_get_place_in_interleaving_chain
1356 (s, STMT_VINFO_GROUP_FIRST_ELEMENT (stmt_info)) != i)
1358 record_stmt_cost (body_cost_vec, group_size, vec_perm,
1359 stmt_info, 0, vect_body);
1360 break;
1364 else
1365 record_stmt_cost (body_cost_vec, ncopies_for_cost, vector_stmt,
1366 stmt_info, 0, vect_body);
1368 /* Scan operands and account for prologue cost of constants/externals.
1369 ??? This over-estimates cost for multiple uses and should be
1370 re-engineered. */
1371 lhs = gimple_get_lhs (stmt);
1372 for (i = 0; i < gimple_num_ops (stmt); ++i)
1374 tree def, op = gimple_op (stmt, i);
1375 gimple def_stmt;
1376 enum vect_def_type dt;
1377 if (!op || op == lhs)
1378 continue;
1379 if (vect_is_simple_use (op, NULL, loop_vinfo, bb_vinfo,
1380 &def_stmt, &def, &dt)
1381 && (dt == vect_constant_def || dt == vect_external_def))
1382 record_stmt_cost (prologue_cost_vec, 1, vector_stmt,
1383 stmt_info, 0, vect_prologue);
1387 /* Compute the cost for the SLP instance INSTANCE. */
1389 static void
1390 vect_analyze_slp_cost (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
1391 slp_instance instance, unsigned nunits)
1393 stmt_vector_for_cost body_cost_vec, prologue_cost_vec;
1394 unsigned ncopies_for_cost;
1395 stmt_info_for_cost *si;
1396 unsigned i;
1398 /* Calculate the number of vector stmts to create based on the unrolling
1399 factor (number of vectors is 1 if NUNITS >= GROUP_SIZE, and is
1400 GROUP_SIZE / NUNITS otherwise. */
1401 unsigned group_size = SLP_INSTANCE_GROUP_SIZE (instance);
1402 ncopies_for_cost = least_common_multiple (nunits, group_size) / nunits;
1404 prologue_cost_vec.create (10);
1405 body_cost_vec.create (10);
1406 SLP_INSTANCE_BODY_COST_VEC (instance) = body_cost_vec;
1407 vect_analyze_slp_cost_1 (loop_vinfo, bb_vinfo,
1408 instance, SLP_INSTANCE_TREE (instance),
1409 &prologue_cost_vec, ncopies_for_cost);
1411 /* Record the prologue costs, which were delayed until we were
1412 sure that SLP was successful. Unlike the body costs, we know
1413 the final values now regardless of the loop vectorization factor. */
1414 void *data = (loop_vinfo ? LOOP_VINFO_TARGET_COST_DATA (loop_vinfo)
1415 : BB_VINFO_TARGET_COST_DATA (bb_vinfo));
1416 FOR_EACH_VEC_ELT (prologue_cost_vec, i, si)
1418 struct _stmt_vec_info *stmt_info
1419 = si->stmt ? vinfo_for_stmt (si->stmt) : NULL;
1420 (void) add_stmt_cost (data, si->count, si->kind, stmt_info,
1421 si->misalign, vect_prologue);
1424 prologue_cost_vec.release ();
1427 /* Analyze an SLP instance starting from a group of grouped stores. Call
1428 vect_build_slp_tree to build a tree of packed stmts if possible.
1429 Return FALSE if it's impossible to SLP any stmt in the loop. */
1431 static bool
1432 vect_analyze_slp_instance (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
1433 gimple stmt)
1435 slp_instance new_instance;
1436 slp_tree node;
1437 unsigned int group_size = GROUP_SIZE (vinfo_for_stmt (stmt));
1438 unsigned int unrolling_factor = 1, nunits;
1439 tree vectype, scalar_type = NULL_TREE;
1440 gimple next;
1441 unsigned int vectorization_factor = 0;
1442 int i;
1443 unsigned int max_nunits = 0;
1444 vec<slp_tree> loads;
1445 struct data_reference *dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt));
1446 vec<gimple> scalar_stmts;
1448 if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
1450 if (dr)
1452 scalar_type = TREE_TYPE (DR_REF (dr));
1453 vectype = get_vectype_for_scalar_type (scalar_type);
1455 else
1457 gcc_assert (loop_vinfo);
1458 vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt));
1461 group_size = GROUP_SIZE (vinfo_for_stmt (stmt));
1463 else
1465 gcc_assert (loop_vinfo);
1466 vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt));
1467 group_size = LOOP_VINFO_REDUCTIONS (loop_vinfo).length ();
1470 if (!vectype)
1472 if (dump_enabled_p ())
1474 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1475 "Build SLP failed: unsupported data-type ");
1476 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, scalar_type);
1477 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
1480 return false;
1483 nunits = TYPE_VECTOR_SUBPARTS (vectype);
1484 if (loop_vinfo)
1485 vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1486 else
1487 vectorization_factor = nunits;
1489 /* Calculate the unrolling factor. */
1490 unrolling_factor = least_common_multiple (nunits, group_size) / group_size;
1491 if (unrolling_factor != 1 && !loop_vinfo)
1493 if (dump_enabled_p ())
1494 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1495 "Build SLP failed: unrolling required in basic"
1496 " block SLP\n");
1498 return false;
1501 /* Create a node (a root of the SLP tree) for the packed grouped stores. */
1502 scalar_stmts.create (group_size);
1503 next = stmt;
1504 if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
1506 /* Collect the stores and store them in SLP_TREE_SCALAR_STMTS. */
1507 while (next)
1509 if (STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (next))
1510 && STMT_VINFO_RELATED_STMT (vinfo_for_stmt (next)))
1511 scalar_stmts.safe_push (
1512 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (next)));
1513 else
1514 scalar_stmts.safe_push (next);
1515 next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next));
1518 else
1520 /* Collect reduction statements. */
1521 vec<gimple> reductions = LOOP_VINFO_REDUCTIONS (loop_vinfo);
1522 for (i = 0; reductions.iterate (i, &next); i++)
1523 scalar_stmts.safe_push (next);
1526 node = vect_create_new_slp_node (scalar_stmts);
1528 loads.create (group_size);
1530 /* Build the tree for the SLP instance. */
1531 if (vect_build_slp_tree (loop_vinfo, bb_vinfo, &node, group_size,
1532 &max_nunits, &loads,
1533 vectorization_factor, NULL, NULL))
1535 /* Calculate the unrolling factor based on the smallest type. */
1536 if (max_nunits > nunits)
1537 unrolling_factor = least_common_multiple (max_nunits, group_size)
1538 / group_size;
1540 if (unrolling_factor != 1 && !loop_vinfo)
1542 if (dump_enabled_p ())
1543 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1544 "Build SLP failed: unrolling required in basic"
1545 " block SLP\n");
1546 vect_free_slp_tree (node);
1547 loads.release ();
1548 return false;
1551 /* Create a new SLP instance. */
1552 new_instance = XNEW (struct _slp_instance);
1553 SLP_INSTANCE_TREE (new_instance) = node;
1554 SLP_INSTANCE_GROUP_SIZE (new_instance) = group_size;
1555 SLP_INSTANCE_UNROLLING_FACTOR (new_instance) = unrolling_factor;
1556 SLP_INSTANCE_BODY_COST_VEC (new_instance) = vNULL;
1557 SLP_INSTANCE_LOADS (new_instance) = loads;
1558 SLP_INSTANCE_FIRST_LOAD_STMT (new_instance) = NULL;
1560 /* Compute the load permutation. */
1561 slp_tree load_node;
1562 bool loads_permuted = false;
1563 FOR_EACH_VEC_ELT (loads, i, load_node)
1565 vec<unsigned> load_permutation;
1566 int j;
1567 gimple load, first_stmt;
1568 bool this_load_permuted = false;
1569 load_permutation.create (group_size);
1570 first_stmt = GROUP_FIRST_ELEMENT
1571 (vinfo_for_stmt (SLP_TREE_SCALAR_STMTS (load_node)[0]));
1572 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (load_node), j, load)
1574 int load_place
1575 = vect_get_place_in_interleaving_chain (load, first_stmt);
1576 gcc_assert (load_place != -1);
1577 if (load_place != j)
1578 this_load_permuted = true;
1579 load_permutation.safe_push (load_place);
1581 if (!this_load_permuted)
1583 load_permutation.release ();
1584 continue;
1586 SLP_TREE_LOAD_PERMUTATION (load_node) = load_permutation;
1587 loads_permuted = true;
1590 if (loads_permuted)
1592 if (!vect_supported_load_permutation_p (new_instance))
1594 if (dump_enabled_p ())
1596 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1597 "Build SLP failed: unsupported load "
1598 "permutation ");
1599 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
1600 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
1602 vect_free_slp_instance (new_instance);
1603 return false;
1606 SLP_INSTANCE_FIRST_LOAD_STMT (new_instance)
1607 = vect_find_first_load_in_slp_instance (new_instance);
1610 /* Compute the costs of this SLP instance. */
1611 vect_analyze_slp_cost (loop_vinfo, bb_vinfo,
1612 new_instance, TYPE_VECTOR_SUBPARTS (vectype));
1614 if (loop_vinfo)
1615 LOOP_VINFO_SLP_INSTANCES (loop_vinfo).safe_push (new_instance);
1616 else
1617 BB_VINFO_SLP_INSTANCES (bb_vinfo).safe_push (new_instance);
1619 if (dump_enabled_p ())
1620 vect_print_slp_tree (MSG_NOTE, node);
1622 return true;
1625 /* Failed to SLP. */
1626 /* Free the allocated memory. */
1627 vect_free_slp_tree (node);
1628 loads.release ();
1630 return false;
1634 /* Check if there are stmts in the loop can be vectorized using SLP. Build SLP
1635 trees of packed scalar stmts if SLP is possible. */
1637 bool
1638 vect_analyze_slp (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo)
1640 unsigned int i;
1641 vec<gimple> grouped_stores;
1642 vec<gimple> reductions = vNULL;
1643 vec<gimple> reduc_chains = vNULL;
1644 gimple first_element;
1645 bool ok = false;
1647 if (dump_enabled_p ())
1648 dump_printf_loc (MSG_NOTE, vect_location, "=== vect_analyze_slp ===\n");
1650 if (loop_vinfo)
1652 grouped_stores = LOOP_VINFO_GROUPED_STORES (loop_vinfo);
1653 reduc_chains = LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo);
1654 reductions = LOOP_VINFO_REDUCTIONS (loop_vinfo);
1656 else
1657 grouped_stores = BB_VINFO_GROUPED_STORES (bb_vinfo);
1659 /* Find SLP sequences starting from groups of grouped stores. */
1660 FOR_EACH_VEC_ELT (grouped_stores, i, first_element)
1661 if (vect_analyze_slp_instance (loop_vinfo, bb_vinfo, first_element))
1662 ok = true;
1664 if (bb_vinfo && !ok)
1666 if (dump_enabled_p ())
1667 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1668 "Failed to SLP the basic block.\n");
1670 return false;
1673 if (loop_vinfo
1674 && LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo).length () > 0)
1676 /* Find SLP sequences starting from reduction chains. */
1677 FOR_EACH_VEC_ELT (reduc_chains, i, first_element)
1678 if (vect_analyze_slp_instance (loop_vinfo, bb_vinfo, first_element))
1679 ok = true;
1680 else
1681 return false;
1683 /* Don't try to vectorize SLP reductions if reduction chain was
1684 detected. */
1685 return ok;
1688 /* Find SLP sequences starting from groups of reductions. */
1689 if (loop_vinfo && LOOP_VINFO_REDUCTIONS (loop_vinfo).length () > 1
1690 && vect_analyze_slp_instance (loop_vinfo, bb_vinfo, reductions[0]))
1691 ok = true;
1693 return true;
1697 /* For each possible SLP instance decide whether to SLP it and calculate overall
1698 unrolling factor needed to SLP the loop. Return TRUE if decided to SLP at
1699 least one instance. */
1701 bool
1702 vect_make_slp_decision (loop_vec_info loop_vinfo)
1704 unsigned int i, unrolling_factor = 1;
1705 vec<slp_instance> slp_instances = LOOP_VINFO_SLP_INSTANCES (loop_vinfo);
1706 slp_instance instance;
1707 int decided_to_slp = 0;
1709 if (dump_enabled_p ())
1710 dump_printf_loc (MSG_NOTE, vect_location, "=== vect_make_slp_decision ==="
1711 "\n");
1713 FOR_EACH_VEC_ELT (slp_instances, i, instance)
1715 /* FORNOW: SLP if you can. */
1716 if (unrolling_factor < SLP_INSTANCE_UNROLLING_FACTOR (instance))
1717 unrolling_factor = SLP_INSTANCE_UNROLLING_FACTOR (instance);
1719 /* Mark all the stmts that belong to INSTANCE as PURE_SLP stmts. Later we
1720 call vect_detect_hybrid_slp () to find stmts that need hybrid SLP and
1721 loop-based vectorization. Such stmts will be marked as HYBRID. */
1722 vect_mark_slp_stmts (SLP_INSTANCE_TREE (instance), pure_slp, -1);
1723 decided_to_slp++;
1726 LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo) = unrolling_factor;
1728 if (decided_to_slp && dump_enabled_p ())
1729 dump_printf_loc (MSG_NOTE, vect_location,
1730 "Decided to SLP %d instances. Unrolling factor %d\n",
1731 decided_to_slp, unrolling_factor);
1733 return (decided_to_slp > 0);
1737 /* Find stmts that must be both vectorized and SLPed (since they feed stmts that
1738 can't be SLPed) in the tree rooted at NODE. Mark such stmts as HYBRID. */
1740 static void
1741 vect_detect_hybrid_slp_stmts (slp_tree node)
1743 int i;
1744 vec<gimple> stmts = SLP_TREE_SCALAR_STMTS (node);
1745 gimple stmt = stmts[0];
1746 imm_use_iterator imm_iter;
1747 gimple use_stmt;
1748 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
1749 slp_tree child;
1750 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
1751 struct loop *loop = NULL;
1752 bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_vinfo);
1753 basic_block bb = NULL;
1755 if (!node)
1756 return;
1758 if (loop_vinfo)
1759 loop = LOOP_VINFO_LOOP (loop_vinfo);
1760 else
1761 bb = BB_VINFO_BB (bb_vinfo);
1763 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
1764 if (PURE_SLP_STMT (vinfo_for_stmt (stmt))
1765 && TREE_CODE (gimple_op (stmt, 0)) == SSA_NAME)
1766 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, gimple_op (stmt, 0))
1767 if (gimple_bb (use_stmt)
1768 && ((loop && flow_bb_inside_loop_p (loop, gimple_bb (use_stmt)))
1769 || bb == gimple_bb (use_stmt))
1770 && (stmt_vinfo = vinfo_for_stmt (use_stmt))
1771 && !STMT_SLP_TYPE (stmt_vinfo)
1772 && (STMT_VINFO_RELEVANT (stmt_vinfo)
1773 || VECTORIZABLE_CYCLE_DEF (STMT_VINFO_DEF_TYPE (stmt_vinfo)))
1774 && !(gimple_code (use_stmt) == GIMPLE_PHI
1775 && STMT_VINFO_DEF_TYPE (stmt_vinfo)
1776 == vect_reduction_def))
1777 vect_mark_slp_stmts (node, hybrid, i);
1779 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
1780 vect_detect_hybrid_slp_stmts (child);
1784 /* Find stmts that must be both vectorized and SLPed. */
1786 void
1787 vect_detect_hybrid_slp (loop_vec_info loop_vinfo)
1789 unsigned int i;
1790 vec<slp_instance> slp_instances = LOOP_VINFO_SLP_INSTANCES (loop_vinfo);
1791 slp_instance instance;
1793 if (dump_enabled_p ())
1794 dump_printf_loc (MSG_NOTE, vect_location, "=== vect_detect_hybrid_slp ==="
1795 "\n");
1797 FOR_EACH_VEC_ELT (slp_instances, i, instance)
1798 vect_detect_hybrid_slp_stmts (SLP_INSTANCE_TREE (instance));
1802 /* Create and initialize a new bb_vec_info struct for BB, as well as
1803 stmt_vec_info structs for all the stmts in it. */
1805 static bb_vec_info
1806 new_bb_vec_info (basic_block bb)
1808 bb_vec_info res = NULL;
1809 gimple_stmt_iterator gsi;
1811 res = (bb_vec_info) xcalloc (1, sizeof (struct _bb_vec_info));
1812 BB_VINFO_BB (res) = bb;
1814 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1816 gimple stmt = gsi_stmt (gsi);
1817 gimple_set_uid (stmt, 0);
1818 set_vinfo_for_stmt (stmt, new_stmt_vec_info (stmt, NULL, res));
1821 BB_VINFO_GROUPED_STORES (res).create (10);
1822 BB_VINFO_SLP_INSTANCES (res).create (2);
1823 BB_VINFO_TARGET_COST_DATA (res) = init_cost (NULL);
1825 bb->aux = res;
1826 return res;
1830 /* Free BB_VINFO struct, as well as all the stmt_vec_info structs of all the
1831 stmts in the basic block. */
1833 static void
1834 destroy_bb_vec_info (bb_vec_info bb_vinfo)
1836 vec<slp_instance> slp_instances;
1837 slp_instance instance;
1838 basic_block bb;
1839 gimple_stmt_iterator si;
1840 unsigned i;
1842 if (!bb_vinfo)
1843 return;
1845 bb = BB_VINFO_BB (bb_vinfo);
1847 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
1849 gimple stmt = gsi_stmt (si);
1850 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1852 if (stmt_info)
1853 /* Free stmt_vec_info. */
1854 free_stmt_vec_info (stmt);
1857 vect_destroy_datarefs (NULL, bb_vinfo);
1858 free_dependence_relations (BB_VINFO_DDRS (bb_vinfo));
1859 BB_VINFO_GROUPED_STORES (bb_vinfo).release ();
1860 slp_instances = BB_VINFO_SLP_INSTANCES (bb_vinfo);
1861 FOR_EACH_VEC_ELT (slp_instances, i, instance)
1862 vect_free_slp_instance (instance);
1863 BB_VINFO_SLP_INSTANCES (bb_vinfo).release ();
1864 destroy_cost_data (BB_VINFO_TARGET_COST_DATA (bb_vinfo));
1865 free (bb_vinfo);
1866 bb->aux = NULL;
1870 /* Analyze statements contained in SLP tree node after recursively analyzing
1871 the subtree. Return TRUE if the operations are supported. */
1873 static bool
1874 vect_slp_analyze_node_operations (bb_vec_info bb_vinfo, slp_tree node)
1876 bool dummy;
1877 int i;
1878 gimple stmt;
1879 slp_tree child;
1881 if (!node)
1882 return true;
1884 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
1885 if (!vect_slp_analyze_node_operations (bb_vinfo, child))
1886 return false;
1888 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
1890 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
1891 gcc_assert (stmt_info);
1892 gcc_assert (PURE_SLP_STMT (stmt_info));
1894 if (!vect_analyze_stmt (stmt, &dummy, node))
1895 return false;
1898 return true;
1902 /* Analyze statements in SLP instances of the basic block. Return TRUE if the
1903 operations are supported. */
1905 static bool
1906 vect_slp_analyze_operations (bb_vec_info bb_vinfo)
1908 vec<slp_instance> slp_instances = BB_VINFO_SLP_INSTANCES (bb_vinfo);
1909 slp_instance instance;
1910 int i;
1912 for (i = 0; slp_instances.iterate (i, &instance); )
1914 if (!vect_slp_analyze_node_operations (bb_vinfo,
1915 SLP_INSTANCE_TREE (instance)))
1917 vect_free_slp_instance (instance);
1918 slp_instances.ordered_remove (i);
1920 else
1921 i++;
1924 if (!slp_instances.length ())
1925 return false;
1927 return true;
1931 /* Compute the scalar cost of the SLP node NODE and its children
1932 and return it. Do not account defs that are marked in LIFE and
1933 update LIFE according to uses of NODE. */
1935 static unsigned
1936 vect_bb_slp_scalar_cost (basic_block bb,
1937 slp_tree node, vec<bool, va_stack> life)
1939 unsigned scalar_cost = 0;
1940 unsigned i;
1941 gimple stmt;
1942 slp_tree child;
1944 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
1946 unsigned stmt_cost;
1947 ssa_op_iter op_iter;
1948 def_operand_p def_p;
1949 stmt_vec_info stmt_info;
1951 if (life[i])
1952 continue;
1954 /* If there is a non-vectorized use of the defs then the scalar
1955 stmt is kept live in which case we do not account it or any
1956 required defs in the SLP children in the scalar cost. This
1957 way we make the vectorization more costly when compared to
1958 the scalar cost. */
1959 FOR_EACH_SSA_DEF_OPERAND (def_p, stmt, op_iter, SSA_OP_DEF)
1961 imm_use_iterator use_iter;
1962 gimple use_stmt;
1963 FOR_EACH_IMM_USE_STMT (use_stmt, use_iter, DEF_FROM_PTR (def_p))
1964 if (gimple_code (use_stmt) == GIMPLE_PHI
1965 || gimple_bb (use_stmt) != bb
1966 || !STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (use_stmt)))
1968 life[i] = true;
1969 BREAK_FROM_IMM_USE_STMT (use_iter);
1972 if (life[i])
1973 continue;
1975 stmt_info = vinfo_for_stmt (stmt);
1976 if (STMT_VINFO_DATA_REF (stmt_info))
1978 if (DR_IS_READ (STMT_VINFO_DATA_REF (stmt_info)))
1979 stmt_cost = vect_get_stmt_cost (scalar_load);
1980 else
1981 stmt_cost = vect_get_stmt_cost (scalar_store);
1983 else
1984 stmt_cost = vect_get_stmt_cost (scalar_stmt);
1986 scalar_cost += stmt_cost;
1989 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
1990 scalar_cost += vect_bb_slp_scalar_cost (bb, child, life);
1992 return scalar_cost;
1995 /* Check if vectorization of the basic block is profitable. */
1997 static bool
1998 vect_bb_vectorization_profitable_p (bb_vec_info bb_vinfo)
2000 vec<slp_instance> slp_instances = BB_VINFO_SLP_INSTANCES (bb_vinfo);
2001 slp_instance instance;
2002 int i, j;
2003 unsigned int vec_inside_cost = 0, vec_outside_cost = 0, scalar_cost = 0;
2004 unsigned int vec_prologue_cost = 0, vec_epilogue_cost = 0;
2005 void *target_cost_data = BB_VINFO_TARGET_COST_DATA (bb_vinfo);
2006 stmt_vec_info stmt_info = NULL;
2007 stmt_vector_for_cost body_cost_vec;
2008 stmt_info_for_cost *ci;
2010 /* Calculate vector costs. */
2011 FOR_EACH_VEC_ELT (slp_instances, i, instance)
2013 body_cost_vec = SLP_INSTANCE_BODY_COST_VEC (instance);
2015 FOR_EACH_VEC_ELT (body_cost_vec, j, ci)
2017 stmt_info = ci->stmt ? vinfo_for_stmt (ci->stmt) : NULL;
2018 (void) add_stmt_cost (target_cost_data, ci->count, ci->kind,
2019 stmt_info, ci->misalign, vect_body);
2023 /* Calculate scalar cost. */
2024 FOR_EACH_VEC_ELT (slp_instances, i, instance)
2026 vec<bool, va_stack> life;
2027 vec_stack_alloc (bool, life, SLP_INSTANCE_GROUP_SIZE (instance));
2028 life.quick_grow_cleared (SLP_INSTANCE_GROUP_SIZE (instance));
2029 scalar_cost += vect_bb_slp_scalar_cost (BB_VINFO_BB (bb_vinfo),
2030 SLP_INSTANCE_TREE (instance),
2031 life);
2032 life.release ();
2035 /* Complete the target-specific cost calculation. */
2036 finish_cost (BB_VINFO_TARGET_COST_DATA (bb_vinfo), &vec_prologue_cost,
2037 &vec_inside_cost, &vec_epilogue_cost);
2039 vec_outside_cost = vec_prologue_cost + vec_epilogue_cost;
2041 if (dump_enabled_p ())
2043 dump_printf_loc (MSG_NOTE, vect_location, "Cost model analysis: \n");
2044 dump_printf (MSG_NOTE, " Vector inside of basic block cost: %d\n",
2045 vec_inside_cost);
2046 dump_printf (MSG_NOTE, " Vector prologue cost: %d\n", vec_prologue_cost);
2047 dump_printf (MSG_NOTE, " Vector epilogue cost: %d\n", vec_epilogue_cost);
2048 dump_printf (MSG_NOTE, " Scalar cost of basic block: %d\n", scalar_cost);
2051 /* Vectorization is profitable if its cost is less than the cost of scalar
2052 version. */
2053 if (vec_outside_cost + vec_inside_cost >= scalar_cost)
2054 return false;
2056 return true;
2059 /* Check if the basic block can be vectorized. */
2061 static bb_vec_info
2062 vect_slp_analyze_bb_1 (basic_block bb)
2064 bb_vec_info bb_vinfo;
2065 vec<slp_instance> slp_instances;
2066 slp_instance instance;
2067 int i;
2068 int min_vf = 2;
2070 bb_vinfo = new_bb_vec_info (bb);
2071 if (!bb_vinfo)
2072 return NULL;
2074 if (!vect_analyze_data_refs (NULL, bb_vinfo, &min_vf))
2076 if (dump_enabled_p ())
2077 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2078 "not vectorized: unhandled data-ref in basic "
2079 "block.\n");
2081 destroy_bb_vec_info (bb_vinfo);
2082 return NULL;
2085 if (BB_VINFO_DATAREFS (bb_vinfo).length () < 2)
2087 if (dump_enabled_p ())
2088 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2089 "not vectorized: not enough data-refs in "
2090 "basic block.\n");
2092 destroy_bb_vec_info (bb_vinfo);
2093 return NULL;
2096 if (!vect_analyze_data_ref_accesses (NULL, bb_vinfo))
2098 if (dump_enabled_p ())
2099 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2100 "not vectorized: unhandled data access in "
2101 "basic block.\n");
2103 destroy_bb_vec_info (bb_vinfo);
2104 return NULL;
2107 vect_pattern_recog (NULL, bb_vinfo);
2109 if (!vect_slp_analyze_data_ref_dependences (bb_vinfo))
2111 if (dump_enabled_p ())
2112 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2113 "not vectorized: unhandled data dependence "
2114 "in basic block.\n");
2116 destroy_bb_vec_info (bb_vinfo);
2117 return NULL;
2120 if (!vect_analyze_data_refs_alignment (NULL, bb_vinfo))
2122 if (dump_enabled_p ())
2123 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2124 "not vectorized: bad data alignment in basic "
2125 "block.\n");
2127 destroy_bb_vec_info (bb_vinfo);
2128 return NULL;
2131 /* Check the SLP opportunities in the basic block, analyze and build SLP
2132 trees. */
2133 if (!vect_analyze_slp (NULL, bb_vinfo))
2135 if (dump_enabled_p ())
2136 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2137 "not vectorized: failed to find SLP opportunities "
2138 "in basic block.\n");
2140 destroy_bb_vec_info (bb_vinfo);
2141 return NULL;
2144 slp_instances = BB_VINFO_SLP_INSTANCES (bb_vinfo);
2146 /* Mark all the statements that we want to vectorize as pure SLP and
2147 relevant. */
2148 FOR_EACH_VEC_ELT (slp_instances, i, instance)
2150 vect_mark_slp_stmts (SLP_INSTANCE_TREE (instance), pure_slp, -1);
2151 vect_mark_slp_stmts_relevant (SLP_INSTANCE_TREE (instance));
2154 if (!vect_verify_datarefs_alignment (NULL, bb_vinfo))
2156 if (dump_enabled_p ())
2157 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2158 "not vectorized: unsupported alignment in basic "
2159 "block.\n");
2160 destroy_bb_vec_info (bb_vinfo);
2161 return NULL;
2164 if (!vect_slp_analyze_operations (bb_vinfo))
2166 if (dump_enabled_p ())
2167 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2168 "not vectorized: bad operation in basic block.\n");
2170 destroy_bb_vec_info (bb_vinfo);
2171 return NULL;
2174 /* Cost model: check if the vectorization is worthwhile. */
2175 if (!unlimited_cost_model ()
2176 && !vect_bb_vectorization_profitable_p (bb_vinfo))
2178 if (dump_enabled_p ())
2179 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2180 "not vectorized: vectorization is not "
2181 "profitable.\n");
2183 destroy_bb_vec_info (bb_vinfo);
2184 return NULL;
2187 if (dump_enabled_p ())
2188 dump_printf_loc (MSG_NOTE, vect_location,
2189 "Basic block will be vectorized using SLP\n");
2191 return bb_vinfo;
2195 bb_vec_info
2196 vect_slp_analyze_bb (basic_block bb)
2198 bb_vec_info bb_vinfo;
2199 int insns = 0;
2200 gimple_stmt_iterator gsi;
2201 unsigned int vector_sizes;
2203 if (dump_enabled_p ())
2204 dump_printf_loc (MSG_NOTE, vect_location, "===vect_slp_analyze_bb===\n");
2206 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2208 gimple stmt = gsi_stmt (gsi);
2209 if (!is_gimple_debug (stmt)
2210 && !gimple_nop_p (stmt)
2211 && gimple_code (stmt) != GIMPLE_LABEL)
2212 insns++;
2215 if (insns > PARAM_VALUE (PARAM_SLP_MAX_INSNS_IN_BB))
2217 if (dump_enabled_p ())
2218 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2219 "not vectorized: too many instructions in "
2220 "basic block.\n");
2222 return NULL;
2225 /* Autodetect first vector size we try. */
2226 current_vector_size = 0;
2227 vector_sizes = targetm.vectorize.autovectorize_vector_sizes ();
2229 while (1)
2231 bb_vinfo = vect_slp_analyze_bb_1 (bb);
2232 if (bb_vinfo)
2233 return bb_vinfo;
2235 destroy_bb_vec_info (bb_vinfo);
2237 vector_sizes &= ~current_vector_size;
2238 if (vector_sizes == 0
2239 || current_vector_size == 0)
2240 return NULL;
2242 /* Try the next biggest vector size. */
2243 current_vector_size = 1 << floor_log2 (vector_sizes);
2244 if (dump_enabled_p ())
2245 dump_printf_loc (MSG_NOTE, vect_location,
2246 "***** Re-trying analysis with "
2247 "vector size %d\n", current_vector_size);
2252 /* SLP costs are calculated according to SLP instance unrolling factor (i.e.,
2253 the number of created vector stmts depends on the unrolling factor).
2254 However, the actual number of vector stmts for every SLP node depends on
2255 VF which is set later in vect_analyze_operations (). Hence, SLP costs
2256 should be updated. In this function we assume that the inside costs
2257 calculated in vect_model_xxx_cost are linear in ncopies. */
2259 void
2260 vect_update_slp_costs_according_to_vf (loop_vec_info loop_vinfo)
2262 unsigned int i, j, vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2263 vec<slp_instance> slp_instances = LOOP_VINFO_SLP_INSTANCES (loop_vinfo);
2264 slp_instance instance;
2265 stmt_vector_for_cost body_cost_vec;
2266 stmt_info_for_cost *si;
2267 void *data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo);
2269 if (dump_enabled_p ())
2270 dump_printf_loc (MSG_NOTE, vect_location,
2271 "=== vect_update_slp_costs_according_to_vf ===\n");
2273 FOR_EACH_VEC_ELT (slp_instances, i, instance)
2275 /* We assume that costs are linear in ncopies. */
2276 int ncopies = vf / SLP_INSTANCE_UNROLLING_FACTOR (instance);
2278 /* Record the instance's instructions in the target cost model.
2279 This was delayed until here because the count of instructions
2280 isn't known beforehand. */
2281 body_cost_vec = SLP_INSTANCE_BODY_COST_VEC (instance);
2283 FOR_EACH_VEC_ELT (body_cost_vec, j, si)
2284 (void) add_stmt_cost (data, si->count * ncopies, si->kind,
2285 vinfo_for_stmt (si->stmt), si->misalign,
2286 vect_body);
2291 /* For constant and loop invariant defs of SLP_NODE this function returns
2292 (vector) defs (VEC_OPRNDS) that will be used in the vectorized stmts.
2293 OP_NUM determines if we gather defs for operand 0 or operand 1 of the RHS of
2294 scalar stmts. NUMBER_OF_VECTORS is the number of vector defs to create.
2295 REDUC_INDEX is the index of the reduction operand in the statements, unless
2296 it is -1. */
2298 static void
2299 vect_get_constant_vectors (tree op, slp_tree slp_node,
2300 vec<tree> *vec_oprnds,
2301 unsigned int op_num, unsigned int number_of_vectors,
2302 int reduc_index)
2304 vec<gimple> stmts = SLP_TREE_SCALAR_STMTS (slp_node);
2305 gimple stmt = stmts[0];
2306 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
2307 unsigned nunits;
2308 tree vec_cst;
2309 tree *elts;
2310 unsigned j, number_of_places_left_in_vector;
2311 tree vector_type;
2312 tree vop;
2313 int group_size = stmts.length ();
2314 unsigned int vec_num, i;
2315 unsigned number_of_copies = 1;
2316 vec<tree> voprnds;
2317 voprnds.create (number_of_vectors);
2318 bool constant_p, is_store;
2319 tree neutral_op = NULL;
2320 enum tree_code code = gimple_expr_code (stmt);
2321 gimple def_stmt;
2322 struct loop *loop;
2323 gimple_seq ctor_seq = NULL;
2325 if (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
2326 && reduc_index != -1)
2328 op_num = reduc_index - 1;
2329 op = gimple_op (stmt, reduc_index);
2330 /* For additional copies (see the explanation of NUMBER_OF_COPIES below)
2331 we need either neutral operands or the original operands. See
2332 get_initial_def_for_reduction() for details. */
2333 switch (code)
2335 case WIDEN_SUM_EXPR:
2336 case DOT_PROD_EXPR:
2337 case PLUS_EXPR:
2338 case MINUS_EXPR:
2339 case BIT_IOR_EXPR:
2340 case BIT_XOR_EXPR:
2341 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (op)))
2342 neutral_op = build_real (TREE_TYPE (op), dconst0);
2343 else
2344 neutral_op = build_int_cst (TREE_TYPE (op), 0);
2346 break;
2348 case MULT_EXPR:
2349 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (op)))
2350 neutral_op = build_real (TREE_TYPE (op), dconst1);
2351 else
2352 neutral_op = build_int_cst (TREE_TYPE (op), 1);
2354 break;
2356 case BIT_AND_EXPR:
2357 neutral_op = build_int_cst (TREE_TYPE (op), -1);
2358 break;
2360 case MAX_EXPR:
2361 case MIN_EXPR:
2362 def_stmt = SSA_NAME_DEF_STMT (op);
2363 loop = (gimple_bb (stmt))->loop_father;
2364 neutral_op = PHI_ARG_DEF_FROM_EDGE (def_stmt,
2365 loop_preheader_edge (loop));
2366 break;
2368 default:
2369 neutral_op = NULL;
2373 if (STMT_VINFO_DATA_REF (stmt_vinfo))
2375 is_store = true;
2376 op = gimple_assign_rhs1 (stmt);
2378 else
2379 is_store = false;
2381 gcc_assert (op);
2383 if (CONSTANT_CLASS_P (op))
2384 constant_p = true;
2385 else
2386 constant_p = false;
2388 vector_type = get_vectype_for_scalar_type (TREE_TYPE (op));
2389 gcc_assert (vector_type);
2390 nunits = TYPE_VECTOR_SUBPARTS (vector_type);
2392 /* NUMBER_OF_COPIES is the number of times we need to use the same values in
2393 created vectors. It is greater than 1 if unrolling is performed.
2395 For example, we have two scalar operands, s1 and s2 (e.g., group of
2396 strided accesses of size two), while NUNITS is four (i.e., four scalars
2397 of this type can be packed in a vector). The output vector will contain
2398 two copies of each scalar operand: {s1, s2, s1, s2}. (NUMBER_OF_COPIES
2399 will be 2).
2401 If GROUP_SIZE > NUNITS, the scalars will be split into several vectors
2402 containing the operands.
2404 For example, NUNITS is four as before, and the group size is 8
2405 (s1, s2, ..., s8). We will create two vectors {s1, s2, s3, s4} and
2406 {s5, s6, s7, s8}. */
2408 number_of_copies = least_common_multiple (nunits, group_size) / group_size;
2410 number_of_places_left_in_vector = nunits;
2411 elts = XALLOCAVEC (tree, nunits);
2412 for (j = 0; j < number_of_copies; j++)
2414 for (i = group_size - 1; stmts.iterate (i, &stmt); i--)
2416 if (is_store)
2417 op = gimple_assign_rhs1 (stmt);
2418 else
2420 switch (code)
2422 case COND_EXPR:
2423 if (op_num == 0 || op_num == 1)
2425 tree cond = gimple_assign_rhs1 (stmt);
2426 op = TREE_OPERAND (cond, op_num);
2428 else
2430 if (op_num == 2)
2431 op = gimple_assign_rhs2 (stmt);
2432 else
2433 op = gimple_assign_rhs3 (stmt);
2435 break;
2437 case CALL_EXPR:
2438 op = gimple_call_arg (stmt, op_num);
2439 break;
2441 case LSHIFT_EXPR:
2442 case RSHIFT_EXPR:
2443 case LROTATE_EXPR:
2444 case RROTATE_EXPR:
2445 op = gimple_op (stmt, op_num + 1);
2446 /* Unlike the other binary operators, shifts/rotates have
2447 the shift count being int, instead of the same type as
2448 the lhs, so make sure the scalar is the right type if
2449 we are dealing with vectors of
2450 long long/long/short/char. */
2451 if (op_num == 1 && TREE_CODE (op) == INTEGER_CST)
2452 op = fold_convert (TREE_TYPE (vector_type), op);
2453 break;
2455 default:
2456 op = gimple_op (stmt, op_num + 1);
2457 break;
2461 if (reduc_index != -1)
2463 loop = (gimple_bb (stmt))->loop_father;
2464 def_stmt = SSA_NAME_DEF_STMT (op);
2466 gcc_assert (loop);
2468 /* Get the def before the loop. In reduction chain we have only
2469 one initial value. */
2470 if ((j != (number_of_copies - 1)
2471 || (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt))
2472 && i != 0))
2473 && neutral_op)
2474 op = neutral_op;
2475 else
2476 op = PHI_ARG_DEF_FROM_EDGE (def_stmt,
2477 loop_preheader_edge (loop));
2480 /* Create 'vect_ = {op0,op1,...,opn}'. */
2481 number_of_places_left_in_vector--;
2482 if (!types_compatible_p (TREE_TYPE (vector_type), TREE_TYPE (op)))
2484 if (CONSTANT_CLASS_P (op))
2486 op = fold_unary (VIEW_CONVERT_EXPR,
2487 TREE_TYPE (vector_type), op);
2488 gcc_assert (op && CONSTANT_CLASS_P (op));
2490 else
2492 tree new_temp
2493 = make_ssa_name (TREE_TYPE (vector_type), NULL);
2494 gimple init_stmt;
2495 op = build1 (VIEW_CONVERT_EXPR, TREE_TYPE (vector_type),
2496 op);
2497 init_stmt
2498 = gimple_build_assign_with_ops (VIEW_CONVERT_EXPR,
2499 new_temp, op, NULL_TREE);
2500 gimple_seq_add_stmt (&ctor_seq, init_stmt);
2501 op = new_temp;
2504 elts[number_of_places_left_in_vector] = op;
2505 if (!CONSTANT_CLASS_P (op))
2506 constant_p = false;
2508 if (number_of_places_left_in_vector == 0)
2510 number_of_places_left_in_vector = nunits;
2512 if (constant_p)
2513 vec_cst = build_vector (vector_type, elts);
2514 else
2516 vec<constructor_elt, va_gc> *v;
2517 unsigned k;
2518 vec_alloc (v, nunits);
2519 for (k = 0; k < nunits; ++k)
2520 CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, elts[k]);
2521 vec_cst = build_constructor (vector_type, v);
2523 voprnds.quick_push (vect_init_vector (stmt, vec_cst,
2524 vector_type, NULL));
2525 if (ctor_seq != NULL)
2527 gimple init_stmt = SSA_NAME_DEF_STMT (voprnds.last ());
2528 gimple_stmt_iterator gsi = gsi_for_stmt (init_stmt);
2529 gsi_insert_seq_before_without_update (&gsi, ctor_seq,
2530 GSI_SAME_STMT);
2531 ctor_seq = NULL;
2537 /* Since the vectors are created in the reverse order, we should invert
2538 them. */
2539 vec_num = voprnds.length ();
2540 for (j = vec_num; j != 0; j--)
2542 vop = voprnds[j - 1];
2543 vec_oprnds->quick_push (vop);
2546 voprnds.release ();
2548 /* In case that VF is greater than the unrolling factor needed for the SLP
2549 group of stmts, NUMBER_OF_VECTORS to be created is greater than
2550 NUMBER_OF_SCALARS/NUNITS or NUNITS/NUMBER_OF_SCALARS, and hence we have
2551 to replicate the vectors. */
2552 while (number_of_vectors > vec_oprnds->length ())
2554 tree neutral_vec = NULL;
2556 if (neutral_op)
2558 if (!neutral_vec)
2559 neutral_vec = build_vector_from_val (vector_type, neutral_op);
2561 vec_oprnds->quick_push (neutral_vec);
2563 else
2565 for (i = 0; vec_oprnds->iterate (i, &vop) && i < vec_num; i++)
2566 vec_oprnds->quick_push (vop);
2572 /* Get vectorized definitions from SLP_NODE that contains corresponding
2573 vectorized def-stmts. */
2575 static void
2576 vect_get_slp_vect_defs (slp_tree slp_node, vec<tree> *vec_oprnds)
2578 tree vec_oprnd;
2579 gimple vec_def_stmt;
2580 unsigned int i;
2582 gcc_assert (SLP_TREE_VEC_STMTS (slp_node).exists ());
2584 FOR_EACH_VEC_ELT (SLP_TREE_VEC_STMTS (slp_node), i, vec_def_stmt)
2586 gcc_assert (vec_def_stmt);
2587 vec_oprnd = gimple_get_lhs (vec_def_stmt);
2588 vec_oprnds->quick_push (vec_oprnd);
2593 /* Get vectorized definitions for SLP_NODE.
2594 If the scalar definitions are loop invariants or constants, collect them and
2595 call vect_get_constant_vectors() to create vector stmts.
2596 Otherwise, the def-stmts must be already vectorized and the vectorized stmts
2597 must be stored in the corresponding child of SLP_NODE, and we call
2598 vect_get_slp_vect_defs () to retrieve them. */
2600 void
2601 vect_get_slp_defs (vec<tree> ops, slp_tree slp_node,
2602 vec<vec<tree> > *vec_oprnds, int reduc_index)
2604 gimple first_stmt;
2605 int number_of_vects = 0, i;
2606 unsigned int child_index = 0;
2607 HOST_WIDE_INT lhs_size_unit, rhs_size_unit;
2608 slp_tree child = NULL;
2609 vec<tree> vec_defs;
2610 tree oprnd;
2611 bool vectorized_defs;
2613 first_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[0];
2614 FOR_EACH_VEC_ELT (ops, i, oprnd)
2616 /* For each operand we check if it has vectorized definitions in a child
2617 node or we need to create them (for invariants and constants). We
2618 check if the LHS of the first stmt of the next child matches OPRND.
2619 If it does, we found the correct child. Otherwise, we call
2620 vect_get_constant_vectors (), and not advance CHILD_INDEX in order
2621 to check this child node for the next operand. */
2622 vectorized_defs = false;
2623 if (SLP_TREE_CHILDREN (slp_node).length () > child_index)
2625 child = SLP_TREE_CHILDREN (slp_node)[child_index];
2627 /* We have to check both pattern and original def, if available. */
2628 gimple first_def = SLP_TREE_SCALAR_STMTS (child)[0];
2629 gimple related = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (first_def));
2631 if (operand_equal_p (oprnd, gimple_get_lhs (first_def), 0)
2632 || (related
2633 && operand_equal_p (oprnd, gimple_get_lhs (related), 0)))
2635 /* The number of vector defs is determined by the number of
2636 vector statements in the node from which we get those
2637 statements. */
2638 number_of_vects = SLP_TREE_NUMBER_OF_VEC_STMTS (child);
2639 vectorized_defs = true;
2640 child_index++;
2644 if (!vectorized_defs)
2646 if (i == 0)
2648 number_of_vects = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
2649 /* Number of vector stmts was calculated according to LHS in
2650 vect_schedule_slp_instance (), fix it by replacing LHS with
2651 RHS, if necessary. See vect_get_smallest_scalar_type () for
2652 details. */
2653 vect_get_smallest_scalar_type (first_stmt, &lhs_size_unit,
2654 &rhs_size_unit);
2655 if (rhs_size_unit != lhs_size_unit)
2657 number_of_vects *= rhs_size_unit;
2658 number_of_vects /= lhs_size_unit;
2663 /* Allocate memory for vectorized defs. */
2664 vec_defs = vNULL;
2665 vec_defs.create (number_of_vects);
2667 /* For reduction defs we call vect_get_constant_vectors (), since we are
2668 looking for initial loop invariant values. */
2669 if (vectorized_defs && reduc_index == -1)
2670 /* The defs are already vectorized. */
2671 vect_get_slp_vect_defs (child, &vec_defs);
2672 else
2673 /* Build vectors from scalar defs. */
2674 vect_get_constant_vectors (oprnd, slp_node, &vec_defs, i,
2675 number_of_vects, reduc_index);
2677 vec_oprnds->quick_push (vec_defs);
2679 /* For reductions, we only need initial values. */
2680 if (reduc_index != -1)
2681 return;
2686 /* Create NCOPIES permutation statements using the mask MASK_BYTES (by
2687 building a vector of type MASK_TYPE from it) and two input vectors placed in
2688 DR_CHAIN at FIRST_VEC_INDX and SECOND_VEC_INDX for the first copy and
2689 shifting by STRIDE elements of DR_CHAIN for every copy.
2690 (STRIDE is the number of vectorized stmts for NODE divided by the number of
2691 copies).
2692 VECT_STMTS_COUNTER specifies the index in the vectorized stmts of NODE, where
2693 the created stmts must be inserted. */
2695 static inline void
2696 vect_create_mask_and_perm (gimple stmt, gimple next_scalar_stmt,
2697 tree mask, int first_vec_indx, int second_vec_indx,
2698 gimple_stmt_iterator *gsi, slp_tree node,
2699 tree vectype, vec<tree> dr_chain,
2700 int ncopies, int vect_stmts_counter)
2702 tree perm_dest;
2703 gimple perm_stmt = NULL;
2704 stmt_vec_info next_stmt_info;
2705 int i, stride;
2706 tree first_vec, second_vec, data_ref;
2708 stride = SLP_TREE_NUMBER_OF_VEC_STMTS (node) / ncopies;
2710 /* Initialize the vect stmts of NODE to properly insert the generated
2711 stmts later. */
2712 for (i = SLP_TREE_VEC_STMTS (node).length ();
2713 i < (int) SLP_TREE_NUMBER_OF_VEC_STMTS (node); i++)
2714 SLP_TREE_VEC_STMTS (node).quick_push (NULL);
2716 perm_dest = vect_create_destination_var (gimple_assign_lhs (stmt), vectype);
2717 for (i = 0; i < ncopies; i++)
2719 first_vec = dr_chain[first_vec_indx];
2720 second_vec = dr_chain[second_vec_indx];
2722 /* Generate the permute statement. */
2723 perm_stmt = gimple_build_assign_with_ops (VEC_PERM_EXPR, perm_dest,
2724 first_vec, second_vec, mask);
2725 data_ref = make_ssa_name (perm_dest, perm_stmt);
2726 gimple_set_lhs (perm_stmt, data_ref);
2727 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
2729 /* Store the vector statement in NODE. */
2730 SLP_TREE_VEC_STMTS (node)[stride * i + vect_stmts_counter] = perm_stmt;
2732 first_vec_indx += stride;
2733 second_vec_indx += stride;
2736 /* Mark the scalar stmt as vectorized. */
2737 next_stmt_info = vinfo_for_stmt (next_scalar_stmt);
2738 STMT_VINFO_VEC_STMT (next_stmt_info) = perm_stmt;
2742 /* Given FIRST_MASK_ELEMENT - the mask element in element representation,
2743 return in CURRENT_MASK_ELEMENT its equivalent in target specific
2744 representation. Check that the mask is valid and return FALSE if not.
2745 Return TRUE in NEED_NEXT_VECTOR if the permutation requires to move to
2746 the next vector, i.e., the current first vector is not needed. */
2748 static bool
2749 vect_get_mask_element (gimple stmt, int first_mask_element, int m,
2750 int mask_nunits, bool only_one_vec, int index,
2751 unsigned char *mask, int *current_mask_element,
2752 bool *need_next_vector, int *number_of_mask_fixes,
2753 bool *mask_fixed, bool *needs_first_vector)
2755 int i;
2757 /* Convert to target specific representation. */
2758 *current_mask_element = first_mask_element + m;
2759 /* Adjust the value in case it's a mask for second and third vectors. */
2760 *current_mask_element -= mask_nunits * (*number_of_mask_fixes - 1);
2762 if (*current_mask_element < mask_nunits)
2763 *needs_first_vector = true;
2765 /* We have only one input vector to permute but the mask accesses values in
2766 the next vector as well. */
2767 if (only_one_vec && *current_mask_element >= mask_nunits)
2769 if (dump_enabled_p ())
2771 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2772 "permutation requires at least two vectors ");
2773 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
2774 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
2777 return false;
2780 /* The mask requires the next vector. */
2781 if (*current_mask_element >= mask_nunits * 2)
2783 if (*needs_first_vector || *mask_fixed)
2785 /* We either need the first vector too or have already moved to the
2786 next vector. In both cases, this permutation needs three
2787 vectors. */
2788 if (dump_enabled_p ())
2790 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2791 "permutation requires at "
2792 "least three vectors ");
2793 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
2794 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
2797 return false;
2800 /* We move to the next vector, dropping the first one and working with
2801 the second and the third - we need to adjust the values of the mask
2802 accordingly. */
2803 *current_mask_element -= mask_nunits * *number_of_mask_fixes;
2805 for (i = 0; i < index; i++)
2806 mask[i] -= mask_nunits * *number_of_mask_fixes;
2808 (*number_of_mask_fixes)++;
2809 *mask_fixed = true;
2812 *need_next_vector = *mask_fixed;
2814 /* This was the last element of this mask. Start a new one. */
2815 if (index == mask_nunits - 1)
2817 *number_of_mask_fixes = 1;
2818 *mask_fixed = false;
2819 *needs_first_vector = false;
2822 return true;
2826 /* Generate vector permute statements from a list of loads in DR_CHAIN.
2827 If ANALYZE_ONLY is TRUE, only check that it is possible to create valid
2828 permute statements for the SLP node NODE of the SLP instance
2829 SLP_NODE_INSTANCE. */
2831 bool
2832 vect_transform_slp_perm_load (slp_tree node, vec<tree> dr_chain,
2833 gimple_stmt_iterator *gsi, int vf,
2834 slp_instance slp_node_instance, bool analyze_only)
2836 gimple stmt = SLP_TREE_SCALAR_STMTS (node)[0];
2837 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2838 tree mask_element_type = NULL_TREE, mask_type;
2839 int i, j, k, nunits, vec_index = 0, scalar_index;
2840 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
2841 gimple next_scalar_stmt;
2842 int group_size = SLP_INSTANCE_GROUP_SIZE (slp_node_instance);
2843 int first_mask_element;
2844 int index, unroll_factor, current_mask_element, ncopies;
2845 unsigned char *mask;
2846 bool only_one_vec = false, need_next_vector = false;
2847 int first_vec_index, second_vec_index, orig_vec_stmts_num, vect_stmts_counter;
2848 int number_of_mask_fixes = 1;
2849 bool mask_fixed = false;
2850 bool needs_first_vector = false;
2851 enum machine_mode mode;
2853 mode = TYPE_MODE (vectype);
2855 if (!can_vec_perm_p (mode, false, NULL))
2857 if (dump_enabled_p ())
2859 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2860 "no vect permute for ");
2861 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
2862 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
2864 return false;
2867 /* The generic VEC_PERM_EXPR code always uses an integral type of the
2868 same size as the vector element being permuted. */
2869 mask_element_type = lang_hooks.types.type_for_mode
2870 (int_mode_for_mode (TYPE_MODE (TREE_TYPE (vectype))), 1);
2871 mask_type = get_vectype_for_scalar_type (mask_element_type);
2872 nunits = TYPE_VECTOR_SUBPARTS (vectype);
2873 mask = XALLOCAVEC (unsigned char, nunits);
2874 unroll_factor = SLP_INSTANCE_UNROLLING_FACTOR (slp_node_instance);
2876 /* The number of vector stmts to generate based only on SLP_NODE_INSTANCE
2877 unrolling factor. */
2878 orig_vec_stmts_num = group_size *
2879 SLP_INSTANCE_UNROLLING_FACTOR (slp_node_instance) / nunits;
2880 if (orig_vec_stmts_num == 1)
2881 only_one_vec = true;
2883 /* Number of copies is determined by the final vectorization factor
2884 relatively to SLP_NODE_INSTANCE unrolling factor. */
2885 ncopies = vf / SLP_INSTANCE_UNROLLING_FACTOR (slp_node_instance);
2887 if (!STMT_VINFO_GROUPED_ACCESS (stmt_info))
2888 return false;
2890 /* Generate permutation masks for every NODE. Number of masks for each NODE
2891 is equal to GROUP_SIZE.
2892 E.g., we have a group of three nodes with three loads from the same
2893 location in each node, and the vector size is 4. I.e., we have a
2894 a0b0c0a1b1c1... sequence and we need to create the following vectors:
2895 for a's: a0a0a0a1 a1a1a2a2 a2a3a3a3
2896 for b's: b0b0b0b1 b1b1b2b2 b2b3b3b3
2899 The masks for a's should be: {0,0,0,3} {3,3,6,6} {6,9,9,9}.
2900 The last mask is illegal since we assume two operands for permute
2901 operation, and the mask element values can't be outside that range.
2902 Hence, the last mask must be converted into {2,5,5,5}.
2903 For the first two permutations we need the first and the second input
2904 vectors: {a0,b0,c0,a1} and {b1,c1,a2,b2}, and for the last permutation
2905 we need the second and the third vectors: {b1,c1,a2,b2} and
2906 {c2,a3,b3,c3}. */
2909 scalar_index = 0;
2910 index = 0;
2911 vect_stmts_counter = 0;
2912 vec_index = 0;
2913 first_vec_index = vec_index++;
2914 if (only_one_vec)
2915 second_vec_index = first_vec_index;
2916 else
2917 second_vec_index = vec_index++;
2919 for (j = 0; j < unroll_factor; j++)
2921 for (k = 0; k < group_size; k++)
2923 i = SLP_TREE_LOAD_PERMUTATION (node)[k];
2924 first_mask_element = i + j * group_size;
2925 if (!vect_get_mask_element (stmt, first_mask_element, 0,
2926 nunits, only_one_vec, index,
2927 mask, &current_mask_element,
2928 &need_next_vector,
2929 &number_of_mask_fixes, &mask_fixed,
2930 &needs_first_vector))
2931 return false;
2932 mask[index++] = current_mask_element;
2934 if (index == nunits)
2936 index = 0;
2937 if (!can_vec_perm_p (mode, false, mask))
2939 if (dump_enabled_p ())
2941 dump_printf_loc (MSG_MISSED_OPTIMIZATION,
2942 vect_location,
2943 "unsupported vect permute { ");
2944 for (i = 0; i < nunits; ++i)
2945 dump_printf (MSG_MISSED_OPTIMIZATION, "%d ",
2946 mask[i]);
2947 dump_printf (MSG_MISSED_OPTIMIZATION, "}\n");
2949 return false;
2952 if (!analyze_only)
2954 int l;
2955 tree mask_vec, *mask_elts;
2956 mask_elts = XALLOCAVEC (tree, nunits);
2957 for (l = 0; l < nunits; ++l)
2958 mask_elts[l] = build_int_cst (mask_element_type,
2959 mask[l]);
2960 mask_vec = build_vector (mask_type, mask_elts);
2962 if (need_next_vector)
2964 first_vec_index = second_vec_index;
2965 second_vec_index = vec_index;
2968 next_scalar_stmt
2969 = SLP_TREE_SCALAR_STMTS (node)[scalar_index++];
2971 vect_create_mask_and_perm (stmt, next_scalar_stmt,
2972 mask_vec, first_vec_index, second_vec_index,
2973 gsi, node, vectype, dr_chain,
2974 ncopies, vect_stmts_counter++);
2981 return true;
2986 /* Vectorize SLP instance tree in postorder. */
2988 static bool
2989 vect_schedule_slp_instance (slp_tree node, slp_instance instance,
2990 unsigned int vectorization_factor)
2992 gimple stmt;
2993 bool grouped_store, is_store;
2994 gimple_stmt_iterator si;
2995 stmt_vec_info stmt_info;
2996 unsigned int vec_stmts_size, nunits, group_size;
2997 tree vectype;
2998 int i;
2999 slp_tree child;
3001 if (!node)
3002 return false;
3004 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
3005 vect_schedule_slp_instance (child, instance, vectorization_factor);
3007 stmt = SLP_TREE_SCALAR_STMTS (node)[0];
3008 stmt_info = vinfo_for_stmt (stmt);
3010 /* VECTYPE is the type of the destination. */
3011 vectype = STMT_VINFO_VECTYPE (stmt_info);
3012 nunits = (unsigned int) TYPE_VECTOR_SUBPARTS (vectype);
3013 group_size = SLP_INSTANCE_GROUP_SIZE (instance);
3015 /* For each SLP instance calculate number of vector stmts to be created
3016 for the scalar stmts in each node of the SLP tree. Number of vector
3017 elements in one vector iteration is the number of scalar elements in
3018 one scalar iteration (GROUP_SIZE) multiplied by VF divided by vector
3019 size. */
3020 vec_stmts_size = (vectorization_factor * group_size) / nunits;
3022 if (!SLP_TREE_VEC_STMTS (node).exists ())
3024 SLP_TREE_VEC_STMTS (node).create (vec_stmts_size);
3025 SLP_TREE_NUMBER_OF_VEC_STMTS (node) = vec_stmts_size;
3028 if (dump_enabled_p ())
3030 dump_printf_loc (MSG_NOTE,vect_location,
3031 "------>vectorizing SLP node starting from: ");
3032 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
3033 dump_printf (MSG_NOTE, "\n");
3036 /* Loads should be inserted before the first load. */
3037 if (SLP_INSTANCE_FIRST_LOAD_STMT (instance)
3038 && STMT_VINFO_GROUPED_ACCESS (stmt_info)
3039 && !REFERENCE_CLASS_P (gimple_get_lhs (stmt))
3040 && SLP_TREE_LOAD_PERMUTATION (node).exists ())
3041 si = gsi_for_stmt (SLP_INSTANCE_FIRST_LOAD_STMT (instance));
3042 else if (is_pattern_stmt_p (stmt_info))
3043 si = gsi_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info));
3044 else
3045 si = gsi_for_stmt (stmt);
3047 /* Stores should be inserted just before the last store. */
3048 if (STMT_VINFO_GROUPED_ACCESS (stmt_info)
3049 && REFERENCE_CLASS_P (gimple_get_lhs (stmt)))
3051 gimple last_store = vect_find_last_store_in_slp_instance (instance);
3052 if (is_pattern_stmt_p (vinfo_for_stmt (last_store)))
3053 last_store = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (last_store));
3054 si = gsi_for_stmt (last_store);
3057 /* Mark the first element of the reduction chain as reduction to properly
3058 transform the node. In the analysis phase only the last element of the
3059 chain is marked as reduction. */
3060 if (GROUP_FIRST_ELEMENT (stmt_info) && !STMT_VINFO_GROUPED_ACCESS (stmt_info)
3061 && GROUP_FIRST_ELEMENT (stmt_info) == stmt)
3063 STMT_VINFO_DEF_TYPE (stmt_info) = vect_reduction_def;
3064 STMT_VINFO_TYPE (stmt_info) = reduc_vec_info_type;
3067 is_store = vect_transform_stmt (stmt, &si, &grouped_store, node, instance);
3068 return is_store;
3071 /* Replace scalar calls from SLP node NODE with setting of their lhs to zero.
3072 For loop vectorization this is done in vectorizable_call, but for SLP
3073 it needs to be deferred until end of vect_schedule_slp, because multiple
3074 SLP instances may refer to the same scalar stmt. */
3076 static void
3077 vect_remove_slp_scalar_calls (slp_tree node)
3079 gimple stmt, new_stmt;
3080 gimple_stmt_iterator gsi;
3081 int i;
3082 slp_tree child;
3083 tree lhs;
3084 stmt_vec_info stmt_info;
3086 if (!node)
3087 return;
3089 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
3090 vect_remove_slp_scalar_calls (child);
3092 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
3094 if (!is_gimple_call (stmt) || gimple_bb (stmt) == NULL)
3095 continue;
3096 stmt_info = vinfo_for_stmt (stmt);
3097 if (stmt_info == NULL
3098 || is_pattern_stmt_p (stmt_info)
3099 || !PURE_SLP_STMT (stmt_info))
3100 continue;
3101 lhs = gimple_call_lhs (stmt);
3102 new_stmt = gimple_build_assign (lhs, build_zero_cst (TREE_TYPE (lhs)));
3103 set_vinfo_for_stmt (new_stmt, stmt_info);
3104 set_vinfo_for_stmt (stmt, NULL);
3105 STMT_VINFO_STMT (stmt_info) = new_stmt;
3106 gsi = gsi_for_stmt (stmt);
3107 gsi_replace (&gsi, new_stmt, false);
3108 SSA_NAME_DEF_STMT (gimple_assign_lhs (new_stmt)) = new_stmt;
3112 /* Generate vector code for all SLP instances in the loop/basic block. */
3114 bool
3115 vect_schedule_slp (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo)
3117 vec<slp_instance> slp_instances;
3118 slp_instance instance;
3119 unsigned int i, vf;
3120 bool is_store = false;
3122 if (loop_vinfo)
3124 slp_instances = LOOP_VINFO_SLP_INSTANCES (loop_vinfo);
3125 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
3127 else
3129 slp_instances = BB_VINFO_SLP_INSTANCES (bb_vinfo);
3130 vf = 1;
3133 FOR_EACH_VEC_ELT (slp_instances, i, instance)
3135 /* Schedule the tree of INSTANCE. */
3136 is_store = vect_schedule_slp_instance (SLP_INSTANCE_TREE (instance),
3137 instance, vf);
3138 if (dump_enabled_p ())
3139 dump_printf_loc (MSG_NOTE, vect_location,
3140 "vectorizing stmts using SLP.\n");
3143 FOR_EACH_VEC_ELT (slp_instances, i, instance)
3145 slp_tree root = SLP_INSTANCE_TREE (instance);
3146 gimple store;
3147 unsigned int j;
3148 gimple_stmt_iterator gsi;
3150 /* Remove scalar call stmts. Do not do this for basic-block
3151 vectorization as not all uses may be vectorized.
3152 ??? Why should this be necessary? DCE should be able to
3153 remove the stmts itself.
3154 ??? For BB vectorization we can as well remove scalar
3155 stmts starting from the SLP tree root if they have no
3156 uses. */
3157 if (loop_vinfo)
3158 vect_remove_slp_scalar_calls (root);
3160 for (j = 0; SLP_TREE_SCALAR_STMTS (root).iterate (j, &store)
3161 && j < SLP_INSTANCE_GROUP_SIZE (instance); j++)
3163 if (!STMT_VINFO_DATA_REF (vinfo_for_stmt (store)))
3164 break;
3166 if (is_pattern_stmt_p (vinfo_for_stmt (store)))
3167 store = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (store));
3168 /* Free the attached stmt_vec_info and remove the stmt. */
3169 gsi = gsi_for_stmt (store);
3170 unlink_stmt_vdef (store);
3171 gsi_remove (&gsi, true);
3172 release_defs (store);
3173 free_stmt_vec_info (store);
3177 return is_store;
3181 /* Vectorize the basic block. */
3183 void
3184 vect_slp_transform_bb (basic_block bb)
3186 bb_vec_info bb_vinfo = vec_info_for_bb (bb);
3187 gimple_stmt_iterator si;
3189 gcc_assert (bb_vinfo);
3191 if (dump_enabled_p ())
3192 dump_printf_loc (MSG_NOTE, vect_location, "SLPing BB\n");
3194 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
3196 gimple stmt = gsi_stmt (si);
3197 stmt_vec_info stmt_info;
3199 if (dump_enabled_p ())
3201 dump_printf_loc (MSG_NOTE, vect_location,
3202 "------>SLPing statement: ");
3203 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
3204 dump_printf (MSG_NOTE, "\n");
3207 stmt_info = vinfo_for_stmt (stmt);
3208 gcc_assert (stmt_info);
3210 /* Schedule all the SLP instances when the first SLP stmt is reached. */
3211 if (STMT_SLP_TYPE (stmt_info))
3213 vect_schedule_slp (NULL, bb_vinfo);
3214 break;
3218 if (dump_enabled_p ())
3219 dump_printf_loc (MSG_NOTE, vect_location,
3220 "BASIC BLOCK VECTORIZED\n");
3222 destroy_bb_vec_info (bb_vinfo);