PR libstdc++/66354
[official-gcc.git] / gcc / tree-vect-slp.c
blobf38191d9884565aa7c19ce6644ae16c6261c50f9
1 /* SLP - Basic Block Vectorization
2 Copyright (C) 2007-2015 Free Software Foundation, Inc.
3 Contributed by Dorit Naishlos <dorit@il.ibm.com>
4 and Ira Rosen <irar@il.ibm.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "dumpfile.h"
26 #include "tm.h"
27 #include "hash-set.h"
28 #include "machmode.h"
29 #include "vec.h"
30 #include "double-int.h"
31 #include "input.h"
32 #include "alias.h"
33 #include "symtab.h"
34 #include "wide-int.h"
35 #include "inchash.h"
36 #include "tree.h"
37 #include "fold-const.h"
38 #include "stor-layout.h"
39 #include "target.h"
40 #include "predict.h"
41 #include "hard-reg-set.h"
42 #include "function.h"
43 #include "basic-block.h"
44 #include "gimple-pretty-print.h"
45 #include "tree-ssa-alias.h"
46 #include "internal-fn.h"
47 #include "gimple-expr.h"
48 #include "is-a.h"
49 #include "gimple.h"
50 #include "gimple-iterator.h"
51 #include "gimple-ssa.h"
52 #include "tree-phinodes.h"
53 #include "ssa-iterators.h"
54 #include "stringpool.h"
55 #include "tree-ssanames.h"
56 #include "tree-pass.h"
57 #include "cfgloop.h"
58 #include "hashtab.h"
59 #include "rtl.h"
60 #include "flags.h"
61 #include "statistics.h"
62 #include "real.h"
63 #include "fixed-value.h"
64 #include "insn-config.h"
65 #include "expmed.h"
66 #include "dojump.h"
67 #include "explow.h"
68 #include "calls.h"
69 #include "emit-rtl.h"
70 #include "varasm.h"
71 #include "stmt.h"
72 #include "expr.h"
73 #include "recog.h" /* FIXME: for insn_data */
74 #include "insn-codes.h"
75 #include "optabs.h"
76 #include "tree-vectorizer.h"
77 #include "langhooks.h"
78 #include "gimple-walk.h"
80 /* Extract the location of the basic block in the source code.
81 Return the basic block location if succeed and NULL if not. */
83 source_location
84 find_bb_location (basic_block bb)
86 gimple stmt = NULL;
87 gimple_stmt_iterator si;
89 if (!bb)
90 return UNKNOWN_LOCATION;
92 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
94 stmt = gsi_stmt (si);
95 if (gimple_location (stmt) != UNKNOWN_LOCATION)
96 return gimple_location (stmt);
99 return UNKNOWN_LOCATION;
103 /* Recursively free the memory allocated for the SLP tree rooted at NODE. */
105 static void
106 vect_free_slp_tree (slp_tree node)
108 int i;
109 slp_tree child;
111 if (!node)
112 return;
114 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
115 vect_free_slp_tree (child);
117 SLP_TREE_CHILDREN (node).release ();
118 SLP_TREE_SCALAR_STMTS (node).release ();
119 SLP_TREE_VEC_STMTS (node).release ();
120 SLP_TREE_LOAD_PERMUTATION (node).release ();
122 free (node);
126 /* Free the memory allocated for the SLP instance. */
128 void
129 vect_free_slp_instance (slp_instance instance)
131 vect_free_slp_tree (SLP_INSTANCE_TREE (instance));
132 SLP_INSTANCE_LOADS (instance).release ();
133 free (instance);
137 /* Create an SLP node for SCALAR_STMTS. */
139 static slp_tree
140 vect_create_new_slp_node (vec<gimple> scalar_stmts)
142 slp_tree node;
143 gimple stmt = scalar_stmts[0];
144 unsigned int nops;
146 if (is_gimple_call (stmt))
147 nops = gimple_call_num_args (stmt);
148 else if (is_gimple_assign (stmt))
150 nops = gimple_num_ops (stmt) - 1;
151 if (gimple_assign_rhs_code (stmt) == COND_EXPR)
152 nops++;
154 else
155 return NULL;
157 node = XNEW (struct _slp_tree);
158 SLP_TREE_SCALAR_STMTS (node) = scalar_stmts;
159 SLP_TREE_VEC_STMTS (node).create (0);
160 SLP_TREE_CHILDREN (node).create (nops);
161 SLP_TREE_LOAD_PERMUTATION (node) = vNULL;
162 SLP_TREE_TWO_OPERATORS (node) = false;
164 return node;
168 /* Allocate operands info for NOPS operands, and GROUP_SIZE def-stmts for each
169 operand. */
170 static vec<slp_oprnd_info>
171 vect_create_oprnd_info (int nops, int group_size)
173 int i;
174 slp_oprnd_info oprnd_info;
175 vec<slp_oprnd_info> oprnds_info;
177 oprnds_info.create (nops);
178 for (i = 0; i < nops; i++)
180 oprnd_info = XNEW (struct _slp_oprnd_info);
181 oprnd_info->def_stmts.create (group_size);
182 oprnd_info->first_dt = vect_uninitialized_def;
183 oprnd_info->first_op_type = NULL_TREE;
184 oprnd_info->first_pattern = false;
185 oprnd_info->second_pattern = false;
186 oprnds_info.quick_push (oprnd_info);
189 return oprnds_info;
193 /* Free operands info. */
195 static void
196 vect_free_oprnd_info (vec<slp_oprnd_info> &oprnds_info)
198 int i;
199 slp_oprnd_info oprnd_info;
201 FOR_EACH_VEC_ELT (oprnds_info, i, oprnd_info)
203 oprnd_info->def_stmts.release ();
204 XDELETE (oprnd_info);
207 oprnds_info.release ();
211 /* Find the place of the data-ref in STMT in the interleaving chain that starts
212 from FIRST_STMT. Return -1 if the data-ref is not a part of the chain. */
214 static int
215 vect_get_place_in_interleaving_chain (gimple stmt, gimple first_stmt)
217 gimple next_stmt = first_stmt;
218 int result = 0;
220 if (first_stmt != GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
221 return -1;
225 if (next_stmt == stmt)
226 return result;
227 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
228 if (next_stmt)
229 result += GROUP_GAP (vinfo_for_stmt (next_stmt));
231 while (next_stmt);
233 return -1;
237 /* Get the defs for the rhs of STMT (collect them in OPRNDS_INFO), check that
238 they are of a valid type and that they match the defs of the first stmt of
239 the SLP group (stored in OPRNDS_INFO). If there was a fatal error
240 return -1, if the error could be corrected by swapping operands of the
241 operation return 1, if everything is ok return 0. */
243 static int
244 vect_get_and_check_slp_defs (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
245 gimple stmt, unsigned stmt_num,
246 vec<slp_oprnd_info> *oprnds_info)
248 tree oprnd;
249 unsigned int i, number_of_oprnds;
250 tree def;
251 gimple def_stmt;
252 enum vect_def_type dt = vect_uninitialized_def;
253 struct loop *loop = NULL;
254 bool pattern = false;
255 slp_oprnd_info oprnd_info;
256 int first_op_idx = 1;
257 bool commutative = false;
258 bool first_op_cond = false;
259 bool first = stmt_num == 0;
260 bool second = stmt_num == 1;
262 if (loop_vinfo)
263 loop = LOOP_VINFO_LOOP (loop_vinfo);
265 if (is_gimple_call (stmt))
267 number_of_oprnds = gimple_call_num_args (stmt);
268 first_op_idx = 3;
270 else if (is_gimple_assign (stmt))
272 enum tree_code code = gimple_assign_rhs_code (stmt);
273 number_of_oprnds = gimple_num_ops (stmt) - 1;
274 if (gimple_assign_rhs_code (stmt) == COND_EXPR)
276 first_op_cond = true;
277 commutative = true;
278 number_of_oprnds++;
280 else
281 commutative = commutative_tree_code (code);
283 else
284 return -1;
286 bool swapped = false;
287 for (i = 0; i < number_of_oprnds; i++)
289 again:
290 if (first_op_cond)
292 if (i == 0 || i == 1)
293 oprnd = TREE_OPERAND (gimple_op (stmt, first_op_idx),
294 swapped ? !i : i);
295 else
296 oprnd = gimple_op (stmt, first_op_idx + i - 1);
298 else
299 oprnd = gimple_op (stmt, first_op_idx + (swapped ? !i : i));
301 oprnd_info = (*oprnds_info)[i];
303 if (!vect_is_simple_use (oprnd, NULL, loop_vinfo, bb_vinfo, &def_stmt,
304 &def, &dt)
305 || (!def_stmt && dt != vect_constant_def))
307 if (dump_enabled_p ())
309 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
310 "Build SLP failed: can't find def for ");
311 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, oprnd);
312 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
315 return -1;
318 /* Check if DEF_STMT is a part of a pattern in LOOP and get the def stmt
319 from the pattern. Check that all the stmts of the node are in the
320 pattern. */
321 if (def_stmt && gimple_bb (def_stmt)
322 && ((loop && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt)))
323 || (!loop && gimple_bb (def_stmt) == BB_VINFO_BB (bb_vinfo)
324 && gimple_code (def_stmt) != GIMPLE_PHI))
325 && vinfo_for_stmt (def_stmt)
326 && STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (def_stmt))
327 && !STMT_VINFO_RELEVANT (vinfo_for_stmt (def_stmt))
328 && !STMT_VINFO_LIVE_P (vinfo_for_stmt (def_stmt)))
330 pattern = true;
331 if (!first && !oprnd_info->first_pattern
332 /* Allow different pattern state for the defs of the
333 first stmt in reduction chains. */
334 && (oprnd_info->first_dt != vect_reduction_def
335 || (!second && !oprnd_info->second_pattern)))
337 if (i == 0
338 && !swapped
339 && commutative)
341 swapped = true;
342 goto again;
345 if (dump_enabled_p ())
347 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
348 "Build SLP failed: some of the stmts"
349 " are in a pattern, and others are not ");
350 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, oprnd);
351 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
354 return 1;
357 def_stmt = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (def_stmt));
358 dt = STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt));
360 if (dt == vect_unknown_def_type)
362 if (dump_enabled_p ())
363 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
364 "Unsupported pattern.\n");
365 return -1;
368 switch (gimple_code (def_stmt))
370 case GIMPLE_PHI:
371 def = gimple_phi_result (def_stmt);
372 break;
374 case GIMPLE_ASSIGN:
375 def = gimple_assign_lhs (def_stmt);
376 break;
378 default:
379 if (dump_enabled_p ())
380 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
381 "unsupported defining stmt:\n");
382 return -1;
386 if (second)
387 oprnd_info->second_pattern = pattern;
389 if (first)
391 oprnd_info->first_dt = dt;
392 oprnd_info->first_pattern = pattern;
393 oprnd_info->first_op_type = TREE_TYPE (oprnd);
395 else
397 /* Not first stmt of the group, check that the def-stmt/s match
398 the def-stmt/s of the first stmt. Allow different definition
399 types for reduction chains: the first stmt must be a
400 vect_reduction_def (a phi node), and the rest
401 vect_internal_def. */
402 if (((oprnd_info->first_dt != dt
403 && !(oprnd_info->first_dt == vect_reduction_def
404 && dt == vect_internal_def)
405 && !((oprnd_info->first_dt == vect_external_def
406 || oprnd_info->first_dt == vect_constant_def)
407 && (dt == vect_external_def
408 || dt == vect_constant_def)))
409 || !types_compatible_p (oprnd_info->first_op_type,
410 TREE_TYPE (oprnd))))
412 /* Try swapping operands if we got a mismatch. */
413 if (i == 0
414 && !swapped
415 && commutative)
417 swapped = true;
418 goto again;
421 if (dump_enabled_p ())
422 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
423 "Build SLP failed: different types\n");
425 return 1;
429 /* Check the types of the definitions. */
430 switch (dt)
432 case vect_constant_def:
433 case vect_external_def:
434 case vect_reduction_def:
435 break;
437 case vect_internal_def:
438 oprnd_info->def_stmts.quick_push (def_stmt);
439 break;
441 default:
442 /* FORNOW: Not supported. */
443 if (dump_enabled_p ())
445 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
446 "Build SLP failed: illegal type of def ");
447 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, def);
448 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
451 return -1;
455 /* Swap operands. */
456 if (swapped)
458 if (first_op_cond)
460 tree cond = gimple_assign_rhs1 (stmt);
461 swap_ssa_operands (stmt, &TREE_OPERAND (cond, 0),
462 &TREE_OPERAND (cond, 1));
463 TREE_SET_CODE (cond, swap_tree_comparison (TREE_CODE (cond)));
465 else
466 swap_ssa_operands (stmt, gimple_assign_rhs1_ptr (stmt),
467 gimple_assign_rhs2_ptr (stmt));
470 return 0;
474 /* Verify if the scalar stmts STMTS are isomorphic, require data
475 permutation or are of unsupported types of operation. Return
476 true if they are, otherwise return false and indicate in *MATCHES
477 which stmts are not isomorphic to the first one. If MATCHES[0]
478 is false then this indicates the comparison could not be
479 carried out or the stmts will never be vectorized by SLP. */
481 static bool
482 vect_build_slp_tree_1 (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
483 vec<gimple> stmts, unsigned int group_size,
484 unsigned nops, unsigned int *max_nunits,
485 unsigned int vectorization_factor, bool *matches,
486 bool *two_operators)
488 unsigned int i;
489 gimple first_stmt = stmts[0], stmt = stmts[0];
490 enum tree_code first_stmt_code = ERROR_MARK;
491 enum tree_code alt_stmt_code = ERROR_MARK;
492 enum tree_code rhs_code = ERROR_MARK;
493 enum tree_code first_cond_code = ERROR_MARK;
494 tree lhs;
495 bool need_same_oprnds = false;
496 tree vectype, scalar_type, first_op1 = NULL_TREE;
497 optab optab;
498 int icode;
499 machine_mode optab_op2_mode;
500 machine_mode vec_mode;
501 struct data_reference *first_dr;
502 HOST_WIDE_INT dummy;
503 gimple first_load = NULL, prev_first_load = NULL, old_first_load = NULL;
504 tree cond;
506 /* For every stmt in NODE find its def stmt/s. */
507 FOR_EACH_VEC_ELT (stmts, i, stmt)
509 matches[i] = false;
511 if (dump_enabled_p ())
513 dump_printf_loc (MSG_NOTE, vect_location, "Build SLP for ");
514 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
515 dump_printf (MSG_NOTE, "\n");
518 /* Fail to vectorize statements marked as unvectorizable. */
519 if (!STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (stmt)))
521 if (dump_enabled_p ())
523 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
524 "Build SLP failed: unvectorizable statement ");
525 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
526 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
528 /* Fatal mismatch. */
529 matches[0] = false;
530 return false;
533 lhs = gimple_get_lhs (stmt);
534 if (lhs == NULL_TREE)
536 if (dump_enabled_p ())
538 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
539 "Build SLP failed: not GIMPLE_ASSIGN nor "
540 "GIMPLE_CALL ");
541 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
542 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
544 /* Fatal mismatch. */
545 matches[0] = false;
546 return false;
549 if (is_gimple_assign (stmt)
550 && gimple_assign_rhs_code (stmt) == COND_EXPR
551 && (cond = gimple_assign_rhs1 (stmt))
552 && !COMPARISON_CLASS_P (cond))
554 if (dump_enabled_p ())
556 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
557 "Build SLP failed: condition is not "
558 "comparison ");
559 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
560 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
562 /* Fatal mismatch. */
563 matches[0] = false;
564 return false;
567 scalar_type = vect_get_smallest_scalar_type (stmt, &dummy, &dummy);
568 vectype = get_vectype_for_scalar_type (scalar_type);
569 if (!vectype)
571 if (dump_enabled_p ())
573 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
574 "Build SLP failed: unsupported data-type ");
575 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
576 scalar_type);
577 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
579 /* Fatal mismatch. */
580 matches[0] = false;
581 return false;
584 /* If populating the vector type requires unrolling then fail
585 before adjusting *max_nunits for basic-block vectorization. */
586 if (bb_vinfo
587 && TYPE_VECTOR_SUBPARTS (vectype) > group_size)
589 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
590 "Build SLP failed: unrolling required "
591 "in basic block SLP\n");
592 /* Fatal mismatch. */
593 matches[0] = false;
594 return false;
597 /* In case of multiple types we need to detect the smallest type. */
598 if (*max_nunits < TYPE_VECTOR_SUBPARTS (vectype))
600 *max_nunits = TYPE_VECTOR_SUBPARTS (vectype);
601 if (bb_vinfo)
602 vectorization_factor = *max_nunits;
605 if (gcall *call_stmt = dyn_cast <gcall *> (stmt))
607 rhs_code = CALL_EXPR;
608 if (gimple_call_internal_p (call_stmt)
609 || gimple_call_tail_p (call_stmt)
610 || gimple_call_noreturn_p (call_stmt)
611 || !gimple_call_nothrow_p (call_stmt)
612 || gimple_call_chain (call_stmt))
614 if (dump_enabled_p ())
616 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
617 "Build SLP failed: unsupported call type ");
618 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
619 call_stmt, 0);
620 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
622 /* Fatal mismatch. */
623 matches[0] = false;
624 return false;
627 else
628 rhs_code = gimple_assign_rhs_code (stmt);
630 /* Check the operation. */
631 if (i == 0)
633 first_stmt_code = rhs_code;
635 /* Shift arguments should be equal in all the packed stmts for a
636 vector shift with scalar shift operand. */
637 if (rhs_code == LSHIFT_EXPR || rhs_code == RSHIFT_EXPR
638 || rhs_code == LROTATE_EXPR
639 || rhs_code == RROTATE_EXPR)
641 vec_mode = TYPE_MODE (vectype);
643 /* First see if we have a vector/vector shift. */
644 optab = optab_for_tree_code (rhs_code, vectype,
645 optab_vector);
647 if (!optab
648 || optab_handler (optab, vec_mode) == CODE_FOR_nothing)
650 /* No vector/vector shift, try for a vector/scalar shift. */
651 optab = optab_for_tree_code (rhs_code, vectype,
652 optab_scalar);
654 if (!optab)
656 if (dump_enabled_p ())
657 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
658 "Build SLP failed: no optab.\n");
659 /* Fatal mismatch. */
660 matches[0] = false;
661 return false;
663 icode = (int) optab_handler (optab, vec_mode);
664 if (icode == CODE_FOR_nothing)
666 if (dump_enabled_p ())
667 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
668 "Build SLP failed: "
669 "op not supported by target.\n");
670 /* Fatal mismatch. */
671 matches[0] = false;
672 return false;
674 optab_op2_mode = insn_data[icode].operand[2].mode;
675 if (!VECTOR_MODE_P (optab_op2_mode))
677 need_same_oprnds = true;
678 first_op1 = gimple_assign_rhs2 (stmt);
682 else if (rhs_code == WIDEN_LSHIFT_EXPR)
684 need_same_oprnds = true;
685 first_op1 = gimple_assign_rhs2 (stmt);
688 else
690 if (first_stmt_code != rhs_code
691 && alt_stmt_code == ERROR_MARK)
692 alt_stmt_code = rhs_code;
693 if (first_stmt_code != rhs_code
694 && (first_stmt_code != IMAGPART_EXPR
695 || rhs_code != REALPART_EXPR)
696 && (first_stmt_code != REALPART_EXPR
697 || rhs_code != IMAGPART_EXPR)
698 /* Handle mismatches in plus/minus by computing both
699 and merging the results. */
700 && !((first_stmt_code == PLUS_EXPR
701 || first_stmt_code == MINUS_EXPR)
702 && (alt_stmt_code == PLUS_EXPR
703 || alt_stmt_code == MINUS_EXPR)
704 && rhs_code == alt_stmt_code)
705 && !(STMT_VINFO_GROUPED_ACCESS (vinfo_for_stmt (stmt))
706 && (first_stmt_code == ARRAY_REF
707 || first_stmt_code == BIT_FIELD_REF
708 || first_stmt_code == INDIRECT_REF
709 || first_stmt_code == COMPONENT_REF
710 || first_stmt_code == MEM_REF)))
712 if (dump_enabled_p ())
714 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
715 "Build SLP failed: different operation "
716 "in stmt ");
717 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
718 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
719 "original stmt ");
720 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
721 first_stmt, 0);
723 /* Mismatch. */
724 continue;
727 if (need_same_oprnds
728 && !operand_equal_p (first_op1, gimple_assign_rhs2 (stmt), 0))
730 if (dump_enabled_p ())
732 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
733 "Build SLP failed: different shift "
734 "arguments in ");
735 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
736 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
738 /* Mismatch. */
739 continue;
742 if (rhs_code == CALL_EXPR)
744 gimple first_stmt = stmts[0];
745 if (gimple_call_num_args (stmt) != nops
746 || !operand_equal_p (gimple_call_fn (first_stmt),
747 gimple_call_fn (stmt), 0)
748 || gimple_call_fntype (first_stmt)
749 != gimple_call_fntype (stmt))
751 if (dump_enabled_p ())
753 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
754 "Build SLP failed: different calls in ");
755 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
756 stmt, 0);
757 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
759 /* Mismatch. */
760 continue;
765 /* Grouped store or load. */
766 if (STMT_VINFO_GROUPED_ACCESS (vinfo_for_stmt (stmt)))
768 if (REFERENCE_CLASS_P (lhs))
770 /* Store. */
773 else
775 /* Load. */
776 unsigned unrolling_factor
777 = least_common_multiple
778 (*max_nunits, group_size) / group_size;
779 /* FORNOW: Check that there is no gap between the loads
780 and no gap between the groups when we need to load
781 multiple groups at once.
782 ??? We should enhance this to only disallow gaps
783 inside vectors. */
784 if ((unrolling_factor > 1
785 && ((GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) == stmt
786 && GROUP_GAP (vinfo_for_stmt (stmt)) != 0)
787 /* If the group is split up then GROUP_GAP
788 isn't correct here, nor is GROUP_FIRST_ELEMENT. */
789 || GROUP_SIZE (vinfo_for_stmt (stmt)) > group_size))
790 || (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) != stmt
791 && GROUP_GAP (vinfo_for_stmt (stmt)) != 1))
793 if (dump_enabled_p ())
795 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
796 "Build SLP failed: grouped "
797 "loads have gaps ");
798 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
799 stmt, 0);
800 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
802 /* Fatal mismatch. */
803 matches[0] = false;
804 return false;
807 /* Check that the size of interleaved loads group is not
808 greater than the SLP group size. */
809 unsigned ncopies
810 = vectorization_factor / TYPE_VECTOR_SUBPARTS (vectype);
811 if (loop_vinfo
812 && GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) == stmt
813 && ((GROUP_SIZE (vinfo_for_stmt (stmt))
814 - GROUP_GAP (vinfo_for_stmt (stmt)))
815 > ncopies * group_size))
817 if (dump_enabled_p ())
819 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
820 "Build SLP failed: the number "
821 "of interleaved loads is greater than "
822 "the SLP group size ");
823 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
824 stmt, 0);
825 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
827 /* Fatal mismatch. */
828 matches[0] = false;
829 return false;
832 old_first_load = first_load;
833 first_load = GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt));
834 if (prev_first_load)
836 /* Check that there are no loads from different interleaving
837 chains in the same node. */
838 if (prev_first_load != first_load)
840 if (dump_enabled_p ())
842 dump_printf_loc (MSG_MISSED_OPTIMIZATION,
843 vect_location,
844 "Build SLP failed: different "
845 "interleaving chains in one node ");
846 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
847 stmt, 0);
848 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
850 /* Mismatch. */
851 continue;
854 else
855 prev_first_load = first_load;
857 /* In some cases a group of loads is just the same load
858 repeated N times. Only analyze its cost once. */
859 if (first_load == stmt && old_first_load != first_load)
861 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt));
862 if (vect_supportable_dr_alignment (first_dr, false)
863 == dr_unaligned_unsupported)
865 if (dump_enabled_p ())
867 dump_printf_loc (MSG_MISSED_OPTIMIZATION,
868 vect_location,
869 "Build SLP failed: unsupported "
870 "unaligned load ");
871 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
872 stmt, 0);
873 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
875 /* Fatal mismatch. */
876 matches[0] = false;
877 return false;
881 } /* Grouped access. */
882 else
884 if (TREE_CODE_CLASS (rhs_code) == tcc_reference)
886 /* Not grouped load. */
887 if (dump_enabled_p ())
889 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
890 "Build SLP failed: not grouped load ");
891 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
892 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
895 /* FORNOW: Not grouped loads are not supported. */
896 /* Fatal mismatch. */
897 matches[0] = false;
898 return false;
901 /* Not memory operation. */
902 if (TREE_CODE_CLASS (rhs_code) != tcc_binary
903 && TREE_CODE_CLASS (rhs_code) != tcc_unary
904 && TREE_CODE_CLASS (rhs_code) != tcc_expression
905 && rhs_code != CALL_EXPR)
907 if (dump_enabled_p ())
909 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
910 "Build SLP failed: operation");
911 dump_printf (MSG_MISSED_OPTIMIZATION, " unsupported ");
912 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
913 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
915 /* Fatal mismatch. */
916 matches[0] = false;
917 return false;
920 if (rhs_code == COND_EXPR)
922 tree cond_expr = gimple_assign_rhs1 (stmt);
924 if (i == 0)
925 first_cond_code = TREE_CODE (cond_expr);
926 else if (first_cond_code != TREE_CODE (cond_expr))
928 if (dump_enabled_p ())
930 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
931 "Build SLP failed: different"
932 " operation");
933 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
934 stmt, 0);
935 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
937 /* Mismatch. */
938 continue;
943 matches[i] = true;
946 for (i = 0; i < group_size; ++i)
947 if (!matches[i])
948 return false;
950 /* If we allowed a two-operation SLP node verify the target can cope
951 with the permute we are going to use. */
952 if (alt_stmt_code != ERROR_MARK
953 && TREE_CODE_CLASS (alt_stmt_code) != tcc_reference)
955 unsigned char *sel
956 = XALLOCAVEC (unsigned char, TYPE_VECTOR_SUBPARTS (vectype));
957 for (i = 0; i < TYPE_VECTOR_SUBPARTS (vectype); ++i)
959 sel[i] = i;
960 if (gimple_assign_rhs_code (stmts[i % group_size]) == alt_stmt_code)
961 sel[i] += TYPE_VECTOR_SUBPARTS (vectype);
963 if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel))
965 for (i = 0; i < group_size; ++i)
966 if (gimple_assign_rhs_code (stmts[i]) == alt_stmt_code)
968 matches[i] = false;
969 if (dump_enabled_p ())
971 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
972 "Build SLP failed: different operation "
973 "in stmt ");
974 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
975 stmts[i], 0);
976 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
977 "original stmt ");
978 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
979 first_stmt, 0);
982 return false;
984 *two_operators = true;
987 return true;
990 /* Recursively build an SLP tree starting from NODE.
991 Fail (and return a value not equal to zero) if def-stmts are not
992 isomorphic, require data permutation or are of unsupported types of
993 operation. Otherwise, return 0.
994 The value returned is the depth in the SLP tree where a mismatch
995 was found. */
997 static bool
998 vect_build_slp_tree (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
999 slp_tree *node, unsigned int group_size,
1000 unsigned int *max_nunits,
1001 vec<slp_tree> *loads,
1002 unsigned int vectorization_factor,
1003 bool *matches, unsigned *npermutes, unsigned *tree_size,
1004 unsigned max_tree_size)
1006 unsigned nops, i, this_tree_size = 0;
1007 gimple stmt;
1009 matches[0] = false;
1011 stmt = SLP_TREE_SCALAR_STMTS (*node)[0];
1012 if (is_gimple_call (stmt))
1013 nops = gimple_call_num_args (stmt);
1014 else if (is_gimple_assign (stmt))
1016 nops = gimple_num_ops (stmt) - 1;
1017 if (gimple_assign_rhs_code (stmt) == COND_EXPR)
1018 nops++;
1020 else
1021 return false;
1023 bool two_operators = false;
1024 if (!vect_build_slp_tree_1 (loop_vinfo, bb_vinfo,
1025 SLP_TREE_SCALAR_STMTS (*node), group_size, nops,
1026 max_nunits, vectorization_factor, matches,
1027 &two_operators))
1028 return false;
1029 SLP_TREE_TWO_OPERATORS (*node) = two_operators;
1031 /* If the SLP node is a load, terminate the recursion. */
1032 if (STMT_VINFO_GROUPED_ACCESS (vinfo_for_stmt (stmt))
1033 && DR_IS_READ (STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt))))
1035 loads->safe_push (*node);
1036 return true;
1039 /* Get at the operands, verifying they are compatible. */
1040 vec<slp_oprnd_info> oprnds_info = vect_create_oprnd_info (nops, group_size);
1041 slp_oprnd_info oprnd_info;
1042 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (*node), i, stmt)
1044 switch (vect_get_and_check_slp_defs (loop_vinfo, bb_vinfo,
1045 stmt, i, &oprnds_info))
1047 case 0:
1048 break;
1049 case -1:
1050 matches[0] = false;
1051 vect_free_oprnd_info (oprnds_info);
1052 return false;
1053 case 1:
1054 matches[i] = false;
1055 break;
1058 for (i = 0; i < group_size; ++i)
1059 if (!matches[i])
1061 vect_free_oprnd_info (oprnds_info);
1062 return false;
1065 stmt = SLP_TREE_SCALAR_STMTS (*node)[0];
1067 /* Create SLP_TREE nodes for the definition node/s. */
1068 FOR_EACH_VEC_ELT (oprnds_info, i, oprnd_info)
1070 slp_tree child;
1071 unsigned old_nloads = loads->length ();
1072 unsigned old_max_nunits = *max_nunits;
1074 if (oprnd_info->first_dt != vect_internal_def)
1075 continue;
1077 if (++this_tree_size > max_tree_size)
1079 vect_free_oprnd_info (oprnds_info);
1080 return false;
1083 child = vect_create_new_slp_node (oprnd_info->def_stmts);
1084 if (!child)
1086 vect_free_oprnd_info (oprnds_info);
1087 return false;
1090 if (vect_build_slp_tree (loop_vinfo, bb_vinfo, &child,
1091 group_size, max_nunits, loads,
1092 vectorization_factor, matches,
1093 npermutes, &this_tree_size, max_tree_size))
1095 oprnd_info->def_stmts = vNULL;
1096 SLP_TREE_CHILDREN (*node).quick_push (child);
1097 continue;
1100 /* If the SLP build failed fatally and we analyze a basic-block
1101 simply treat nodes we fail to build as externally defined
1102 (and thus build vectors from the scalar defs).
1103 The cost model will reject outright expensive cases.
1104 ??? This doesn't treat cases where permutation ultimatively
1105 fails (or we don't try permutation below). Ideally we'd
1106 even compute a permutation that will end up with the maximum
1107 SLP tree size... */
1108 if (bb_vinfo
1109 && !matches[0]
1110 /* ??? Rejecting patterns this way doesn't work. We'd have to
1111 do extra work to cancel the pattern so the uses see the
1112 scalar version. */
1113 && !is_pattern_stmt_p (vinfo_for_stmt (stmt)))
1115 unsigned int j;
1116 slp_tree grandchild;
1118 /* Roll back. */
1119 *max_nunits = old_max_nunits;
1120 loads->truncate (old_nloads);
1121 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (child), j, grandchild)
1122 vect_free_slp_tree (grandchild);
1123 SLP_TREE_CHILDREN (child).truncate (0);
1125 dump_printf_loc (MSG_NOTE, vect_location,
1126 "Building vector operands from scalars\n");
1127 oprnd_info->def_stmts = vNULL;
1128 vect_free_slp_tree (child);
1129 SLP_TREE_CHILDREN (*node).quick_push (NULL);
1130 continue;
1133 /* If the SLP build for operand zero failed and operand zero
1134 and one can be commutated try that for the scalar stmts
1135 that failed the match. */
1136 if (i == 0
1137 /* A first scalar stmt mismatch signals a fatal mismatch. */
1138 && matches[0]
1139 /* ??? For COND_EXPRs we can swap the comparison operands
1140 as well as the arms under some constraints. */
1141 && nops == 2
1142 && oprnds_info[1]->first_dt == vect_internal_def
1143 && is_gimple_assign (stmt)
1144 && commutative_tree_code (gimple_assign_rhs_code (stmt))
1145 && !SLP_TREE_TWO_OPERATORS (*node)
1146 /* Do so only if the number of not successful permutes was nor more
1147 than a cut-ff as re-trying the recursive match on
1148 possibly each level of the tree would expose exponential
1149 behavior. */
1150 && *npermutes < 4)
1152 unsigned int j;
1153 slp_tree grandchild;
1155 /* Roll back. */
1156 *max_nunits = old_max_nunits;
1157 loads->truncate (old_nloads);
1158 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (child), j, grandchild)
1159 vect_free_slp_tree (grandchild);
1160 SLP_TREE_CHILDREN (child).truncate (0);
1162 /* Swap mismatched definition stmts. */
1163 dump_printf_loc (MSG_NOTE, vect_location,
1164 "Re-trying with swapped operands of stmts ");
1165 for (j = 0; j < group_size; ++j)
1166 if (!matches[j])
1168 gimple tem = oprnds_info[0]->def_stmts[j];
1169 oprnds_info[0]->def_stmts[j] = oprnds_info[1]->def_stmts[j];
1170 oprnds_info[1]->def_stmts[j] = tem;
1171 dump_printf (MSG_NOTE, "%d ", j);
1173 dump_printf (MSG_NOTE, "\n");
1174 /* And try again with scratch 'matches' ... */
1175 bool *tem = XALLOCAVEC (bool, group_size);
1176 if (vect_build_slp_tree (loop_vinfo, bb_vinfo, &child,
1177 group_size, max_nunits, loads,
1178 vectorization_factor,
1179 tem, npermutes, &this_tree_size,
1180 max_tree_size))
1182 /* ... so if successful we can apply the operand swapping
1183 to the GIMPLE IL. This is necessary because for example
1184 vect_get_slp_defs uses operand indexes and thus expects
1185 canonical operand order. */
1186 for (j = 0; j < group_size; ++j)
1187 if (!matches[j])
1189 gimple stmt = SLP_TREE_SCALAR_STMTS (*node)[j];
1190 swap_ssa_operands (stmt, gimple_assign_rhs1_ptr (stmt),
1191 gimple_assign_rhs2_ptr (stmt));
1193 oprnd_info->def_stmts = vNULL;
1194 SLP_TREE_CHILDREN (*node).quick_push (child);
1195 continue;
1198 ++*npermutes;
1201 oprnd_info->def_stmts = vNULL;
1202 vect_free_slp_tree (child);
1203 vect_free_oprnd_info (oprnds_info);
1204 return false;
1207 if (tree_size)
1208 *tree_size += this_tree_size;
1210 vect_free_oprnd_info (oprnds_info);
1211 return true;
1214 /* Dump a slp tree NODE using flags specified in DUMP_KIND. */
1216 static void
1217 vect_print_slp_tree (int dump_kind, slp_tree node)
1219 int i;
1220 gimple stmt;
1221 slp_tree child;
1223 if (!node)
1224 return;
1226 dump_printf (dump_kind, "node ");
1227 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
1229 dump_printf (dump_kind, "\n\tstmt %d ", i);
1230 dump_gimple_stmt (dump_kind, TDF_SLIM, stmt, 0);
1232 dump_printf (dump_kind, "\n");
1234 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
1235 vect_print_slp_tree (dump_kind, child);
1239 /* Mark the tree rooted at NODE with MARK (PURE_SLP or HYBRID).
1240 If MARK is HYBRID, it refers to a specific stmt in NODE (the stmt at index
1241 J). Otherwise, MARK is PURE_SLP and J is -1, which indicates that all the
1242 stmts in NODE are to be marked. */
1244 static void
1245 vect_mark_slp_stmts (slp_tree node, enum slp_vect_type mark, int j)
1247 int i;
1248 gimple stmt;
1249 slp_tree child;
1251 if (!node)
1252 return;
1254 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
1255 if (j < 0 || i == j)
1256 STMT_SLP_TYPE (vinfo_for_stmt (stmt)) = mark;
1258 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
1259 vect_mark_slp_stmts (child, mark, j);
1263 /* Mark the statements of the tree rooted at NODE as relevant (vect_used). */
1265 static void
1266 vect_mark_slp_stmts_relevant (slp_tree node)
1268 int i;
1269 gimple stmt;
1270 stmt_vec_info stmt_info;
1271 slp_tree child;
1273 if (!node)
1274 return;
1276 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
1278 stmt_info = vinfo_for_stmt (stmt);
1279 gcc_assert (!STMT_VINFO_RELEVANT (stmt_info)
1280 || STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_scope);
1281 STMT_VINFO_RELEVANT (stmt_info) = vect_used_in_scope;
1284 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
1285 vect_mark_slp_stmts_relevant (child);
1289 /* Rearrange the statements of NODE according to PERMUTATION. */
1291 static void
1292 vect_slp_rearrange_stmts (slp_tree node, unsigned int group_size,
1293 vec<unsigned> permutation)
1295 gimple stmt;
1296 vec<gimple> tmp_stmts;
1297 unsigned int i;
1298 slp_tree child;
1300 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
1301 vect_slp_rearrange_stmts (child, group_size, permutation);
1303 gcc_assert (group_size == SLP_TREE_SCALAR_STMTS (node).length ());
1304 tmp_stmts.create (group_size);
1305 tmp_stmts.quick_grow_cleared (group_size);
1307 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
1308 tmp_stmts[permutation[i]] = stmt;
1310 SLP_TREE_SCALAR_STMTS (node).release ();
1311 SLP_TREE_SCALAR_STMTS (node) = tmp_stmts;
1315 /* Check if the required load permutations in the SLP instance
1316 SLP_INSTN are supported. */
1318 static bool
1319 vect_supported_load_permutation_p (slp_instance slp_instn)
1321 unsigned int group_size = SLP_INSTANCE_GROUP_SIZE (slp_instn);
1322 unsigned int i, j, k, next;
1323 sbitmap load_index;
1324 slp_tree node;
1325 gimple stmt, load, next_load, first_load;
1326 struct data_reference *dr;
1328 if (dump_enabled_p ())
1330 dump_printf_loc (MSG_NOTE, vect_location, "Load permutation ");
1331 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1332 if (node->load_permutation.exists ())
1333 FOR_EACH_VEC_ELT (node->load_permutation, j, next)
1334 dump_printf (MSG_NOTE, "%d ", next);
1335 else
1336 for (k = 0; k < group_size; ++k)
1337 dump_printf (MSG_NOTE, "%d ", k);
1338 dump_printf (MSG_NOTE, "\n");
1341 /* In case of reduction every load permutation is allowed, since the order
1342 of the reduction statements is not important (as opposed to the case of
1343 grouped stores). The only condition we need to check is that all the
1344 load nodes are of the same size and have the same permutation (and then
1345 rearrange all the nodes of the SLP instance according to this
1346 permutation). */
1348 /* Check that all the load nodes are of the same size. */
1349 /* ??? Can't we assert this? */
1350 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1351 if (SLP_TREE_SCALAR_STMTS (node).length () != (unsigned) group_size)
1352 return false;
1354 node = SLP_INSTANCE_TREE (slp_instn);
1355 stmt = SLP_TREE_SCALAR_STMTS (node)[0];
1357 /* Reduction (there are no data-refs in the root).
1358 In reduction chain the order of the loads is important. */
1359 if (!STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt))
1360 && !GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
1362 slp_tree load;
1363 unsigned int lidx;
1365 /* Compare all the permutation sequences to the first one. We know
1366 that at least one load is permuted. */
1367 node = SLP_INSTANCE_LOADS (slp_instn)[0];
1368 if (!node->load_permutation.exists ())
1369 return false;
1370 for (i = 1; SLP_INSTANCE_LOADS (slp_instn).iterate (i, &load); ++i)
1372 if (!load->load_permutation.exists ())
1373 return false;
1374 FOR_EACH_VEC_ELT (load->load_permutation, j, lidx)
1375 if (lidx != node->load_permutation[j])
1376 return false;
1379 /* Check that the loads in the first sequence are different and there
1380 are no gaps between them. */
1381 load_index = sbitmap_alloc (group_size);
1382 bitmap_clear (load_index);
1383 FOR_EACH_VEC_ELT (node->load_permutation, i, lidx)
1385 if (bitmap_bit_p (load_index, lidx))
1387 sbitmap_free (load_index);
1388 return false;
1390 bitmap_set_bit (load_index, lidx);
1392 for (i = 0; i < group_size; i++)
1393 if (!bitmap_bit_p (load_index, i))
1395 sbitmap_free (load_index);
1396 return false;
1398 sbitmap_free (load_index);
1400 /* This permutation is valid for reduction. Since the order of the
1401 statements in the nodes is not important unless they are memory
1402 accesses, we can rearrange the statements in all the nodes
1403 according to the order of the loads. */
1404 vect_slp_rearrange_stmts (SLP_INSTANCE_TREE (slp_instn), group_size,
1405 node->load_permutation);
1407 /* We are done, no actual permutations need to be generated. */
1408 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1409 SLP_TREE_LOAD_PERMUTATION (node).release ();
1410 return true;
1413 /* In basic block vectorization we allow any subchain of an interleaving
1414 chain.
1415 FORNOW: not supported in loop SLP because of realignment compications. */
1416 if (STMT_VINFO_BB_VINFO (vinfo_for_stmt (stmt)))
1418 /* Check whether the loads in an instance form a subchain and thus
1419 no permutation is necessary. */
1420 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1422 if (!SLP_TREE_LOAD_PERMUTATION (node).exists ())
1423 continue;
1424 bool subchain_p = true;
1425 next_load = NULL;
1426 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), j, load)
1428 if (j != 0 && next_load != load)
1430 subchain_p = false;
1431 break;
1433 next_load = GROUP_NEXT_ELEMENT (vinfo_for_stmt (load));
1435 if (subchain_p)
1436 SLP_TREE_LOAD_PERMUTATION (node).release ();
1437 else
1439 /* Verify the permutation can be generated. */
1440 vec<tree> tem;
1441 if (!vect_transform_slp_perm_load (node, tem, NULL,
1442 1, slp_instn, true))
1444 dump_printf_loc (MSG_MISSED_OPTIMIZATION,
1445 vect_location,
1446 "unsupported load permutation\n");
1447 return false;
1452 /* Check that the alignment of the first load in every subchain, i.e.,
1453 the first statement in every load node, is supported.
1454 ??? This belongs in alignment checking. */
1455 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1457 first_load = SLP_TREE_SCALAR_STMTS (node)[0];
1458 if (first_load != GROUP_FIRST_ELEMENT (vinfo_for_stmt (first_load)))
1460 dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_load));
1461 if (vect_supportable_dr_alignment (dr, false)
1462 == dr_unaligned_unsupported)
1464 if (dump_enabled_p ())
1466 dump_printf_loc (MSG_MISSED_OPTIMIZATION,
1467 vect_location,
1468 "unsupported unaligned load ");
1469 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
1470 first_load, 0);
1471 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
1473 return false;
1478 return true;
1481 /* FORNOW: the only supported permutation is 0..01..1.. of length equal to
1482 GROUP_SIZE and where each sequence of same drs is of GROUP_SIZE length as
1483 well (unless it's reduction). */
1484 if (SLP_INSTANCE_LOADS (slp_instn).length () != group_size)
1485 return false;
1486 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1487 if (!node->load_permutation.exists ())
1488 return false;
1490 load_index = sbitmap_alloc (group_size);
1491 bitmap_clear (load_index);
1492 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1494 unsigned int lidx = node->load_permutation[0];
1495 if (bitmap_bit_p (load_index, lidx))
1497 sbitmap_free (load_index);
1498 return false;
1500 bitmap_set_bit (load_index, lidx);
1501 FOR_EACH_VEC_ELT (node->load_permutation, j, k)
1502 if (k != lidx)
1504 sbitmap_free (load_index);
1505 return false;
1508 for (i = 0; i < group_size; i++)
1509 if (!bitmap_bit_p (load_index, i))
1511 sbitmap_free (load_index);
1512 return false;
1514 sbitmap_free (load_index);
1516 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1517 if (node->load_permutation.exists ()
1518 && !vect_transform_slp_perm_load
1519 (node, vNULL, NULL,
1520 SLP_INSTANCE_UNROLLING_FACTOR (slp_instn), slp_instn, true))
1521 return false;
1522 return true;
1526 /* Find the last store in SLP INSTANCE. */
1528 static gimple
1529 vect_find_last_scalar_stmt_in_slp (slp_tree node)
1531 gimple last = NULL, stmt;
1533 for (int i = 0; SLP_TREE_SCALAR_STMTS (node).iterate (i, &stmt); i++)
1535 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
1536 if (is_pattern_stmt_p (stmt_vinfo))
1537 last = get_later_stmt (STMT_VINFO_RELATED_STMT (stmt_vinfo), last);
1538 else
1539 last = get_later_stmt (stmt, last);
1542 return last;
1545 /* Compute the cost for the SLP node NODE in the SLP instance INSTANCE. */
1547 static void
1548 vect_analyze_slp_cost_1 (slp_instance instance, slp_tree node,
1549 stmt_vector_for_cost *prologue_cost_vec,
1550 stmt_vector_for_cost *body_cost_vec,
1551 unsigned ncopies_for_cost)
1553 unsigned i;
1554 slp_tree child;
1555 gimple stmt, s;
1556 stmt_vec_info stmt_info;
1557 tree lhs;
1558 unsigned group_size = SLP_INSTANCE_GROUP_SIZE (instance);
1560 /* Recurse down the SLP tree. */
1561 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
1562 if (child)
1563 vect_analyze_slp_cost_1 (instance, child, prologue_cost_vec,
1564 body_cost_vec, ncopies_for_cost);
1566 /* Look at the first scalar stmt to determine the cost. */
1567 stmt = SLP_TREE_SCALAR_STMTS (node)[0];
1568 stmt_info = vinfo_for_stmt (stmt);
1569 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
1571 if (DR_IS_WRITE (STMT_VINFO_DATA_REF (stmt_info)))
1572 vect_model_store_cost (stmt_info, ncopies_for_cost, false,
1573 vect_uninitialized_def,
1574 node, prologue_cost_vec, body_cost_vec);
1575 else
1577 int i;
1578 gcc_checking_assert (DR_IS_READ (STMT_VINFO_DATA_REF (stmt_info)));
1579 vect_model_load_cost (stmt_info, ncopies_for_cost, false,
1580 node, prologue_cost_vec, body_cost_vec);
1581 /* If the load is permuted record the cost for the permutation.
1582 ??? Loads from multiple chains are let through here only
1583 for a single special case involving complex numbers where
1584 in the end no permutation is necessary. */
1585 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, s)
1586 if ((STMT_VINFO_GROUP_FIRST_ELEMENT (vinfo_for_stmt (s))
1587 == STMT_VINFO_GROUP_FIRST_ELEMENT (stmt_info))
1588 && vect_get_place_in_interleaving_chain
1589 (s, STMT_VINFO_GROUP_FIRST_ELEMENT (stmt_info)) != i)
1591 record_stmt_cost (body_cost_vec, group_size, vec_perm,
1592 stmt_info, 0, vect_body);
1593 break;
1597 else
1599 record_stmt_cost (body_cost_vec, ncopies_for_cost, vector_stmt,
1600 stmt_info, 0, vect_body);
1601 if (SLP_TREE_TWO_OPERATORS (node))
1603 record_stmt_cost (body_cost_vec, ncopies_for_cost, vector_stmt,
1604 stmt_info, 0, vect_body);
1605 record_stmt_cost (body_cost_vec, ncopies_for_cost, vec_perm,
1606 stmt_info, 0, vect_body);
1610 /* Scan operands and account for prologue cost of constants/externals.
1611 ??? This over-estimates cost for multiple uses and should be
1612 re-engineered. */
1613 lhs = gimple_get_lhs (stmt);
1614 for (i = 0; i < gimple_num_ops (stmt); ++i)
1616 tree def, op = gimple_op (stmt, i);
1617 gimple def_stmt;
1618 enum vect_def_type dt;
1619 if (!op || op == lhs)
1620 continue;
1621 if (vect_is_simple_use (op, NULL, STMT_VINFO_LOOP_VINFO (stmt_info),
1622 STMT_VINFO_BB_VINFO (stmt_info),
1623 &def_stmt, &def, &dt))
1625 /* Without looking at the actual initializer a vector of
1626 constants can be implemented as load from the constant pool.
1627 ??? We need to pass down stmt_info for a vector type
1628 even if it points to the wrong stmt. */
1629 if (dt == vect_constant_def)
1630 record_stmt_cost (prologue_cost_vec, 1, vector_load,
1631 stmt_info, 0, vect_prologue);
1632 else if (dt == vect_external_def)
1633 record_stmt_cost (prologue_cost_vec, 1, vec_construct,
1634 stmt_info, 0, vect_prologue);
1639 /* Compute the cost for the SLP instance INSTANCE. */
1641 static void
1642 vect_analyze_slp_cost (slp_instance instance, void *data)
1644 stmt_vector_for_cost body_cost_vec, prologue_cost_vec;
1645 unsigned ncopies_for_cost;
1646 stmt_info_for_cost *si;
1647 unsigned i;
1649 /* Calculate the number of vector stmts to create based on the unrolling
1650 factor (number of vectors is 1 if NUNITS >= GROUP_SIZE, and is
1651 GROUP_SIZE / NUNITS otherwise. */
1652 unsigned group_size = SLP_INSTANCE_GROUP_SIZE (instance);
1653 slp_tree node = SLP_INSTANCE_TREE (instance);
1654 stmt_vec_info stmt_info = vinfo_for_stmt (SLP_TREE_SCALAR_STMTS (node)[0]);
1655 /* Adjust the group_size by the vectorization factor which is always one
1656 for basic-block vectorization. */
1657 if (STMT_VINFO_LOOP_VINFO (stmt_info))
1658 group_size *= LOOP_VINFO_VECT_FACTOR (STMT_VINFO_LOOP_VINFO (stmt_info));
1659 unsigned nunits = TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info));
1660 /* For reductions look at a reduction operand in case the reduction
1661 operation is widening like DOT_PROD or SAD. */
1662 if (!STMT_VINFO_GROUPED_ACCESS (stmt_info))
1664 gimple stmt = SLP_TREE_SCALAR_STMTS (node)[0];
1665 switch (gimple_assign_rhs_code (stmt))
1667 case DOT_PROD_EXPR:
1668 case SAD_EXPR:
1669 nunits = TYPE_VECTOR_SUBPARTS (get_vectype_for_scalar_type
1670 (TREE_TYPE (gimple_assign_rhs1 (stmt))));
1671 break;
1672 default:;
1675 ncopies_for_cost = least_common_multiple (nunits, group_size) / nunits;
1677 prologue_cost_vec.create (10);
1678 body_cost_vec.create (10);
1679 vect_analyze_slp_cost_1 (instance, SLP_INSTANCE_TREE (instance),
1680 &prologue_cost_vec, &body_cost_vec,
1681 ncopies_for_cost);
1683 /* Record the prologue costs, which were delayed until we were
1684 sure that SLP was successful. */
1685 FOR_EACH_VEC_ELT (prologue_cost_vec, i, si)
1687 struct _stmt_vec_info *stmt_info
1688 = si->stmt ? vinfo_for_stmt (si->stmt) : NULL;
1689 (void) add_stmt_cost (data, si->count, si->kind, stmt_info,
1690 si->misalign, vect_prologue);
1693 /* Record the instance's instructions in the target cost model. */
1694 FOR_EACH_VEC_ELT (body_cost_vec, i, si)
1696 struct _stmt_vec_info *stmt_info
1697 = si->stmt ? vinfo_for_stmt (si->stmt) : NULL;
1698 (void) add_stmt_cost (data, si->count, si->kind, stmt_info,
1699 si->misalign, vect_body);
1702 prologue_cost_vec.release ();
1703 body_cost_vec.release ();
1706 /* Analyze an SLP instance starting from a group of grouped stores. Call
1707 vect_build_slp_tree to build a tree of packed stmts if possible.
1708 Return FALSE if it's impossible to SLP any stmt in the loop. */
1710 static bool
1711 vect_analyze_slp_instance (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
1712 gimple stmt, unsigned max_tree_size)
1714 slp_instance new_instance;
1715 slp_tree node;
1716 unsigned int group_size = GROUP_SIZE (vinfo_for_stmt (stmt));
1717 unsigned int unrolling_factor = 1, nunits;
1718 tree vectype, scalar_type = NULL_TREE;
1719 gimple next;
1720 unsigned int vectorization_factor = 0;
1721 int i;
1722 unsigned int max_nunits = 0;
1723 vec<slp_tree> loads;
1724 struct data_reference *dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt));
1725 vec<gimple> scalar_stmts;
1727 if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
1729 if (dr)
1731 scalar_type = TREE_TYPE (DR_REF (dr));
1732 vectype = get_vectype_for_scalar_type (scalar_type);
1734 else
1736 gcc_assert (loop_vinfo);
1737 vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt));
1740 group_size = GROUP_SIZE (vinfo_for_stmt (stmt));
1742 else
1744 gcc_assert (loop_vinfo);
1745 vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt));
1746 group_size = LOOP_VINFO_REDUCTIONS (loop_vinfo).length ();
1749 if (!vectype)
1751 if (dump_enabled_p ())
1753 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1754 "Build SLP failed: unsupported data-type ");
1755 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, scalar_type);
1756 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
1759 return false;
1762 nunits = TYPE_VECTOR_SUBPARTS (vectype);
1763 if (loop_vinfo)
1764 vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1765 else
1766 vectorization_factor = nunits;
1768 /* Calculate the unrolling factor. */
1769 unrolling_factor = least_common_multiple (nunits, group_size) / group_size;
1770 if (unrolling_factor != 1 && !loop_vinfo)
1772 if (dump_enabled_p ())
1773 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1774 "Build SLP failed: unrolling required in basic"
1775 " block SLP\n");
1777 return false;
1780 /* Create a node (a root of the SLP tree) for the packed grouped stores. */
1781 scalar_stmts.create (group_size);
1782 next = stmt;
1783 if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
1785 /* Collect the stores and store them in SLP_TREE_SCALAR_STMTS. */
1786 while (next)
1788 if (STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (next))
1789 && STMT_VINFO_RELATED_STMT (vinfo_for_stmt (next)))
1790 scalar_stmts.safe_push (
1791 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (next)));
1792 else
1793 scalar_stmts.safe_push (next);
1794 next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next));
1796 /* Mark the first element of the reduction chain as reduction to properly
1797 transform the node. In the reduction analysis phase only the last
1798 element of the chain is marked as reduction. */
1799 if (!STMT_VINFO_GROUPED_ACCESS (vinfo_for_stmt (stmt)))
1800 STMT_VINFO_DEF_TYPE (vinfo_for_stmt (stmt)) = vect_reduction_def;
1802 else
1804 /* Collect reduction statements. */
1805 vec<gimple> reductions = LOOP_VINFO_REDUCTIONS (loop_vinfo);
1806 for (i = 0; reductions.iterate (i, &next); i++)
1807 scalar_stmts.safe_push (next);
1810 node = vect_create_new_slp_node (scalar_stmts);
1812 loads.create (group_size);
1814 /* Build the tree for the SLP instance. */
1815 bool *matches = XALLOCAVEC (bool, group_size);
1816 unsigned npermutes = 0;
1817 if (vect_build_slp_tree (loop_vinfo, bb_vinfo, &node, group_size,
1818 &max_nunits, &loads,
1819 vectorization_factor, matches, &npermutes, NULL,
1820 max_tree_size))
1822 /* Calculate the unrolling factor based on the smallest type. */
1823 if (max_nunits > nunits)
1824 unrolling_factor = least_common_multiple (max_nunits, group_size)
1825 / group_size;
1827 if (unrolling_factor != 1 && !loop_vinfo)
1829 if (dump_enabled_p ())
1830 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1831 "Build SLP failed: unrolling required in basic"
1832 " block SLP\n");
1833 vect_free_slp_tree (node);
1834 loads.release ();
1835 return false;
1838 /* Create a new SLP instance. */
1839 new_instance = XNEW (struct _slp_instance);
1840 SLP_INSTANCE_TREE (new_instance) = node;
1841 SLP_INSTANCE_GROUP_SIZE (new_instance) = group_size;
1842 SLP_INSTANCE_UNROLLING_FACTOR (new_instance) = unrolling_factor;
1843 SLP_INSTANCE_LOADS (new_instance) = loads;
1845 /* Compute the load permutation. */
1846 slp_tree load_node;
1847 bool loads_permuted = false;
1848 FOR_EACH_VEC_ELT (loads, i, load_node)
1850 vec<unsigned> load_permutation;
1851 int j;
1852 gimple load, first_stmt;
1853 bool this_load_permuted = false;
1854 load_permutation.create (group_size);
1855 first_stmt = GROUP_FIRST_ELEMENT
1856 (vinfo_for_stmt (SLP_TREE_SCALAR_STMTS (load_node)[0]));
1857 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (load_node), j, load)
1859 int load_place
1860 = vect_get_place_in_interleaving_chain (load, first_stmt);
1861 gcc_assert (load_place != -1);
1862 if (load_place != j)
1863 this_load_permuted = true;
1864 load_permutation.safe_push (load_place);
1866 if (!this_load_permuted)
1868 load_permutation.release ();
1869 continue;
1871 SLP_TREE_LOAD_PERMUTATION (load_node) = load_permutation;
1872 loads_permuted = true;
1875 if (loads_permuted)
1877 if (!vect_supported_load_permutation_p (new_instance))
1879 if (dump_enabled_p ())
1881 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1882 "Build SLP failed: unsupported load "
1883 "permutation ");
1884 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
1885 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
1887 vect_free_slp_instance (new_instance);
1888 return false;
1893 if (loop_vinfo)
1894 LOOP_VINFO_SLP_INSTANCES (loop_vinfo).safe_push (new_instance);
1895 else
1896 BB_VINFO_SLP_INSTANCES (bb_vinfo).safe_push (new_instance);
1898 if (dump_enabled_p ())
1899 vect_print_slp_tree (MSG_NOTE, node);
1901 return true;
1904 /* Failed to SLP. */
1905 /* Free the allocated memory. */
1906 vect_free_slp_tree (node);
1907 loads.release ();
1909 return false;
1913 /* Check if there are stmts in the loop can be vectorized using SLP. Build SLP
1914 trees of packed scalar stmts if SLP is possible. */
1916 bool
1917 vect_analyze_slp (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
1918 unsigned max_tree_size)
1920 unsigned int i;
1921 vec<gimple> grouped_stores;
1922 vec<gimple> reductions = vNULL;
1923 vec<gimple> reduc_chains = vNULL;
1924 gimple first_element;
1925 bool ok = false;
1927 if (dump_enabled_p ())
1928 dump_printf_loc (MSG_NOTE, vect_location, "=== vect_analyze_slp ===\n");
1930 if (loop_vinfo)
1932 grouped_stores = LOOP_VINFO_GROUPED_STORES (loop_vinfo);
1933 reduc_chains = LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo);
1934 reductions = LOOP_VINFO_REDUCTIONS (loop_vinfo);
1936 else
1937 grouped_stores = BB_VINFO_GROUPED_STORES (bb_vinfo);
1939 /* Find SLP sequences starting from groups of grouped stores. */
1940 FOR_EACH_VEC_ELT (grouped_stores, i, first_element)
1941 if (vect_analyze_slp_instance (loop_vinfo, bb_vinfo, first_element,
1942 max_tree_size))
1943 ok = true;
1945 if (reduc_chains.length () > 0)
1947 /* Find SLP sequences starting from reduction chains. */
1948 FOR_EACH_VEC_ELT (reduc_chains, i, first_element)
1949 if (vect_analyze_slp_instance (loop_vinfo, bb_vinfo, first_element,
1950 max_tree_size))
1951 ok = true;
1952 else
1953 return false;
1955 /* Don't try to vectorize SLP reductions if reduction chain was
1956 detected. */
1957 return ok;
1960 /* Find SLP sequences starting from groups of reductions. */
1961 if (reductions.length () > 1
1962 && vect_analyze_slp_instance (loop_vinfo, bb_vinfo, reductions[0],
1963 max_tree_size))
1964 ok = true;
1966 return true;
1970 /* For each possible SLP instance decide whether to SLP it and calculate overall
1971 unrolling factor needed to SLP the loop. Return TRUE if decided to SLP at
1972 least one instance. */
1974 bool
1975 vect_make_slp_decision (loop_vec_info loop_vinfo)
1977 unsigned int i, unrolling_factor = 1;
1978 vec<slp_instance> slp_instances = LOOP_VINFO_SLP_INSTANCES (loop_vinfo);
1979 slp_instance instance;
1980 int decided_to_slp = 0;
1982 if (dump_enabled_p ())
1983 dump_printf_loc (MSG_NOTE, vect_location, "=== vect_make_slp_decision ==="
1984 "\n");
1986 FOR_EACH_VEC_ELT (slp_instances, i, instance)
1988 /* FORNOW: SLP if you can. */
1989 if (unrolling_factor < SLP_INSTANCE_UNROLLING_FACTOR (instance))
1990 unrolling_factor = SLP_INSTANCE_UNROLLING_FACTOR (instance);
1992 /* Mark all the stmts that belong to INSTANCE as PURE_SLP stmts. Later we
1993 call vect_detect_hybrid_slp () to find stmts that need hybrid SLP and
1994 loop-based vectorization. Such stmts will be marked as HYBRID. */
1995 vect_mark_slp_stmts (SLP_INSTANCE_TREE (instance), pure_slp, -1);
1996 decided_to_slp++;
1999 LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo) = unrolling_factor;
2001 if (decided_to_slp && dump_enabled_p ())
2002 dump_printf_loc (MSG_NOTE, vect_location,
2003 "Decided to SLP %d instances. Unrolling factor %d\n",
2004 decided_to_slp, unrolling_factor);
2006 return (decided_to_slp > 0);
2010 /* Find stmts that must be both vectorized and SLPed (since they feed stmts that
2011 can't be SLPed) in the tree rooted at NODE. Mark such stmts as HYBRID. */
2013 static void
2014 vect_detect_hybrid_slp_stmts (slp_tree node, unsigned i, slp_vect_type stype)
2016 gimple stmt = SLP_TREE_SCALAR_STMTS (node)[i];
2017 imm_use_iterator imm_iter;
2018 gimple use_stmt;
2019 stmt_vec_info use_vinfo, stmt_vinfo = vinfo_for_stmt (stmt);
2020 slp_tree child;
2021 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
2022 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2023 int j;
2025 /* Propagate hybrid down the SLP tree. */
2026 if (stype == hybrid)
2028 else if (HYBRID_SLP_STMT (stmt_vinfo))
2029 stype = hybrid;
2030 else
2032 /* Check if a pure SLP stmt has uses in non-SLP stmts. */
2033 gcc_checking_assert (PURE_SLP_STMT (stmt_vinfo));
2034 if (TREE_CODE (gimple_op (stmt, 0)) == SSA_NAME)
2035 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, gimple_op (stmt, 0))
2036 if (gimple_bb (use_stmt)
2037 && flow_bb_inside_loop_p (loop, gimple_bb (use_stmt))
2038 && (use_vinfo = vinfo_for_stmt (use_stmt))
2039 && !STMT_SLP_TYPE (use_vinfo)
2040 && (STMT_VINFO_RELEVANT (use_vinfo)
2041 || VECTORIZABLE_CYCLE_DEF (STMT_VINFO_DEF_TYPE (use_vinfo))
2042 || (STMT_VINFO_IN_PATTERN_P (use_vinfo)
2043 && STMT_VINFO_RELATED_STMT (use_vinfo)
2044 && !STMT_SLP_TYPE (vinfo_for_stmt
2045 (STMT_VINFO_RELATED_STMT (use_vinfo)))))
2046 && !(gimple_code (use_stmt) == GIMPLE_PHI
2047 && STMT_VINFO_DEF_TYPE (use_vinfo) == vect_reduction_def))
2048 stype = hybrid;
2051 if (stype == hybrid)
2053 if (dump_enabled_p ())
2055 dump_printf_loc (MSG_NOTE, vect_location, "marking hybrid: ");
2056 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
2058 STMT_SLP_TYPE (stmt_vinfo) = hybrid;
2061 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), j, child)
2062 if (child)
2063 vect_detect_hybrid_slp_stmts (child, i, stype);
2066 /* Helpers for vect_detect_hybrid_slp walking pattern stmt uses. */
2068 static tree
2069 vect_detect_hybrid_slp_1 (tree *tp, int *, void *data)
2071 walk_stmt_info *wi = (walk_stmt_info *)data;
2072 struct loop *loopp = (struct loop *)wi->info;
2074 if (wi->is_lhs)
2075 return NULL_TREE;
2077 if (TREE_CODE (*tp) == SSA_NAME
2078 && !SSA_NAME_IS_DEFAULT_DEF (*tp))
2080 gimple def_stmt = SSA_NAME_DEF_STMT (*tp);
2081 if (flow_bb_inside_loop_p (loopp, gimple_bb (def_stmt))
2082 && PURE_SLP_STMT (vinfo_for_stmt (def_stmt)))
2084 if (dump_enabled_p ())
2086 dump_printf_loc (MSG_NOTE, vect_location, "marking hybrid: ");
2087 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, def_stmt, 0);
2089 STMT_SLP_TYPE (vinfo_for_stmt (def_stmt)) = hybrid;
2093 return NULL_TREE;
2096 static tree
2097 vect_detect_hybrid_slp_2 (gimple_stmt_iterator *gsi, bool *handled,
2098 walk_stmt_info *)
2100 /* If the stmt is in a SLP instance then this isn't a reason
2101 to mark use definitions in other SLP instances as hybrid. */
2102 if (STMT_SLP_TYPE (vinfo_for_stmt (gsi_stmt (*gsi))) != loop_vect)
2103 *handled = true;
2104 return NULL_TREE;
2107 /* Find stmts that must be both vectorized and SLPed. */
2109 void
2110 vect_detect_hybrid_slp (loop_vec_info loop_vinfo)
2112 unsigned int i;
2113 vec<slp_instance> slp_instances = LOOP_VINFO_SLP_INSTANCES (loop_vinfo);
2114 slp_instance instance;
2116 if (dump_enabled_p ())
2117 dump_printf_loc (MSG_NOTE, vect_location, "=== vect_detect_hybrid_slp ==="
2118 "\n");
2120 /* First walk all pattern stmt in the loop and mark defs of uses as
2121 hybrid because immediate uses in them are not recorded. */
2122 for (i = 0; i < LOOP_VINFO_LOOP (loop_vinfo)->num_nodes; ++i)
2124 basic_block bb = LOOP_VINFO_BBS (loop_vinfo)[i];
2125 for (gimple_stmt_iterator gsi = gsi_start_bb (bb); !gsi_end_p (gsi);
2126 gsi_next (&gsi))
2128 gimple stmt = gsi_stmt (gsi);
2129 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2130 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
2132 walk_stmt_info wi;
2133 memset (&wi, 0, sizeof (wi));
2134 wi.info = LOOP_VINFO_LOOP (loop_vinfo);
2135 gimple_stmt_iterator gsi2
2136 = gsi_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info));
2137 walk_gimple_stmt (&gsi2, vect_detect_hybrid_slp_2,
2138 vect_detect_hybrid_slp_1, &wi);
2139 walk_gimple_seq (STMT_VINFO_PATTERN_DEF_SEQ (stmt_info),
2140 vect_detect_hybrid_slp_2,
2141 vect_detect_hybrid_slp_1, &wi);
2146 /* Then walk the SLP instance trees marking stmts with uses in
2147 non-SLP stmts as hybrid, also propagating hybrid down the
2148 SLP tree, collecting the above info on-the-fly. */
2149 FOR_EACH_VEC_ELT (slp_instances, i, instance)
2151 for (unsigned i = 0; i < SLP_INSTANCE_GROUP_SIZE (instance); ++i)
2152 vect_detect_hybrid_slp_stmts (SLP_INSTANCE_TREE (instance),
2153 i, pure_slp);
2158 /* Create and initialize a new bb_vec_info struct for BB, as well as
2159 stmt_vec_info structs for all the stmts in it. */
2161 static bb_vec_info
2162 new_bb_vec_info (basic_block bb)
2164 bb_vec_info res = NULL;
2165 gimple_stmt_iterator gsi;
2167 res = (bb_vec_info) xcalloc (1, sizeof (struct _bb_vec_info));
2168 BB_VINFO_BB (res) = bb;
2170 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2172 gimple stmt = gsi_stmt (gsi);
2173 gimple_set_uid (stmt, 0);
2174 set_vinfo_for_stmt (stmt, new_stmt_vec_info (stmt, NULL, res));
2177 BB_VINFO_GROUPED_STORES (res).create (10);
2178 BB_VINFO_SLP_INSTANCES (res).create (2);
2179 BB_VINFO_TARGET_COST_DATA (res) = init_cost (NULL);
2181 bb->aux = res;
2182 return res;
2186 /* Free BB_VINFO struct, as well as all the stmt_vec_info structs of all the
2187 stmts in the basic block. */
2189 static void
2190 destroy_bb_vec_info (bb_vec_info bb_vinfo)
2192 vec<slp_instance> slp_instances;
2193 slp_instance instance;
2194 basic_block bb;
2195 gimple_stmt_iterator si;
2196 unsigned i;
2198 if (!bb_vinfo)
2199 return;
2201 bb = BB_VINFO_BB (bb_vinfo);
2203 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
2205 gimple stmt = gsi_stmt (si);
2206 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2208 if (stmt_info)
2209 /* Free stmt_vec_info. */
2210 free_stmt_vec_info (stmt);
2213 vect_destroy_datarefs (NULL, bb_vinfo);
2214 free_dependence_relations (BB_VINFO_DDRS (bb_vinfo));
2215 BB_VINFO_GROUPED_STORES (bb_vinfo).release ();
2216 slp_instances = BB_VINFO_SLP_INSTANCES (bb_vinfo);
2217 FOR_EACH_VEC_ELT (slp_instances, i, instance)
2218 vect_free_slp_instance (instance);
2219 BB_VINFO_SLP_INSTANCES (bb_vinfo).release ();
2220 destroy_cost_data (BB_VINFO_TARGET_COST_DATA (bb_vinfo));
2221 free (bb_vinfo);
2222 bb->aux = NULL;
2226 /* Analyze statements contained in SLP tree node after recursively analyzing
2227 the subtree. Return TRUE if the operations are supported. */
2229 static bool
2230 vect_slp_analyze_node_operations (slp_tree node)
2232 bool dummy;
2233 int i;
2234 gimple stmt;
2235 slp_tree child;
2237 if (!node)
2238 return true;
2240 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
2241 if (!vect_slp_analyze_node_operations (child))
2242 return false;
2244 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
2246 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2247 gcc_assert (stmt_info);
2248 gcc_assert (STMT_SLP_TYPE (stmt_info) != loop_vect);
2250 if (!vect_analyze_stmt (stmt, &dummy, node))
2251 return false;
2254 return true;
2258 /* Analyze statements in SLP instances of the basic block. Return TRUE if the
2259 operations are supported. */
2261 bool
2262 vect_slp_analyze_operations (vec<slp_instance> slp_instances, void *data)
2264 slp_instance instance;
2265 int i;
2267 if (dump_enabled_p ())
2268 dump_printf_loc (MSG_NOTE, vect_location,
2269 "=== vect_slp_analyze_operations ===\n");
2271 for (i = 0; slp_instances.iterate (i, &instance); )
2273 if (!vect_slp_analyze_node_operations (SLP_INSTANCE_TREE (instance)))
2275 dump_printf_loc (MSG_NOTE, vect_location,
2276 "removing SLP instance operations starting from: ");
2277 dump_gimple_stmt (MSG_NOTE, TDF_SLIM,
2278 SLP_TREE_SCALAR_STMTS
2279 (SLP_INSTANCE_TREE (instance))[0], 0);
2280 vect_free_slp_instance (instance);
2281 slp_instances.ordered_remove (i);
2283 else
2285 /* Compute the costs of the SLP instance. */
2286 vect_analyze_slp_cost (instance, data);
2287 i++;
2291 if (!slp_instances.length ())
2292 return false;
2294 return true;
2298 /* Compute the scalar cost of the SLP node NODE and its children
2299 and return it. Do not account defs that are marked in LIFE and
2300 update LIFE according to uses of NODE. */
2302 static unsigned
2303 vect_bb_slp_scalar_cost (basic_block bb,
2304 slp_tree node, vec<bool, va_heap> *life)
2306 unsigned scalar_cost = 0;
2307 unsigned i;
2308 gimple stmt;
2309 slp_tree child;
2311 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
2313 unsigned stmt_cost;
2314 ssa_op_iter op_iter;
2315 def_operand_p def_p;
2316 stmt_vec_info stmt_info;
2318 if ((*life)[i])
2319 continue;
2321 /* If there is a non-vectorized use of the defs then the scalar
2322 stmt is kept live in which case we do not account it or any
2323 required defs in the SLP children in the scalar cost. This
2324 way we make the vectorization more costly when compared to
2325 the scalar cost. */
2326 FOR_EACH_SSA_DEF_OPERAND (def_p, stmt, op_iter, SSA_OP_DEF)
2328 imm_use_iterator use_iter;
2329 gimple use_stmt;
2330 FOR_EACH_IMM_USE_STMT (use_stmt, use_iter, DEF_FROM_PTR (def_p))
2331 if (!is_gimple_debug (use_stmt)
2332 && (gimple_code (use_stmt) == GIMPLE_PHI
2333 || gimple_bb (use_stmt) != bb
2334 || !STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (use_stmt))))
2336 (*life)[i] = true;
2337 BREAK_FROM_IMM_USE_STMT (use_iter);
2340 if ((*life)[i])
2341 continue;
2343 stmt_info = vinfo_for_stmt (stmt);
2344 if (STMT_VINFO_DATA_REF (stmt_info))
2346 if (DR_IS_READ (STMT_VINFO_DATA_REF (stmt_info)))
2347 stmt_cost = vect_get_stmt_cost (scalar_load);
2348 else
2349 stmt_cost = vect_get_stmt_cost (scalar_store);
2351 else
2352 stmt_cost = vect_get_stmt_cost (scalar_stmt);
2354 scalar_cost += stmt_cost;
2357 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
2358 if (child)
2359 scalar_cost += vect_bb_slp_scalar_cost (bb, child, life);
2361 return scalar_cost;
2364 /* Check if vectorization of the basic block is profitable. */
2366 static bool
2367 vect_bb_vectorization_profitable_p (bb_vec_info bb_vinfo)
2369 vec<slp_instance> slp_instances = BB_VINFO_SLP_INSTANCES (bb_vinfo);
2370 slp_instance instance;
2371 int i;
2372 unsigned int vec_inside_cost = 0, vec_outside_cost = 0, scalar_cost = 0;
2373 unsigned int vec_prologue_cost = 0, vec_epilogue_cost = 0;
2375 /* Calculate scalar cost. */
2376 FOR_EACH_VEC_ELT (slp_instances, i, instance)
2378 auto_vec<bool, 20> life;
2379 life.safe_grow_cleared (SLP_INSTANCE_GROUP_SIZE (instance));
2380 scalar_cost += vect_bb_slp_scalar_cost (BB_VINFO_BB (bb_vinfo),
2381 SLP_INSTANCE_TREE (instance),
2382 &life);
2385 /* Complete the target-specific cost calculation. */
2386 finish_cost (BB_VINFO_TARGET_COST_DATA (bb_vinfo), &vec_prologue_cost,
2387 &vec_inside_cost, &vec_epilogue_cost);
2389 vec_outside_cost = vec_prologue_cost + vec_epilogue_cost;
2391 if (dump_enabled_p ())
2393 dump_printf_loc (MSG_NOTE, vect_location, "Cost model analysis: \n");
2394 dump_printf (MSG_NOTE, " Vector inside of basic block cost: %d\n",
2395 vec_inside_cost);
2396 dump_printf (MSG_NOTE, " Vector prologue cost: %d\n", vec_prologue_cost);
2397 dump_printf (MSG_NOTE, " Vector epilogue cost: %d\n", vec_epilogue_cost);
2398 dump_printf (MSG_NOTE, " Scalar cost of basic block: %d\n", scalar_cost);
2401 /* Vectorization is profitable if its cost is less than the cost of scalar
2402 version. */
2403 if (vec_outside_cost + vec_inside_cost >= scalar_cost)
2404 return false;
2406 return true;
2409 /* Check if the basic block can be vectorized. */
2411 static bb_vec_info
2412 vect_slp_analyze_bb_1 (basic_block bb)
2414 bb_vec_info bb_vinfo;
2415 vec<slp_instance> slp_instances;
2416 slp_instance instance;
2417 int i;
2418 int min_vf = 2;
2419 unsigned n_stmts = 0;
2421 bb_vinfo = new_bb_vec_info (bb);
2422 if (!bb_vinfo)
2423 return NULL;
2425 if (!vect_analyze_data_refs (NULL, bb_vinfo, &min_vf, &n_stmts))
2427 if (dump_enabled_p ())
2428 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2429 "not vectorized: unhandled data-ref in basic "
2430 "block.\n");
2432 destroy_bb_vec_info (bb_vinfo);
2433 return NULL;
2436 if (BB_VINFO_DATAREFS (bb_vinfo).length () < 2)
2438 if (dump_enabled_p ())
2439 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2440 "not vectorized: not enough data-refs in "
2441 "basic block.\n");
2443 destroy_bb_vec_info (bb_vinfo);
2444 return NULL;
2447 if (!vect_analyze_data_ref_accesses (NULL, bb_vinfo))
2449 if (dump_enabled_p ())
2450 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2451 "not vectorized: unhandled data access in "
2452 "basic block.\n");
2454 destroy_bb_vec_info (bb_vinfo);
2455 return NULL;
2458 vect_pattern_recog (NULL, bb_vinfo);
2460 if (!vect_analyze_data_refs_alignment (NULL, bb_vinfo))
2462 if (dump_enabled_p ())
2463 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2464 "not vectorized: bad data alignment in basic "
2465 "block.\n");
2467 destroy_bb_vec_info (bb_vinfo);
2468 return NULL;
2471 /* Check the SLP opportunities in the basic block, analyze and build SLP
2472 trees. */
2473 if (!vect_analyze_slp (NULL, bb_vinfo, n_stmts))
2475 if (dump_enabled_p ())
2477 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2478 "Failed to SLP the basic block.\n");
2479 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2480 "not vectorized: failed to find SLP opportunities "
2481 "in basic block.\n");
2484 destroy_bb_vec_info (bb_vinfo);
2485 return NULL;
2488 slp_instances = BB_VINFO_SLP_INSTANCES (bb_vinfo);
2490 /* Mark all the statements that we want to vectorize as pure SLP and
2491 relevant. */
2492 FOR_EACH_VEC_ELT (slp_instances, i, instance)
2494 vect_mark_slp_stmts (SLP_INSTANCE_TREE (instance), pure_slp, -1);
2495 vect_mark_slp_stmts_relevant (SLP_INSTANCE_TREE (instance));
2498 /* Mark all the statements that we do not want to vectorize. */
2499 for (gimple_stmt_iterator gsi = gsi_start_bb (BB_VINFO_BB (bb_vinfo));
2500 !gsi_end_p (gsi); gsi_next (&gsi))
2502 stmt_vec_info vinfo = vinfo_for_stmt (gsi_stmt (gsi));
2503 if (STMT_SLP_TYPE (vinfo) != pure_slp)
2504 STMT_VINFO_VECTORIZABLE (vinfo) = false;
2507 /* Analyze dependences. At this point all stmts not participating in
2508 vectorization have to be marked. Dependence analysis assumes
2509 that we either vectorize all SLP instances or none at all. */
2510 if (!vect_slp_analyze_data_ref_dependences (bb_vinfo))
2512 if (dump_enabled_p ())
2513 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2514 "not vectorized: unhandled data dependence "
2515 "in basic block.\n");
2517 destroy_bb_vec_info (bb_vinfo);
2518 return NULL;
2521 if (!vect_verify_datarefs_alignment (NULL, bb_vinfo))
2523 if (dump_enabled_p ())
2524 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2525 "not vectorized: unsupported alignment in basic "
2526 "block.\n");
2527 destroy_bb_vec_info (bb_vinfo);
2528 return NULL;
2531 if (!vect_slp_analyze_operations (BB_VINFO_SLP_INSTANCES (bb_vinfo),
2532 BB_VINFO_TARGET_COST_DATA (bb_vinfo)))
2534 if (dump_enabled_p ())
2535 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2536 "not vectorized: bad operation in basic block.\n");
2538 destroy_bb_vec_info (bb_vinfo);
2539 return NULL;
2542 /* Cost model: check if the vectorization is worthwhile. */
2543 if (!unlimited_cost_model (NULL)
2544 && !vect_bb_vectorization_profitable_p (bb_vinfo))
2546 if (dump_enabled_p ())
2547 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2548 "not vectorized: vectorization is not "
2549 "profitable.\n");
2551 destroy_bb_vec_info (bb_vinfo);
2552 return NULL;
2555 if (dump_enabled_p ())
2556 dump_printf_loc (MSG_NOTE, vect_location,
2557 "Basic block will be vectorized using SLP\n");
2559 return bb_vinfo;
2563 bb_vec_info
2564 vect_slp_analyze_bb (basic_block bb)
2566 bb_vec_info bb_vinfo;
2567 int insns = 0;
2568 gimple_stmt_iterator gsi;
2569 unsigned int vector_sizes;
2571 if (dump_enabled_p ())
2572 dump_printf_loc (MSG_NOTE, vect_location, "===vect_slp_analyze_bb===\n");
2574 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2576 gimple stmt = gsi_stmt (gsi);
2577 if (!is_gimple_debug (stmt)
2578 && !gimple_nop_p (stmt)
2579 && gimple_code (stmt) != GIMPLE_LABEL)
2580 insns++;
2583 if (insns > PARAM_VALUE (PARAM_SLP_MAX_INSNS_IN_BB))
2585 if (dump_enabled_p ())
2586 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2587 "not vectorized: too many instructions in "
2588 "basic block.\n");
2590 return NULL;
2593 /* Autodetect first vector size we try. */
2594 current_vector_size = 0;
2595 vector_sizes = targetm.vectorize.autovectorize_vector_sizes ();
2597 while (1)
2599 bb_vinfo = vect_slp_analyze_bb_1 (bb);
2600 if (bb_vinfo)
2601 return bb_vinfo;
2603 destroy_bb_vec_info (bb_vinfo);
2605 vector_sizes &= ~current_vector_size;
2606 if (vector_sizes == 0
2607 || current_vector_size == 0)
2608 return NULL;
2610 /* Try the next biggest vector size. */
2611 current_vector_size = 1 << floor_log2 (vector_sizes);
2612 if (dump_enabled_p ())
2613 dump_printf_loc (MSG_NOTE, vect_location,
2614 "***** Re-trying analysis with "
2615 "vector size %d\n", current_vector_size);
2620 /* For constant and loop invariant defs of SLP_NODE this function returns
2621 (vector) defs (VEC_OPRNDS) that will be used in the vectorized stmts.
2622 OP_NUM determines if we gather defs for operand 0 or operand 1 of the RHS of
2623 scalar stmts. NUMBER_OF_VECTORS is the number of vector defs to create.
2624 REDUC_INDEX is the index of the reduction operand in the statements, unless
2625 it is -1. */
2627 static void
2628 vect_get_constant_vectors (tree op, slp_tree slp_node,
2629 vec<tree> *vec_oprnds,
2630 unsigned int op_num, unsigned int number_of_vectors,
2631 int reduc_index)
2633 vec<gimple> stmts = SLP_TREE_SCALAR_STMTS (slp_node);
2634 gimple stmt = stmts[0];
2635 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
2636 unsigned nunits;
2637 tree vec_cst;
2638 tree *elts;
2639 unsigned j, number_of_places_left_in_vector;
2640 tree vector_type;
2641 tree vop;
2642 int group_size = stmts.length ();
2643 unsigned int vec_num, i;
2644 unsigned number_of_copies = 1;
2645 vec<tree> voprnds;
2646 voprnds.create (number_of_vectors);
2647 bool constant_p, is_store;
2648 tree neutral_op = NULL;
2649 enum tree_code code = gimple_expr_code (stmt);
2650 gimple def_stmt;
2651 struct loop *loop;
2652 gimple_seq ctor_seq = NULL;
2654 vector_type = get_vectype_for_scalar_type (TREE_TYPE (op));
2655 nunits = TYPE_VECTOR_SUBPARTS (vector_type);
2657 if (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
2658 && reduc_index != -1)
2660 op_num = reduc_index;
2661 op = gimple_op (stmt, op_num + 1);
2662 /* For additional copies (see the explanation of NUMBER_OF_COPIES below)
2663 we need either neutral operands or the original operands. See
2664 get_initial_def_for_reduction() for details. */
2665 switch (code)
2667 case WIDEN_SUM_EXPR:
2668 case DOT_PROD_EXPR:
2669 case SAD_EXPR:
2670 case PLUS_EXPR:
2671 case MINUS_EXPR:
2672 case BIT_IOR_EXPR:
2673 case BIT_XOR_EXPR:
2674 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (op)))
2675 neutral_op = build_real (TREE_TYPE (op), dconst0);
2676 else
2677 neutral_op = build_int_cst (TREE_TYPE (op), 0);
2679 break;
2681 case MULT_EXPR:
2682 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (op)))
2683 neutral_op = build_real (TREE_TYPE (op), dconst1);
2684 else
2685 neutral_op = build_int_cst (TREE_TYPE (op), 1);
2687 break;
2689 case BIT_AND_EXPR:
2690 neutral_op = build_int_cst (TREE_TYPE (op), -1);
2691 break;
2693 /* For MIN/MAX we don't have an easy neutral operand but
2694 the initial values can be used fine here. Only for
2695 a reduction chain we have to force a neutral element. */
2696 case MAX_EXPR:
2697 case MIN_EXPR:
2698 if (!GROUP_FIRST_ELEMENT (stmt_vinfo))
2699 neutral_op = NULL;
2700 else
2702 def_stmt = SSA_NAME_DEF_STMT (op);
2703 loop = (gimple_bb (stmt))->loop_father;
2704 neutral_op = PHI_ARG_DEF_FROM_EDGE (def_stmt,
2705 loop_preheader_edge (loop));
2707 break;
2709 default:
2710 gcc_assert (!GROUP_FIRST_ELEMENT (stmt_vinfo));
2711 neutral_op = NULL;
2715 if (STMT_VINFO_DATA_REF (stmt_vinfo))
2717 is_store = true;
2718 op = gimple_assign_rhs1 (stmt);
2720 else
2721 is_store = false;
2723 gcc_assert (op);
2725 if (CONSTANT_CLASS_P (op))
2726 constant_p = true;
2727 else
2728 constant_p = false;
2730 /* NUMBER_OF_COPIES is the number of times we need to use the same values in
2731 created vectors. It is greater than 1 if unrolling is performed.
2733 For example, we have two scalar operands, s1 and s2 (e.g., group of
2734 strided accesses of size two), while NUNITS is four (i.e., four scalars
2735 of this type can be packed in a vector). The output vector will contain
2736 two copies of each scalar operand: {s1, s2, s1, s2}. (NUMBER_OF_COPIES
2737 will be 2).
2739 If GROUP_SIZE > NUNITS, the scalars will be split into several vectors
2740 containing the operands.
2742 For example, NUNITS is four as before, and the group size is 8
2743 (s1, s2, ..., s8). We will create two vectors {s1, s2, s3, s4} and
2744 {s5, s6, s7, s8}. */
2746 number_of_copies = nunits * number_of_vectors / group_size;
2748 number_of_places_left_in_vector = nunits;
2749 elts = XALLOCAVEC (tree, nunits);
2750 bool place_after_defs = false;
2751 for (j = 0; j < number_of_copies; j++)
2753 for (i = group_size - 1; stmts.iterate (i, &stmt); i--)
2755 if (is_store)
2756 op = gimple_assign_rhs1 (stmt);
2757 else
2759 switch (code)
2761 case COND_EXPR:
2762 if (op_num == 0 || op_num == 1)
2764 tree cond = gimple_assign_rhs1 (stmt);
2765 op = TREE_OPERAND (cond, op_num);
2767 else
2769 if (op_num == 2)
2770 op = gimple_assign_rhs2 (stmt);
2771 else
2772 op = gimple_assign_rhs3 (stmt);
2774 break;
2776 case CALL_EXPR:
2777 op = gimple_call_arg (stmt, op_num);
2778 break;
2780 case LSHIFT_EXPR:
2781 case RSHIFT_EXPR:
2782 case LROTATE_EXPR:
2783 case RROTATE_EXPR:
2784 op = gimple_op (stmt, op_num + 1);
2785 /* Unlike the other binary operators, shifts/rotates have
2786 the shift count being int, instead of the same type as
2787 the lhs, so make sure the scalar is the right type if
2788 we are dealing with vectors of
2789 long long/long/short/char. */
2790 if (op_num == 1 && TREE_CODE (op) == INTEGER_CST)
2791 op = fold_convert (TREE_TYPE (vector_type), op);
2792 break;
2794 default:
2795 op = gimple_op (stmt, op_num + 1);
2796 break;
2800 if (reduc_index != -1)
2802 loop = (gimple_bb (stmt))->loop_father;
2803 def_stmt = SSA_NAME_DEF_STMT (op);
2805 gcc_assert (loop);
2807 /* Get the def before the loop. In reduction chain we have only
2808 one initial value. */
2809 if ((j != (number_of_copies - 1)
2810 || (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt))
2811 && i != 0))
2812 && neutral_op)
2813 op = neutral_op;
2814 else
2815 op = PHI_ARG_DEF_FROM_EDGE (def_stmt,
2816 loop_preheader_edge (loop));
2819 /* Create 'vect_ = {op0,op1,...,opn}'. */
2820 number_of_places_left_in_vector--;
2821 tree orig_op = op;
2822 if (!types_compatible_p (TREE_TYPE (vector_type), TREE_TYPE (op)))
2824 if (CONSTANT_CLASS_P (op))
2826 op = fold_unary (VIEW_CONVERT_EXPR,
2827 TREE_TYPE (vector_type), op);
2828 gcc_assert (op && CONSTANT_CLASS_P (op));
2830 else
2832 tree new_temp = make_ssa_name (TREE_TYPE (vector_type));
2833 gimple init_stmt;
2834 op = build1 (VIEW_CONVERT_EXPR, TREE_TYPE (vector_type), op);
2835 init_stmt
2836 = gimple_build_assign (new_temp, VIEW_CONVERT_EXPR, op);
2837 gimple_seq_add_stmt (&ctor_seq, init_stmt);
2838 op = new_temp;
2841 elts[number_of_places_left_in_vector] = op;
2842 if (!CONSTANT_CLASS_P (op))
2843 constant_p = false;
2844 if (TREE_CODE (orig_op) == SSA_NAME
2845 && !SSA_NAME_IS_DEFAULT_DEF (orig_op)
2846 && STMT_VINFO_BB_VINFO (stmt_vinfo)
2847 && (STMT_VINFO_BB_VINFO (stmt_vinfo)->bb
2848 == gimple_bb (SSA_NAME_DEF_STMT (orig_op))))
2849 place_after_defs = true;
2851 if (number_of_places_left_in_vector == 0)
2853 number_of_places_left_in_vector = nunits;
2855 if (constant_p)
2856 vec_cst = build_vector (vector_type, elts);
2857 else
2859 vec<constructor_elt, va_gc> *v;
2860 unsigned k;
2861 vec_alloc (v, nunits);
2862 for (k = 0; k < nunits; ++k)
2863 CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, elts[k]);
2864 vec_cst = build_constructor (vector_type, v);
2866 tree init;
2867 gimple_stmt_iterator gsi;
2868 if (place_after_defs)
2870 gsi = gsi_for_stmt
2871 (vect_find_last_scalar_stmt_in_slp (slp_node));
2872 init = vect_init_vector (stmt, vec_cst, vector_type, &gsi);
2874 else
2875 init = vect_init_vector (stmt, vec_cst, vector_type, NULL);
2876 if (ctor_seq != NULL)
2878 gsi = gsi_for_stmt (SSA_NAME_DEF_STMT (init));
2879 gsi_insert_seq_before_without_update (&gsi, ctor_seq,
2880 GSI_SAME_STMT);
2881 ctor_seq = NULL;
2883 voprnds.quick_push (init);
2884 place_after_defs = false;
2889 /* Since the vectors are created in the reverse order, we should invert
2890 them. */
2891 vec_num = voprnds.length ();
2892 for (j = vec_num; j != 0; j--)
2894 vop = voprnds[j - 1];
2895 vec_oprnds->quick_push (vop);
2898 voprnds.release ();
2900 /* In case that VF is greater than the unrolling factor needed for the SLP
2901 group of stmts, NUMBER_OF_VECTORS to be created is greater than
2902 NUMBER_OF_SCALARS/NUNITS or NUNITS/NUMBER_OF_SCALARS, and hence we have
2903 to replicate the vectors. */
2904 while (number_of_vectors > vec_oprnds->length ())
2906 tree neutral_vec = NULL;
2908 if (neutral_op)
2910 if (!neutral_vec)
2911 neutral_vec = build_vector_from_val (vector_type, neutral_op);
2913 vec_oprnds->quick_push (neutral_vec);
2915 else
2917 for (i = 0; vec_oprnds->iterate (i, &vop) && i < vec_num; i++)
2918 vec_oprnds->quick_push (vop);
2924 /* Get vectorized definitions from SLP_NODE that contains corresponding
2925 vectorized def-stmts. */
2927 static void
2928 vect_get_slp_vect_defs (slp_tree slp_node, vec<tree> *vec_oprnds)
2930 tree vec_oprnd;
2931 gimple vec_def_stmt;
2932 unsigned int i;
2934 gcc_assert (SLP_TREE_VEC_STMTS (slp_node).exists ());
2936 FOR_EACH_VEC_ELT (SLP_TREE_VEC_STMTS (slp_node), i, vec_def_stmt)
2938 gcc_assert (vec_def_stmt);
2939 vec_oprnd = gimple_get_lhs (vec_def_stmt);
2940 vec_oprnds->quick_push (vec_oprnd);
2945 /* Get vectorized definitions for SLP_NODE.
2946 If the scalar definitions are loop invariants or constants, collect them and
2947 call vect_get_constant_vectors() to create vector stmts.
2948 Otherwise, the def-stmts must be already vectorized and the vectorized stmts
2949 must be stored in the corresponding child of SLP_NODE, and we call
2950 vect_get_slp_vect_defs () to retrieve them. */
2952 void
2953 vect_get_slp_defs (vec<tree> ops, slp_tree slp_node,
2954 vec<vec<tree> > *vec_oprnds, int reduc_index)
2956 gimple first_stmt;
2957 int number_of_vects = 0, i;
2958 unsigned int child_index = 0;
2959 HOST_WIDE_INT lhs_size_unit, rhs_size_unit;
2960 slp_tree child = NULL;
2961 vec<tree> vec_defs;
2962 tree oprnd;
2963 bool vectorized_defs;
2965 first_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[0];
2966 FOR_EACH_VEC_ELT (ops, i, oprnd)
2968 /* For each operand we check if it has vectorized definitions in a child
2969 node or we need to create them (for invariants and constants). We
2970 check if the LHS of the first stmt of the next child matches OPRND.
2971 If it does, we found the correct child. Otherwise, we call
2972 vect_get_constant_vectors (), and not advance CHILD_INDEX in order
2973 to check this child node for the next operand. */
2974 vectorized_defs = false;
2975 if (SLP_TREE_CHILDREN (slp_node).length () > child_index)
2977 child = SLP_TREE_CHILDREN (slp_node)[child_index];
2979 /* We have to check both pattern and original def, if available. */
2980 if (child)
2982 gimple first_def = SLP_TREE_SCALAR_STMTS (child)[0];
2983 gimple related
2984 = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (first_def));
2986 if (operand_equal_p (oprnd, gimple_get_lhs (first_def), 0)
2987 || (related
2988 && operand_equal_p (oprnd, gimple_get_lhs (related), 0)))
2990 /* The number of vector defs is determined by the number of
2991 vector statements in the node from which we get those
2992 statements. */
2993 number_of_vects = SLP_TREE_NUMBER_OF_VEC_STMTS (child);
2994 vectorized_defs = true;
2995 child_index++;
2998 else
2999 child_index++;
3002 if (!vectorized_defs)
3004 if (i == 0)
3006 number_of_vects = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
3007 /* Number of vector stmts was calculated according to LHS in
3008 vect_schedule_slp_instance (), fix it by replacing LHS with
3009 RHS, if necessary. See vect_get_smallest_scalar_type () for
3010 details. */
3011 vect_get_smallest_scalar_type (first_stmt, &lhs_size_unit,
3012 &rhs_size_unit);
3013 if (rhs_size_unit != lhs_size_unit)
3015 number_of_vects *= rhs_size_unit;
3016 number_of_vects /= lhs_size_unit;
3021 /* Allocate memory for vectorized defs. */
3022 vec_defs = vNULL;
3023 vec_defs.create (number_of_vects);
3025 /* For reduction defs we call vect_get_constant_vectors (), since we are
3026 looking for initial loop invariant values. */
3027 if (vectorized_defs && reduc_index == -1)
3028 /* The defs are already vectorized. */
3029 vect_get_slp_vect_defs (child, &vec_defs);
3030 else
3031 /* Build vectors from scalar defs. */
3032 vect_get_constant_vectors (oprnd, slp_node, &vec_defs, i,
3033 number_of_vects, reduc_index);
3035 vec_oprnds->quick_push (vec_defs);
3037 /* For reductions, we only need initial values. */
3038 if (reduc_index != -1)
3039 return;
3044 /* Create NCOPIES permutation statements using the mask MASK_BYTES (by
3045 building a vector of type MASK_TYPE from it) and two input vectors placed in
3046 DR_CHAIN at FIRST_VEC_INDX and SECOND_VEC_INDX for the first copy and
3047 shifting by STRIDE elements of DR_CHAIN for every copy.
3048 (STRIDE is the number of vectorized stmts for NODE divided by the number of
3049 copies).
3050 VECT_STMTS_COUNTER specifies the index in the vectorized stmts of NODE, where
3051 the created stmts must be inserted. */
3053 static inline void
3054 vect_create_mask_and_perm (gimple stmt, gimple next_scalar_stmt,
3055 tree mask, int first_vec_indx, int second_vec_indx,
3056 gimple_stmt_iterator *gsi, slp_tree node,
3057 tree vectype, vec<tree> dr_chain,
3058 int ncopies, int vect_stmts_counter)
3060 tree perm_dest;
3061 gimple perm_stmt = NULL;
3062 stmt_vec_info next_stmt_info;
3063 int i, stride;
3064 tree first_vec, second_vec, data_ref;
3066 stride = SLP_TREE_NUMBER_OF_VEC_STMTS (node) / ncopies;
3068 /* Initialize the vect stmts of NODE to properly insert the generated
3069 stmts later. */
3070 for (i = SLP_TREE_VEC_STMTS (node).length ();
3071 i < (int) SLP_TREE_NUMBER_OF_VEC_STMTS (node); i++)
3072 SLP_TREE_VEC_STMTS (node).quick_push (NULL);
3074 perm_dest = vect_create_destination_var (gimple_assign_lhs (stmt), vectype);
3075 for (i = 0; i < ncopies; i++)
3077 first_vec = dr_chain[first_vec_indx];
3078 second_vec = dr_chain[second_vec_indx];
3080 /* Generate the permute statement. */
3081 perm_stmt = gimple_build_assign (perm_dest, VEC_PERM_EXPR,
3082 first_vec, second_vec, mask);
3083 data_ref = make_ssa_name (perm_dest, perm_stmt);
3084 gimple_set_lhs (perm_stmt, data_ref);
3085 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
3087 /* Store the vector statement in NODE. */
3088 SLP_TREE_VEC_STMTS (node)[stride * i + vect_stmts_counter] = perm_stmt;
3090 first_vec_indx += stride;
3091 second_vec_indx += stride;
3094 /* Mark the scalar stmt as vectorized. */
3095 next_stmt_info = vinfo_for_stmt (next_scalar_stmt);
3096 STMT_VINFO_VEC_STMT (next_stmt_info) = perm_stmt;
3100 /* Given FIRST_MASK_ELEMENT - the mask element in element representation,
3101 return in CURRENT_MASK_ELEMENT its equivalent in target specific
3102 representation. Check that the mask is valid and return FALSE if not.
3103 Return TRUE in NEED_NEXT_VECTOR if the permutation requires to move to
3104 the next vector, i.e., the current first vector is not needed. */
3106 static bool
3107 vect_get_mask_element (gimple stmt, int first_mask_element, int m,
3108 int mask_nunits, bool only_one_vec, int index,
3109 unsigned char *mask, int *current_mask_element,
3110 bool *need_next_vector, int *number_of_mask_fixes,
3111 bool *mask_fixed, bool *needs_first_vector)
3113 int i;
3115 /* Convert to target specific representation. */
3116 *current_mask_element = first_mask_element + m;
3117 /* Adjust the value in case it's a mask for second and third vectors. */
3118 *current_mask_element -= mask_nunits * (*number_of_mask_fixes - 1);
3120 if (*current_mask_element < 0)
3122 if (dump_enabled_p ())
3124 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3125 "permutation requires past vector ");
3126 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
3127 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
3129 return false;
3132 if (*current_mask_element < mask_nunits)
3133 *needs_first_vector = true;
3135 /* We have only one input vector to permute but the mask accesses values in
3136 the next vector as well. */
3137 if (only_one_vec && *current_mask_element >= mask_nunits)
3139 if (dump_enabled_p ())
3141 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3142 "permutation requires at least two vectors ");
3143 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
3144 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
3147 return false;
3150 /* The mask requires the next vector. */
3151 while (*current_mask_element >= mask_nunits * 2)
3153 if (*needs_first_vector || *mask_fixed)
3155 /* We either need the first vector too or have already moved to the
3156 next vector. In both cases, this permutation needs three
3157 vectors. */
3158 if (dump_enabled_p ())
3160 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3161 "permutation requires at "
3162 "least three vectors ");
3163 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
3164 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
3167 return false;
3170 /* We move to the next vector, dropping the first one and working with
3171 the second and the third - we need to adjust the values of the mask
3172 accordingly. */
3173 *current_mask_element -= mask_nunits * *number_of_mask_fixes;
3175 for (i = 0; i < index; i++)
3176 mask[i] -= mask_nunits * *number_of_mask_fixes;
3178 (*number_of_mask_fixes)++;
3179 *mask_fixed = true;
3182 *need_next_vector = *mask_fixed;
3184 /* This was the last element of this mask. Start a new one. */
3185 if (index == mask_nunits - 1)
3187 *number_of_mask_fixes = 1;
3188 *mask_fixed = false;
3189 *needs_first_vector = false;
3192 return true;
3196 /* Generate vector permute statements from a list of loads in DR_CHAIN.
3197 If ANALYZE_ONLY is TRUE, only check that it is possible to create valid
3198 permute statements for the SLP node NODE of the SLP instance
3199 SLP_NODE_INSTANCE. */
3201 bool
3202 vect_transform_slp_perm_load (slp_tree node, vec<tree> dr_chain,
3203 gimple_stmt_iterator *gsi, int vf,
3204 slp_instance slp_node_instance, bool analyze_only)
3206 gimple stmt = SLP_TREE_SCALAR_STMTS (node)[0];
3207 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3208 tree mask_element_type = NULL_TREE, mask_type;
3209 int i, j, k, nunits, vec_index = 0, scalar_index;
3210 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
3211 gimple next_scalar_stmt;
3212 int group_size = SLP_INSTANCE_GROUP_SIZE (slp_node_instance);
3213 int first_mask_element;
3214 int index, unroll_factor, current_mask_element, ncopies;
3215 unsigned char *mask;
3216 bool only_one_vec = false, need_next_vector = false;
3217 int first_vec_index, second_vec_index, orig_vec_stmts_num, vect_stmts_counter;
3218 int number_of_mask_fixes = 1;
3219 bool mask_fixed = false;
3220 bool needs_first_vector = false;
3221 machine_mode mode;
3223 mode = TYPE_MODE (vectype);
3225 if (!can_vec_perm_p (mode, false, NULL))
3227 if (dump_enabled_p ())
3229 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3230 "no vect permute for ");
3231 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
3232 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
3234 return false;
3237 /* The generic VEC_PERM_EXPR code always uses an integral type of the
3238 same size as the vector element being permuted. */
3239 mask_element_type = lang_hooks.types.type_for_mode
3240 (int_mode_for_mode (TYPE_MODE (TREE_TYPE (vectype))), 1);
3241 mask_type = get_vectype_for_scalar_type (mask_element_type);
3242 nunits = TYPE_VECTOR_SUBPARTS (vectype);
3243 mask = XALLOCAVEC (unsigned char, nunits);
3244 unroll_factor = SLP_INSTANCE_UNROLLING_FACTOR (slp_node_instance);
3246 /* The number of vector stmts to generate based only on SLP_NODE_INSTANCE
3247 unrolling factor. */
3248 orig_vec_stmts_num = group_size *
3249 SLP_INSTANCE_UNROLLING_FACTOR (slp_node_instance) / nunits;
3250 if (orig_vec_stmts_num == 1)
3251 only_one_vec = true;
3253 /* Number of copies is determined by the final vectorization factor
3254 relatively to SLP_NODE_INSTANCE unrolling factor. */
3255 ncopies = vf / SLP_INSTANCE_UNROLLING_FACTOR (slp_node_instance);
3257 if (!STMT_VINFO_GROUPED_ACCESS (stmt_info))
3258 return false;
3260 /* Generate permutation masks for every NODE. Number of masks for each NODE
3261 is equal to GROUP_SIZE.
3262 E.g., we have a group of three nodes with three loads from the same
3263 location in each node, and the vector size is 4. I.e., we have a
3264 a0b0c0a1b1c1... sequence and we need to create the following vectors:
3265 for a's: a0a0a0a1 a1a1a2a2 a2a3a3a3
3266 for b's: b0b0b0b1 b1b1b2b2 b2b3b3b3
3269 The masks for a's should be: {0,0,0,3} {3,3,6,6} {6,9,9,9}.
3270 The last mask is illegal since we assume two operands for permute
3271 operation, and the mask element values can't be outside that range.
3272 Hence, the last mask must be converted into {2,5,5,5}.
3273 For the first two permutations we need the first and the second input
3274 vectors: {a0,b0,c0,a1} and {b1,c1,a2,b2}, and for the last permutation
3275 we need the second and the third vectors: {b1,c1,a2,b2} and
3276 {c2,a3,b3,c3}. */
3279 scalar_index = 0;
3280 index = 0;
3281 vect_stmts_counter = 0;
3282 vec_index = 0;
3283 first_vec_index = vec_index++;
3284 if (only_one_vec)
3285 second_vec_index = first_vec_index;
3286 else
3287 second_vec_index = vec_index++;
3289 for (j = 0; j < unroll_factor; j++)
3291 for (k = 0; k < group_size; k++)
3293 i = SLP_TREE_LOAD_PERMUTATION (node)[k];
3294 first_mask_element = i + j * group_size;
3295 if (!vect_get_mask_element (stmt, first_mask_element, 0,
3296 nunits, only_one_vec, index,
3297 mask, &current_mask_element,
3298 &need_next_vector,
3299 &number_of_mask_fixes, &mask_fixed,
3300 &needs_first_vector))
3301 return false;
3302 gcc_assert (current_mask_element >= 0
3303 && current_mask_element < 2 * nunits);
3304 mask[index++] = current_mask_element;
3306 if (index == nunits)
3308 index = 0;
3309 if (!can_vec_perm_p (mode, false, mask))
3311 if (dump_enabled_p ())
3313 dump_printf_loc (MSG_MISSED_OPTIMIZATION,
3314 vect_location,
3315 "unsupported vect permute { ");
3316 for (i = 0; i < nunits; ++i)
3317 dump_printf (MSG_MISSED_OPTIMIZATION, "%d ",
3318 mask[i]);
3319 dump_printf (MSG_MISSED_OPTIMIZATION, "}\n");
3321 return false;
3324 if (!analyze_only)
3326 int l;
3327 tree mask_vec, *mask_elts;
3328 mask_elts = XALLOCAVEC (tree, nunits);
3329 for (l = 0; l < nunits; ++l)
3330 mask_elts[l] = build_int_cst (mask_element_type,
3331 mask[l]);
3332 mask_vec = build_vector (mask_type, mask_elts);
3334 if (need_next_vector)
3336 first_vec_index = second_vec_index;
3337 second_vec_index = vec_index;
3340 next_scalar_stmt
3341 = SLP_TREE_SCALAR_STMTS (node)[scalar_index++];
3343 vect_create_mask_and_perm (stmt, next_scalar_stmt,
3344 mask_vec, first_vec_index, second_vec_index,
3345 gsi, node, vectype, dr_chain,
3346 ncopies, vect_stmts_counter++);
3353 return true;
3358 /* Vectorize SLP instance tree in postorder. */
3360 static bool
3361 vect_schedule_slp_instance (slp_tree node, slp_instance instance,
3362 unsigned int vectorization_factor)
3364 gimple stmt;
3365 bool grouped_store, is_store;
3366 gimple_stmt_iterator si;
3367 stmt_vec_info stmt_info;
3368 unsigned int vec_stmts_size, nunits, group_size;
3369 tree vectype;
3370 int i;
3371 slp_tree child;
3373 if (!node)
3374 return false;
3376 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
3377 vect_schedule_slp_instance (child, instance, vectorization_factor);
3379 stmt = SLP_TREE_SCALAR_STMTS (node)[0];
3380 stmt_info = vinfo_for_stmt (stmt);
3382 /* VECTYPE is the type of the destination. */
3383 vectype = STMT_VINFO_VECTYPE (stmt_info);
3384 nunits = (unsigned int) TYPE_VECTOR_SUBPARTS (vectype);
3385 group_size = SLP_INSTANCE_GROUP_SIZE (instance);
3387 /* For each SLP instance calculate number of vector stmts to be created
3388 for the scalar stmts in each node of the SLP tree. Number of vector
3389 elements in one vector iteration is the number of scalar elements in
3390 one scalar iteration (GROUP_SIZE) multiplied by VF divided by vector
3391 size.
3392 Unless this is a SLP reduction in which case the number of vector
3393 stmts is equal to the number of vector stmts of the children. */
3394 if (GROUP_FIRST_ELEMENT (stmt_info)
3395 && !STMT_VINFO_GROUPED_ACCESS (stmt_info))
3396 vec_stmts_size = SLP_TREE_NUMBER_OF_VEC_STMTS (SLP_TREE_CHILDREN (node)[0]);
3397 else
3398 vec_stmts_size = (vectorization_factor * group_size) / nunits;
3400 if (!SLP_TREE_VEC_STMTS (node).exists ())
3402 SLP_TREE_VEC_STMTS (node).create (vec_stmts_size);
3403 SLP_TREE_NUMBER_OF_VEC_STMTS (node) = vec_stmts_size;
3406 if (dump_enabled_p ())
3408 dump_printf_loc (MSG_NOTE,vect_location,
3409 "------>vectorizing SLP node starting from: ");
3410 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
3411 dump_printf (MSG_NOTE, "\n");
3414 /* Vectorized stmts go before the last scalar stmt which is where
3415 all uses are ready. */
3416 si = gsi_for_stmt (vect_find_last_scalar_stmt_in_slp (node));
3418 /* Mark the first element of the reduction chain as reduction to properly
3419 transform the node. In the analysis phase only the last element of the
3420 chain is marked as reduction. */
3421 if (GROUP_FIRST_ELEMENT (stmt_info) && !STMT_VINFO_GROUPED_ACCESS (stmt_info)
3422 && GROUP_FIRST_ELEMENT (stmt_info) == stmt)
3424 STMT_VINFO_DEF_TYPE (stmt_info) = vect_reduction_def;
3425 STMT_VINFO_TYPE (stmt_info) = reduc_vec_info_type;
3428 /* Handle two-operation SLP nodes by vectorizing the group with
3429 both operations and then performing a merge. */
3430 if (SLP_TREE_TWO_OPERATORS (node))
3432 enum tree_code code0 = gimple_assign_rhs_code (stmt);
3433 enum tree_code ocode;
3434 gimple ostmt;
3435 unsigned char *mask = XALLOCAVEC (unsigned char, group_size);
3436 bool allsame = true;
3437 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, ostmt)
3438 if (gimple_assign_rhs_code (ostmt) != code0)
3440 mask[i] = 1;
3441 allsame = false;
3442 ocode = gimple_assign_rhs_code (ostmt);
3444 else
3445 mask[i] = 0;
3446 if (!allsame)
3448 vec<gimple> v0;
3449 vec<gimple> v1;
3450 unsigned j;
3451 tree tmask = NULL_TREE;
3452 vect_transform_stmt (stmt, &si, &grouped_store, node, instance);
3453 v0 = SLP_TREE_VEC_STMTS (node).copy ();
3454 SLP_TREE_VEC_STMTS (node).truncate (0);
3455 gimple_assign_set_rhs_code (stmt, ocode);
3456 vect_transform_stmt (stmt, &si, &grouped_store, node, instance);
3457 gimple_assign_set_rhs_code (stmt, code0);
3458 v1 = SLP_TREE_VEC_STMTS (node).copy ();
3459 SLP_TREE_VEC_STMTS (node).truncate (0);
3460 tree meltype = build_nonstandard_integer_type
3461 (GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (vectype))), 1);
3462 tree mvectype = get_same_sized_vectype (meltype, vectype);
3463 unsigned k = 0, l;
3464 for (j = 0; j < v0.length (); ++j)
3466 tree *melts = XALLOCAVEC (tree, TYPE_VECTOR_SUBPARTS (vectype));
3467 for (l = 0; l < TYPE_VECTOR_SUBPARTS (vectype); ++l)
3469 if (k >= group_size)
3470 k = 0;
3471 melts[l] = build_int_cst
3472 (meltype, mask[k++] * TYPE_VECTOR_SUBPARTS (vectype) + l);
3474 tmask = build_vector (mvectype, melts);
3476 /* ??? Not all targets support a VEC_PERM_EXPR with a
3477 constant mask that would translate to a vec_merge RTX
3478 (with their vec_perm_const_ok). We can either not
3479 vectorize in that case or let veclower do its job.
3480 Unfortunately that isn't too great and at least for
3481 plus/minus we'd eventually like to match targets
3482 vector addsub instructions. */
3483 gimple vstmt;
3484 vstmt = gimple_build_assign (make_ssa_name (vectype),
3485 VEC_PERM_EXPR,
3486 gimple_assign_lhs (v0[j]),
3487 gimple_assign_lhs (v1[j]), tmask);
3488 vect_finish_stmt_generation (stmt, vstmt, &si);
3489 SLP_TREE_VEC_STMTS (node).quick_push (vstmt);
3491 v0.release ();
3492 v1.release ();
3493 return false;
3496 is_store = vect_transform_stmt (stmt, &si, &grouped_store, node, instance);
3497 return is_store;
3500 /* Replace scalar calls from SLP node NODE with setting of their lhs to zero.
3501 For loop vectorization this is done in vectorizable_call, but for SLP
3502 it needs to be deferred until end of vect_schedule_slp, because multiple
3503 SLP instances may refer to the same scalar stmt. */
3505 static void
3506 vect_remove_slp_scalar_calls (slp_tree node)
3508 gimple stmt, new_stmt;
3509 gimple_stmt_iterator gsi;
3510 int i;
3511 slp_tree child;
3512 tree lhs;
3513 stmt_vec_info stmt_info;
3515 if (!node)
3516 return;
3518 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
3519 vect_remove_slp_scalar_calls (child);
3521 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
3523 if (!is_gimple_call (stmt) || gimple_bb (stmt) == NULL)
3524 continue;
3525 stmt_info = vinfo_for_stmt (stmt);
3526 if (stmt_info == NULL
3527 || is_pattern_stmt_p (stmt_info)
3528 || !PURE_SLP_STMT (stmt_info))
3529 continue;
3530 lhs = gimple_call_lhs (stmt);
3531 new_stmt = gimple_build_assign (lhs, build_zero_cst (TREE_TYPE (lhs)));
3532 set_vinfo_for_stmt (new_stmt, stmt_info);
3533 set_vinfo_for_stmt (stmt, NULL);
3534 STMT_VINFO_STMT (stmt_info) = new_stmt;
3535 gsi = gsi_for_stmt (stmt);
3536 gsi_replace (&gsi, new_stmt, false);
3537 SSA_NAME_DEF_STMT (gimple_assign_lhs (new_stmt)) = new_stmt;
3541 /* Generate vector code for all SLP instances in the loop/basic block. */
3543 bool
3544 vect_schedule_slp (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo)
3546 vec<slp_instance> slp_instances;
3547 slp_instance instance;
3548 unsigned int i, vf;
3549 bool is_store = false;
3551 if (loop_vinfo)
3553 slp_instances = LOOP_VINFO_SLP_INSTANCES (loop_vinfo);
3554 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
3556 else
3558 slp_instances = BB_VINFO_SLP_INSTANCES (bb_vinfo);
3559 vf = 1;
3562 FOR_EACH_VEC_ELT (slp_instances, i, instance)
3564 /* Schedule the tree of INSTANCE. */
3565 is_store = vect_schedule_slp_instance (SLP_INSTANCE_TREE (instance),
3566 instance, vf);
3567 if (dump_enabled_p ())
3568 dump_printf_loc (MSG_NOTE, vect_location,
3569 "vectorizing stmts using SLP.\n");
3572 FOR_EACH_VEC_ELT (slp_instances, i, instance)
3574 slp_tree root = SLP_INSTANCE_TREE (instance);
3575 gimple store;
3576 unsigned int j;
3577 gimple_stmt_iterator gsi;
3579 /* Remove scalar call stmts. Do not do this for basic-block
3580 vectorization as not all uses may be vectorized.
3581 ??? Why should this be necessary? DCE should be able to
3582 remove the stmts itself.
3583 ??? For BB vectorization we can as well remove scalar
3584 stmts starting from the SLP tree root if they have no
3585 uses. */
3586 if (loop_vinfo)
3587 vect_remove_slp_scalar_calls (root);
3589 for (j = 0; SLP_TREE_SCALAR_STMTS (root).iterate (j, &store)
3590 && j < SLP_INSTANCE_GROUP_SIZE (instance); j++)
3592 if (!STMT_VINFO_DATA_REF (vinfo_for_stmt (store)))
3593 break;
3595 if (is_pattern_stmt_p (vinfo_for_stmt (store)))
3596 store = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (store));
3597 /* Free the attached stmt_vec_info and remove the stmt. */
3598 gsi = gsi_for_stmt (store);
3599 unlink_stmt_vdef (store);
3600 gsi_remove (&gsi, true);
3601 release_defs (store);
3602 free_stmt_vec_info (store);
3606 return is_store;
3610 /* Vectorize the basic block. */
3612 void
3613 vect_slp_transform_bb (basic_block bb)
3615 bb_vec_info bb_vinfo = vec_info_for_bb (bb);
3616 gimple_stmt_iterator si;
3618 gcc_assert (bb_vinfo);
3620 if (dump_enabled_p ())
3621 dump_printf_loc (MSG_NOTE, vect_location, "SLPing BB\n");
3623 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
3625 gimple stmt = gsi_stmt (si);
3626 stmt_vec_info stmt_info;
3628 if (dump_enabled_p ())
3630 dump_printf_loc (MSG_NOTE, vect_location,
3631 "------>SLPing statement: ");
3632 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
3633 dump_printf (MSG_NOTE, "\n");
3636 stmt_info = vinfo_for_stmt (stmt);
3637 gcc_assert (stmt_info);
3639 /* Schedule all the SLP instances when the first SLP stmt is reached. */
3640 if (STMT_SLP_TYPE (stmt_info))
3642 vect_schedule_slp (NULL, bb_vinfo);
3643 break;
3647 if (dump_enabled_p ())
3648 dump_printf_loc (MSG_NOTE, vect_location,
3649 "BASIC BLOCK VECTORIZED\n");
3651 destroy_bb_vec_info (bb_vinfo);