2015-12-16 Richard Biener <rguenther@suse.de>
[official-gcc.git] / gcc / tree-vect-slp.c
blob6955e15775e6c8d782036443e1881fe62cfd5d09
1 /* SLP - Basic Block Vectorization
2 Copyright (C) 2007-2015 Free Software Foundation, Inc.
3 Contributed by Dorit Naishlos <dorit@il.ibm.com>
4 and Ira Rosen <irar@il.ibm.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "backend.h"
26 #include "target.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "gimple.h"
30 #include "tree-pass.h"
31 #include "ssa.h"
32 #include "optabs-tree.h"
33 #include "insn-config.h"
34 #include "recog.h" /* FIXME: for insn_data */
35 #include "params.h"
36 #include "fold-const.h"
37 #include "stor-layout.h"
38 #include "gimple-iterator.h"
39 #include "cfgloop.h"
40 #include "tree-vectorizer.h"
41 #include "langhooks.h"
42 #include "gimple-walk.h"
43 #include "dbgcnt.h"
46 /* Recursively free the memory allocated for the SLP tree rooted at NODE. */
48 static void
49 vect_free_slp_tree (slp_tree node)
51 int i;
52 slp_tree child;
54 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
55 vect_free_slp_tree (child);
57 SLP_TREE_CHILDREN (node).release ();
58 SLP_TREE_SCALAR_STMTS (node).release ();
59 SLP_TREE_VEC_STMTS (node).release ();
60 SLP_TREE_LOAD_PERMUTATION (node).release ();
62 free (node);
66 /* Free the memory allocated for the SLP instance. */
68 void
69 vect_free_slp_instance (slp_instance instance)
71 vect_free_slp_tree (SLP_INSTANCE_TREE (instance));
72 SLP_INSTANCE_LOADS (instance).release ();
73 free (instance);
77 /* Create an SLP node for SCALAR_STMTS. */
79 static slp_tree
80 vect_create_new_slp_node (vec<gimple *> scalar_stmts)
82 slp_tree node;
83 gimple *stmt = scalar_stmts[0];
84 unsigned int nops;
86 if (is_gimple_call (stmt))
87 nops = gimple_call_num_args (stmt);
88 else if (is_gimple_assign (stmt))
90 nops = gimple_num_ops (stmt) - 1;
91 if (gimple_assign_rhs_code (stmt) == COND_EXPR)
92 nops++;
94 else
95 return NULL;
97 node = XNEW (struct _slp_tree);
98 SLP_TREE_SCALAR_STMTS (node) = scalar_stmts;
99 SLP_TREE_VEC_STMTS (node).create (0);
100 SLP_TREE_CHILDREN (node).create (nops);
101 SLP_TREE_LOAD_PERMUTATION (node) = vNULL;
102 SLP_TREE_TWO_OPERATORS (node) = false;
103 SLP_TREE_DEF_TYPE (node) = vect_internal_def;
105 return node;
109 /* This structure is used in creation of an SLP tree. Each instance
110 corresponds to the same operand in a group of scalar stmts in an SLP
111 node. */
112 typedef struct _slp_oprnd_info
114 /* Def-stmts for the operands. */
115 vec<gimple *> def_stmts;
116 /* Information about the first statement, its vector def-type, type, the
117 operand itself in case it's constant, and an indication if it's a pattern
118 stmt. */
119 enum vect_def_type first_dt;
120 tree first_op_type;
121 bool first_pattern;
122 bool second_pattern;
123 } *slp_oprnd_info;
126 /* Allocate operands info for NOPS operands, and GROUP_SIZE def-stmts for each
127 operand. */
128 static vec<slp_oprnd_info>
129 vect_create_oprnd_info (int nops, int group_size)
131 int i;
132 slp_oprnd_info oprnd_info;
133 vec<slp_oprnd_info> oprnds_info;
135 oprnds_info.create (nops);
136 for (i = 0; i < nops; i++)
138 oprnd_info = XNEW (struct _slp_oprnd_info);
139 oprnd_info->def_stmts.create (group_size);
140 oprnd_info->first_dt = vect_uninitialized_def;
141 oprnd_info->first_op_type = NULL_TREE;
142 oprnd_info->first_pattern = false;
143 oprnd_info->second_pattern = false;
144 oprnds_info.quick_push (oprnd_info);
147 return oprnds_info;
151 /* Free operands info. */
153 static void
154 vect_free_oprnd_info (vec<slp_oprnd_info> &oprnds_info)
156 int i;
157 slp_oprnd_info oprnd_info;
159 FOR_EACH_VEC_ELT (oprnds_info, i, oprnd_info)
161 oprnd_info->def_stmts.release ();
162 XDELETE (oprnd_info);
165 oprnds_info.release ();
169 /* Find the place of the data-ref in STMT in the interleaving chain that starts
170 from FIRST_STMT. Return -1 if the data-ref is not a part of the chain. */
172 static int
173 vect_get_place_in_interleaving_chain (gimple *stmt, gimple *first_stmt)
175 gimple *next_stmt = first_stmt;
176 int result = 0;
178 if (first_stmt != GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
179 return -1;
183 if (next_stmt == stmt)
184 return result;
185 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
186 if (next_stmt)
187 result += GROUP_GAP (vinfo_for_stmt (next_stmt));
189 while (next_stmt);
191 return -1;
195 /* Get the defs for the rhs of STMT (collect them in OPRNDS_INFO), check that
196 they are of a valid type and that they match the defs of the first stmt of
197 the SLP group (stored in OPRNDS_INFO). If there was a fatal error
198 return -1, if the error could be corrected by swapping operands of the
199 operation return 1, if everything is ok return 0. */
201 static int
202 vect_get_and_check_slp_defs (vec_info *vinfo,
203 gimple *stmt, unsigned stmt_num,
204 vec<slp_oprnd_info> *oprnds_info)
206 tree oprnd;
207 unsigned int i, number_of_oprnds;
208 gimple *def_stmt;
209 enum vect_def_type dt = vect_uninitialized_def;
210 bool pattern = false;
211 slp_oprnd_info oprnd_info;
212 int first_op_idx = 1;
213 bool commutative = false;
214 bool first_op_cond = false;
215 bool first = stmt_num == 0;
216 bool second = stmt_num == 1;
218 if (is_gimple_call (stmt))
220 number_of_oprnds = gimple_call_num_args (stmt);
221 first_op_idx = 3;
223 else if (is_gimple_assign (stmt))
225 enum tree_code code = gimple_assign_rhs_code (stmt);
226 number_of_oprnds = gimple_num_ops (stmt) - 1;
227 if (gimple_assign_rhs_code (stmt) == COND_EXPR
228 && COMPARISON_CLASS_P (gimple_assign_rhs1 (stmt)))
230 first_op_cond = true;
231 commutative = true;
232 number_of_oprnds++;
234 else
235 commutative = commutative_tree_code (code);
237 else
238 return -1;
240 bool swapped = false;
241 for (i = 0; i < number_of_oprnds; i++)
243 again:
244 if (first_op_cond)
246 if (i == 0 || i == 1)
247 oprnd = TREE_OPERAND (gimple_op (stmt, first_op_idx),
248 swapped ? !i : i);
249 else
250 oprnd = gimple_op (stmt, first_op_idx + i - 1);
252 else
253 oprnd = gimple_op (stmt, first_op_idx + (swapped ? !i : i));
255 oprnd_info = (*oprnds_info)[i];
257 if (!vect_is_simple_use (oprnd, vinfo, &def_stmt, &dt))
259 if (dump_enabled_p ())
261 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
262 "Build SLP failed: can't analyze def for ");
263 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, oprnd);
264 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
267 return -1;
270 /* Check if DEF_STMT is a part of a pattern in LOOP and get the def stmt
271 from the pattern. Check that all the stmts of the node are in the
272 pattern. */
273 if (def_stmt && gimple_bb (def_stmt)
274 && vect_stmt_in_region_p (vinfo, def_stmt)
275 && vinfo_for_stmt (def_stmt)
276 && STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (def_stmt))
277 && !STMT_VINFO_RELEVANT (vinfo_for_stmt (def_stmt))
278 && !STMT_VINFO_LIVE_P (vinfo_for_stmt (def_stmt)))
280 pattern = true;
281 if (!first && !oprnd_info->first_pattern
282 /* Allow different pattern state for the defs of the
283 first stmt in reduction chains. */
284 && (oprnd_info->first_dt != vect_reduction_def
285 || (!second && !oprnd_info->second_pattern)))
287 if (i == 0
288 && !swapped
289 && commutative)
291 swapped = true;
292 goto again;
295 if (dump_enabled_p ())
297 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
298 "Build SLP failed: some of the stmts"
299 " are in a pattern, and others are not ");
300 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, oprnd);
301 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
304 return 1;
307 def_stmt = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (def_stmt));
308 dt = STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt));
310 if (dt == vect_unknown_def_type)
312 if (dump_enabled_p ())
313 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
314 "Unsupported pattern.\n");
315 return -1;
318 switch (gimple_code (def_stmt))
320 case GIMPLE_PHI:
321 case GIMPLE_ASSIGN:
322 break;
324 default:
325 if (dump_enabled_p ())
326 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
327 "unsupported defining stmt:\n");
328 return -1;
332 if (second)
333 oprnd_info->second_pattern = pattern;
335 if (first)
337 oprnd_info->first_dt = dt;
338 oprnd_info->first_pattern = pattern;
339 oprnd_info->first_op_type = TREE_TYPE (oprnd);
341 else
343 /* Not first stmt of the group, check that the def-stmt/s match
344 the def-stmt/s of the first stmt. Allow different definition
345 types for reduction chains: the first stmt must be a
346 vect_reduction_def (a phi node), and the rest
347 vect_internal_def. */
348 if (((oprnd_info->first_dt != dt
349 && !(oprnd_info->first_dt == vect_reduction_def
350 && dt == vect_internal_def)
351 && !((oprnd_info->first_dt == vect_external_def
352 || oprnd_info->first_dt == vect_constant_def)
353 && (dt == vect_external_def
354 || dt == vect_constant_def)))
355 || !types_compatible_p (oprnd_info->first_op_type,
356 TREE_TYPE (oprnd))))
358 /* Try swapping operands if we got a mismatch. */
359 if (i == 0
360 && !swapped
361 && commutative)
363 swapped = true;
364 goto again;
367 if (dump_enabled_p ())
368 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
369 "Build SLP failed: different types\n");
371 return 1;
375 /* Check the types of the definitions. */
376 switch (dt)
378 case vect_constant_def:
379 case vect_external_def:
380 case vect_reduction_def:
381 break;
383 case vect_internal_def:
384 oprnd_info->def_stmts.quick_push (def_stmt);
385 break;
387 default:
388 /* FORNOW: Not supported. */
389 if (dump_enabled_p ())
391 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
392 "Build SLP failed: illegal type of def ");
393 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, oprnd);
394 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
397 return -1;
401 /* Swap operands. */
402 if (swapped)
404 if (first_op_cond)
406 tree cond = gimple_assign_rhs1 (stmt);
407 swap_ssa_operands (stmt, &TREE_OPERAND (cond, 0),
408 &TREE_OPERAND (cond, 1));
409 TREE_SET_CODE (cond, swap_tree_comparison (TREE_CODE (cond)));
411 else
412 swap_ssa_operands (stmt, gimple_assign_rhs1_ptr (stmt),
413 gimple_assign_rhs2_ptr (stmt));
416 return 0;
420 /* Verify if the scalar stmts STMTS are isomorphic, require data
421 permutation or are of unsupported types of operation. Return
422 true if they are, otherwise return false and indicate in *MATCHES
423 which stmts are not isomorphic to the first one. If MATCHES[0]
424 is false then this indicates the comparison could not be
425 carried out or the stmts will never be vectorized by SLP. */
427 static bool
428 vect_build_slp_tree_1 (vec_info *vinfo,
429 vec<gimple *> stmts, unsigned int group_size,
430 unsigned nops, unsigned int *max_nunits,
431 bool *matches, bool *two_operators)
433 unsigned int i;
434 gimple *first_stmt = stmts[0], *stmt = stmts[0];
435 enum tree_code first_stmt_code = ERROR_MARK;
436 enum tree_code alt_stmt_code = ERROR_MARK;
437 enum tree_code rhs_code = ERROR_MARK;
438 enum tree_code first_cond_code = ERROR_MARK;
439 tree lhs;
440 bool need_same_oprnds = false;
441 tree vectype = NULL_TREE, scalar_type, first_op1 = NULL_TREE;
442 optab optab;
443 int icode;
444 machine_mode optab_op2_mode;
445 machine_mode vec_mode;
446 HOST_WIDE_INT dummy;
447 gimple *first_load = NULL, *prev_first_load = NULL;
449 /* For every stmt in NODE find its def stmt/s. */
450 FOR_EACH_VEC_ELT (stmts, i, stmt)
452 matches[i] = false;
454 if (dump_enabled_p ())
456 dump_printf_loc (MSG_NOTE, vect_location, "Build SLP for ");
457 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
460 /* Fail to vectorize statements marked as unvectorizable. */
461 if (!STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (stmt)))
463 if (dump_enabled_p ())
465 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
466 "Build SLP failed: unvectorizable statement ");
467 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
468 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
470 /* Fatal mismatch. */
471 matches[0] = false;
472 return false;
475 lhs = gimple_get_lhs (stmt);
476 if (lhs == NULL_TREE)
478 if (dump_enabled_p ())
480 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
481 "Build SLP failed: not GIMPLE_ASSIGN nor "
482 "GIMPLE_CALL ");
483 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
484 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
486 /* Fatal mismatch. */
487 matches[0] = false;
488 return false;
491 scalar_type = vect_get_smallest_scalar_type (stmt, &dummy, &dummy);
492 vectype = get_vectype_for_scalar_type (scalar_type);
493 if (!vectype)
495 if (dump_enabled_p ())
497 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
498 "Build SLP failed: unsupported data-type ");
499 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
500 scalar_type);
501 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
503 /* Fatal mismatch. */
504 matches[0] = false;
505 return false;
508 /* If populating the vector type requires unrolling then fail
509 before adjusting *max_nunits for basic-block vectorization. */
510 if (is_a <bb_vec_info> (vinfo)
511 && TYPE_VECTOR_SUBPARTS (vectype) > group_size)
513 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
514 "Build SLP failed: unrolling required "
515 "in basic block SLP\n");
516 /* Fatal mismatch. */
517 matches[0] = false;
518 return false;
521 /* In case of multiple types we need to detect the smallest type. */
522 if (*max_nunits < TYPE_VECTOR_SUBPARTS (vectype))
523 *max_nunits = TYPE_VECTOR_SUBPARTS (vectype);
525 if (gcall *call_stmt = dyn_cast <gcall *> (stmt))
527 rhs_code = CALL_EXPR;
528 if (gimple_call_internal_p (call_stmt)
529 || gimple_call_tail_p (call_stmt)
530 || gimple_call_noreturn_p (call_stmt)
531 || !gimple_call_nothrow_p (call_stmt)
532 || gimple_call_chain (call_stmt))
534 if (dump_enabled_p ())
536 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
537 "Build SLP failed: unsupported call type ");
538 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
539 call_stmt, 0);
540 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
542 /* Fatal mismatch. */
543 matches[0] = false;
544 return false;
547 else
548 rhs_code = gimple_assign_rhs_code (stmt);
550 /* Check the operation. */
551 if (i == 0)
553 first_stmt_code = rhs_code;
555 /* Shift arguments should be equal in all the packed stmts for a
556 vector shift with scalar shift operand. */
557 if (rhs_code == LSHIFT_EXPR || rhs_code == RSHIFT_EXPR
558 || rhs_code == LROTATE_EXPR
559 || rhs_code == RROTATE_EXPR)
561 vec_mode = TYPE_MODE (vectype);
563 /* First see if we have a vector/vector shift. */
564 optab = optab_for_tree_code (rhs_code, vectype,
565 optab_vector);
567 if (!optab
568 || optab_handler (optab, vec_mode) == CODE_FOR_nothing)
570 /* No vector/vector shift, try for a vector/scalar shift. */
571 optab = optab_for_tree_code (rhs_code, vectype,
572 optab_scalar);
574 if (!optab)
576 if (dump_enabled_p ())
577 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
578 "Build SLP failed: no optab.\n");
579 /* Fatal mismatch. */
580 matches[0] = false;
581 return false;
583 icode = (int) optab_handler (optab, vec_mode);
584 if (icode == CODE_FOR_nothing)
586 if (dump_enabled_p ())
587 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
588 "Build SLP failed: "
589 "op not supported by target.\n");
590 /* Fatal mismatch. */
591 matches[0] = false;
592 return false;
594 optab_op2_mode = insn_data[icode].operand[2].mode;
595 if (!VECTOR_MODE_P (optab_op2_mode))
597 need_same_oprnds = true;
598 first_op1 = gimple_assign_rhs2 (stmt);
602 else if (rhs_code == WIDEN_LSHIFT_EXPR)
604 need_same_oprnds = true;
605 first_op1 = gimple_assign_rhs2 (stmt);
608 else
610 if (first_stmt_code != rhs_code
611 && alt_stmt_code == ERROR_MARK)
612 alt_stmt_code = rhs_code;
613 if (first_stmt_code != rhs_code
614 && (first_stmt_code != IMAGPART_EXPR
615 || rhs_code != REALPART_EXPR)
616 && (first_stmt_code != REALPART_EXPR
617 || rhs_code != IMAGPART_EXPR)
618 /* Handle mismatches in plus/minus by computing both
619 and merging the results. */
620 && !((first_stmt_code == PLUS_EXPR
621 || first_stmt_code == MINUS_EXPR)
622 && (alt_stmt_code == PLUS_EXPR
623 || alt_stmt_code == MINUS_EXPR)
624 && rhs_code == alt_stmt_code)
625 && !(STMT_VINFO_GROUPED_ACCESS (vinfo_for_stmt (stmt))
626 && (first_stmt_code == ARRAY_REF
627 || first_stmt_code == BIT_FIELD_REF
628 || first_stmt_code == INDIRECT_REF
629 || first_stmt_code == COMPONENT_REF
630 || first_stmt_code == MEM_REF)))
632 if (dump_enabled_p ())
634 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
635 "Build SLP failed: different operation "
636 "in stmt ");
637 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
638 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
639 "original stmt ");
640 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
641 first_stmt, 0);
643 /* Mismatch. */
644 continue;
647 if (need_same_oprnds
648 && !operand_equal_p (first_op1, gimple_assign_rhs2 (stmt), 0))
650 if (dump_enabled_p ())
652 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
653 "Build SLP failed: different shift "
654 "arguments in ");
655 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
656 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
658 /* Mismatch. */
659 continue;
662 if (rhs_code == CALL_EXPR)
664 gimple *first_stmt = stmts[0];
665 if (gimple_call_num_args (stmt) != nops
666 || !operand_equal_p (gimple_call_fn (first_stmt),
667 gimple_call_fn (stmt), 0)
668 || gimple_call_fntype (first_stmt)
669 != gimple_call_fntype (stmt))
671 if (dump_enabled_p ())
673 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
674 "Build SLP failed: different calls in ");
675 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
676 stmt, 0);
677 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
679 /* Mismatch. */
680 continue;
685 /* Grouped store or load. */
686 if (STMT_VINFO_GROUPED_ACCESS (vinfo_for_stmt (stmt)))
688 if (REFERENCE_CLASS_P (lhs))
690 /* Store. */
693 else
695 /* Load. */
696 first_load = GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt));
697 if (prev_first_load)
699 /* Check that there are no loads from different interleaving
700 chains in the same node. */
701 if (prev_first_load != first_load)
703 if (dump_enabled_p ())
705 dump_printf_loc (MSG_MISSED_OPTIMIZATION,
706 vect_location,
707 "Build SLP failed: different "
708 "interleaving chains in one node ");
709 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
710 stmt, 0);
711 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
713 /* Mismatch. */
714 continue;
717 else
718 prev_first_load = first_load;
720 } /* Grouped access. */
721 else
723 if (TREE_CODE_CLASS (rhs_code) == tcc_reference)
725 /* Not grouped load. */
726 if (dump_enabled_p ())
728 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
729 "Build SLP failed: not grouped load ");
730 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
731 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
734 /* FORNOW: Not grouped loads are not supported. */
735 /* Fatal mismatch. */
736 matches[0] = false;
737 return false;
740 /* Not memory operation. */
741 if (TREE_CODE_CLASS (rhs_code) != tcc_binary
742 && TREE_CODE_CLASS (rhs_code) != tcc_unary
743 && TREE_CODE_CLASS (rhs_code) != tcc_expression
744 && TREE_CODE_CLASS (rhs_code) != tcc_comparison
745 && rhs_code != CALL_EXPR)
747 if (dump_enabled_p ())
749 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
750 "Build SLP failed: operation");
751 dump_printf (MSG_MISSED_OPTIMIZATION, " unsupported ");
752 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
753 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
755 /* Fatal mismatch. */
756 matches[0] = false;
757 return false;
760 if (rhs_code == COND_EXPR)
762 tree cond_expr = gimple_assign_rhs1 (stmt);
764 if (i == 0)
765 first_cond_code = TREE_CODE (cond_expr);
766 else if (first_cond_code != TREE_CODE (cond_expr))
768 if (dump_enabled_p ())
770 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
771 "Build SLP failed: different"
772 " operation");
773 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
774 stmt, 0);
775 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
777 /* Mismatch. */
778 continue;
783 matches[i] = true;
786 for (i = 0; i < group_size; ++i)
787 if (!matches[i])
788 return false;
790 /* If we allowed a two-operation SLP node verify the target can cope
791 with the permute we are going to use. */
792 if (alt_stmt_code != ERROR_MARK
793 && TREE_CODE_CLASS (alt_stmt_code) != tcc_reference)
795 unsigned char *sel
796 = XALLOCAVEC (unsigned char, TYPE_VECTOR_SUBPARTS (vectype));
797 for (i = 0; i < TYPE_VECTOR_SUBPARTS (vectype); ++i)
799 sel[i] = i;
800 if (gimple_assign_rhs_code (stmts[i % group_size]) == alt_stmt_code)
801 sel[i] += TYPE_VECTOR_SUBPARTS (vectype);
803 if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel))
805 for (i = 0; i < group_size; ++i)
806 if (gimple_assign_rhs_code (stmts[i]) == alt_stmt_code)
808 matches[i] = false;
809 if (dump_enabled_p ())
811 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
812 "Build SLP failed: different operation "
813 "in stmt ");
814 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
815 stmts[i], 0);
816 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
817 "original stmt ");
818 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
819 first_stmt, 0);
822 return false;
824 *two_operators = true;
827 return true;
830 /* Recursively build an SLP tree starting from NODE.
831 Fail (and return a value not equal to zero) if def-stmts are not
832 isomorphic, require data permutation or are of unsupported types of
833 operation. Otherwise, return 0.
834 The value returned is the depth in the SLP tree where a mismatch
835 was found. */
837 static bool
838 vect_build_slp_tree (vec_info *vinfo,
839 slp_tree *node, unsigned int group_size,
840 unsigned int *max_nunits,
841 vec<slp_tree> *loads,
842 bool *matches, unsigned *npermutes, unsigned *tree_size,
843 unsigned max_tree_size)
845 unsigned nops, i, this_tree_size = 0;
846 gimple *stmt;
848 matches[0] = false;
850 stmt = SLP_TREE_SCALAR_STMTS (*node)[0];
851 if (is_gimple_call (stmt))
852 nops = gimple_call_num_args (stmt);
853 else if (is_gimple_assign (stmt))
855 nops = gimple_num_ops (stmt) - 1;
856 if (gimple_assign_rhs_code (stmt) == COND_EXPR)
857 nops++;
859 else
860 return false;
862 bool two_operators = false;
863 if (!vect_build_slp_tree_1 (vinfo,
864 SLP_TREE_SCALAR_STMTS (*node), group_size, nops,
865 max_nunits, matches, &two_operators))
866 return false;
867 SLP_TREE_TWO_OPERATORS (*node) = two_operators;
869 /* If the SLP node is a load, terminate the recursion. */
870 if (STMT_VINFO_GROUPED_ACCESS (vinfo_for_stmt (stmt))
871 && DR_IS_READ (STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt))))
873 loads->safe_push (*node);
874 return true;
877 /* Get at the operands, verifying they are compatible. */
878 vec<slp_oprnd_info> oprnds_info = vect_create_oprnd_info (nops, group_size);
879 slp_oprnd_info oprnd_info;
880 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (*node), i, stmt)
882 switch (vect_get_and_check_slp_defs (vinfo, stmt, i, &oprnds_info))
884 case 0:
885 break;
886 case -1:
887 matches[0] = false;
888 vect_free_oprnd_info (oprnds_info);
889 return false;
890 case 1:
891 matches[i] = false;
892 break;
895 for (i = 0; i < group_size; ++i)
896 if (!matches[i])
898 vect_free_oprnd_info (oprnds_info);
899 return false;
902 stmt = SLP_TREE_SCALAR_STMTS (*node)[0];
904 /* Create SLP_TREE nodes for the definition node/s. */
905 FOR_EACH_VEC_ELT (oprnds_info, i, oprnd_info)
907 slp_tree child;
908 unsigned old_nloads = loads->length ();
909 unsigned old_max_nunits = *max_nunits;
911 if (oprnd_info->first_dt != vect_internal_def)
912 continue;
914 if (++this_tree_size > max_tree_size)
916 vect_free_oprnd_info (oprnds_info);
917 return false;
920 child = vect_create_new_slp_node (oprnd_info->def_stmts);
921 if (!child)
923 vect_free_oprnd_info (oprnds_info);
924 return false;
927 if (vect_build_slp_tree (vinfo, &child,
928 group_size, max_nunits, loads, matches,
929 npermutes, &this_tree_size, max_tree_size))
931 /* If we have all children of child built up from scalars then just
932 throw that away and build it up this node from scalars. */
933 if (!SLP_TREE_CHILDREN (child).is_empty ())
935 unsigned int j;
936 slp_tree grandchild;
938 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (child), j, grandchild)
939 if (SLP_TREE_DEF_TYPE (grandchild) == vect_internal_def)
940 break;
941 if (!grandchild)
943 /* Roll back. */
944 *max_nunits = old_max_nunits;
945 loads->truncate (old_nloads);
946 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (child), j, grandchild)
947 vect_free_slp_tree (grandchild);
948 SLP_TREE_CHILDREN (child).truncate (0);
950 dump_printf_loc (MSG_NOTE, vect_location,
951 "Building parent vector operands from "
952 "scalars instead\n");
953 oprnd_info->def_stmts = vNULL;
954 SLP_TREE_DEF_TYPE (child) = vect_external_def;
955 SLP_TREE_CHILDREN (*node).quick_push (child);
956 continue;
960 oprnd_info->def_stmts = vNULL;
961 SLP_TREE_CHILDREN (*node).quick_push (child);
962 continue;
965 /* If the SLP build failed fatally and we analyze a basic-block
966 simply treat nodes we fail to build as externally defined
967 (and thus build vectors from the scalar defs).
968 The cost model will reject outright expensive cases.
969 ??? This doesn't treat cases where permutation ultimatively
970 fails (or we don't try permutation below). Ideally we'd
971 even compute a permutation that will end up with the maximum
972 SLP tree size... */
973 if (is_a <bb_vec_info> (vinfo)
974 && !matches[0]
975 /* ??? Rejecting patterns this way doesn't work. We'd have to
976 do extra work to cancel the pattern so the uses see the
977 scalar version. */
978 && !is_pattern_stmt_p (vinfo_for_stmt (stmt)))
980 unsigned int j;
981 slp_tree grandchild;
983 /* Roll back. */
984 *max_nunits = old_max_nunits;
985 loads->truncate (old_nloads);
986 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (child), j, grandchild)
987 vect_free_slp_tree (grandchild);
988 SLP_TREE_CHILDREN (child).truncate (0);
990 dump_printf_loc (MSG_NOTE, vect_location,
991 "Building vector operands from scalars\n");
992 oprnd_info->def_stmts = vNULL;
993 SLP_TREE_DEF_TYPE (child) = vect_external_def;
994 SLP_TREE_CHILDREN (*node).quick_push (child);
995 continue;
998 /* If the SLP build for operand zero failed and operand zero
999 and one can be commutated try that for the scalar stmts
1000 that failed the match. */
1001 if (i == 0
1002 /* A first scalar stmt mismatch signals a fatal mismatch. */
1003 && matches[0]
1004 /* ??? For COND_EXPRs we can swap the comparison operands
1005 as well as the arms under some constraints. */
1006 && nops == 2
1007 && oprnds_info[1]->first_dt == vect_internal_def
1008 && is_gimple_assign (stmt)
1009 && commutative_tree_code (gimple_assign_rhs_code (stmt))
1010 && !SLP_TREE_TWO_OPERATORS (*node)
1011 /* Do so only if the number of not successful permutes was nor more
1012 than a cut-ff as re-trying the recursive match on
1013 possibly each level of the tree would expose exponential
1014 behavior. */
1015 && *npermutes < 4)
1017 unsigned int j;
1018 slp_tree grandchild;
1020 /* Roll back. */
1021 *max_nunits = old_max_nunits;
1022 loads->truncate (old_nloads);
1023 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (child), j, grandchild)
1024 vect_free_slp_tree (grandchild);
1025 SLP_TREE_CHILDREN (child).truncate (0);
1027 /* Swap mismatched definition stmts. */
1028 dump_printf_loc (MSG_NOTE, vect_location,
1029 "Re-trying with swapped operands of stmts ");
1030 for (j = 0; j < group_size; ++j)
1031 if (!matches[j])
1033 std::swap (oprnds_info[0]->def_stmts[j],
1034 oprnds_info[1]->def_stmts[j]);
1035 dump_printf (MSG_NOTE, "%d ", j);
1037 dump_printf (MSG_NOTE, "\n");
1038 /* And try again with scratch 'matches' ... */
1039 bool *tem = XALLOCAVEC (bool, group_size);
1040 if (vect_build_slp_tree (vinfo, &child,
1041 group_size, max_nunits, loads,
1042 tem, npermutes, &this_tree_size,
1043 max_tree_size))
1045 /* ... so if successful we can apply the operand swapping
1046 to the GIMPLE IL. This is necessary because for example
1047 vect_get_slp_defs uses operand indexes and thus expects
1048 canonical operand order. This is also necessary even
1049 if we end up building the operand from scalars as
1050 we'll continue to process swapped operand two. */
1051 for (j = 0; j < group_size; ++j)
1053 gimple *stmt = SLP_TREE_SCALAR_STMTS (*node)[j];
1054 gimple_set_plf (stmt, GF_PLF_1, false);
1056 for (j = 0; j < group_size; ++j)
1058 gimple *stmt = SLP_TREE_SCALAR_STMTS (*node)[j];
1059 if (!matches[j])
1061 /* Avoid swapping operands twice. */
1062 if (gimple_plf (stmt, GF_PLF_1))
1063 continue;
1064 swap_ssa_operands (stmt, gimple_assign_rhs1_ptr (stmt),
1065 gimple_assign_rhs2_ptr (stmt));
1066 gimple_set_plf (stmt, GF_PLF_1, true);
1069 /* Verify we swap all duplicates or none. */
1070 if (flag_checking)
1071 for (j = 0; j < group_size; ++j)
1073 gimple *stmt = SLP_TREE_SCALAR_STMTS (*node)[j];
1074 gcc_assert (gimple_plf (stmt, GF_PLF_1) == ! matches[j]);
1077 /* If we have all children of child built up from scalars then
1078 just throw that away and build it up this node from scalars. */
1079 if (!SLP_TREE_CHILDREN (child).is_empty ())
1081 unsigned int j;
1082 slp_tree grandchild;
1084 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (child), j, grandchild)
1085 if (SLP_TREE_DEF_TYPE (grandchild) == vect_internal_def)
1086 break;
1087 if (!grandchild)
1089 /* Roll back. */
1090 *max_nunits = old_max_nunits;
1091 loads->truncate (old_nloads);
1092 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (child), j, grandchild)
1093 vect_free_slp_tree (grandchild);
1094 SLP_TREE_CHILDREN (child).truncate (0);
1096 dump_printf_loc (MSG_NOTE, vect_location,
1097 "Building parent vector operands from "
1098 "scalars instead\n");
1099 oprnd_info->def_stmts = vNULL;
1100 SLP_TREE_DEF_TYPE (child) = vect_external_def;
1101 SLP_TREE_CHILDREN (*node).quick_push (child);
1102 continue;
1106 oprnd_info->def_stmts = vNULL;
1107 SLP_TREE_CHILDREN (*node).quick_push (child);
1108 continue;
1111 ++*npermutes;
1114 oprnd_info->def_stmts = vNULL;
1115 vect_free_slp_tree (child);
1116 vect_free_oprnd_info (oprnds_info);
1117 return false;
1120 if (tree_size)
1121 *tree_size += this_tree_size;
1123 vect_free_oprnd_info (oprnds_info);
1124 return true;
1127 /* Dump a slp tree NODE using flags specified in DUMP_KIND. */
1129 static void
1130 vect_print_slp_tree (int dump_kind, location_t loc, slp_tree node)
1132 int i;
1133 gimple *stmt;
1134 slp_tree child;
1136 dump_printf_loc (dump_kind, loc, "node%s\n",
1137 SLP_TREE_DEF_TYPE (node) != vect_internal_def
1138 ? " (external)" : "");
1139 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
1141 dump_printf_loc (dump_kind, loc, "\tstmt %d ", i);
1142 dump_gimple_stmt (dump_kind, TDF_SLIM, stmt, 0);
1144 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
1145 vect_print_slp_tree (dump_kind, loc, child);
1149 /* Mark the tree rooted at NODE with MARK (PURE_SLP or HYBRID).
1150 If MARK is HYBRID, it refers to a specific stmt in NODE (the stmt at index
1151 J). Otherwise, MARK is PURE_SLP and J is -1, which indicates that all the
1152 stmts in NODE are to be marked. */
1154 static void
1155 vect_mark_slp_stmts (slp_tree node, enum slp_vect_type mark, int j)
1157 int i;
1158 gimple *stmt;
1159 slp_tree child;
1161 if (SLP_TREE_DEF_TYPE (node) != vect_internal_def)
1162 return;
1164 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
1165 if (j < 0 || i == j)
1166 STMT_SLP_TYPE (vinfo_for_stmt (stmt)) = mark;
1168 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
1169 vect_mark_slp_stmts (child, mark, j);
1173 /* Mark the statements of the tree rooted at NODE as relevant (vect_used). */
1175 static void
1176 vect_mark_slp_stmts_relevant (slp_tree node)
1178 int i;
1179 gimple *stmt;
1180 stmt_vec_info stmt_info;
1181 slp_tree child;
1183 if (SLP_TREE_DEF_TYPE (node) != vect_internal_def)
1184 return;
1186 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
1188 stmt_info = vinfo_for_stmt (stmt);
1189 gcc_assert (!STMT_VINFO_RELEVANT (stmt_info)
1190 || STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_scope);
1191 STMT_VINFO_RELEVANT (stmt_info) = vect_used_in_scope;
1194 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
1195 vect_mark_slp_stmts_relevant (child);
1199 /* Rearrange the statements of NODE according to PERMUTATION. */
1201 static void
1202 vect_slp_rearrange_stmts (slp_tree node, unsigned int group_size,
1203 vec<unsigned> permutation)
1205 gimple *stmt;
1206 vec<gimple *> tmp_stmts;
1207 unsigned int i;
1208 slp_tree child;
1210 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
1211 vect_slp_rearrange_stmts (child, group_size, permutation);
1213 gcc_assert (group_size == SLP_TREE_SCALAR_STMTS (node).length ());
1214 tmp_stmts.create (group_size);
1215 tmp_stmts.quick_grow_cleared (group_size);
1217 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
1218 tmp_stmts[permutation[i]] = stmt;
1220 SLP_TREE_SCALAR_STMTS (node).release ();
1221 SLP_TREE_SCALAR_STMTS (node) = tmp_stmts;
1225 /* Attempt to reorder stmts in a reduction chain so that we don't
1226 require any load permutation. Return true if that was possible,
1227 otherwise return false. */
1229 static bool
1230 vect_attempt_slp_rearrange_stmts (slp_instance slp_instn)
1232 unsigned int group_size = SLP_INSTANCE_GROUP_SIZE (slp_instn);
1233 unsigned int i, j;
1234 sbitmap load_index;
1235 unsigned int lidx;
1236 slp_tree node, load;
1238 /* Compare all the permutation sequences to the first one. We know
1239 that at least one load is permuted. */
1240 node = SLP_INSTANCE_LOADS (slp_instn)[0];
1241 if (!node->load_permutation.exists ())
1242 return false;
1243 for (i = 1; SLP_INSTANCE_LOADS (slp_instn).iterate (i, &load); ++i)
1245 if (!load->load_permutation.exists ())
1246 return false;
1247 FOR_EACH_VEC_ELT (load->load_permutation, j, lidx)
1248 if (lidx != node->load_permutation[j])
1249 return false;
1252 /* Check that the loads in the first sequence are different and there
1253 are no gaps between them. */
1254 load_index = sbitmap_alloc (group_size);
1255 bitmap_clear (load_index);
1256 FOR_EACH_VEC_ELT (node->load_permutation, i, lidx)
1258 if (lidx >= group_size)
1259 return false;
1260 if (bitmap_bit_p (load_index, lidx))
1262 sbitmap_free (load_index);
1263 return false;
1265 bitmap_set_bit (load_index, lidx);
1267 for (i = 0; i < group_size; i++)
1268 if (!bitmap_bit_p (load_index, i))
1270 sbitmap_free (load_index);
1271 return false;
1273 sbitmap_free (load_index);
1275 /* This permutation is valid for reduction. Since the order of the
1276 statements in the nodes is not important unless they are memory
1277 accesses, we can rearrange the statements in all the nodes
1278 according to the order of the loads. */
1279 vect_slp_rearrange_stmts (SLP_INSTANCE_TREE (slp_instn), group_size,
1280 node->load_permutation);
1282 /* We are done, no actual permutations need to be generated. */
1283 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1284 SLP_TREE_LOAD_PERMUTATION (node).release ();
1285 return true;
1288 /* Check if the required load permutations in the SLP instance
1289 SLP_INSTN are supported. */
1291 static bool
1292 vect_supported_load_permutation_p (slp_instance slp_instn)
1294 unsigned int group_size = SLP_INSTANCE_GROUP_SIZE (slp_instn);
1295 unsigned int i, j, k, next;
1296 slp_tree node;
1297 gimple *stmt, *load, *next_load;
1299 if (dump_enabled_p ())
1301 dump_printf_loc (MSG_NOTE, vect_location, "Load permutation ");
1302 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1303 if (node->load_permutation.exists ())
1304 FOR_EACH_VEC_ELT (node->load_permutation, j, next)
1305 dump_printf (MSG_NOTE, "%d ", next);
1306 else
1307 for (k = 0; k < group_size; ++k)
1308 dump_printf (MSG_NOTE, "%d ", k);
1309 dump_printf (MSG_NOTE, "\n");
1312 /* In case of reduction every load permutation is allowed, since the order
1313 of the reduction statements is not important (as opposed to the case of
1314 grouped stores). The only condition we need to check is that all the
1315 load nodes are of the same size and have the same permutation (and then
1316 rearrange all the nodes of the SLP instance according to this
1317 permutation). */
1319 /* Check that all the load nodes are of the same size. */
1320 /* ??? Can't we assert this? */
1321 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1322 if (SLP_TREE_SCALAR_STMTS (node).length () != (unsigned) group_size)
1323 return false;
1325 node = SLP_INSTANCE_TREE (slp_instn);
1326 stmt = SLP_TREE_SCALAR_STMTS (node)[0];
1328 /* Reduction (there are no data-refs in the root).
1329 In reduction chain the order of the loads is not important. */
1330 if (!STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt))
1331 && !GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
1333 if (vect_attempt_slp_rearrange_stmts (slp_instn))
1334 return true;
1336 /* Fallthru to general load permutation handling. */
1339 /* In basic block vectorization we allow any subchain of an interleaving
1340 chain.
1341 FORNOW: not supported in loop SLP because of realignment compications. */
1342 if (STMT_VINFO_BB_VINFO (vinfo_for_stmt (stmt)))
1344 /* Check whether the loads in an instance form a subchain and thus
1345 no permutation is necessary. */
1346 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1348 if (!SLP_TREE_LOAD_PERMUTATION (node).exists ())
1349 continue;
1350 bool subchain_p = true;
1351 next_load = NULL;
1352 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), j, load)
1354 if (j != 0
1355 && (next_load != load
1356 || GROUP_GAP (vinfo_for_stmt (load)) != 1))
1358 subchain_p = false;
1359 break;
1361 next_load = GROUP_NEXT_ELEMENT (vinfo_for_stmt (load));
1363 if (subchain_p)
1364 SLP_TREE_LOAD_PERMUTATION (node).release ();
1365 else
1367 /* Verify the permutation can be generated. */
1368 vec<tree> tem;
1369 if (!vect_transform_slp_perm_load (node, tem, NULL,
1370 1, slp_instn, true))
1372 dump_printf_loc (MSG_MISSED_OPTIMIZATION,
1373 vect_location,
1374 "unsupported load permutation\n");
1375 return false;
1379 return true;
1382 /* For loop vectorization verify we can generate the permutation. */
1383 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1384 if (node->load_permutation.exists ()
1385 && !vect_transform_slp_perm_load
1386 (node, vNULL, NULL,
1387 SLP_INSTANCE_UNROLLING_FACTOR (slp_instn), slp_instn, true))
1388 return false;
1390 return true;
1394 /* Find the last store in SLP INSTANCE. */
1396 gimple *
1397 vect_find_last_scalar_stmt_in_slp (slp_tree node)
1399 gimple *last = NULL, *stmt;
1401 for (int i = 0; SLP_TREE_SCALAR_STMTS (node).iterate (i, &stmt); i++)
1403 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
1404 if (is_pattern_stmt_p (stmt_vinfo))
1405 last = get_later_stmt (STMT_VINFO_RELATED_STMT (stmt_vinfo), last);
1406 else
1407 last = get_later_stmt (stmt, last);
1410 return last;
1413 /* Compute the cost for the SLP node NODE in the SLP instance INSTANCE. */
1415 static void
1416 vect_analyze_slp_cost_1 (slp_instance instance, slp_tree node,
1417 stmt_vector_for_cost *prologue_cost_vec,
1418 stmt_vector_for_cost *body_cost_vec,
1419 unsigned ncopies_for_cost)
1421 unsigned i, j;
1422 slp_tree child;
1423 gimple *stmt;
1424 stmt_vec_info stmt_info;
1425 tree lhs;
1427 /* Recurse down the SLP tree. */
1428 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
1429 if (SLP_TREE_DEF_TYPE (child) == vect_internal_def)
1430 vect_analyze_slp_cost_1 (instance, child, prologue_cost_vec,
1431 body_cost_vec, ncopies_for_cost);
1433 /* Look at the first scalar stmt to determine the cost. */
1434 stmt = SLP_TREE_SCALAR_STMTS (node)[0];
1435 stmt_info = vinfo_for_stmt (stmt);
1436 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
1438 if (DR_IS_WRITE (STMT_VINFO_DATA_REF (stmt_info)))
1439 vect_model_store_cost (stmt_info, ncopies_for_cost, false,
1440 vect_uninitialized_def,
1441 node, prologue_cost_vec, body_cost_vec);
1442 else
1444 gcc_checking_assert (DR_IS_READ (STMT_VINFO_DATA_REF (stmt_info)));
1445 if (SLP_TREE_LOAD_PERMUTATION (node).exists ())
1447 /* If the load is permuted then the alignment is determined by
1448 the first group element not by the first scalar stmt DR. */
1449 stmt = GROUP_FIRST_ELEMENT (stmt_info);
1450 stmt_info = vinfo_for_stmt (stmt);
1451 /* Record the cost for the permutation. */
1452 record_stmt_cost (body_cost_vec, ncopies_for_cost, vec_perm,
1453 stmt_info, 0, vect_body);
1454 /* And adjust the number of loads performed. */
1455 unsigned nunits
1456 = TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info));
1457 ncopies_for_cost
1458 = (GROUP_SIZE (stmt_info) - GROUP_GAP (stmt_info)
1459 + nunits - 1) / nunits;
1460 ncopies_for_cost *= SLP_INSTANCE_UNROLLING_FACTOR (instance);
1462 /* Record the cost for the vector loads. */
1463 vect_model_load_cost (stmt_info, ncopies_for_cost, false,
1464 node, prologue_cost_vec, body_cost_vec);
1466 return;
1469 record_stmt_cost (body_cost_vec, ncopies_for_cost, vector_stmt,
1470 stmt_info, 0, vect_body);
1471 if (SLP_TREE_TWO_OPERATORS (node))
1473 record_stmt_cost (body_cost_vec, ncopies_for_cost, vector_stmt,
1474 stmt_info, 0, vect_body);
1475 record_stmt_cost (body_cost_vec, ncopies_for_cost, vec_perm,
1476 stmt_info, 0, vect_body);
1479 /* Push SLP node def-type to stmts. */
1480 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
1481 if (SLP_TREE_DEF_TYPE (child) != vect_internal_def)
1482 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (child), j, stmt)
1483 STMT_VINFO_DEF_TYPE (vinfo_for_stmt (stmt)) = SLP_TREE_DEF_TYPE (child);
1485 /* Scan operands and account for prologue cost of constants/externals.
1486 ??? This over-estimates cost for multiple uses and should be
1487 re-engineered. */
1488 stmt = SLP_TREE_SCALAR_STMTS (node)[0];
1489 lhs = gimple_get_lhs (stmt);
1490 for (i = 0; i < gimple_num_ops (stmt); ++i)
1492 tree op = gimple_op (stmt, i);
1493 gimple *def_stmt;
1494 enum vect_def_type dt;
1495 if (!op || op == lhs)
1496 continue;
1497 if (vect_is_simple_use (op, stmt_info->vinfo, &def_stmt, &dt))
1499 /* Without looking at the actual initializer a vector of
1500 constants can be implemented as load from the constant pool.
1501 ??? We need to pass down stmt_info for a vector type
1502 even if it points to the wrong stmt. */
1503 if (dt == vect_constant_def)
1504 record_stmt_cost (prologue_cost_vec, 1, vector_load,
1505 stmt_info, 0, vect_prologue);
1506 else if (dt == vect_external_def)
1507 record_stmt_cost (prologue_cost_vec, 1, vec_construct,
1508 stmt_info, 0, vect_prologue);
1512 /* Restore stmt def-types. */
1513 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
1514 if (SLP_TREE_DEF_TYPE (child) != vect_internal_def)
1515 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (child), j, stmt)
1516 STMT_VINFO_DEF_TYPE (vinfo_for_stmt (stmt)) = vect_internal_def;
1519 /* Compute the cost for the SLP instance INSTANCE. */
1521 static void
1522 vect_analyze_slp_cost (slp_instance instance, void *data)
1524 stmt_vector_for_cost body_cost_vec, prologue_cost_vec;
1525 unsigned ncopies_for_cost;
1526 stmt_info_for_cost *si;
1527 unsigned i;
1529 if (dump_enabled_p ())
1530 dump_printf_loc (MSG_NOTE, vect_location,
1531 "=== vect_analyze_slp_cost ===\n");
1533 /* Calculate the number of vector stmts to create based on the unrolling
1534 factor (number of vectors is 1 if NUNITS >= GROUP_SIZE, and is
1535 GROUP_SIZE / NUNITS otherwise. */
1536 unsigned group_size = SLP_INSTANCE_GROUP_SIZE (instance);
1537 slp_tree node = SLP_INSTANCE_TREE (instance);
1538 stmt_vec_info stmt_info = vinfo_for_stmt (SLP_TREE_SCALAR_STMTS (node)[0]);
1539 /* Adjust the group_size by the vectorization factor which is always one
1540 for basic-block vectorization. */
1541 if (STMT_VINFO_LOOP_VINFO (stmt_info))
1542 group_size *= LOOP_VINFO_VECT_FACTOR (STMT_VINFO_LOOP_VINFO (stmt_info));
1543 unsigned nunits = TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info));
1544 /* For reductions look at a reduction operand in case the reduction
1545 operation is widening like DOT_PROD or SAD. */
1546 if (!STMT_VINFO_GROUPED_ACCESS (stmt_info))
1548 gimple *stmt = SLP_TREE_SCALAR_STMTS (node)[0];
1549 switch (gimple_assign_rhs_code (stmt))
1551 case DOT_PROD_EXPR:
1552 case SAD_EXPR:
1553 nunits = TYPE_VECTOR_SUBPARTS (get_vectype_for_scalar_type
1554 (TREE_TYPE (gimple_assign_rhs1 (stmt))));
1555 break;
1556 default:;
1559 ncopies_for_cost = least_common_multiple (nunits, group_size) / nunits;
1561 prologue_cost_vec.create (10);
1562 body_cost_vec.create (10);
1563 vect_analyze_slp_cost_1 (instance, SLP_INSTANCE_TREE (instance),
1564 &prologue_cost_vec, &body_cost_vec,
1565 ncopies_for_cost);
1567 /* Record the prologue costs, which were delayed until we were
1568 sure that SLP was successful. */
1569 FOR_EACH_VEC_ELT (prologue_cost_vec, i, si)
1571 struct _stmt_vec_info *stmt_info
1572 = si->stmt ? vinfo_for_stmt (si->stmt) : NULL;
1573 (void) add_stmt_cost (data, si->count, si->kind, stmt_info,
1574 si->misalign, vect_prologue);
1577 /* Record the instance's instructions in the target cost model. */
1578 FOR_EACH_VEC_ELT (body_cost_vec, i, si)
1580 struct _stmt_vec_info *stmt_info
1581 = si->stmt ? vinfo_for_stmt (si->stmt) : NULL;
1582 (void) add_stmt_cost (data, si->count, si->kind, stmt_info,
1583 si->misalign, vect_body);
1586 prologue_cost_vec.release ();
1587 body_cost_vec.release ();
1590 /* Splits a group of stores, currently beginning at FIRST_STMT, into two groups:
1591 one (still beginning at FIRST_STMT) of size GROUP1_SIZE (also containing
1592 the first GROUP1_SIZE stmts, since stores are consecutive), the second
1593 containing the remainder.
1594 Return the first stmt in the second group. */
1596 static gimple *
1597 vect_split_slp_store_group (gimple *first_stmt, unsigned group1_size)
1599 stmt_vec_info first_vinfo = vinfo_for_stmt (first_stmt);
1600 gcc_assert (GROUP_FIRST_ELEMENT (first_vinfo) == first_stmt);
1601 gcc_assert (group1_size > 0);
1602 int group2_size = GROUP_SIZE (first_vinfo) - group1_size;
1603 gcc_assert (group2_size > 0);
1604 GROUP_SIZE (first_vinfo) = group1_size;
1606 gimple *stmt = first_stmt;
1607 for (unsigned i = group1_size; i > 1; i--)
1609 stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (stmt));
1610 gcc_assert (GROUP_GAP (vinfo_for_stmt (stmt)) == 1);
1612 /* STMT is now the last element of the first group. */
1613 gimple *group2 = GROUP_NEXT_ELEMENT (vinfo_for_stmt (stmt));
1614 GROUP_NEXT_ELEMENT (vinfo_for_stmt (stmt)) = 0;
1616 GROUP_SIZE (vinfo_for_stmt (group2)) = group2_size;
1617 for (stmt = group2; stmt; stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (stmt)))
1619 GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) = group2;
1620 gcc_assert (GROUP_GAP (vinfo_for_stmt (stmt)) == 1);
1623 /* For the second group, the GROUP_GAP is that before the original group,
1624 plus skipping over the first vector. */
1625 GROUP_GAP (vinfo_for_stmt (group2)) =
1626 GROUP_GAP (first_vinfo) + group1_size;
1628 /* GROUP_GAP of the first group now has to skip over the second group too. */
1629 GROUP_GAP (first_vinfo) += group2_size;
1631 if (dump_enabled_p ())
1632 dump_printf_loc (MSG_NOTE, vect_location, "Split group into %d and %d\n",
1633 group1_size, group2_size);
1635 return group2;
1638 /* Analyze an SLP instance starting from a group of grouped stores. Call
1639 vect_build_slp_tree to build a tree of packed stmts if possible.
1640 Return FALSE if it's impossible to SLP any stmt in the loop. */
1642 static bool
1643 vect_analyze_slp_instance (vec_info *vinfo,
1644 gimple *stmt, unsigned max_tree_size)
1646 slp_instance new_instance;
1647 slp_tree node;
1648 unsigned int group_size = GROUP_SIZE (vinfo_for_stmt (stmt));
1649 unsigned int unrolling_factor = 1, nunits;
1650 tree vectype, scalar_type = NULL_TREE;
1651 gimple *next;
1652 unsigned int i;
1653 unsigned int max_nunits = 0;
1654 vec<slp_tree> loads;
1655 struct data_reference *dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt));
1656 vec<gimple *> scalar_stmts;
1658 if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
1660 if (dr)
1662 scalar_type = TREE_TYPE (DR_REF (dr));
1663 vectype = get_vectype_for_scalar_type (scalar_type);
1665 else
1667 gcc_assert (is_a <loop_vec_info> (vinfo));
1668 vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt));
1671 group_size = GROUP_SIZE (vinfo_for_stmt (stmt));
1673 else
1675 gcc_assert (is_a <loop_vec_info> (vinfo));
1676 vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt));
1677 group_size = as_a <loop_vec_info> (vinfo)->reductions.length ();
1680 if (!vectype)
1682 if (dump_enabled_p ())
1684 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1685 "Build SLP failed: unsupported data-type ");
1686 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, scalar_type);
1687 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
1690 return false;
1692 nunits = TYPE_VECTOR_SUBPARTS (vectype);
1694 /* Calculate the unrolling factor. */
1695 unrolling_factor = least_common_multiple (nunits, group_size) / group_size;
1696 if (unrolling_factor != 1 && is_a <bb_vec_info> (vinfo))
1698 if (dump_enabled_p ())
1699 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1700 "Build SLP failed: unrolling required in basic"
1701 " block SLP\n");
1703 return false;
1706 /* Create a node (a root of the SLP tree) for the packed grouped stores. */
1707 scalar_stmts.create (group_size);
1708 next = stmt;
1709 if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
1711 /* Collect the stores and store them in SLP_TREE_SCALAR_STMTS. */
1712 while (next)
1714 if (STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (next))
1715 && STMT_VINFO_RELATED_STMT (vinfo_for_stmt (next)))
1716 scalar_stmts.safe_push (
1717 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (next)));
1718 else
1719 scalar_stmts.safe_push (next);
1720 next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next));
1722 /* Mark the first element of the reduction chain as reduction to properly
1723 transform the node. In the reduction analysis phase only the last
1724 element of the chain is marked as reduction. */
1725 if (!STMT_VINFO_GROUPED_ACCESS (vinfo_for_stmt (stmt)))
1726 STMT_VINFO_DEF_TYPE (vinfo_for_stmt (stmt)) = vect_reduction_def;
1728 else
1730 /* Collect reduction statements. */
1731 vec<gimple *> reductions = as_a <loop_vec_info> (vinfo)->reductions;
1732 for (i = 0; reductions.iterate (i, &next); i++)
1733 scalar_stmts.safe_push (next);
1736 node = vect_create_new_slp_node (scalar_stmts);
1738 loads.create (group_size);
1740 /* Build the tree for the SLP instance. */
1741 bool *matches = XALLOCAVEC (bool, group_size);
1742 unsigned npermutes = 0;
1743 if (vect_build_slp_tree (vinfo, &node, group_size,
1744 &max_nunits, &loads,
1745 matches, &npermutes, NULL, max_tree_size))
1747 /* Calculate the unrolling factor based on the smallest type. */
1748 if (max_nunits > nunits)
1749 unrolling_factor = least_common_multiple (max_nunits, group_size)
1750 / group_size;
1752 if (unrolling_factor != 1 && is_a <bb_vec_info> (vinfo))
1754 if (dump_enabled_p ())
1755 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1756 "Build SLP failed: unrolling required in basic"
1757 " block SLP\n");
1758 vect_free_slp_tree (node);
1759 loads.release ();
1760 return false;
1763 /* Create a new SLP instance. */
1764 new_instance = XNEW (struct _slp_instance);
1765 SLP_INSTANCE_TREE (new_instance) = node;
1766 SLP_INSTANCE_GROUP_SIZE (new_instance) = group_size;
1767 SLP_INSTANCE_UNROLLING_FACTOR (new_instance) = unrolling_factor;
1768 SLP_INSTANCE_LOADS (new_instance) = loads;
1770 /* Compute the load permutation. */
1771 slp_tree load_node;
1772 bool loads_permuted = false;
1773 FOR_EACH_VEC_ELT (loads, i, load_node)
1775 vec<unsigned> load_permutation;
1776 int j;
1777 gimple *load, *first_stmt;
1778 bool this_load_permuted = false;
1779 load_permutation.create (group_size);
1780 first_stmt = GROUP_FIRST_ELEMENT
1781 (vinfo_for_stmt (SLP_TREE_SCALAR_STMTS (load_node)[0]));
1782 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (load_node), j, load)
1784 int load_place
1785 = vect_get_place_in_interleaving_chain (load, first_stmt);
1786 gcc_assert (load_place != -1);
1787 if (load_place != j)
1788 this_load_permuted = true;
1789 load_permutation.safe_push (load_place);
1791 if (!this_load_permuted
1792 /* The load requires permutation when unrolling exposes
1793 a gap either because the group is larger than the SLP
1794 group-size or because there is a gap between the groups. */
1795 && (unrolling_factor == 1
1796 || (group_size == GROUP_SIZE (vinfo_for_stmt (first_stmt))
1797 && GROUP_GAP (vinfo_for_stmt (first_stmt)) == 0)))
1799 load_permutation.release ();
1800 continue;
1802 SLP_TREE_LOAD_PERMUTATION (load_node) = load_permutation;
1803 loads_permuted = true;
1806 if (loads_permuted)
1808 if (!vect_supported_load_permutation_p (new_instance))
1810 if (dump_enabled_p ())
1812 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1813 "Build SLP failed: unsupported load "
1814 "permutation ");
1815 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
1816 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
1818 vect_free_slp_instance (new_instance);
1819 return false;
1823 vinfo->slp_instances.safe_push (new_instance);
1825 if (dump_enabled_p ())
1827 dump_printf_loc (MSG_NOTE, vect_location,
1828 "Final SLP tree for instance:\n");
1829 vect_print_slp_tree (MSG_NOTE, vect_location, node);
1832 return true;
1835 /* Failed to SLP. */
1836 /* Free the allocated memory. */
1837 vect_free_slp_tree (node);
1838 loads.release ();
1840 /* For basic block SLP, try to break the group up into multiples of the
1841 vector size. */
1842 if (is_a <bb_vec_info> (vinfo)
1843 && GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt))
1844 && STMT_VINFO_GROUPED_ACCESS (vinfo_for_stmt (stmt)))
1846 /* We consider breaking the group only on VF boundaries from the existing
1847 start. */
1848 for (i = 0; i < group_size; i++)
1849 if (!matches[i]) break;
1851 if (i >= nunits && i < group_size)
1853 /* Split into two groups at the first vector boundary before i. */
1854 gcc_assert ((nunits & (nunits - 1)) == 0);
1855 unsigned group1_size = i & ~(nunits - 1);
1857 gimple *rest = vect_split_slp_store_group (stmt, group1_size);
1858 bool res = vect_analyze_slp_instance (vinfo, stmt, max_tree_size);
1859 /* If the first non-match was in the middle of a vector,
1860 skip the rest of that vector. */
1861 if (group1_size < i)
1863 i = group1_size + nunits;
1864 if (i < group_size)
1865 rest = vect_split_slp_store_group (rest, nunits);
1867 if (i < group_size)
1868 res |= vect_analyze_slp_instance (vinfo, rest, max_tree_size);
1869 return res;
1871 /* Even though the first vector did not all match, we might be able to SLP
1872 (some) of the remainder. FORNOW ignore this possibility. */
1875 return false;
1879 /* Check if there are stmts in the loop can be vectorized using SLP. Build SLP
1880 trees of packed scalar stmts if SLP is possible. */
1882 bool
1883 vect_analyze_slp (vec_info *vinfo, unsigned max_tree_size)
1885 unsigned int i;
1886 gimple *first_element;
1887 bool ok = false;
1889 if (dump_enabled_p ())
1890 dump_printf_loc (MSG_NOTE, vect_location, "=== vect_analyze_slp ===\n");
1892 /* Find SLP sequences starting from groups of grouped stores. */
1893 FOR_EACH_VEC_ELT (vinfo->grouped_stores, i, first_element)
1894 if (vect_analyze_slp_instance (vinfo, first_element, max_tree_size))
1895 ok = true;
1897 if (loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo))
1899 if (loop_vinfo->reduction_chains.length () > 0)
1901 /* Find SLP sequences starting from reduction chains. */
1902 FOR_EACH_VEC_ELT (loop_vinfo->reduction_chains, i, first_element)
1903 if (vect_analyze_slp_instance (vinfo, first_element,
1904 max_tree_size))
1905 ok = true;
1906 else
1907 return false;
1909 /* Don't try to vectorize SLP reductions if reduction chain was
1910 detected. */
1911 return ok;
1914 /* Find SLP sequences starting from groups of reductions. */
1915 if (loop_vinfo->reductions.length () > 1
1916 && vect_analyze_slp_instance (vinfo, loop_vinfo->reductions[0],
1917 max_tree_size))
1918 ok = true;
1921 return true;
1925 /* For each possible SLP instance decide whether to SLP it and calculate overall
1926 unrolling factor needed to SLP the loop. Return TRUE if decided to SLP at
1927 least one instance. */
1929 bool
1930 vect_make_slp_decision (loop_vec_info loop_vinfo)
1932 unsigned int i, unrolling_factor = 1;
1933 vec<slp_instance> slp_instances = LOOP_VINFO_SLP_INSTANCES (loop_vinfo);
1934 slp_instance instance;
1935 int decided_to_slp = 0;
1937 if (dump_enabled_p ())
1938 dump_printf_loc (MSG_NOTE, vect_location, "=== vect_make_slp_decision ==="
1939 "\n");
1941 FOR_EACH_VEC_ELT (slp_instances, i, instance)
1943 /* FORNOW: SLP if you can. */
1944 if (unrolling_factor < SLP_INSTANCE_UNROLLING_FACTOR (instance))
1945 unrolling_factor = SLP_INSTANCE_UNROLLING_FACTOR (instance);
1947 /* Mark all the stmts that belong to INSTANCE as PURE_SLP stmts. Later we
1948 call vect_detect_hybrid_slp () to find stmts that need hybrid SLP and
1949 loop-based vectorization. Such stmts will be marked as HYBRID. */
1950 vect_mark_slp_stmts (SLP_INSTANCE_TREE (instance), pure_slp, -1);
1951 decided_to_slp++;
1954 LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo) = unrolling_factor;
1956 if (decided_to_slp && dump_enabled_p ())
1957 dump_printf_loc (MSG_NOTE, vect_location,
1958 "Decided to SLP %d instances. Unrolling factor %d\n",
1959 decided_to_slp, unrolling_factor);
1961 return (decided_to_slp > 0);
1965 /* Find stmts that must be both vectorized and SLPed (since they feed stmts that
1966 can't be SLPed) in the tree rooted at NODE. Mark such stmts as HYBRID. */
1968 static void
1969 vect_detect_hybrid_slp_stmts (slp_tree node, unsigned i, slp_vect_type stype)
1971 gimple *stmt = SLP_TREE_SCALAR_STMTS (node)[i];
1972 imm_use_iterator imm_iter;
1973 gimple *use_stmt;
1974 stmt_vec_info use_vinfo, stmt_vinfo = vinfo_for_stmt (stmt);
1975 slp_tree child;
1976 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
1977 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1978 int j;
1980 /* Propagate hybrid down the SLP tree. */
1981 if (stype == hybrid)
1983 else if (HYBRID_SLP_STMT (stmt_vinfo))
1984 stype = hybrid;
1985 else
1987 /* Check if a pure SLP stmt has uses in non-SLP stmts. */
1988 gcc_checking_assert (PURE_SLP_STMT (stmt_vinfo));
1989 /* We always get the pattern stmt here, but for immediate
1990 uses we have to use the LHS of the original stmt. */
1991 gcc_checking_assert (!STMT_VINFO_IN_PATTERN_P (stmt_vinfo));
1992 if (STMT_VINFO_RELATED_STMT (stmt_vinfo))
1993 stmt = STMT_VINFO_RELATED_STMT (stmt_vinfo);
1994 if (TREE_CODE (gimple_op (stmt, 0)) == SSA_NAME)
1995 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, gimple_op (stmt, 0))
1997 if (!flow_bb_inside_loop_p (loop, gimple_bb (use_stmt)))
1998 continue;
1999 use_vinfo = vinfo_for_stmt (use_stmt);
2000 if (STMT_VINFO_IN_PATTERN_P (use_vinfo)
2001 && STMT_VINFO_RELATED_STMT (use_vinfo))
2002 use_vinfo = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (use_vinfo));
2003 if (!STMT_SLP_TYPE (use_vinfo)
2004 && (STMT_VINFO_RELEVANT (use_vinfo)
2005 || VECTORIZABLE_CYCLE_DEF (STMT_VINFO_DEF_TYPE (use_vinfo)))
2006 && !(gimple_code (use_stmt) == GIMPLE_PHI
2007 && STMT_VINFO_DEF_TYPE (use_vinfo) == vect_reduction_def))
2009 if (dump_enabled_p ())
2011 dump_printf_loc (MSG_NOTE, vect_location, "use of SLP "
2012 "def in non-SLP stmt: ");
2013 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, use_stmt, 0);
2015 stype = hybrid;
2020 if (stype == hybrid
2021 && !HYBRID_SLP_STMT (stmt_vinfo))
2023 if (dump_enabled_p ())
2025 dump_printf_loc (MSG_NOTE, vect_location, "marking hybrid: ");
2026 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
2028 STMT_SLP_TYPE (stmt_vinfo) = hybrid;
2031 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), j, child)
2032 if (SLP_TREE_DEF_TYPE (child) != vect_external_def)
2033 vect_detect_hybrid_slp_stmts (child, i, stype);
2036 /* Helpers for vect_detect_hybrid_slp walking pattern stmt uses. */
2038 static tree
2039 vect_detect_hybrid_slp_1 (tree *tp, int *, void *data)
2041 walk_stmt_info *wi = (walk_stmt_info *)data;
2042 struct loop *loopp = (struct loop *)wi->info;
2044 if (wi->is_lhs)
2045 return NULL_TREE;
2047 if (TREE_CODE (*tp) == SSA_NAME
2048 && !SSA_NAME_IS_DEFAULT_DEF (*tp))
2050 gimple *def_stmt = SSA_NAME_DEF_STMT (*tp);
2051 if (flow_bb_inside_loop_p (loopp, gimple_bb (def_stmt))
2052 && PURE_SLP_STMT (vinfo_for_stmt (def_stmt)))
2054 if (dump_enabled_p ())
2056 dump_printf_loc (MSG_NOTE, vect_location, "marking hybrid: ");
2057 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, def_stmt, 0);
2059 STMT_SLP_TYPE (vinfo_for_stmt (def_stmt)) = hybrid;
2063 return NULL_TREE;
2066 static tree
2067 vect_detect_hybrid_slp_2 (gimple_stmt_iterator *gsi, bool *handled,
2068 walk_stmt_info *)
2070 /* If the stmt is in a SLP instance then this isn't a reason
2071 to mark use definitions in other SLP instances as hybrid. */
2072 if (STMT_SLP_TYPE (vinfo_for_stmt (gsi_stmt (*gsi))) != loop_vect)
2073 *handled = true;
2074 return NULL_TREE;
2077 /* Find stmts that must be both vectorized and SLPed. */
2079 void
2080 vect_detect_hybrid_slp (loop_vec_info loop_vinfo)
2082 unsigned int i;
2083 vec<slp_instance> slp_instances = LOOP_VINFO_SLP_INSTANCES (loop_vinfo);
2084 slp_instance instance;
2086 if (dump_enabled_p ())
2087 dump_printf_loc (MSG_NOTE, vect_location, "=== vect_detect_hybrid_slp ==="
2088 "\n");
2090 /* First walk all pattern stmt in the loop and mark defs of uses as
2091 hybrid because immediate uses in them are not recorded. */
2092 for (i = 0; i < LOOP_VINFO_LOOP (loop_vinfo)->num_nodes; ++i)
2094 basic_block bb = LOOP_VINFO_BBS (loop_vinfo)[i];
2095 for (gimple_stmt_iterator gsi = gsi_start_bb (bb); !gsi_end_p (gsi);
2096 gsi_next (&gsi))
2098 gimple *stmt = gsi_stmt (gsi);
2099 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2100 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
2102 walk_stmt_info wi;
2103 memset (&wi, 0, sizeof (wi));
2104 wi.info = LOOP_VINFO_LOOP (loop_vinfo);
2105 gimple_stmt_iterator gsi2
2106 = gsi_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info));
2107 walk_gimple_stmt (&gsi2, vect_detect_hybrid_slp_2,
2108 vect_detect_hybrid_slp_1, &wi);
2109 walk_gimple_seq (STMT_VINFO_PATTERN_DEF_SEQ (stmt_info),
2110 vect_detect_hybrid_slp_2,
2111 vect_detect_hybrid_slp_1, &wi);
2116 /* Then walk the SLP instance trees marking stmts with uses in
2117 non-SLP stmts as hybrid, also propagating hybrid down the
2118 SLP tree, collecting the above info on-the-fly. */
2119 FOR_EACH_VEC_ELT (slp_instances, i, instance)
2121 for (unsigned i = 0; i < SLP_INSTANCE_GROUP_SIZE (instance); ++i)
2122 vect_detect_hybrid_slp_stmts (SLP_INSTANCE_TREE (instance),
2123 i, pure_slp);
2128 /* Create and initialize a new bb_vec_info struct for BB, as well as
2129 stmt_vec_info structs for all the stmts in it. */
2131 static bb_vec_info
2132 new_bb_vec_info (gimple_stmt_iterator region_begin,
2133 gimple_stmt_iterator region_end)
2135 basic_block bb = gsi_bb (region_begin);
2136 bb_vec_info res = NULL;
2137 gimple_stmt_iterator gsi;
2139 res = (bb_vec_info) xcalloc (1, sizeof (struct _bb_vec_info));
2140 res->kind = vec_info::bb;
2141 BB_VINFO_BB (res) = bb;
2142 res->region_begin = region_begin;
2143 res->region_end = region_end;
2145 for (gsi = region_begin; gsi_stmt (gsi) != gsi_stmt (region_end);
2146 gsi_next (&gsi))
2148 gimple *stmt = gsi_stmt (gsi);
2149 gimple_set_uid (stmt, 0);
2150 set_vinfo_for_stmt (stmt, new_stmt_vec_info (stmt, res));
2153 BB_VINFO_GROUPED_STORES (res).create (10);
2154 BB_VINFO_SLP_INSTANCES (res).create (2);
2155 BB_VINFO_TARGET_COST_DATA (res) = init_cost (NULL);
2157 bb->aux = res;
2158 return res;
2162 /* Free BB_VINFO struct, as well as all the stmt_vec_info structs of all the
2163 stmts in the basic block. */
2165 static void
2166 destroy_bb_vec_info (bb_vec_info bb_vinfo)
2168 vec<slp_instance> slp_instances;
2169 slp_instance instance;
2170 basic_block bb;
2171 gimple_stmt_iterator si;
2172 unsigned i;
2174 if (!bb_vinfo)
2175 return;
2177 bb = BB_VINFO_BB (bb_vinfo);
2179 for (si = bb_vinfo->region_begin;
2180 gsi_stmt (si) != gsi_stmt (bb_vinfo->region_end); gsi_next (&si))
2182 gimple *stmt = gsi_stmt (si);
2183 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2185 if (stmt_info)
2186 /* Free stmt_vec_info. */
2187 free_stmt_vec_info (stmt);
2189 /* Reset region marker. */
2190 gimple_set_uid (stmt, -1);
2193 vect_destroy_datarefs (bb_vinfo);
2194 free_dependence_relations (BB_VINFO_DDRS (bb_vinfo));
2195 BB_VINFO_GROUPED_STORES (bb_vinfo).release ();
2196 slp_instances = BB_VINFO_SLP_INSTANCES (bb_vinfo);
2197 FOR_EACH_VEC_ELT (slp_instances, i, instance)
2198 vect_free_slp_instance (instance);
2199 BB_VINFO_SLP_INSTANCES (bb_vinfo).release ();
2200 destroy_cost_data (BB_VINFO_TARGET_COST_DATA (bb_vinfo));
2201 free (bb_vinfo);
2202 bb->aux = NULL;
2206 /* Analyze statements contained in SLP tree node after recursively analyzing
2207 the subtree. Return TRUE if the operations are supported. */
2209 static bool
2210 vect_slp_analyze_node_operations (slp_tree node)
2212 bool dummy;
2213 int i, j;
2214 gimple *stmt;
2215 slp_tree child;
2217 if (SLP_TREE_DEF_TYPE (node) != vect_internal_def)
2218 return true;
2220 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
2221 if (!vect_slp_analyze_node_operations (child))
2222 return false;
2224 /* Push SLP node def-type to stmts. */
2225 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
2226 if (SLP_TREE_DEF_TYPE (child) != vect_internal_def)
2227 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (child), j, stmt)
2228 STMT_VINFO_DEF_TYPE (vinfo_for_stmt (stmt)) = SLP_TREE_DEF_TYPE (child);
2230 bool res = true;
2231 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
2233 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2234 gcc_assert (stmt_info);
2235 gcc_assert (STMT_SLP_TYPE (stmt_info) != loop_vect);
2237 if (!vect_analyze_stmt (stmt, &dummy, node))
2239 res = false;
2240 break;
2244 /* Restore stmt def-types. */
2245 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
2246 if (SLP_TREE_DEF_TYPE (child) != vect_internal_def)
2247 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (child), j, stmt)
2248 STMT_VINFO_DEF_TYPE (vinfo_for_stmt (stmt)) = vect_internal_def;
2250 return res;
2254 /* Analyze statements in SLP instances of the basic block. Return TRUE if the
2255 operations are supported. */
2257 bool
2258 vect_slp_analyze_operations (vec<slp_instance> slp_instances, void *data)
2260 slp_instance instance;
2261 int i;
2263 if (dump_enabled_p ())
2264 dump_printf_loc (MSG_NOTE, vect_location,
2265 "=== vect_slp_analyze_operations ===\n");
2267 for (i = 0; slp_instances.iterate (i, &instance); )
2269 if (!vect_slp_analyze_node_operations (SLP_INSTANCE_TREE (instance)))
2271 dump_printf_loc (MSG_NOTE, vect_location,
2272 "removing SLP instance operations starting from: ");
2273 dump_gimple_stmt (MSG_NOTE, TDF_SLIM,
2274 SLP_TREE_SCALAR_STMTS
2275 (SLP_INSTANCE_TREE (instance))[0], 0);
2276 vect_free_slp_instance (instance);
2277 slp_instances.ordered_remove (i);
2279 else
2281 /* Compute the costs of the SLP instance. */
2282 vect_analyze_slp_cost (instance, data);
2283 i++;
2287 if (!slp_instances.length ())
2288 return false;
2290 return true;
2294 /* Compute the scalar cost of the SLP node NODE and its children
2295 and return it. Do not account defs that are marked in LIFE and
2296 update LIFE according to uses of NODE. */
2298 static unsigned
2299 vect_bb_slp_scalar_cost (basic_block bb,
2300 slp_tree node, vec<bool, va_heap> *life)
2302 unsigned scalar_cost = 0;
2303 unsigned i;
2304 gimple *stmt;
2305 slp_tree child;
2307 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
2309 unsigned stmt_cost;
2310 ssa_op_iter op_iter;
2311 def_operand_p def_p;
2312 stmt_vec_info stmt_info;
2314 if ((*life)[i])
2315 continue;
2317 /* If there is a non-vectorized use of the defs then the scalar
2318 stmt is kept live in which case we do not account it or any
2319 required defs in the SLP children in the scalar cost. This
2320 way we make the vectorization more costly when compared to
2321 the scalar cost. */
2322 FOR_EACH_SSA_DEF_OPERAND (def_p, stmt, op_iter, SSA_OP_DEF)
2324 imm_use_iterator use_iter;
2325 gimple *use_stmt;
2326 FOR_EACH_IMM_USE_STMT (use_stmt, use_iter, DEF_FROM_PTR (def_p))
2327 if (!is_gimple_debug (use_stmt)
2328 && (! vect_stmt_in_region_p (vinfo_for_stmt (stmt)->vinfo,
2329 use_stmt)
2330 || ! PURE_SLP_STMT (vinfo_for_stmt (use_stmt))))
2332 (*life)[i] = true;
2333 BREAK_FROM_IMM_USE_STMT (use_iter);
2336 if ((*life)[i])
2337 continue;
2339 stmt_info = vinfo_for_stmt (stmt);
2340 if (STMT_VINFO_DATA_REF (stmt_info))
2342 if (DR_IS_READ (STMT_VINFO_DATA_REF (stmt_info)))
2343 stmt_cost = vect_get_stmt_cost (scalar_load);
2344 else
2345 stmt_cost = vect_get_stmt_cost (scalar_store);
2347 else
2348 stmt_cost = vect_get_stmt_cost (scalar_stmt);
2350 scalar_cost += stmt_cost;
2353 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
2354 if (SLP_TREE_DEF_TYPE (child) == vect_internal_def)
2355 scalar_cost += vect_bb_slp_scalar_cost (bb, child, life);
2357 return scalar_cost;
2360 /* Check if vectorization of the basic block is profitable. */
2362 static bool
2363 vect_bb_vectorization_profitable_p (bb_vec_info bb_vinfo)
2365 vec<slp_instance> slp_instances = BB_VINFO_SLP_INSTANCES (bb_vinfo);
2366 slp_instance instance;
2367 int i;
2368 unsigned int vec_inside_cost = 0, vec_outside_cost = 0, scalar_cost = 0;
2369 unsigned int vec_prologue_cost = 0, vec_epilogue_cost = 0;
2371 /* Calculate scalar cost. */
2372 FOR_EACH_VEC_ELT (slp_instances, i, instance)
2374 auto_vec<bool, 20> life;
2375 life.safe_grow_cleared (SLP_INSTANCE_GROUP_SIZE (instance));
2376 scalar_cost += vect_bb_slp_scalar_cost (BB_VINFO_BB (bb_vinfo),
2377 SLP_INSTANCE_TREE (instance),
2378 &life);
2381 /* Complete the target-specific cost calculation. */
2382 finish_cost (BB_VINFO_TARGET_COST_DATA (bb_vinfo), &vec_prologue_cost,
2383 &vec_inside_cost, &vec_epilogue_cost);
2385 vec_outside_cost = vec_prologue_cost + vec_epilogue_cost;
2387 if (dump_enabled_p ())
2389 dump_printf_loc (MSG_NOTE, vect_location, "Cost model analysis: \n");
2390 dump_printf (MSG_NOTE, " Vector inside of basic block cost: %d\n",
2391 vec_inside_cost);
2392 dump_printf (MSG_NOTE, " Vector prologue cost: %d\n", vec_prologue_cost);
2393 dump_printf (MSG_NOTE, " Vector epilogue cost: %d\n", vec_epilogue_cost);
2394 dump_printf (MSG_NOTE, " Scalar cost of basic block: %d\n", scalar_cost);
2397 /* Vectorization is profitable if its cost is more than the cost of scalar
2398 version. Note that we err on the vector side for equal cost because
2399 the cost estimate is otherwise quite pessimistic (constant uses are
2400 free on the scalar side but cost a load on the vector side for
2401 example). */
2402 if (vec_outside_cost + vec_inside_cost > scalar_cost)
2403 return false;
2405 return true;
2408 /* Check if the basic block can be vectorized. Returns a bb_vec_info
2409 if so and sets fatal to true if failure is independent of
2410 current_vector_size. */
2412 static bb_vec_info
2413 vect_slp_analyze_bb_1 (gimple_stmt_iterator region_begin,
2414 gimple_stmt_iterator region_end,
2415 vec<data_reference_p> datarefs, int n_stmts,
2416 bool &fatal)
2418 bb_vec_info bb_vinfo;
2419 slp_instance instance;
2420 int i;
2421 int min_vf = 2;
2423 /* The first group of checks is independent of the vector size. */
2424 fatal = true;
2426 if (n_stmts > PARAM_VALUE (PARAM_SLP_MAX_INSNS_IN_BB))
2428 if (dump_enabled_p ())
2429 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2430 "not vectorized: too many instructions in "
2431 "basic block.\n");
2432 free_data_refs (datarefs);
2433 return NULL;
2436 bb_vinfo = new_bb_vec_info (region_begin, region_end);
2437 if (!bb_vinfo)
2438 return NULL;
2440 BB_VINFO_DATAREFS (bb_vinfo) = datarefs;
2442 /* Analyze the data references. */
2444 if (!vect_analyze_data_refs (bb_vinfo, &min_vf))
2446 if (dump_enabled_p ())
2447 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2448 "not vectorized: unhandled data-ref in basic "
2449 "block.\n");
2451 destroy_bb_vec_info (bb_vinfo);
2452 return NULL;
2455 if (BB_VINFO_DATAREFS (bb_vinfo).length () < 2)
2457 if (dump_enabled_p ())
2458 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2459 "not vectorized: not enough data-refs in "
2460 "basic block.\n");
2462 destroy_bb_vec_info (bb_vinfo);
2463 return NULL;
2466 if (!vect_analyze_data_ref_accesses (bb_vinfo))
2468 if (dump_enabled_p ())
2469 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2470 "not vectorized: unhandled data access in "
2471 "basic block.\n");
2473 destroy_bb_vec_info (bb_vinfo);
2474 return NULL;
2477 /* If there are no grouped stores in the region there is no need
2478 to continue with pattern recog as vect_analyze_slp will fail
2479 anyway. */
2480 if (bb_vinfo->grouped_stores.is_empty ())
2482 if (dump_enabled_p ())
2483 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2484 "not vectorized: no grouped stores in "
2485 "basic block.\n");
2487 destroy_bb_vec_info (bb_vinfo);
2488 return NULL;
2491 /* While the rest of the analysis below depends on it in some way. */
2492 fatal = false;
2494 vect_pattern_recog (bb_vinfo);
2496 /* Check the SLP opportunities in the basic block, analyze and build SLP
2497 trees. */
2498 if (!vect_analyze_slp (bb_vinfo, n_stmts))
2500 if (dump_enabled_p ())
2502 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2503 "Failed to SLP the basic block.\n");
2504 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2505 "not vectorized: failed to find SLP opportunities "
2506 "in basic block.\n");
2509 destroy_bb_vec_info (bb_vinfo);
2510 return NULL;
2513 /* Analyze and verify the alignment of data references and the
2514 dependence in the SLP instances. */
2515 for (i = 0; BB_VINFO_SLP_INSTANCES (bb_vinfo).iterate (i, &instance); )
2517 if (! vect_slp_analyze_and_verify_instance_alignment (instance)
2518 || ! vect_slp_analyze_instance_dependence (instance))
2520 dump_printf_loc (MSG_NOTE, vect_location,
2521 "removing SLP instance operations starting from: ");
2522 dump_gimple_stmt (MSG_NOTE, TDF_SLIM,
2523 SLP_TREE_SCALAR_STMTS
2524 (SLP_INSTANCE_TREE (instance))[0], 0);
2525 vect_free_slp_instance (instance);
2526 BB_VINFO_SLP_INSTANCES (bb_vinfo).ordered_remove (i);
2527 continue;
2530 /* Mark all the statements that we want to vectorize as pure SLP and
2531 relevant. */
2532 vect_mark_slp_stmts (SLP_INSTANCE_TREE (instance), pure_slp, -1);
2533 vect_mark_slp_stmts_relevant (SLP_INSTANCE_TREE (instance));
2535 i++;
2537 if (! BB_VINFO_SLP_INSTANCES (bb_vinfo).length ())
2539 destroy_bb_vec_info (bb_vinfo);
2540 return NULL;
2543 if (!vect_slp_analyze_operations (BB_VINFO_SLP_INSTANCES (bb_vinfo),
2544 BB_VINFO_TARGET_COST_DATA (bb_vinfo)))
2546 if (dump_enabled_p ())
2547 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2548 "not vectorized: bad operation in basic block.\n");
2550 destroy_bb_vec_info (bb_vinfo);
2551 return NULL;
2554 /* Cost model: check if the vectorization is worthwhile. */
2555 if (!unlimited_cost_model (NULL)
2556 && !vect_bb_vectorization_profitable_p (bb_vinfo))
2558 if (dump_enabled_p ())
2559 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2560 "not vectorized: vectorization is not "
2561 "profitable.\n");
2563 destroy_bb_vec_info (bb_vinfo);
2564 return NULL;
2567 if (dump_enabled_p ())
2568 dump_printf_loc (MSG_NOTE, vect_location,
2569 "Basic block will be vectorized using SLP\n");
2571 return bb_vinfo;
2575 /* Main entry for the BB vectorizer. Analyze and transform BB, returns
2576 true if anything in the basic-block was vectorized. */
2578 bool
2579 vect_slp_bb (basic_block bb)
2581 bb_vec_info bb_vinfo;
2582 gimple_stmt_iterator gsi;
2583 unsigned int vector_sizes;
2584 bool any_vectorized = false;
2586 if (dump_enabled_p ())
2587 dump_printf_loc (MSG_NOTE, vect_location, "===vect_slp_analyze_bb===\n");
2589 /* Autodetect first vector size we try. */
2590 current_vector_size = 0;
2591 vector_sizes = targetm.vectorize.autovectorize_vector_sizes ();
2593 gsi = gsi_start_bb (bb);
2595 while (1)
2597 if (gsi_end_p (gsi))
2598 break;
2600 gimple_stmt_iterator region_begin = gsi;
2601 vec<data_reference_p> datarefs = vNULL;
2602 int insns = 0;
2604 for (; !gsi_end_p (gsi); gsi_next (&gsi))
2606 gimple *stmt = gsi_stmt (gsi);
2607 if (is_gimple_debug (stmt))
2608 continue;
2609 insns++;
2611 if (gimple_location (stmt) != UNKNOWN_LOCATION)
2612 vect_location = gimple_location (stmt);
2614 if (!find_data_references_in_stmt (NULL, stmt, &datarefs))
2615 break;
2618 /* Skip leading unhandled stmts. */
2619 if (gsi_stmt (region_begin) == gsi_stmt (gsi))
2621 gsi_next (&gsi);
2622 continue;
2625 gimple_stmt_iterator region_end = gsi;
2627 bool vectorized = false;
2628 bool fatal = false;
2629 bb_vinfo = vect_slp_analyze_bb_1 (region_begin, region_end,
2630 datarefs, insns, fatal);
2631 if (bb_vinfo
2632 && dbg_cnt (vect_slp))
2634 if (dump_enabled_p ())
2635 dump_printf_loc (MSG_NOTE, vect_location, "SLPing BB part\n");
2637 vect_schedule_slp (bb_vinfo);
2639 if (dump_enabled_p ())
2640 dump_printf_loc (MSG_NOTE, vect_location,
2641 "basic block part vectorized\n");
2643 destroy_bb_vec_info (bb_vinfo);
2645 vectorized = true;
2647 else
2648 destroy_bb_vec_info (bb_vinfo);
2650 any_vectorized |= vectorized;
2652 vector_sizes &= ~current_vector_size;
2653 if (vectorized
2654 || vector_sizes == 0
2655 || current_vector_size == 0
2656 /* If vect_slp_analyze_bb_1 signaled that analysis for all
2657 vector sizes will fail do not bother iterating. */
2658 || fatal)
2660 if (gsi_end_p (region_end))
2661 break;
2663 /* Skip the unhandled stmt. */
2664 gsi_next (&gsi);
2666 /* And reset vector sizes. */
2667 current_vector_size = 0;
2668 vector_sizes = targetm.vectorize.autovectorize_vector_sizes ();
2670 else
2672 /* Try the next biggest vector size. */
2673 current_vector_size = 1 << floor_log2 (vector_sizes);
2674 if (dump_enabled_p ())
2675 dump_printf_loc (MSG_NOTE, vect_location,
2676 "***** Re-trying analysis with "
2677 "vector size %d\n", current_vector_size);
2679 /* Start over. */
2680 gsi = region_begin;
2684 return any_vectorized;
2688 /* Return 1 if vector type of boolean constant which is OPNUM
2689 operand in statement STMT is a boolean vector. */
2691 static bool
2692 vect_mask_constant_operand_p (gimple *stmt, int opnum)
2694 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
2695 enum tree_code code = gimple_expr_code (stmt);
2696 tree op, vectype;
2697 gimple *def_stmt;
2698 enum vect_def_type dt;
2700 /* For comparison and COND_EXPR type is chosen depending
2701 on the other comparison operand. */
2702 if (TREE_CODE_CLASS (code) == tcc_comparison)
2704 if (opnum)
2705 op = gimple_assign_rhs1 (stmt);
2706 else
2707 op = gimple_assign_rhs2 (stmt);
2709 if (!vect_is_simple_use (op, stmt_vinfo->vinfo, &def_stmt,
2710 &dt, &vectype))
2711 gcc_unreachable ();
2713 return !vectype || VECTOR_BOOLEAN_TYPE_P (vectype);
2716 if (code == COND_EXPR)
2718 tree cond = gimple_assign_rhs1 (stmt);
2720 if (TREE_CODE (cond) == SSA_NAME)
2721 return false;
2723 if (opnum)
2724 op = TREE_OPERAND (cond, 1);
2725 else
2726 op = TREE_OPERAND (cond, 0);
2728 if (!vect_is_simple_use (op, stmt_vinfo->vinfo, &def_stmt,
2729 &dt, &vectype))
2730 gcc_unreachable ();
2732 return !vectype || VECTOR_BOOLEAN_TYPE_P (vectype);
2735 return VECTOR_BOOLEAN_TYPE_P (STMT_VINFO_VECTYPE (stmt_vinfo));
2739 /* For constant and loop invariant defs of SLP_NODE this function returns
2740 (vector) defs (VEC_OPRNDS) that will be used in the vectorized stmts.
2741 OP_NUM determines if we gather defs for operand 0 or operand 1 of the RHS of
2742 scalar stmts. NUMBER_OF_VECTORS is the number of vector defs to create.
2743 REDUC_INDEX is the index of the reduction operand in the statements, unless
2744 it is -1. */
2746 static void
2747 vect_get_constant_vectors (tree op, slp_tree slp_node,
2748 vec<tree> *vec_oprnds,
2749 unsigned int op_num, unsigned int number_of_vectors,
2750 int reduc_index)
2752 vec<gimple *> stmts = SLP_TREE_SCALAR_STMTS (slp_node);
2753 gimple *stmt = stmts[0];
2754 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
2755 unsigned nunits;
2756 tree vec_cst;
2757 tree *elts;
2758 unsigned j, number_of_places_left_in_vector;
2759 tree vector_type;
2760 tree vop;
2761 int group_size = stmts.length ();
2762 unsigned int vec_num, i;
2763 unsigned number_of_copies = 1;
2764 vec<tree> voprnds;
2765 voprnds.create (number_of_vectors);
2766 bool constant_p, is_store;
2767 tree neutral_op = NULL;
2768 enum tree_code code = gimple_expr_code (stmt);
2769 gimple *def_stmt;
2770 struct loop *loop;
2771 gimple_seq ctor_seq = NULL;
2773 /* Check if vector type is a boolean vector. */
2774 if (TREE_CODE (TREE_TYPE (op)) == BOOLEAN_TYPE
2775 && vect_mask_constant_operand_p (stmt, op_num))
2776 vector_type
2777 = build_same_sized_truth_vector_type (STMT_VINFO_VECTYPE (stmt_vinfo));
2778 else
2779 vector_type = get_vectype_for_scalar_type (TREE_TYPE (op));
2780 nunits = TYPE_VECTOR_SUBPARTS (vector_type);
2782 if (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
2783 && reduc_index != -1)
2785 op_num = reduc_index;
2786 op = gimple_op (stmt, op_num + 1);
2787 /* For additional copies (see the explanation of NUMBER_OF_COPIES below)
2788 we need either neutral operands or the original operands. See
2789 get_initial_def_for_reduction() for details. */
2790 switch (code)
2792 case WIDEN_SUM_EXPR:
2793 case DOT_PROD_EXPR:
2794 case SAD_EXPR:
2795 case PLUS_EXPR:
2796 case MINUS_EXPR:
2797 case BIT_IOR_EXPR:
2798 case BIT_XOR_EXPR:
2799 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (op)))
2800 neutral_op = build_real (TREE_TYPE (op), dconst0);
2801 else
2802 neutral_op = build_int_cst (TREE_TYPE (op), 0);
2804 break;
2806 case MULT_EXPR:
2807 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (op)))
2808 neutral_op = build_real (TREE_TYPE (op), dconst1);
2809 else
2810 neutral_op = build_int_cst (TREE_TYPE (op), 1);
2812 break;
2814 case BIT_AND_EXPR:
2815 neutral_op = build_int_cst (TREE_TYPE (op), -1);
2816 break;
2818 /* For MIN/MAX we don't have an easy neutral operand but
2819 the initial values can be used fine here. Only for
2820 a reduction chain we have to force a neutral element. */
2821 case MAX_EXPR:
2822 case MIN_EXPR:
2823 if (!GROUP_FIRST_ELEMENT (stmt_vinfo))
2824 neutral_op = NULL;
2825 else
2827 def_stmt = SSA_NAME_DEF_STMT (op);
2828 loop = (gimple_bb (stmt))->loop_father;
2829 neutral_op = PHI_ARG_DEF_FROM_EDGE (def_stmt,
2830 loop_preheader_edge (loop));
2832 break;
2834 default:
2835 gcc_assert (!GROUP_FIRST_ELEMENT (stmt_vinfo));
2836 neutral_op = NULL;
2840 if (STMT_VINFO_DATA_REF (stmt_vinfo))
2842 is_store = true;
2843 op = gimple_assign_rhs1 (stmt);
2845 else
2846 is_store = false;
2848 gcc_assert (op);
2850 if (CONSTANT_CLASS_P (op))
2851 constant_p = true;
2852 else
2853 constant_p = false;
2855 /* NUMBER_OF_COPIES is the number of times we need to use the same values in
2856 created vectors. It is greater than 1 if unrolling is performed.
2858 For example, we have two scalar operands, s1 and s2 (e.g., group of
2859 strided accesses of size two), while NUNITS is four (i.e., four scalars
2860 of this type can be packed in a vector). The output vector will contain
2861 two copies of each scalar operand: {s1, s2, s1, s2}. (NUMBER_OF_COPIES
2862 will be 2).
2864 If GROUP_SIZE > NUNITS, the scalars will be split into several vectors
2865 containing the operands.
2867 For example, NUNITS is four as before, and the group size is 8
2868 (s1, s2, ..., s8). We will create two vectors {s1, s2, s3, s4} and
2869 {s5, s6, s7, s8}. */
2871 number_of_copies = nunits * number_of_vectors / group_size;
2873 number_of_places_left_in_vector = nunits;
2874 elts = XALLOCAVEC (tree, nunits);
2875 bool place_after_defs = false;
2876 for (j = 0; j < number_of_copies; j++)
2878 for (i = group_size - 1; stmts.iterate (i, &stmt); i--)
2880 if (is_store)
2881 op = gimple_assign_rhs1 (stmt);
2882 else
2884 switch (code)
2886 case COND_EXPR:
2888 tree cond = gimple_assign_rhs1 (stmt);
2889 if (TREE_CODE (cond) == SSA_NAME)
2890 op = gimple_op (stmt, op_num + 1);
2891 else if (op_num == 0 || op_num == 1)
2892 op = TREE_OPERAND (cond, op_num);
2893 else
2895 if (op_num == 2)
2896 op = gimple_assign_rhs2 (stmt);
2897 else
2898 op = gimple_assign_rhs3 (stmt);
2901 break;
2903 case CALL_EXPR:
2904 op = gimple_call_arg (stmt, op_num);
2905 break;
2907 case LSHIFT_EXPR:
2908 case RSHIFT_EXPR:
2909 case LROTATE_EXPR:
2910 case RROTATE_EXPR:
2911 op = gimple_op (stmt, op_num + 1);
2912 /* Unlike the other binary operators, shifts/rotates have
2913 the shift count being int, instead of the same type as
2914 the lhs, so make sure the scalar is the right type if
2915 we are dealing with vectors of
2916 long long/long/short/char. */
2917 if (op_num == 1 && TREE_CODE (op) == INTEGER_CST)
2918 op = fold_convert (TREE_TYPE (vector_type), op);
2919 break;
2921 default:
2922 op = gimple_op (stmt, op_num + 1);
2923 break;
2927 if (reduc_index != -1)
2929 loop = (gimple_bb (stmt))->loop_father;
2930 def_stmt = SSA_NAME_DEF_STMT (op);
2932 gcc_assert (loop);
2934 /* Get the def before the loop. In reduction chain we have only
2935 one initial value. */
2936 if ((j != (number_of_copies - 1)
2937 || (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt))
2938 && i != 0))
2939 && neutral_op)
2940 op = neutral_op;
2941 else
2942 op = PHI_ARG_DEF_FROM_EDGE (def_stmt,
2943 loop_preheader_edge (loop));
2946 /* Create 'vect_ = {op0,op1,...,opn}'. */
2947 number_of_places_left_in_vector--;
2948 tree orig_op = op;
2949 if (!types_compatible_p (TREE_TYPE (vector_type), TREE_TYPE (op)))
2951 if (CONSTANT_CLASS_P (op))
2953 if (VECTOR_BOOLEAN_TYPE_P (vector_type))
2955 /* Can't use VIEW_CONVERT_EXPR for booleans because
2956 of possibly different sizes of scalar value and
2957 vector element. */
2958 if (integer_zerop (op))
2959 op = build_int_cst (TREE_TYPE (vector_type), 0);
2960 else if (integer_onep (op))
2961 op = build_int_cst (TREE_TYPE (vector_type), 1);
2962 else
2963 gcc_unreachable ();
2965 else
2966 op = fold_unary (VIEW_CONVERT_EXPR,
2967 TREE_TYPE (vector_type), op);
2968 gcc_assert (op && CONSTANT_CLASS_P (op));
2970 else
2972 tree new_temp = make_ssa_name (TREE_TYPE (vector_type));
2973 gimple *init_stmt;
2974 op = build1 (VIEW_CONVERT_EXPR, TREE_TYPE (vector_type), op);
2975 init_stmt
2976 = gimple_build_assign (new_temp, VIEW_CONVERT_EXPR, op);
2977 gimple_seq_add_stmt (&ctor_seq, init_stmt);
2978 op = new_temp;
2981 elts[number_of_places_left_in_vector] = op;
2982 if (!CONSTANT_CLASS_P (op))
2983 constant_p = false;
2984 if (TREE_CODE (orig_op) == SSA_NAME
2985 && !SSA_NAME_IS_DEFAULT_DEF (orig_op)
2986 && STMT_VINFO_BB_VINFO (stmt_vinfo)
2987 && (STMT_VINFO_BB_VINFO (stmt_vinfo)->bb
2988 == gimple_bb (SSA_NAME_DEF_STMT (orig_op))))
2989 place_after_defs = true;
2991 if (number_of_places_left_in_vector == 0)
2993 number_of_places_left_in_vector = nunits;
2995 if (constant_p)
2996 vec_cst = build_vector (vector_type, elts);
2997 else
2999 vec<constructor_elt, va_gc> *v;
3000 unsigned k;
3001 vec_alloc (v, nunits);
3002 for (k = 0; k < nunits; ++k)
3003 CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, elts[k]);
3004 vec_cst = build_constructor (vector_type, v);
3006 tree init;
3007 gimple_stmt_iterator gsi;
3008 if (place_after_defs)
3010 gsi = gsi_for_stmt
3011 (vect_find_last_scalar_stmt_in_slp (slp_node));
3012 init = vect_init_vector (stmt, vec_cst, vector_type, &gsi);
3014 else
3015 init = vect_init_vector (stmt, vec_cst, vector_type, NULL);
3016 if (ctor_seq != NULL)
3018 gsi = gsi_for_stmt (SSA_NAME_DEF_STMT (init));
3019 gsi_insert_seq_before_without_update (&gsi, ctor_seq,
3020 GSI_SAME_STMT);
3021 ctor_seq = NULL;
3023 voprnds.quick_push (init);
3024 place_after_defs = false;
3029 /* Since the vectors are created in the reverse order, we should invert
3030 them. */
3031 vec_num = voprnds.length ();
3032 for (j = vec_num; j != 0; j--)
3034 vop = voprnds[j - 1];
3035 vec_oprnds->quick_push (vop);
3038 voprnds.release ();
3040 /* In case that VF is greater than the unrolling factor needed for the SLP
3041 group of stmts, NUMBER_OF_VECTORS to be created is greater than
3042 NUMBER_OF_SCALARS/NUNITS or NUNITS/NUMBER_OF_SCALARS, and hence we have
3043 to replicate the vectors. */
3044 while (number_of_vectors > vec_oprnds->length ())
3046 tree neutral_vec = NULL;
3048 if (neutral_op)
3050 if (!neutral_vec)
3051 neutral_vec = build_vector_from_val (vector_type, neutral_op);
3053 vec_oprnds->quick_push (neutral_vec);
3055 else
3057 for (i = 0; vec_oprnds->iterate (i, &vop) && i < vec_num; i++)
3058 vec_oprnds->quick_push (vop);
3064 /* Get vectorized definitions from SLP_NODE that contains corresponding
3065 vectorized def-stmts. */
3067 static void
3068 vect_get_slp_vect_defs (slp_tree slp_node, vec<tree> *vec_oprnds)
3070 tree vec_oprnd;
3071 gimple *vec_def_stmt;
3072 unsigned int i;
3074 gcc_assert (SLP_TREE_VEC_STMTS (slp_node).exists ());
3076 FOR_EACH_VEC_ELT (SLP_TREE_VEC_STMTS (slp_node), i, vec_def_stmt)
3078 gcc_assert (vec_def_stmt);
3079 vec_oprnd = gimple_get_lhs (vec_def_stmt);
3080 vec_oprnds->quick_push (vec_oprnd);
3085 /* Get vectorized definitions for SLP_NODE.
3086 If the scalar definitions are loop invariants or constants, collect them and
3087 call vect_get_constant_vectors() to create vector stmts.
3088 Otherwise, the def-stmts must be already vectorized and the vectorized stmts
3089 must be stored in the corresponding child of SLP_NODE, and we call
3090 vect_get_slp_vect_defs () to retrieve them. */
3092 void
3093 vect_get_slp_defs (vec<tree> ops, slp_tree slp_node,
3094 vec<vec<tree> > *vec_oprnds, int reduc_index)
3096 gimple *first_stmt;
3097 int number_of_vects = 0, i;
3098 unsigned int child_index = 0;
3099 HOST_WIDE_INT lhs_size_unit, rhs_size_unit;
3100 slp_tree child = NULL;
3101 vec<tree> vec_defs;
3102 tree oprnd;
3103 bool vectorized_defs;
3105 first_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[0];
3106 FOR_EACH_VEC_ELT (ops, i, oprnd)
3108 /* For each operand we check if it has vectorized definitions in a child
3109 node or we need to create them (for invariants and constants). We
3110 check if the LHS of the first stmt of the next child matches OPRND.
3111 If it does, we found the correct child. Otherwise, we call
3112 vect_get_constant_vectors (), and not advance CHILD_INDEX in order
3113 to check this child node for the next operand. */
3114 vectorized_defs = false;
3115 if (SLP_TREE_CHILDREN (slp_node).length () > child_index)
3117 child = SLP_TREE_CHILDREN (slp_node)[child_index];
3119 /* We have to check both pattern and original def, if available. */
3120 if (SLP_TREE_DEF_TYPE (child) == vect_internal_def)
3122 gimple *first_def = SLP_TREE_SCALAR_STMTS (child)[0];
3123 gimple *related
3124 = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (first_def));
3126 if (operand_equal_p (oprnd, gimple_get_lhs (first_def), 0)
3127 || (related
3128 && operand_equal_p (oprnd, gimple_get_lhs (related), 0)))
3130 /* The number of vector defs is determined by the number of
3131 vector statements in the node from which we get those
3132 statements. */
3133 number_of_vects = SLP_TREE_NUMBER_OF_VEC_STMTS (child);
3134 vectorized_defs = true;
3135 child_index++;
3138 else
3139 child_index++;
3142 if (!vectorized_defs)
3144 if (i == 0)
3146 number_of_vects = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
3147 /* Number of vector stmts was calculated according to LHS in
3148 vect_schedule_slp_instance (), fix it by replacing LHS with
3149 RHS, if necessary. See vect_get_smallest_scalar_type () for
3150 details. */
3151 vect_get_smallest_scalar_type (first_stmt, &lhs_size_unit,
3152 &rhs_size_unit);
3153 if (rhs_size_unit != lhs_size_unit)
3155 number_of_vects *= rhs_size_unit;
3156 number_of_vects /= lhs_size_unit;
3161 /* Allocate memory for vectorized defs. */
3162 vec_defs = vNULL;
3163 vec_defs.create (number_of_vects);
3165 /* For reduction defs we call vect_get_constant_vectors (), since we are
3166 looking for initial loop invariant values. */
3167 if (vectorized_defs && reduc_index == -1)
3168 /* The defs are already vectorized. */
3169 vect_get_slp_vect_defs (child, &vec_defs);
3170 else
3171 /* Build vectors from scalar defs. */
3172 vect_get_constant_vectors (oprnd, slp_node, &vec_defs, i,
3173 number_of_vects, reduc_index);
3175 vec_oprnds->quick_push (vec_defs);
3177 /* For reductions, we only need initial values. */
3178 if (reduc_index != -1)
3179 return;
3184 /* Create NCOPIES permutation statements using the mask MASK_BYTES (by
3185 building a vector of type MASK_TYPE from it) and two input vectors placed in
3186 DR_CHAIN at FIRST_VEC_INDX and SECOND_VEC_INDX for the first copy and
3187 shifting by STRIDE elements of DR_CHAIN for every copy.
3188 (STRIDE is the number of vectorized stmts for NODE divided by the number of
3189 copies).
3190 VECT_STMTS_COUNTER specifies the index in the vectorized stmts of NODE, where
3191 the created stmts must be inserted. */
3193 static inline void
3194 vect_create_mask_and_perm (gimple *stmt,
3195 tree mask, int first_vec_indx, int second_vec_indx,
3196 gimple_stmt_iterator *gsi, slp_tree node,
3197 tree vectype, vec<tree> dr_chain,
3198 int ncopies, int vect_stmts_counter)
3200 tree perm_dest;
3201 gimple *perm_stmt = NULL;
3202 int i, stride_in, stride_out;
3203 tree first_vec, second_vec, data_ref;
3205 stride_out = SLP_TREE_NUMBER_OF_VEC_STMTS (node) / ncopies;
3206 stride_in = dr_chain.length () / ncopies;
3208 /* Initialize the vect stmts of NODE to properly insert the generated
3209 stmts later. */
3210 for (i = SLP_TREE_VEC_STMTS (node).length ();
3211 i < (int) SLP_TREE_NUMBER_OF_VEC_STMTS (node); i++)
3212 SLP_TREE_VEC_STMTS (node).quick_push (NULL);
3214 perm_dest = vect_create_destination_var (gimple_assign_lhs (stmt), vectype);
3215 for (i = 0; i < ncopies; i++)
3217 first_vec = dr_chain[first_vec_indx];
3218 second_vec = dr_chain[second_vec_indx];
3220 /* Generate the permute statement if necessary. */
3221 if (mask)
3223 perm_stmt = gimple_build_assign (perm_dest, VEC_PERM_EXPR,
3224 first_vec, second_vec, mask);
3225 data_ref = make_ssa_name (perm_dest, perm_stmt);
3226 gimple_set_lhs (perm_stmt, data_ref);
3227 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
3229 else
3230 /* If mask was NULL_TREE generate the requested identity transform. */
3231 perm_stmt = SSA_NAME_DEF_STMT (first_vec);
3233 /* Store the vector statement in NODE. */
3234 SLP_TREE_VEC_STMTS (node)[stride_out * i + vect_stmts_counter]
3235 = perm_stmt;
3237 first_vec_indx += stride_in;
3238 second_vec_indx += stride_in;
3243 /* Generate vector permute statements from a list of loads in DR_CHAIN.
3244 If ANALYZE_ONLY is TRUE, only check that it is possible to create valid
3245 permute statements for the SLP node NODE of the SLP instance
3246 SLP_NODE_INSTANCE. */
3248 bool
3249 vect_transform_slp_perm_load (slp_tree node, vec<tree> dr_chain,
3250 gimple_stmt_iterator *gsi, int vf,
3251 slp_instance slp_node_instance, bool analyze_only)
3253 gimple *stmt = SLP_TREE_SCALAR_STMTS (node)[0];
3254 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3255 tree mask_element_type = NULL_TREE, mask_type;
3256 int nunits, vec_index = 0;
3257 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
3258 int group_size = SLP_INSTANCE_GROUP_SIZE (slp_node_instance);
3259 int unroll_factor, mask_element, ncopies;
3260 unsigned char *mask;
3261 machine_mode mode;
3263 if (!STMT_VINFO_GROUPED_ACCESS (stmt_info))
3264 return false;
3266 stmt_info = vinfo_for_stmt (GROUP_FIRST_ELEMENT (stmt_info));
3268 mode = TYPE_MODE (vectype);
3270 /* The generic VEC_PERM_EXPR code always uses an integral type of the
3271 same size as the vector element being permuted. */
3272 mask_element_type = lang_hooks.types.type_for_mode
3273 (int_mode_for_mode (TYPE_MODE (TREE_TYPE (vectype))), 1);
3274 mask_type = get_vectype_for_scalar_type (mask_element_type);
3275 nunits = TYPE_VECTOR_SUBPARTS (vectype);
3276 mask = XALLOCAVEC (unsigned char, nunits);
3277 unroll_factor = SLP_INSTANCE_UNROLLING_FACTOR (slp_node_instance);
3279 /* Number of copies is determined by the final vectorization factor
3280 relatively to SLP_NODE_INSTANCE unrolling factor. */
3281 ncopies = vf / SLP_INSTANCE_UNROLLING_FACTOR (slp_node_instance);
3283 /* Generate permutation masks for every NODE. Number of masks for each NODE
3284 is equal to GROUP_SIZE.
3285 E.g., we have a group of three nodes with three loads from the same
3286 location in each node, and the vector size is 4. I.e., we have a
3287 a0b0c0a1b1c1... sequence and we need to create the following vectors:
3288 for a's: a0a0a0a1 a1a1a2a2 a2a3a3a3
3289 for b's: b0b0b0b1 b1b1b2b2 b2b3b3b3
3292 The masks for a's should be: {0,0,0,3} {3,3,6,6} {6,9,9,9}.
3293 The last mask is illegal since we assume two operands for permute
3294 operation, and the mask element values can't be outside that range.
3295 Hence, the last mask must be converted into {2,5,5,5}.
3296 For the first two permutations we need the first and the second input
3297 vectors: {a0,b0,c0,a1} and {b1,c1,a2,b2}, and for the last permutation
3298 we need the second and the third vectors: {b1,c1,a2,b2} and
3299 {c2,a3,b3,c3}. */
3301 int vect_stmts_counter = 0;
3302 int index = 0;
3303 int first_vec_index = -1;
3304 int second_vec_index = -1;
3305 bool noop_p = true;
3307 for (int j = 0; j < unroll_factor; j++)
3309 for (int k = 0; k < group_size; k++)
3311 int i = (SLP_TREE_LOAD_PERMUTATION (node)[k]
3312 + j * STMT_VINFO_GROUP_SIZE (stmt_info));
3313 vec_index = i / nunits;
3314 mask_element = i % nunits;
3315 if (vec_index == first_vec_index
3316 || first_vec_index == -1)
3318 first_vec_index = vec_index;
3320 else if (vec_index == second_vec_index
3321 || second_vec_index == -1)
3323 second_vec_index = vec_index;
3324 mask_element += nunits;
3326 else
3328 if (dump_enabled_p ())
3330 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3331 "permutation requires at "
3332 "least three vectors ");
3333 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
3334 stmt, 0);
3335 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
3337 return false;
3340 gcc_assert (mask_element >= 0
3341 && mask_element < 2 * nunits);
3342 if (mask_element != index)
3343 noop_p = false;
3344 mask[index++] = mask_element;
3346 if (index == nunits)
3348 if (! noop_p
3349 && ! can_vec_perm_p (mode, false, mask))
3351 if (dump_enabled_p ())
3353 dump_printf_loc (MSG_MISSED_OPTIMIZATION,
3354 vect_location,
3355 "unsupported vect permute { ");
3356 for (i = 0; i < nunits; ++i)
3357 dump_printf (MSG_MISSED_OPTIMIZATION, "%d ", mask[i]);
3358 dump_printf (MSG_MISSED_OPTIMIZATION, "}\n");
3360 return false;
3363 if (!analyze_only)
3365 tree mask_vec = NULL_TREE;
3367 if (! noop_p)
3369 tree *mask_elts = XALLOCAVEC (tree, nunits);
3370 for (int l = 0; l < nunits; ++l)
3371 mask_elts[l] = build_int_cst (mask_element_type,
3372 mask[l]);
3373 mask_vec = build_vector (mask_type, mask_elts);
3376 if (second_vec_index == -1)
3377 second_vec_index = first_vec_index;
3378 vect_create_mask_and_perm (stmt, mask_vec, first_vec_index,
3379 second_vec_index,
3380 gsi, node, vectype, dr_chain,
3381 ncopies, vect_stmts_counter++);
3384 index = 0;
3385 first_vec_index = -1;
3386 second_vec_index = -1;
3387 noop_p = true;
3392 return true;
3397 /* Vectorize SLP instance tree in postorder. */
3399 static bool
3400 vect_schedule_slp_instance (slp_tree node, slp_instance instance,
3401 unsigned int vectorization_factor)
3403 gimple *stmt;
3404 bool grouped_store, is_store;
3405 gimple_stmt_iterator si;
3406 stmt_vec_info stmt_info;
3407 unsigned int vec_stmts_size, nunits, group_size;
3408 tree vectype;
3409 int i, j;
3410 slp_tree child;
3412 if (SLP_TREE_DEF_TYPE (node) != vect_internal_def)
3413 return false;
3415 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
3416 vect_schedule_slp_instance (child, instance, vectorization_factor);
3418 /* Push SLP node def-type to stmts. */
3419 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
3420 if (SLP_TREE_DEF_TYPE (child) != vect_internal_def)
3421 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (child), j, stmt)
3422 STMT_VINFO_DEF_TYPE (vinfo_for_stmt (stmt)) = SLP_TREE_DEF_TYPE (child);
3424 stmt = SLP_TREE_SCALAR_STMTS (node)[0];
3425 stmt_info = vinfo_for_stmt (stmt);
3427 /* VECTYPE is the type of the destination. */
3428 vectype = STMT_VINFO_VECTYPE (stmt_info);
3429 nunits = (unsigned int) TYPE_VECTOR_SUBPARTS (vectype);
3430 group_size = SLP_INSTANCE_GROUP_SIZE (instance);
3432 /* For each SLP instance calculate number of vector stmts to be created
3433 for the scalar stmts in each node of the SLP tree. Number of vector
3434 elements in one vector iteration is the number of scalar elements in
3435 one scalar iteration (GROUP_SIZE) multiplied by VF divided by vector
3436 size.
3437 Unless this is a SLP reduction in which case the number of vector
3438 stmts is equal to the number of vector stmts of the children. */
3439 if (GROUP_FIRST_ELEMENT (stmt_info)
3440 && !STMT_VINFO_GROUPED_ACCESS (stmt_info))
3441 vec_stmts_size = SLP_TREE_NUMBER_OF_VEC_STMTS (SLP_TREE_CHILDREN (node)[0]);
3442 else
3443 vec_stmts_size = (vectorization_factor * group_size) / nunits;
3445 if (!SLP_TREE_VEC_STMTS (node).exists ())
3447 SLP_TREE_VEC_STMTS (node).create (vec_stmts_size);
3448 SLP_TREE_NUMBER_OF_VEC_STMTS (node) = vec_stmts_size;
3451 if (dump_enabled_p ())
3453 dump_printf_loc (MSG_NOTE,vect_location,
3454 "------>vectorizing SLP node starting from: ");
3455 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
3456 dump_printf (MSG_NOTE, "\n");
3459 /* Vectorized stmts go before the last scalar stmt which is where
3460 all uses are ready. */
3461 si = gsi_for_stmt (vect_find_last_scalar_stmt_in_slp (node));
3463 /* Mark the first element of the reduction chain as reduction to properly
3464 transform the node. In the analysis phase only the last element of the
3465 chain is marked as reduction. */
3466 if (GROUP_FIRST_ELEMENT (stmt_info) && !STMT_VINFO_GROUPED_ACCESS (stmt_info)
3467 && GROUP_FIRST_ELEMENT (stmt_info) == stmt)
3469 STMT_VINFO_DEF_TYPE (stmt_info) = vect_reduction_def;
3470 STMT_VINFO_TYPE (stmt_info) = reduc_vec_info_type;
3473 /* Handle two-operation SLP nodes by vectorizing the group with
3474 both operations and then performing a merge. */
3475 if (SLP_TREE_TWO_OPERATORS (node))
3477 enum tree_code code0 = gimple_assign_rhs_code (stmt);
3478 enum tree_code ocode;
3479 gimple *ostmt;
3480 unsigned char *mask = XALLOCAVEC (unsigned char, group_size);
3481 bool allsame = true;
3482 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, ostmt)
3483 if (gimple_assign_rhs_code (ostmt) != code0)
3485 mask[i] = 1;
3486 allsame = false;
3487 ocode = gimple_assign_rhs_code (ostmt);
3489 else
3490 mask[i] = 0;
3491 if (!allsame)
3493 vec<gimple *> v0;
3494 vec<gimple *> v1;
3495 unsigned j;
3496 tree tmask = NULL_TREE;
3497 vect_transform_stmt (stmt, &si, &grouped_store, node, instance);
3498 v0 = SLP_TREE_VEC_STMTS (node).copy ();
3499 SLP_TREE_VEC_STMTS (node).truncate (0);
3500 gimple_assign_set_rhs_code (stmt, ocode);
3501 vect_transform_stmt (stmt, &si, &grouped_store, node, instance);
3502 gimple_assign_set_rhs_code (stmt, code0);
3503 v1 = SLP_TREE_VEC_STMTS (node).copy ();
3504 SLP_TREE_VEC_STMTS (node).truncate (0);
3505 tree meltype = build_nonstandard_integer_type
3506 (GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (vectype))), 1);
3507 tree mvectype = get_same_sized_vectype (meltype, vectype);
3508 unsigned k = 0, l;
3509 for (j = 0; j < v0.length (); ++j)
3511 tree *melts = XALLOCAVEC (tree, TYPE_VECTOR_SUBPARTS (vectype));
3512 for (l = 0; l < TYPE_VECTOR_SUBPARTS (vectype); ++l)
3514 if (k >= group_size)
3515 k = 0;
3516 melts[l] = build_int_cst
3517 (meltype, mask[k++] * TYPE_VECTOR_SUBPARTS (vectype) + l);
3519 tmask = build_vector (mvectype, melts);
3521 /* ??? Not all targets support a VEC_PERM_EXPR with a
3522 constant mask that would translate to a vec_merge RTX
3523 (with their vec_perm_const_ok). We can either not
3524 vectorize in that case or let veclower do its job.
3525 Unfortunately that isn't too great and at least for
3526 plus/minus we'd eventually like to match targets
3527 vector addsub instructions. */
3528 gimple *vstmt;
3529 vstmt = gimple_build_assign (make_ssa_name (vectype),
3530 VEC_PERM_EXPR,
3531 gimple_assign_lhs (v0[j]),
3532 gimple_assign_lhs (v1[j]), tmask);
3533 vect_finish_stmt_generation (stmt, vstmt, &si);
3534 SLP_TREE_VEC_STMTS (node).quick_push (vstmt);
3536 v0.release ();
3537 v1.release ();
3538 return false;
3541 is_store = vect_transform_stmt (stmt, &si, &grouped_store, node, instance);
3543 /* Restore stmt def-types. */
3544 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
3545 if (SLP_TREE_DEF_TYPE (child) != vect_internal_def)
3546 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (child), j, stmt)
3547 STMT_VINFO_DEF_TYPE (vinfo_for_stmt (stmt)) = vect_internal_def;
3549 return is_store;
3552 /* Replace scalar calls from SLP node NODE with setting of their lhs to zero.
3553 For loop vectorization this is done in vectorizable_call, but for SLP
3554 it needs to be deferred until end of vect_schedule_slp, because multiple
3555 SLP instances may refer to the same scalar stmt. */
3557 static void
3558 vect_remove_slp_scalar_calls (slp_tree node)
3560 gimple *stmt, *new_stmt;
3561 gimple_stmt_iterator gsi;
3562 int i;
3563 slp_tree child;
3564 tree lhs;
3565 stmt_vec_info stmt_info;
3567 if (SLP_TREE_DEF_TYPE (node) != vect_internal_def)
3568 return;
3570 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
3571 vect_remove_slp_scalar_calls (child);
3573 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
3575 if (!is_gimple_call (stmt) || gimple_bb (stmt) == NULL)
3576 continue;
3577 stmt_info = vinfo_for_stmt (stmt);
3578 if (stmt_info == NULL
3579 || is_pattern_stmt_p (stmt_info)
3580 || !PURE_SLP_STMT (stmt_info))
3581 continue;
3582 lhs = gimple_call_lhs (stmt);
3583 new_stmt = gimple_build_assign (lhs, build_zero_cst (TREE_TYPE (lhs)));
3584 set_vinfo_for_stmt (new_stmt, stmt_info);
3585 set_vinfo_for_stmt (stmt, NULL);
3586 STMT_VINFO_STMT (stmt_info) = new_stmt;
3587 gsi = gsi_for_stmt (stmt);
3588 gsi_replace (&gsi, new_stmt, false);
3589 SSA_NAME_DEF_STMT (gimple_assign_lhs (new_stmt)) = new_stmt;
3593 /* Generate vector code for all SLP instances in the loop/basic block. */
3595 bool
3596 vect_schedule_slp (vec_info *vinfo)
3598 vec<slp_instance> slp_instances;
3599 slp_instance instance;
3600 unsigned int i, vf;
3601 bool is_store = false;
3603 slp_instances = vinfo->slp_instances;
3604 if (is_a <loop_vec_info> (vinfo))
3605 vf = as_a <loop_vec_info> (vinfo)->vectorization_factor;
3606 else
3607 vf = 1;
3609 FOR_EACH_VEC_ELT (slp_instances, i, instance)
3611 /* Schedule the tree of INSTANCE. */
3612 is_store = vect_schedule_slp_instance (SLP_INSTANCE_TREE (instance),
3613 instance, vf);
3614 if (dump_enabled_p ())
3615 dump_printf_loc (MSG_NOTE, vect_location,
3616 "vectorizing stmts using SLP.\n");
3619 FOR_EACH_VEC_ELT (slp_instances, i, instance)
3621 slp_tree root = SLP_INSTANCE_TREE (instance);
3622 gimple *store;
3623 unsigned int j;
3624 gimple_stmt_iterator gsi;
3626 /* Remove scalar call stmts. Do not do this for basic-block
3627 vectorization as not all uses may be vectorized.
3628 ??? Why should this be necessary? DCE should be able to
3629 remove the stmts itself.
3630 ??? For BB vectorization we can as well remove scalar
3631 stmts starting from the SLP tree root if they have no
3632 uses. */
3633 if (is_a <loop_vec_info> (vinfo))
3634 vect_remove_slp_scalar_calls (root);
3636 for (j = 0; SLP_TREE_SCALAR_STMTS (root).iterate (j, &store)
3637 && j < SLP_INSTANCE_GROUP_SIZE (instance); j++)
3639 if (!STMT_VINFO_DATA_REF (vinfo_for_stmt (store)))
3640 break;
3642 if (is_pattern_stmt_p (vinfo_for_stmt (store)))
3643 store = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (store));
3644 /* Free the attached stmt_vec_info and remove the stmt. */
3645 gsi = gsi_for_stmt (store);
3646 unlink_stmt_vdef (store);
3647 gsi_remove (&gsi, true);
3648 release_defs (store);
3649 free_stmt_vec_info (store);
3653 return is_store;