Add missing include for std::__addressof
[official-gcc.git] / gcc / tree-vect-slp.c
blobb029f6179754fcad67962fda13e9157e122945d1
1 /* SLP - Basic Block Vectorization
2 Copyright (C) 2007-2016 Free Software Foundation, Inc.
3 Contributed by Dorit Naishlos <dorit@il.ibm.com>
4 and Ira Rosen <irar@il.ibm.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "backend.h"
26 #include "target.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "gimple.h"
30 #include "tree-pass.h"
31 #include "ssa.h"
32 #include "optabs-tree.h"
33 #include "insn-config.h"
34 #include "recog.h" /* FIXME: for insn_data */
35 #include "params.h"
36 #include "fold-const.h"
37 #include "stor-layout.h"
38 #include "gimple-iterator.h"
39 #include "cfgloop.h"
40 #include "tree-vectorizer.h"
41 #include "langhooks.h"
42 #include "gimple-walk.h"
43 #include "dbgcnt.h"
46 /* Recursively free the memory allocated for the SLP tree rooted at NODE. */
48 static void
49 vect_free_slp_tree (slp_tree node)
51 int i;
52 slp_tree child;
54 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
55 vect_free_slp_tree (child);
57 SLP_TREE_CHILDREN (node).release ();
58 SLP_TREE_SCALAR_STMTS (node).release ();
59 SLP_TREE_VEC_STMTS (node).release ();
60 SLP_TREE_LOAD_PERMUTATION (node).release ();
62 free (node);
66 /* Free the memory allocated for the SLP instance. */
68 void
69 vect_free_slp_instance (slp_instance instance)
71 vect_free_slp_tree (SLP_INSTANCE_TREE (instance));
72 SLP_INSTANCE_LOADS (instance).release ();
73 free (instance);
77 /* Create an SLP node for SCALAR_STMTS. */
79 static slp_tree
80 vect_create_new_slp_node (vec<gimple *> scalar_stmts)
82 slp_tree node;
83 gimple *stmt = scalar_stmts[0];
84 unsigned int nops;
86 if (is_gimple_call (stmt))
87 nops = gimple_call_num_args (stmt);
88 else if (is_gimple_assign (stmt))
90 nops = gimple_num_ops (stmt) - 1;
91 if (gimple_assign_rhs_code (stmt) == COND_EXPR)
92 nops++;
94 else
95 return NULL;
97 node = XNEW (struct _slp_tree);
98 SLP_TREE_SCALAR_STMTS (node) = scalar_stmts;
99 SLP_TREE_VEC_STMTS (node).create (0);
100 SLP_TREE_CHILDREN (node).create (nops);
101 SLP_TREE_LOAD_PERMUTATION (node) = vNULL;
102 SLP_TREE_TWO_OPERATORS (node) = false;
103 SLP_TREE_DEF_TYPE (node) = vect_internal_def;
105 return node;
109 /* This structure is used in creation of an SLP tree. Each instance
110 corresponds to the same operand in a group of scalar stmts in an SLP
111 node. */
112 typedef struct _slp_oprnd_info
114 /* Def-stmts for the operands. */
115 vec<gimple *> def_stmts;
116 /* Information about the first statement, its vector def-type, type, the
117 operand itself in case it's constant, and an indication if it's a pattern
118 stmt. */
119 enum vect_def_type first_dt;
120 tree first_op_type;
121 bool first_pattern;
122 bool second_pattern;
123 } *slp_oprnd_info;
126 /* Allocate operands info for NOPS operands, and GROUP_SIZE def-stmts for each
127 operand. */
128 static vec<slp_oprnd_info>
129 vect_create_oprnd_info (int nops, int group_size)
131 int i;
132 slp_oprnd_info oprnd_info;
133 vec<slp_oprnd_info> oprnds_info;
135 oprnds_info.create (nops);
136 for (i = 0; i < nops; i++)
138 oprnd_info = XNEW (struct _slp_oprnd_info);
139 oprnd_info->def_stmts.create (group_size);
140 oprnd_info->first_dt = vect_uninitialized_def;
141 oprnd_info->first_op_type = NULL_TREE;
142 oprnd_info->first_pattern = false;
143 oprnd_info->second_pattern = false;
144 oprnds_info.quick_push (oprnd_info);
147 return oprnds_info;
151 /* Free operands info. */
153 static void
154 vect_free_oprnd_info (vec<slp_oprnd_info> &oprnds_info)
156 int i;
157 slp_oprnd_info oprnd_info;
159 FOR_EACH_VEC_ELT (oprnds_info, i, oprnd_info)
161 oprnd_info->def_stmts.release ();
162 XDELETE (oprnd_info);
165 oprnds_info.release ();
169 /* Find the place of the data-ref in STMT in the interleaving chain that starts
170 from FIRST_STMT. Return -1 if the data-ref is not a part of the chain. */
172 static int
173 vect_get_place_in_interleaving_chain (gimple *stmt, gimple *first_stmt)
175 gimple *next_stmt = first_stmt;
176 int result = 0;
178 if (first_stmt != GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
179 return -1;
183 if (next_stmt == stmt)
184 return result;
185 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
186 if (next_stmt)
187 result += GROUP_GAP (vinfo_for_stmt (next_stmt));
189 while (next_stmt);
191 return -1;
195 /* Get the defs for the rhs of STMT (collect them in OPRNDS_INFO), check that
196 they are of a valid type and that they match the defs of the first stmt of
197 the SLP group (stored in OPRNDS_INFO). If there was a fatal error
198 return -1, if the error could be corrected by swapping operands of the
199 operation return 1, if everything is ok return 0. */
201 static int
202 vect_get_and_check_slp_defs (vec_info *vinfo,
203 gimple *stmt, unsigned stmt_num,
204 vec<slp_oprnd_info> *oprnds_info)
206 tree oprnd;
207 unsigned int i, number_of_oprnds;
208 gimple *def_stmt;
209 enum vect_def_type dt = vect_uninitialized_def;
210 bool pattern = false;
211 slp_oprnd_info oprnd_info;
212 int first_op_idx = 1;
213 bool commutative = false;
214 bool first_op_cond = false;
215 bool first = stmt_num == 0;
216 bool second = stmt_num == 1;
218 if (is_gimple_call (stmt))
220 number_of_oprnds = gimple_call_num_args (stmt);
221 first_op_idx = 3;
223 else if (is_gimple_assign (stmt))
225 enum tree_code code = gimple_assign_rhs_code (stmt);
226 number_of_oprnds = gimple_num_ops (stmt) - 1;
227 if (gimple_assign_rhs_code (stmt) == COND_EXPR
228 && COMPARISON_CLASS_P (gimple_assign_rhs1 (stmt)))
230 first_op_cond = true;
231 commutative = true;
232 number_of_oprnds++;
234 else
235 commutative = commutative_tree_code (code);
237 else
238 return -1;
240 bool swapped = false;
241 for (i = 0; i < number_of_oprnds; i++)
243 again:
244 if (first_op_cond)
246 if (i == 0 || i == 1)
247 oprnd = TREE_OPERAND (gimple_op (stmt, first_op_idx),
248 swapped ? !i : i);
249 else
250 oprnd = gimple_op (stmt, first_op_idx + i - 1);
252 else
253 oprnd = gimple_op (stmt, first_op_idx + (swapped ? !i : i));
255 oprnd_info = (*oprnds_info)[i];
257 if (!vect_is_simple_use (oprnd, vinfo, &def_stmt, &dt))
259 if (dump_enabled_p ())
261 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
262 "Build SLP failed: can't analyze def for ");
263 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, oprnd);
264 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
267 return -1;
270 /* Check if DEF_STMT is a part of a pattern in LOOP and get the def stmt
271 from the pattern. Check that all the stmts of the node are in the
272 pattern. */
273 if (def_stmt && gimple_bb (def_stmt)
274 && vect_stmt_in_region_p (vinfo, def_stmt)
275 && vinfo_for_stmt (def_stmt)
276 && STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (def_stmt))
277 && !STMT_VINFO_RELEVANT (vinfo_for_stmt (def_stmt))
278 && !STMT_VINFO_LIVE_P (vinfo_for_stmt (def_stmt)))
280 pattern = true;
281 if (!first && !oprnd_info->first_pattern
282 /* Allow different pattern state for the defs of the
283 first stmt in reduction chains. */
284 && (oprnd_info->first_dt != vect_reduction_def
285 || (!second && !oprnd_info->second_pattern)))
287 if (i == 0
288 && !swapped
289 && commutative)
291 swapped = true;
292 goto again;
295 if (dump_enabled_p ())
297 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
298 "Build SLP failed: some of the stmts"
299 " are in a pattern, and others are not ");
300 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, oprnd);
301 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
304 return 1;
307 def_stmt = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (def_stmt));
308 dt = STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt));
310 if (dt == vect_unknown_def_type)
312 if (dump_enabled_p ())
313 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
314 "Unsupported pattern.\n");
315 return -1;
318 switch (gimple_code (def_stmt))
320 case GIMPLE_PHI:
321 case GIMPLE_ASSIGN:
322 break;
324 default:
325 if (dump_enabled_p ())
326 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
327 "unsupported defining stmt:\n");
328 return -1;
332 if (second)
333 oprnd_info->second_pattern = pattern;
335 if (first)
337 oprnd_info->first_dt = dt;
338 oprnd_info->first_pattern = pattern;
339 oprnd_info->first_op_type = TREE_TYPE (oprnd);
341 else
343 /* Not first stmt of the group, check that the def-stmt/s match
344 the def-stmt/s of the first stmt. Allow different definition
345 types for reduction chains: the first stmt must be a
346 vect_reduction_def (a phi node), and the rest
347 vect_internal_def. */
348 if (((oprnd_info->first_dt != dt
349 && !(oprnd_info->first_dt == vect_reduction_def
350 && dt == vect_internal_def)
351 && !((oprnd_info->first_dt == vect_external_def
352 || oprnd_info->first_dt == vect_constant_def)
353 && (dt == vect_external_def
354 || dt == vect_constant_def)))
355 || !types_compatible_p (oprnd_info->first_op_type,
356 TREE_TYPE (oprnd))))
358 /* Try swapping operands if we got a mismatch. */
359 if (i == 0
360 && !swapped
361 && commutative)
363 swapped = true;
364 goto again;
367 if (dump_enabled_p ())
368 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
369 "Build SLP failed: different types\n");
371 return 1;
375 /* Check the types of the definitions. */
376 switch (dt)
378 case vect_constant_def:
379 case vect_external_def:
380 case vect_reduction_def:
381 break;
383 case vect_internal_def:
384 oprnd_info->def_stmts.quick_push (def_stmt);
385 break;
387 default:
388 /* FORNOW: Not supported. */
389 if (dump_enabled_p ())
391 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
392 "Build SLP failed: illegal type of def ");
393 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, oprnd);
394 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
397 return -1;
401 /* Swap operands. */
402 if (swapped)
404 if (first_op_cond)
406 tree cond = gimple_assign_rhs1 (stmt);
407 swap_ssa_operands (stmt, &TREE_OPERAND (cond, 0),
408 &TREE_OPERAND (cond, 1));
409 TREE_SET_CODE (cond, swap_tree_comparison (TREE_CODE (cond)));
411 else
412 swap_ssa_operands (stmt, gimple_assign_rhs1_ptr (stmt),
413 gimple_assign_rhs2_ptr (stmt));
416 return 0;
420 /* Verify if the scalar stmts STMTS are isomorphic, require data
421 permutation or are of unsupported types of operation. Return
422 true if they are, otherwise return false and indicate in *MATCHES
423 which stmts are not isomorphic to the first one. If MATCHES[0]
424 is false then this indicates the comparison could not be
425 carried out or the stmts will never be vectorized by SLP. */
427 static bool
428 vect_build_slp_tree_1 (vec_info *vinfo,
429 vec<gimple *> stmts, unsigned int group_size,
430 unsigned nops, unsigned int *max_nunits,
431 bool *matches, bool *two_operators)
433 unsigned int i;
434 gimple *first_stmt = stmts[0], *stmt = stmts[0];
435 enum tree_code first_stmt_code = ERROR_MARK;
436 enum tree_code alt_stmt_code = ERROR_MARK;
437 enum tree_code rhs_code = ERROR_MARK;
438 enum tree_code first_cond_code = ERROR_MARK;
439 tree lhs;
440 bool need_same_oprnds = false;
441 tree vectype = NULL_TREE, scalar_type, first_op1 = NULL_TREE;
442 optab optab;
443 int icode;
444 machine_mode optab_op2_mode;
445 machine_mode vec_mode;
446 HOST_WIDE_INT dummy;
447 gimple *first_load = NULL, *prev_first_load = NULL;
449 /* For every stmt in NODE find its def stmt/s. */
450 FOR_EACH_VEC_ELT (stmts, i, stmt)
452 matches[i] = false;
454 if (dump_enabled_p ())
456 dump_printf_loc (MSG_NOTE, vect_location, "Build SLP for ");
457 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
460 /* Fail to vectorize statements marked as unvectorizable. */
461 if (!STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (stmt)))
463 if (dump_enabled_p ())
465 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
466 "Build SLP failed: unvectorizable statement ");
467 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
468 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
470 /* Fatal mismatch. */
471 matches[0] = false;
472 return false;
475 lhs = gimple_get_lhs (stmt);
476 if (lhs == NULL_TREE)
478 if (dump_enabled_p ())
480 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
481 "Build SLP failed: not GIMPLE_ASSIGN nor "
482 "GIMPLE_CALL ");
483 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
484 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
486 /* Fatal mismatch. */
487 matches[0] = false;
488 return false;
491 scalar_type = vect_get_smallest_scalar_type (stmt, &dummy, &dummy);
492 vectype = get_vectype_for_scalar_type (scalar_type);
493 if (!vectype)
495 if (dump_enabled_p ())
497 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
498 "Build SLP failed: unsupported data-type ");
499 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
500 scalar_type);
501 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
503 /* Fatal mismatch. */
504 matches[0] = false;
505 return false;
508 /* If populating the vector type requires unrolling then fail
509 before adjusting *max_nunits for basic-block vectorization. */
510 if (is_a <bb_vec_info> (vinfo)
511 && TYPE_VECTOR_SUBPARTS (vectype) > group_size)
513 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
514 "Build SLP failed: unrolling required "
515 "in basic block SLP\n");
516 /* Fatal mismatch. */
517 matches[0] = false;
518 return false;
521 /* In case of multiple types we need to detect the smallest type. */
522 if (*max_nunits < TYPE_VECTOR_SUBPARTS (vectype))
523 *max_nunits = TYPE_VECTOR_SUBPARTS (vectype);
525 if (gcall *call_stmt = dyn_cast <gcall *> (stmt))
527 rhs_code = CALL_EXPR;
528 if (gimple_call_internal_p (call_stmt)
529 || gimple_call_tail_p (call_stmt)
530 || gimple_call_noreturn_p (call_stmt)
531 || !gimple_call_nothrow_p (call_stmt)
532 || gimple_call_chain (call_stmt))
534 if (dump_enabled_p ())
536 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
537 "Build SLP failed: unsupported call type ");
538 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
539 call_stmt, 0);
540 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
542 /* Fatal mismatch. */
543 matches[0] = false;
544 return false;
547 else
548 rhs_code = gimple_assign_rhs_code (stmt);
550 /* Check the operation. */
551 if (i == 0)
553 first_stmt_code = rhs_code;
555 /* Shift arguments should be equal in all the packed stmts for a
556 vector shift with scalar shift operand. */
557 if (rhs_code == LSHIFT_EXPR || rhs_code == RSHIFT_EXPR
558 || rhs_code == LROTATE_EXPR
559 || rhs_code == RROTATE_EXPR)
561 vec_mode = TYPE_MODE (vectype);
563 /* First see if we have a vector/vector shift. */
564 optab = optab_for_tree_code (rhs_code, vectype,
565 optab_vector);
567 if (!optab
568 || optab_handler (optab, vec_mode) == CODE_FOR_nothing)
570 /* No vector/vector shift, try for a vector/scalar shift. */
571 optab = optab_for_tree_code (rhs_code, vectype,
572 optab_scalar);
574 if (!optab)
576 if (dump_enabled_p ())
577 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
578 "Build SLP failed: no optab.\n");
579 /* Fatal mismatch. */
580 matches[0] = false;
581 return false;
583 icode = (int) optab_handler (optab, vec_mode);
584 if (icode == CODE_FOR_nothing)
586 if (dump_enabled_p ())
587 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
588 "Build SLP failed: "
589 "op not supported by target.\n");
590 /* Fatal mismatch. */
591 matches[0] = false;
592 return false;
594 optab_op2_mode = insn_data[icode].operand[2].mode;
595 if (!VECTOR_MODE_P (optab_op2_mode))
597 need_same_oprnds = true;
598 first_op1 = gimple_assign_rhs2 (stmt);
602 else if (rhs_code == WIDEN_LSHIFT_EXPR)
604 need_same_oprnds = true;
605 first_op1 = gimple_assign_rhs2 (stmt);
608 else
610 if (first_stmt_code != rhs_code
611 && alt_stmt_code == ERROR_MARK)
612 alt_stmt_code = rhs_code;
613 if (first_stmt_code != rhs_code
614 && (first_stmt_code != IMAGPART_EXPR
615 || rhs_code != REALPART_EXPR)
616 && (first_stmt_code != REALPART_EXPR
617 || rhs_code != IMAGPART_EXPR)
618 /* Handle mismatches in plus/minus by computing both
619 and merging the results. */
620 && !((first_stmt_code == PLUS_EXPR
621 || first_stmt_code == MINUS_EXPR)
622 && (alt_stmt_code == PLUS_EXPR
623 || alt_stmt_code == MINUS_EXPR)
624 && rhs_code == alt_stmt_code)
625 && !(STMT_VINFO_GROUPED_ACCESS (vinfo_for_stmt (stmt))
626 && (first_stmt_code == ARRAY_REF
627 || first_stmt_code == BIT_FIELD_REF
628 || first_stmt_code == INDIRECT_REF
629 || first_stmt_code == COMPONENT_REF
630 || first_stmt_code == MEM_REF)))
632 if (dump_enabled_p ())
634 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
635 "Build SLP failed: different operation "
636 "in stmt ");
637 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
638 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
639 "original stmt ");
640 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
641 first_stmt, 0);
643 /* Mismatch. */
644 continue;
647 if (need_same_oprnds
648 && !operand_equal_p (first_op1, gimple_assign_rhs2 (stmt), 0))
650 if (dump_enabled_p ())
652 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
653 "Build SLP failed: different shift "
654 "arguments in ");
655 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
656 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
658 /* Mismatch. */
659 continue;
662 if (rhs_code == CALL_EXPR)
664 gimple *first_stmt = stmts[0];
665 if (gimple_call_num_args (stmt) != nops
666 || !operand_equal_p (gimple_call_fn (first_stmt),
667 gimple_call_fn (stmt), 0)
668 || gimple_call_fntype (first_stmt)
669 != gimple_call_fntype (stmt))
671 if (dump_enabled_p ())
673 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
674 "Build SLP failed: different calls in ");
675 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
676 stmt, 0);
677 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
679 /* Mismatch. */
680 continue;
685 /* Grouped store or load. */
686 if (STMT_VINFO_GROUPED_ACCESS (vinfo_for_stmt (stmt)))
688 if (REFERENCE_CLASS_P (lhs))
690 /* Store. */
693 else
695 /* Load. */
696 first_load = GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt));
697 if (prev_first_load)
699 /* Check that there are no loads from different interleaving
700 chains in the same node. */
701 if (prev_first_load != first_load)
703 if (dump_enabled_p ())
705 dump_printf_loc (MSG_MISSED_OPTIMIZATION,
706 vect_location,
707 "Build SLP failed: different "
708 "interleaving chains in one node ");
709 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
710 stmt, 0);
711 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
713 /* Mismatch. */
714 continue;
717 else
718 prev_first_load = first_load;
720 } /* Grouped access. */
721 else
723 if (TREE_CODE_CLASS (rhs_code) == tcc_reference)
725 /* Not grouped load. */
726 if (dump_enabled_p ())
728 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
729 "Build SLP failed: not grouped load ");
730 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
731 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
734 /* FORNOW: Not grouped loads are not supported. */
735 /* Fatal mismatch. */
736 matches[0] = false;
737 return false;
740 /* Not memory operation. */
741 if (TREE_CODE_CLASS (rhs_code) != tcc_binary
742 && TREE_CODE_CLASS (rhs_code) != tcc_unary
743 && TREE_CODE_CLASS (rhs_code) != tcc_expression
744 && TREE_CODE_CLASS (rhs_code) != tcc_comparison
745 && rhs_code != CALL_EXPR)
747 if (dump_enabled_p ())
749 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
750 "Build SLP failed: operation");
751 dump_printf (MSG_MISSED_OPTIMIZATION, " unsupported ");
752 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
753 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
755 /* Fatal mismatch. */
756 matches[0] = false;
757 return false;
760 if (rhs_code == COND_EXPR)
762 tree cond_expr = gimple_assign_rhs1 (stmt);
764 if (i == 0)
765 first_cond_code = TREE_CODE (cond_expr);
766 else if (first_cond_code != TREE_CODE (cond_expr))
768 if (dump_enabled_p ())
770 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
771 "Build SLP failed: different"
772 " operation");
773 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
774 stmt, 0);
775 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
777 /* Mismatch. */
778 continue;
783 matches[i] = true;
786 for (i = 0; i < group_size; ++i)
787 if (!matches[i])
788 return false;
790 /* If we allowed a two-operation SLP node verify the target can cope
791 with the permute we are going to use. */
792 if (alt_stmt_code != ERROR_MARK
793 && TREE_CODE_CLASS (alt_stmt_code) != tcc_reference)
795 unsigned char *sel
796 = XALLOCAVEC (unsigned char, TYPE_VECTOR_SUBPARTS (vectype));
797 for (i = 0; i < TYPE_VECTOR_SUBPARTS (vectype); ++i)
799 sel[i] = i;
800 if (gimple_assign_rhs_code (stmts[i % group_size]) == alt_stmt_code)
801 sel[i] += TYPE_VECTOR_SUBPARTS (vectype);
803 if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel))
805 for (i = 0; i < group_size; ++i)
806 if (gimple_assign_rhs_code (stmts[i]) == alt_stmt_code)
808 matches[i] = false;
809 if (dump_enabled_p ())
811 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
812 "Build SLP failed: different operation "
813 "in stmt ");
814 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
815 stmts[i], 0);
816 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
817 "original stmt ");
818 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
819 first_stmt, 0);
822 return false;
824 *two_operators = true;
827 return true;
830 /* Recursively build an SLP tree starting from NODE.
831 Fail (and return a value not equal to zero) if def-stmts are not
832 isomorphic, require data permutation or are of unsupported types of
833 operation. Otherwise, return 0.
834 The value returned is the depth in the SLP tree where a mismatch
835 was found. */
837 static bool
838 vect_build_slp_tree (vec_info *vinfo,
839 slp_tree *node, unsigned int group_size,
840 unsigned int *max_nunits,
841 vec<slp_tree> *loads,
842 bool *matches, unsigned *npermutes, unsigned *tree_size,
843 unsigned max_tree_size)
845 unsigned nops, i, this_tree_size = 0;
846 gimple *stmt;
848 matches[0] = false;
850 stmt = SLP_TREE_SCALAR_STMTS (*node)[0];
851 if (is_gimple_call (stmt))
852 nops = gimple_call_num_args (stmt);
853 else if (is_gimple_assign (stmt))
855 nops = gimple_num_ops (stmt) - 1;
856 if (gimple_assign_rhs_code (stmt) == COND_EXPR)
857 nops++;
859 else
860 return false;
862 bool two_operators = false;
863 if (!vect_build_slp_tree_1 (vinfo,
864 SLP_TREE_SCALAR_STMTS (*node), group_size, nops,
865 max_nunits, matches, &two_operators))
866 return false;
867 SLP_TREE_TWO_OPERATORS (*node) = two_operators;
869 /* If the SLP node is a load, terminate the recursion. */
870 if (STMT_VINFO_GROUPED_ACCESS (vinfo_for_stmt (stmt))
871 && DR_IS_READ (STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt))))
873 loads->safe_push (*node);
874 return true;
877 /* Get at the operands, verifying they are compatible. */
878 vec<slp_oprnd_info> oprnds_info = vect_create_oprnd_info (nops, group_size);
879 slp_oprnd_info oprnd_info;
880 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (*node), i, stmt)
882 switch (vect_get_and_check_slp_defs (vinfo, stmt, i, &oprnds_info))
884 case 0:
885 break;
886 case -1:
887 matches[0] = false;
888 vect_free_oprnd_info (oprnds_info);
889 return false;
890 case 1:
891 matches[i] = false;
892 break;
895 for (i = 0; i < group_size; ++i)
896 if (!matches[i])
898 vect_free_oprnd_info (oprnds_info);
899 return false;
902 stmt = SLP_TREE_SCALAR_STMTS (*node)[0];
904 /* Create SLP_TREE nodes for the definition node/s. */
905 FOR_EACH_VEC_ELT (oprnds_info, i, oprnd_info)
907 slp_tree child;
908 unsigned old_nloads = loads->length ();
909 unsigned old_max_nunits = *max_nunits;
911 if (oprnd_info->first_dt != vect_internal_def)
912 continue;
914 if (++this_tree_size > max_tree_size)
916 vect_free_oprnd_info (oprnds_info);
917 return false;
920 child = vect_create_new_slp_node (oprnd_info->def_stmts);
921 if (!child)
923 vect_free_oprnd_info (oprnds_info);
924 return false;
927 if (vect_build_slp_tree (vinfo, &child,
928 group_size, max_nunits, loads, matches,
929 npermutes, &this_tree_size, max_tree_size))
931 /* If we have all children of child built up from scalars then just
932 throw that away and build it up this node from scalars. */
933 if (!SLP_TREE_CHILDREN (child).is_empty ())
935 unsigned int j;
936 slp_tree grandchild;
938 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (child), j, grandchild)
939 if (SLP_TREE_DEF_TYPE (grandchild) == vect_internal_def)
940 break;
941 if (!grandchild)
943 /* Roll back. */
944 *max_nunits = old_max_nunits;
945 loads->truncate (old_nloads);
946 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (child), j, grandchild)
947 vect_free_slp_tree (grandchild);
948 SLP_TREE_CHILDREN (child).truncate (0);
950 dump_printf_loc (MSG_NOTE, vect_location,
951 "Building parent vector operands from "
952 "scalars instead\n");
953 oprnd_info->def_stmts = vNULL;
954 SLP_TREE_DEF_TYPE (child) = vect_external_def;
955 SLP_TREE_CHILDREN (*node).quick_push (child);
956 continue;
960 oprnd_info->def_stmts = vNULL;
961 SLP_TREE_CHILDREN (*node).quick_push (child);
962 continue;
965 /* If the SLP build failed fatally and we analyze a basic-block
966 simply treat nodes we fail to build as externally defined
967 (and thus build vectors from the scalar defs).
968 The cost model will reject outright expensive cases.
969 ??? This doesn't treat cases where permutation ultimatively
970 fails (or we don't try permutation below). Ideally we'd
971 even compute a permutation that will end up with the maximum
972 SLP tree size... */
973 if (is_a <bb_vec_info> (vinfo)
974 && !matches[0]
975 /* ??? Rejecting patterns this way doesn't work. We'd have to
976 do extra work to cancel the pattern so the uses see the
977 scalar version. */
978 && !is_pattern_stmt_p (vinfo_for_stmt (stmt)))
980 unsigned int j;
981 slp_tree grandchild;
983 /* Roll back. */
984 *max_nunits = old_max_nunits;
985 loads->truncate (old_nloads);
986 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (child), j, grandchild)
987 vect_free_slp_tree (grandchild);
988 SLP_TREE_CHILDREN (child).truncate (0);
990 dump_printf_loc (MSG_NOTE, vect_location,
991 "Building vector operands from scalars\n");
992 oprnd_info->def_stmts = vNULL;
993 SLP_TREE_DEF_TYPE (child) = vect_external_def;
994 SLP_TREE_CHILDREN (*node).quick_push (child);
995 continue;
998 /* If the SLP build for operand zero failed and operand zero
999 and one can be commutated try that for the scalar stmts
1000 that failed the match. */
1001 if (i == 0
1002 /* A first scalar stmt mismatch signals a fatal mismatch. */
1003 && matches[0]
1004 /* ??? For COND_EXPRs we can swap the comparison operands
1005 as well as the arms under some constraints. */
1006 && nops == 2
1007 && oprnds_info[1]->first_dt == vect_internal_def
1008 && is_gimple_assign (stmt)
1009 && commutative_tree_code (gimple_assign_rhs_code (stmt))
1010 && !SLP_TREE_TWO_OPERATORS (*node)
1011 /* Do so only if the number of not successful permutes was nor more
1012 than a cut-ff as re-trying the recursive match on
1013 possibly each level of the tree would expose exponential
1014 behavior. */
1015 && *npermutes < 4)
1017 unsigned int j;
1018 slp_tree grandchild;
1020 /* Roll back. */
1021 *max_nunits = old_max_nunits;
1022 loads->truncate (old_nloads);
1023 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (child), j, grandchild)
1024 vect_free_slp_tree (grandchild);
1025 SLP_TREE_CHILDREN (child).truncate (0);
1027 /* Swap mismatched definition stmts. */
1028 dump_printf_loc (MSG_NOTE, vect_location,
1029 "Re-trying with swapped operands of stmts ");
1030 for (j = 0; j < group_size; ++j)
1031 if (!matches[j])
1033 std::swap (oprnds_info[0]->def_stmts[j],
1034 oprnds_info[1]->def_stmts[j]);
1035 dump_printf (MSG_NOTE, "%d ", j);
1037 dump_printf (MSG_NOTE, "\n");
1038 /* And try again with scratch 'matches' ... */
1039 bool *tem = XALLOCAVEC (bool, group_size);
1040 if (vect_build_slp_tree (vinfo, &child,
1041 group_size, max_nunits, loads,
1042 tem, npermutes, &this_tree_size,
1043 max_tree_size))
1045 /* ... so if successful we can apply the operand swapping
1046 to the GIMPLE IL. This is necessary because for example
1047 vect_get_slp_defs uses operand indexes and thus expects
1048 canonical operand order. This is also necessary even
1049 if we end up building the operand from scalars as
1050 we'll continue to process swapped operand two. */
1051 for (j = 0; j < group_size; ++j)
1053 gimple *stmt = SLP_TREE_SCALAR_STMTS (*node)[j];
1054 gimple_set_plf (stmt, GF_PLF_1, false);
1056 for (j = 0; j < group_size; ++j)
1058 gimple *stmt = SLP_TREE_SCALAR_STMTS (*node)[j];
1059 if (!matches[j])
1061 /* Avoid swapping operands twice. */
1062 if (gimple_plf (stmt, GF_PLF_1))
1063 continue;
1064 swap_ssa_operands (stmt, gimple_assign_rhs1_ptr (stmt),
1065 gimple_assign_rhs2_ptr (stmt));
1066 gimple_set_plf (stmt, GF_PLF_1, true);
1069 /* Verify we swap all duplicates or none. */
1070 if (flag_checking)
1071 for (j = 0; j < group_size; ++j)
1073 gimple *stmt = SLP_TREE_SCALAR_STMTS (*node)[j];
1074 gcc_assert (gimple_plf (stmt, GF_PLF_1) == ! matches[j]);
1077 /* If we have all children of child built up from scalars then
1078 just throw that away and build it up this node from scalars. */
1079 if (!SLP_TREE_CHILDREN (child).is_empty ())
1081 unsigned int j;
1082 slp_tree grandchild;
1084 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (child), j, grandchild)
1085 if (SLP_TREE_DEF_TYPE (grandchild) == vect_internal_def)
1086 break;
1087 if (!grandchild)
1089 /* Roll back. */
1090 *max_nunits = old_max_nunits;
1091 loads->truncate (old_nloads);
1092 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (child), j, grandchild)
1093 vect_free_slp_tree (grandchild);
1094 SLP_TREE_CHILDREN (child).truncate (0);
1096 dump_printf_loc (MSG_NOTE, vect_location,
1097 "Building parent vector operands from "
1098 "scalars instead\n");
1099 oprnd_info->def_stmts = vNULL;
1100 SLP_TREE_DEF_TYPE (child) = vect_external_def;
1101 SLP_TREE_CHILDREN (*node).quick_push (child);
1102 continue;
1106 oprnd_info->def_stmts = vNULL;
1107 SLP_TREE_CHILDREN (*node).quick_push (child);
1108 continue;
1111 ++*npermutes;
1114 oprnd_info->def_stmts = vNULL;
1115 vect_free_slp_tree (child);
1116 vect_free_oprnd_info (oprnds_info);
1117 return false;
1120 if (tree_size)
1121 *tree_size += this_tree_size;
1123 vect_free_oprnd_info (oprnds_info);
1124 return true;
1127 /* Dump a slp tree NODE using flags specified in DUMP_KIND. */
1129 static void
1130 vect_print_slp_tree (int dump_kind, location_t loc, slp_tree node)
1132 int i;
1133 gimple *stmt;
1134 slp_tree child;
1136 dump_printf_loc (dump_kind, loc, "node%s\n",
1137 SLP_TREE_DEF_TYPE (node) != vect_internal_def
1138 ? " (external)" : "");
1139 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
1141 dump_printf_loc (dump_kind, loc, "\tstmt %d ", i);
1142 dump_gimple_stmt (dump_kind, TDF_SLIM, stmt, 0);
1144 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
1145 vect_print_slp_tree (dump_kind, loc, child);
1149 /* Mark the tree rooted at NODE with MARK (PURE_SLP or HYBRID).
1150 If MARK is HYBRID, it refers to a specific stmt in NODE (the stmt at index
1151 J). Otherwise, MARK is PURE_SLP and J is -1, which indicates that all the
1152 stmts in NODE are to be marked. */
1154 static void
1155 vect_mark_slp_stmts (slp_tree node, enum slp_vect_type mark, int j)
1157 int i;
1158 gimple *stmt;
1159 slp_tree child;
1161 if (SLP_TREE_DEF_TYPE (node) != vect_internal_def)
1162 return;
1164 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
1165 if (j < 0 || i == j)
1166 STMT_SLP_TYPE (vinfo_for_stmt (stmt)) = mark;
1168 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
1169 vect_mark_slp_stmts (child, mark, j);
1173 /* Mark the statements of the tree rooted at NODE as relevant (vect_used). */
1175 static void
1176 vect_mark_slp_stmts_relevant (slp_tree node)
1178 int i;
1179 gimple *stmt;
1180 stmt_vec_info stmt_info;
1181 slp_tree child;
1183 if (SLP_TREE_DEF_TYPE (node) != vect_internal_def)
1184 return;
1186 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
1188 stmt_info = vinfo_for_stmt (stmt);
1189 gcc_assert (!STMT_VINFO_RELEVANT (stmt_info)
1190 || STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_scope);
1191 STMT_VINFO_RELEVANT (stmt_info) = vect_used_in_scope;
1194 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
1195 vect_mark_slp_stmts_relevant (child);
1199 /* Rearrange the statements of NODE according to PERMUTATION. */
1201 static void
1202 vect_slp_rearrange_stmts (slp_tree node, unsigned int group_size,
1203 vec<unsigned> permutation)
1205 gimple *stmt;
1206 vec<gimple *> tmp_stmts;
1207 unsigned int i;
1208 slp_tree child;
1210 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
1211 vect_slp_rearrange_stmts (child, group_size, permutation);
1213 gcc_assert (group_size == SLP_TREE_SCALAR_STMTS (node).length ());
1214 tmp_stmts.create (group_size);
1215 tmp_stmts.quick_grow_cleared (group_size);
1217 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
1218 tmp_stmts[permutation[i]] = stmt;
1220 SLP_TREE_SCALAR_STMTS (node).release ();
1221 SLP_TREE_SCALAR_STMTS (node) = tmp_stmts;
1225 /* Attempt to reorder stmts in a reduction chain so that we don't
1226 require any load permutation. Return true if that was possible,
1227 otherwise return false. */
1229 static bool
1230 vect_attempt_slp_rearrange_stmts (slp_instance slp_instn)
1232 unsigned int group_size = SLP_INSTANCE_GROUP_SIZE (slp_instn);
1233 unsigned int i, j;
1234 sbitmap load_index;
1235 unsigned int lidx;
1236 slp_tree node, load;
1238 /* Compare all the permutation sequences to the first one. We know
1239 that at least one load is permuted. */
1240 node = SLP_INSTANCE_LOADS (slp_instn)[0];
1241 if (!node->load_permutation.exists ())
1242 return false;
1243 for (i = 1; SLP_INSTANCE_LOADS (slp_instn).iterate (i, &load); ++i)
1245 if (!load->load_permutation.exists ())
1246 return false;
1247 FOR_EACH_VEC_ELT (load->load_permutation, j, lidx)
1248 if (lidx != node->load_permutation[j])
1249 return false;
1252 /* Check that the loads in the first sequence are different and there
1253 are no gaps between them. */
1254 load_index = sbitmap_alloc (group_size);
1255 bitmap_clear (load_index);
1256 FOR_EACH_VEC_ELT (node->load_permutation, i, lidx)
1258 if (lidx >= group_size)
1259 return false;
1260 if (bitmap_bit_p (load_index, lidx))
1262 sbitmap_free (load_index);
1263 return false;
1265 bitmap_set_bit (load_index, lidx);
1267 for (i = 0; i < group_size; i++)
1268 if (!bitmap_bit_p (load_index, i))
1270 sbitmap_free (load_index);
1271 return false;
1273 sbitmap_free (load_index);
1275 /* This permutation is valid for reduction. Since the order of the
1276 statements in the nodes is not important unless they are memory
1277 accesses, we can rearrange the statements in all the nodes
1278 according to the order of the loads. */
1279 vect_slp_rearrange_stmts (SLP_INSTANCE_TREE (slp_instn), group_size,
1280 node->load_permutation);
1282 /* We are done, no actual permutations need to be generated. */
1283 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1284 SLP_TREE_LOAD_PERMUTATION (node).release ();
1285 return true;
1288 /* Check if the required load permutations in the SLP instance
1289 SLP_INSTN are supported. */
1291 static bool
1292 vect_supported_load_permutation_p (slp_instance slp_instn)
1294 unsigned int group_size = SLP_INSTANCE_GROUP_SIZE (slp_instn);
1295 unsigned int i, j, k, next;
1296 slp_tree node;
1297 gimple *stmt, *load, *next_load;
1299 if (dump_enabled_p ())
1301 dump_printf_loc (MSG_NOTE, vect_location, "Load permutation ");
1302 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1303 if (node->load_permutation.exists ())
1304 FOR_EACH_VEC_ELT (node->load_permutation, j, next)
1305 dump_printf (MSG_NOTE, "%d ", next);
1306 else
1307 for (k = 0; k < group_size; ++k)
1308 dump_printf (MSG_NOTE, "%d ", k);
1309 dump_printf (MSG_NOTE, "\n");
1312 /* In case of reduction every load permutation is allowed, since the order
1313 of the reduction statements is not important (as opposed to the case of
1314 grouped stores). The only condition we need to check is that all the
1315 load nodes are of the same size and have the same permutation (and then
1316 rearrange all the nodes of the SLP instance according to this
1317 permutation). */
1319 /* Check that all the load nodes are of the same size. */
1320 /* ??? Can't we assert this? */
1321 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1322 if (SLP_TREE_SCALAR_STMTS (node).length () != (unsigned) group_size)
1323 return false;
1325 node = SLP_INSTANCE_TREE (slp_instn);
1326 stmt = SLP_TREE_SCALAR_STMTS (node)[0];
1328 /* Reduction (there are no data-refs in the root).
1329 In reduction chain the order of the loads is not important. */
1330 if (!STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt))
1331 && !GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
1333 if (vect_attempt_slp_rearrange_stmts (slp_instn))
1334 return true;
1336 /* Fallthru to general load permutation handling. */
1339 /* In basic block vectorization we allow any subchain of an interleaving
1340 chain.
1341 FORNOW: not supported in loop SLP because of realignment compications. */
1342 if (STMT_VINFO_BB_VINFO (vinfo_for_stmt (stmt)))
1344 /* Check whether the loads in an instance form a subchain and thus
1345 no permutation is necessary. */
1346 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1348 if (!SLP_TREE_LOAD_PERMUTATION (node).exists ())
1349 continue;
1350 bool subchain_p = true;
1351 next_load = NULL;
1352 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), j, load)
1354 if (j != 0
1355 && (next_load != load
1356 || GROUP_GAP (vinfo_for_stmt (load)) != 1))
1358 subchain_p = false;
1359 break;
1361 next_load = GROUP_NEXT_ELEMENT (vinfo_for_stmt (load));
1363 if (subchain_p)
1364 SLP_TREE_LOAD_PERMUTATION (node).release ();
1365 else
1367 /* Verify the permutation can be generated. */
1368 vec<tree> tem;
1369 if (!vect_transform_slp_perm_load (node, tem, NULL,
1370 1, slp_instn, true))
1372 dump_printf_loc (MSG_MISSED_OPTIMIZATION,
1373 vect_location,
1374 "unsupported load permutation\n");
1375 return false;
1379 return true;
1382 /* For loop vectorization verify we can generate the permutation. */
1383 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1384 if (node->load_permutation.exists ()
1385 && !vect_transform_slp_perm_load
1386 (node, vNULL, NULL,
1387 SLP_INSTANCE_UNROLLING_FACTOR (slp_instn), slp_instn, true))
1388 return false;
1390 return true;
1394 /* Find the last store in SLP INSTANCE. */
1396 gimple *
1397 vect_find_last_scalar_stmt_in_slp (slp_tree node)
1399 gimple *last = NULL, *stmt;
1401 for (int i = 0; SLP_TREE_SCALAR_STMTS (node).iterate (i, &stmt); i++)
1403 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
1404 if (is_pattern_stmt_p (stmt_vinfo))
1405 last = get_later_stmt (STMT_VINFO_RELATED_STMT (stmt_vinfo), last);
1406 else
1407 last = get_later_stmt (stmt, last);
1410 return last;
1413 /* Compute the cost for the SLP node NODE in the SLP instance INSTANCE. */
1415 static void
1416 vect_analyze_slp_cost_1 (slp_instance instance, slp_tree node,
1417 stmt_vector_for_cost *prologue_cost_vec,
1418 stmt_vector_for_cost *body_cost_vec,
1419 unsigned ncopies_for_cost)
1421 unsigned i, j;
1422 slp_tree child;
1423 gimple *stmt;
1424 stmt_vec_info stmt_info;
1425 tree lhs;
1427 /* Recurse down the SLP tree. */
1428 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
1429 if (SLP_TREE_DEF_TYPE (child) == vect_internal_def)
1430 vect_analyze_slp_cost_1 (instance, child, prologue_cost_vec,
1431 body_cost_vec, ncopies_for_cost);
1433 /* Look at the first scalar stmt to determine the cost. */
1434 stmt = SLP_TREE_SCALAR_STMTS (node)[0];
1435 stmt_info = vinfo_for_stmt (stmt);
1436 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
1438 if (DR_IS_WRITE (STMT_VINFO_DATA_REF (stmt_info)))
1439 vect_model_store_cost (stmt_info, ncopies_for_cost, false,
1440 vect_uninitialized_def,
1441 node, prologue_cost_vec, body_cost_vec);
1442 else
1444 gcc_checking_assert (DR_IS_READ (STMT_VINFO_DATA_REF (stmt_info)));
1445 if (SLP_TREE_LOAD_PERMUTATION (node).exists ())
1447 /* If the load is permuted then the alignment is determined by
1448 the first group element not by the first scalar stmt DR. */
1449 stmt = GROUP_FIRST_ELEMENT (stmt_info);
1450 stmt_info = vinfo_for_stmt (stmt);
1451 /* Record the cost for the permutation. */
1452 record_stmt_cost (body_cost_vec, ncopies_for_cost, vec_perm,
1453 stmt_info, 0, vect_body);
1454 /* And adjust the number of loads performed. */
1455 unsigned nunits
1456 = TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info));
1457 ncopies_for_cost
1458 = (GROUP_SIZE (stmt_info) - GROUP_GAP (stmt_info)
1459 + nunits - 1) / nunits;
1460 ncopies_for_cost *= SLP_INSTANCE_UNROLLING_FACTOR (instance);
1462 /* Record the cost for the vector loads. */
1463 vect_model_load_cost (stmt_info, ncopies_for_cost, false,
1464 node, prologue_cost_vec, body_cost_vec);
1466 return;
1469 record_stmt_cost (body_cost_vec, ncopies_for_cost, vector_stmt,
1470 stmt_info, 0, vect_body);
1471 if (SLP_TREE_TWO_OPERATORS (node))
1473 record_stmt_cost (body_cost_vec, ncopies_for_cost, vector_stmt,
1474 stmt_info, 0, vect_body);
1475 record_stmt_cost (body_cost_vec, ncopies_for_cost, vec_perm,
1476 stmt_info, 0, vect_body);
1479 /* Push SLP node def-type to stmts. */
1480 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
1481 if (SLP_TREE_DEF_TYPE (child) != vect_internal_def)
1482 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (child), j, stmt)
1483 STMT_VINFO_DEF_TYPE (vinfo_for_stmt (stmt)) = SLP_TREE_DEF_TYPE (child);
1485 /* Scan operands and account for prologue cost of constants/externals.
1486 ??? This over-estimates cost for multiple uses and should be
1487 re-engineered. */
1488 stmt = SLP_TREE_SCALAR_STMTS (node)[0];
1489 lhs = gimple_get_lhs (stmt);
1490 for (i = 0; i < gimple_num_ops (stmt); ++i)
1492 tree op = gimple_op (stmt, i);
1493 gimple *def_stmt;
1494 enum vect_def_type dt;
1495 if (!op || op == lhs)
1496 continue;
1497 if (vect_is_simple_use (op, stmt_info->vinfo, &def_stmt, &dt))
1499 /* Without looking at the actual initializer a vector of
1500 constants can be implemented as load from the constant pool.
1501 ??? We need to pass down stmt_info for a vector type
1502 even if it points to the wrong stmt. */
1503 if (dt == vect_constant_def)
1504 record_stmt_cost (prologue_cost_vec, 1, vector_load,
1505 stmt_info, 0, vect_prologue);
1506 else if (dt == vect_external_def)
1507 record_stmt_cost (prologue_cost_vec, 1, vec_construct,
1508 stmt_info, 0, vect_prologue);
1512 /* Restore stmt def-types. */
1513 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
1514 if (SLP_TREE_DEF_TYPE (child) != vect_internal_def)
1515 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (child), j, stmt)
1516 STMT_VINFO_DEF_TYPE (vinfo_for_stmt (stmt)) = vect_internal_def;
1519 /* Compute the cost for the SLP instance INSTANCE. */
1521 static void
1522 vect_analyze_slp_cost (slp_instance instance, void *data)
1524 stmt_vector_for_cost body_cost_vec, prologue_cost_vec;
1525 unsigned ncopies_for_cost;
1526 stmt_info_for_cost *si;
1527 unsigned i;
1529 if (dump_enabled_p ())
1530 dump_printf_loc (MSG_NOTE, vect_location,
1531 "=== vect_analyze_slp_cost ===\n");
1533 /* Calculate the number of vector stmts to create based on the unrolling
1534 factor (number of vectors is 1 if NUNITS >= GROUP_SIZE, and is
1535 GROUP_SIZE / NUNITS otherwise. */
1536 unsigned group_size = SLP_INSTANCE_GROUP_SIZE (instance);
1537 slp_tree node = SLP_INSTANCE_TREE (instance);
1538 stmt_vec_info stmt_info = vinfo_for_stmt (SLP_TREE_SCALAR_STMTS (node)[0]);
1539 /* Adjust the group_size by the vectorization factor which is always one
1540 for basic-block vectorization. */
1541 if (STMT_VINFO_LOOP_VINFO (stmt_info))
1542 group_size *= LOOP_VINFO_VECT_FACTOR (STMT_VINFO_LOOP_VINFO (stmt_info));
1543 unsigned nunits = TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info));
1544 /* For reductions look at a reduction operand in case the reduction
1545 operation is widening like DOT_PROD or SAD. */
1546 if (!STMT_VINFO_GROUPED_ACCESS (stmt_info))
1548 gimple *stmt = SLP_TREE_SCALAR_STMTS (node)[0];
1549 switch (gimple_assign_rhs_code (stmt))
1551 case DOT_PROD_EXPR:
1552 case SAD_EXPR:
1553 nunits = TYPE_VECTOR_SUBPARTS (get_vectype_for_scalar_type
1554 (TREE_TYPE (gimple_assign_rhs1 (stmt))));
1555 break;
1556 default:;
1559 ncopies_for_cost = least_common_multiple (nunits, group_size) / nunits;
1561 prologue_cost_vec.create (10);
1562 body_cost_vec.create (10);
1563 vect_analyze_slp_cost_1 (instance, SLP_INSTANCE_TREE (instance),
1564 &prologue_cost_vec, &body_cost_vec,
1565 ncopies_for_cost);
1567 /* Record the prologue costs, which were delayed until we were
1568 sure that SLP was successful. */
1569 FOR_EACH_VEC_ELT (prologue_cost_vec, i, si)
1571 struct _stmt_vec_info *stmt_info
1572 = si->stmt ? vinfo_for_stmt (si->stmt) : NULL;
1573 (void) add_stmt_cost (data, si->count, si->kind, stmt_info,
1574 si->misalign, vect_prologue);
1577 /* Record the instance's instructions in the target cost model. */
1578 FOR_EACH_VEC_ELT (body_cost_vec, i, si)
1580 struct _stmt_vec_info *stmt_info
1581 = si->stmt ? vinfo_for_stmt (si->stmt) : NULL;
1582 (void) add_stmt_cost (data, si->count, si->kind, stmt_info,
1583 si->misalign, vect_body);
1586 prologue_cost_vec.release ();
1587 body_cost_vec.release ();
1590 /* Splits a group of stores, currently beginning at FIRST_STMT, into two groups:
1591 one (still beginning at FIRST_STMT) of size GROUP1_SIZE (also containing
1592 the first GROUP1_SIZE stmts, since stores are consecutive), the second
1593 containing the remainder.
1594 Return the first stmt in the second group. */
1596 static gimple *
1597 vect_split_slp_store_group (gimple *first_stmt, unsigned group1_size)
1599 stmt_vec_info first_vinfo = vinfo_for_stmt (first_stmt);
1600 gcc_assert (GROUP_FIRST_ELEMENT (first_vinfo) == first_stmt);
1601 gcc_assert (group1_size > 0);
1602 int group2_size = GROUP_SIZE (first_vinfo) - group1_size;
1603 gcc_assert (group2_size > 0);
1604 GROUP_SIZE (first_vinfo) = group1_size;
1606 gimple *stmt = first_stmt;
1607 for (unsigned i = group1_size; i > 1; i--)
1609 stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (stmt));
1610 gcc_assert (GROUP_GAP (vinfo_for_stmt (stmt)) == 1);
1612 /* STMT is now the last element of the first group. */
1613 gimple *group2 = GROUP_NEXT_ELEMENT (vinfo_for_stmt (stmt));
1614 GROUP_NEXT_ELEMENT (vinfo_for_stmt (stmt)) = 0;
1616 GROUP_SIZE (vinfo_for_stmt (group2)) = group2_size;
1617 for (stmt = group2; stmt; stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (stmt)))
1619 GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) = group2;
1620 gcc_assert (GROUP_GAP (vinfo_for_stmt (stmt)) == 1);
1623 /* For the second group, the GROUP_GAP is that before the original group,
1624 plus skipping over the first vector. */
1625 GROUP_GAP (vinfo_for_stmt (group2)) =
1626 GROUP_GAP (first_vinfo) + group1_size;
1628 /* GROUP_GAP of the first group now has to skip over the second group too. */
1629 GROUP_GAP (first_vinfo) += group2_size;
1631 if (dump_enabled_p ())
1632 dump_printf_loc (MSG_NOTE, vect_location, "Split group into %d and %d\n",
1633 group1_size, group2_size);
1635 return group2;
1638 /* Analyze an SLP instance starting from a group of grouped stores. Call
1639 vect_build_slp_tree to build a tree of packed stmts if possible.
1640 Return FALSE if it's impossible to SLP any stmt in the loop. */
1642 static bool
1643 vect_analyze_slp_instance (vec_info *vinfo,
1644 gimple *stmt, unsigned max_tree_size)
1646 slp_instance new_instance;
1647 slp_tree node;
1648 unsigned int group_size = GROUP_SIZE (vinfo_for_stmt (stmt));
1649 unsigned int unrolling_factor = 1, nunits;
1650 tree vectype, scalar_type = NULL_TREE;
1651 gimple *next;
1652 unsigned int i;
1653 unsigned int max_nunits = 0;
1654 vec<slp_tree> loads;
1655 struct data_reference *dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt));
1656 vec<gimple *> scalar_stmts;
1658 if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
1660 if (dr)
1662 scalar_type = TREE_TYPE (DR_REF (dr));
1663 vectype = get_vectype_for_scalar_type (scalar_type);
1665 else
1667 gcc_assert (is_a <loop_vec_info> (vinfo));
1668 vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt));
1671 group_size = GROUP_SIZE (vinfo_for_stmt (stmt));
1673 else
1675 gcc_assert (is_a <loop_vec_info> (vinfo));
1676 vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt));
1677 group_size = as_a <loop_vec_info> (vinfo)->reductions.length ();
1680 if (!vectype)
1682 if (dump_enabled_p ())
1684 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1685 "Build SLP failed: unsupported data-type ");
1686 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, scalar_type);
1687 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
1690 return false;
1692 nunits = TYPE_VECTOR_SUBPARTS (vectype);
1694 /* Calculate the unrolling factor. */
1695 unrolling_factor = least_common_multiple (nunits, group_size) / group_size;
1696 if (unrolling_factor != 1 && is_a <bb_vec_info> (vinfo))
1698 if (dump_enabled_p ())
1699 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1700 "Build SLP failed: unrolling required in basic"
1701 " block SLP\n");
1703 return false;
1706 /* Create a node (a root of the SLP tree) for the packed grouped stores. */
1707 scalar_stmts.create (group_size);
1708 next = stmt;
1709 if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
1711 /* Collect the stores and store them in SLP_TREE_SCALAR_STMTS. */
1712 while (next)
1714 if (STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (next))
1715 && STMT_VINFO_RELATED_STMT (vinfo_for_stmt (next)))
1716 scalar_stmts.safe_push (
1717 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (next)));
1718 else
1719 scalar_stmts.safe_push (next);
1720 next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next));
1722 /* Mark the first element of the reduction chain as reduction to properly
1723 transform the node. In the reduction analysis phase only the last
1724 element of the chain is marked as reduction. */
1725 if (!STMT_VINFO_GROUPED_ACCESS (vinfo_for_stmt (stmt)))
1726 STMT_VINFO_DEF_TYPE (vinfo_for_stmt (stmt)) = vect_reduction_def;
1728 else
1730 /* Collect reduction statements. */
1731 vec<gimple *> reductions = as_a <loop_vec_info> (vinfo)->reductions;
1732 for (i = 0; reductions.iterate (i, &next); i++)
1733 scalar_stmts.safe_push (next);
1736 node = vect_create_new_slp_node (scalar_stmts);
1738 loads.create (group_size);
1740 /* Build the tree for the SLP instance. */
1741 bool *matches = XALLOCAVEC (bool, group_size);
1742 unsigned npermutes = 0;
1743 if (vect_build_slp_tree (vinfo, &node, group_size,
1744 &max_nunits, &loads,
1745 matches, &npermutes, NULL, max_tree_size))
1747 /* Calculate the unrolling factor based on the smallest type. */
1748 if (max_nunits > nunits)
1749 unrolling_factor = least_common_multiple (max_nunits, group_size)
1750 / group_size;
1752 if (unrolling_factor != 1 && is_a <bb_vec_info> (vinfo))
1754 if (dump_enabled_p ())
1755 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1756 "Build SLP failed: unrolling required in basic"
1757 " block SLP\n");
1758 vect_free_slp_tree (node);
1759 loads.release ();
1760 return false;
1763 /* Create a new SLP instance. */
1764 new_instance = XNEW (struct _slp_instance);
1765 SLP_INSTANCE_TREE (new_instance) = node;
1766 SLP_INSTANCE_GROUP_SIZE (new_instance) = group_size;
1767 SLP_INSTANCE_UNROLLING_FACTOR (new_instance) = unrolling_factor;
1768 SLP_INSTANCE_LOADS (new_instance) = loads;
1770 /* Compute the load permutation. */
1771 slp_tree load_node;
1772 bool loads_permuted = false;
1773 FOR_EACH_VEC_ELT (loads, i, load_node)
1775 vec<unsigned> load_permutation;
1776 int j;
1777 gimple *load, *first_stmt;
1778 bool this_load_permuted = false;
1779 load_permutation.create (group_size);
1780 first_stmt = GROUP_FIRST_ELEMENT
1781 (vinfo_for_stmt (SLP_TREE_SCALAR_STMTS (load_node)[0]));
1782 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (load_node), j, load)
1784 int load_place
1785 = vect_get_place_in_interleaving_chain (load, first_stmt);
1786 gcc_assert (load_place != -1);
1787 if (load_place != j)
1788 this_load_permuted = true;
1789 load_permutation.safe_push (load_place);
1791 if (!this_load_permuted
1792 /* The load requires permutation when unrolling exposes
1793 a gap either because the group is larger than the SLP
1794 group-size or because there is a gap between the groups. */
1795 && (unrolling_factor == 1
1796 || (group_size == GROUP_SIZE (vinfo_for_stmt (first_stmt))
1797 && GROUP_GAP (vinfo_for_stmt (first_stmt)) == 0)))
1799 load_permutation.release ();
1800 continue;
1802 SLP_TREE_LOAD_PERMUTATION (load_node) = load_permutation;
1803 loads_permuted = true;
1806 if (loads_permuted)
1808 if (!vect_supported_load_permutation_p (new_instance))
1810 if (dump_enabled_p ())
1812 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1813 "Build SLP failed: unsupported load "
1814 "permutation ");
1815 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
1816 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
1818 vect_free_slp_instance (new_instance);
1819 return false;
1823 vinfo->slp_instances.safe_push (new_instance);
1825 if (dump_enabled_p ())
1827 dump_printf_loc (MSG_NOTE, vect_location,
1828 "Final SLP tree for instance:\n");
1829 vect_print_slp_tree (MSG_NOTE, vect_location, node);
1832 return true;
1835 /* Failed to SLP. */
1836 /* Free the allocated memory. */
1837 vect_free_slp_tree (node);
1838 loads.release ();
1840 /* For basic block SLP, try to break the group up into multiples of the
1841 vector size. */
1842 if (is_a <bb_vec_info> (vinfo)
1843 && GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt))
1844 && STMT_VINFO_GROUPED_ACCESS (vinfo_for_stmt (stmt)))
1846 /* We consider breaking the group only on VF boundaries from the existing
1847 start. */
1848 for (i = 0; i < group_size; i++)
1849 if (!matches[i]) break;
1851 if (i >= nunits && i < group_size)
1853 /* Split into two groups at the first vector boundary before i. */
1854 gcc_assert ((nunits & (nunits - 1)) == 0);
1855 unsigned group1_size = i & ~(nunits - 1);
1857 gimple *rest = vect_split_slp_store_group (stmt, group1_size);
1858 bool res = vect_analyze_slp_instance (vinfo, stmt, max_tree_size);
1859 /* If the first non-match was in the middle of a vector,
1860 skip the rest of that vector. */
1861 if (group1_size < i)
1863 i = group1_size + nunits;
1864 if (i < group_size)
1865 rest = vect_split_slp_store_group (rest, nunits);
1867 if (i < group_size)
1868 res |= vect_analyze_slp_instance (vinfo, rest, max_tree_size);
1869 return res;
1871 /* Even though the first vector did not all match, we might be able to SLP
1872 (some) of the remainder. FORNOW ignore this possibility. */
1875 return false;
1879 /* Check if there are stmts in the loop can be vectorized using SLP. Build SLP
1880 trees of packed scalar stmts if SLP is possible. */
1882 bool
1883 vect_analyze_slp (vec_info *vinfo, unsigned max_tree_size)
1885 unsigned int i;
1886 gimple *first_element;
1887 bool ok = false;
1889 if (dump_enabled_p ())
1890 dump_printf_loc (MSG_NOTE, vect_location, "=== vect_analyze_slp ===\n");
1892 /* Find SLP sequences starting from groups of grouped stores. */
1893 FOR_EACH_VEC_ELT (vinfo->grouped_stores, i, first_element)
1894 if (vect_analyze_slp_instance (vinfo, first_element, max_tree_size))
1895 ok = true;
1897 if (loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo))
1899 if (loop_vinfo->reduction_chains.length () > 0)
1901 /* Find SLP sequences starting from reduction chains. */
1902 FOR_EACH_VEC_ELT (loop_vinfo->reduction_chains, i, first_element)
1903 if (vect_analyze_slp_instance (vinfo, first_element,
1904 max_tree_size))
1905 ok = true;
1906 else
1907 return false;
1909 /* Don't try to vectorize SLP reductions if reduction chain was
1910 detected. */
1911 return ok;
1914 /* Find SLP sequences starting from groups of reductions. */
1915 if (loop_vinfo->reductions.length () > 1
1916 && vect_analyze_slp_instance (vinfo, loop_vinfo->reductions[0],
1917 max_tree_size))
1918 ok = true;
1921 return true;
1925 /* For each possible SLP instance decide whether to SLP it and calculate overall
1926 unrolling factor needed to SLP the loop. Return TRUE if decided to SLP at
1927 least one instance. */
1929 bool
1930 vect_make_slp_decision (loop_vec_info loop_vinfo)
1932 unsigned int i, unrolling_factor = 1;
1933 vec<slp_instance> slp_instances = LOOP_VINFO_SLP_INSTANCES (loop_vinfo);
1934 slp_instance instance;
1935 int decided_to_slp = 0;
1937 if (dump_enabled_p ())
1938 dump_printf_loc (MSG_NOTE, vect_location, "=== vect_make_slp_decision ==="
1939 "\n");
1941 FOR_EACH_VEC_ELT (slp_instances, i, instance)
1943 /* FORNOW: SLP if you can. */
1944 if (unrolling_factor < SLP_INSTANCE_UNROLLING_FACTOR (instance))
1945 unrolling_factor = SLP_INSTANCE_UNROLLING_FACTOR (instance);
1947 /* Mark all the stmts that belong to INSTANCE as PURE_SLP stmts. Later we
1948 call vect_detect_hybrid_slp () to find stmts that need hybrid SLP and
1949 loop-based vectorization. Such stmts will be marked as HYBRID. */
1950 vect_mark_slp_stmts (SLP_INSTANCE_TREE (instance), pure_slp, -1);
1951 decided_to_slp++;
1954 LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo) = unrolling_factor;
1956 if (decided_to_slp && dump_enabled_p ())
1957 dump_printf_loc (MSG_NOTE, vect_location,
1958 "Decided to SLP %d instances. Unrolling factor %d\n",
1959 decided_to_slp, unrolling_factor);
1961 return (decided_to_slp > 0);
1965 /* Find stmts that must be both vectorized and SLPed (since they feed stmts that
1966 can't be SLPed) in the tree rooted at NODE. Mark such stmts as HYBRID. */
1968 static void
1969 vect_detect_hybrid_slp_stmts (slp_tree node, unsigned i, slp_vect_type stype)
1971 gimple *stmt = SLP_TREE_SCALAR_STMTS (node)[i];
1972 imm_use_iterator imm_iter;
1973 gimple *use_stmt;
1974 stmt_vec_info use_vinfo, stmt_vinfo = vinfo_for_stmt (stmt);
1975 slp_tree child;
1976 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
1977 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1978 int j;
1980 /* Propagate hybrid down the SLP tree. */
1981 if (stype == hybrid)
1983 else if (HYBRID_SLP_STMT (stmt_vinfo))
1984 stype = hybrid;
1985 else
1987 /* Check if a pure SLP stmt has uses in non-SLP stmts. */
1988 gcc_checking_assert (PURE_SLP_STMT (stmt_vinfo));
1989 /* We always get the pattern stmt here, but for immediate
1990 uses we have to use the LHS of the original stmt. */
1991 gcc_checking_assert (!STMT_VINFO_IN_PATTERN_P (stmt_vinfo));
1992 if (STMT_VINFO_RELATED_STMT (stmt_vinfo))
1993 stmt = STMT_VINFO_RELATED_STMT (stmt_vinfo);
1994 if (TREE_CODE (gimple_op (stmt, 0)) == SSA_NAME)
1995 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, gimple_op (stmt, 0))
1997 if (!flow_bb_inside_loop_p (loop, gimple_bb (use_stmt)))
1998 continue;
1999 use_vinfo = vinfo_for_stmt (use_stmt);
2000 if (STMT_VINFO_IN_PATTERN_P (use_vinfo)
2001 && STMT_VINFO_RELATED_STMT (use_vinfo))
2002 use_vinfo = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (use_vinfo));
2003 if (!STMT_SLP_TYPE (use_vinfo)
2004 && (STMT_VINFO_RELEVANT (use_vinfo)
2005 || VECTORIZABLE_CYCLE_DEF (STMT_VINFO_DEF_TYPE (use_vinfo)))
2006 && !(gimple_code (use_stmt) == GIMPLE_PHI
2007 && STMT_VINFO_DEF_TYPE (use_vinfo) == vect_reduction_def))
2009 if (dump_enabled_p ())
2011 dump_printf_loc (MSG_NOTE, vect_location, "use of SLP "
2012 "def in non-SLP stmt: ");
2013 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, use_stmt, 0);
2015 stype = hybrid;
2020 if (stype == hybrid
2021 && !HYBRID_SLP_STMT (stmt_vinfo))
2023 if (dump_enabled_p ())
2025 dump_printf_loc (MSG_NOTE, vect_location, "marking hybrid: ");
2026 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
2028 STMT_SLP_TYPE (stmt_vinfo) = hybrid;
2031 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), j, child)
2032 if (SLP_TREE_DEF_TYPE (child) != vect_external_def)
2033 vect_detect_hybrid_slp_stmts (child, i, stype);
2036 /* Helpers for vect_detect_hybrid_slp walking pattern stmt uses. */
2038 static tree
2039 vect_detect_hybrid_slp_1 (tree *tp, int *, void *data)
2041 walk_stmt_info *wi = (walk_stmt_info *)data;
2042 struct loop *loopp = (struct loop *)wi->info;
2044 if (wi->is_lhs)
2045 return NULL_TREE;
2047 if (TREE_CODE (*tp) == SSA_NAME
2048 && !SSA_NAME_IS_DEFAULT_DEF (*tp))
2050 gimple *def_stmt = SSA_NAME_DEF_STMT (*tp);
2051 if (flow_bb_inside_loop_p (loopp, gimple_bb (def_stmt))
2052 && PURE_SLP_STMT (vinfo_for_stmt (def_stmt)))
2054 if (dump_enabled_p ())
2056 dump_printf_loc (MSG_NOTE, vect_location, "marking hybrid: ");
2057 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, def_stmt, 0);
2059 STMT_SLP_TYPE (vinfo_for_stmt (def_stmt)) = hybrid;
2063 return NULL_TREE;
2066 static tree
2067 vect_detect_hybrid_slp_2 (gimple_stmt_iterator *gsi, bool *handled,
2068 walk_stmt_info *)
2070 /* If the stmt is in a SLP instance then this isn't a reason
2071 to mark use definitions in other SLP instances as hybrid. */
2072 if (STMT_SLP_TYPE (vinfo_for_stmt (gsi_stmt (*gsi))) != loop_vect)
2073 *handled = true;
2074 return NULL_TREE;
2077 /* Find stmts that must be both vectorized and SLPed. */
2079 void
2080 vect_detect_hybrid_slp (loop_vec_info loop_vinfo)
2082 unsigned int i;
2083 vec<slp_instance> slp_instances = LOOP_VINFO_SLP_INSTANCES (loop_vinfo);
2084 slp_instance instance;
2086 if (dump_enabled_p ())
2087 dump_printf_loc (MSG_NOTE, vect_location, "=== vect_detect_hybrid_slp ==="
2088 "\n");
2090 /* First walk all pattern stmt in the loop and mark defs of uses as
2091 hybrid because immediate uses in them are not recorded. */
2092 for (i = 0; i < LOOP_VINFO_LOOP (loop_vinfo)->num_nodes; ++i)
2094 basic_block bb = LOOP_VINFO_BBS (loop_vinfo)[i];
2095 for (gimple_stmt_iterator gsi = gsi_start_bb (bb); !gsi_end_p (gsi);
2096 gsi_next (&gsi))
2098 gimple *stmt = gsi_stmt (gsi);
2099 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2100 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
2102 walk_stmt_info wi;
2103 memset (&wi, 0, sizeof (wi));
2104 wi.info = LOOP_VINFO_LOOP (loop_vinfo);
2105 gimple_stmt_iterator gsi2
2106 = gsi_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info));
2107 walk_gimple_stmt (&gsi2, vect_detect_hybrid_slp_2,
2108 vect_detect_hybrid_slp_1, &wi);
2109 walk_gimple_seq (STMT_VINFO_PATTERN_DEF_SEQ (stmt_info),
2110 vect_detect_hybrid_slp_2,
2111 vect_detect_hybrid_slp_1, &wi);
2116 /* Then walk the SLP instance trees marking stmts with uses in
2117 non-SLP stmts as hybrid, also propagating hybrid down the
2118 SLP tree, collecting the above info on-the-fly. */
2119 FOR_EACH_VEC_ELT (slp_instances, i, instance)
2121 for (unsigned i = 0; i < SLP_INSTANCE_GROUP_SIZE (instance); ++i)
2122 vect_detect_hybrid_slp_stmts (SLP_INSTANCE_TREE (instance),
2123 i, pure_slp);
2128 /* Create and initialize a new bb_vec_info struct for BB, as well as
2129 stmt_vec_info structs for all the stmts in it. */
2131 static bb_vec_info
2132 new_bb_vec_info (gimple_stmt_iterator region_begin,
2133 gimple_stmt_iterator region_end)
2135 basic_block bb = gsi_bb (region_begin);
2136 bb_vec_info res = NULL;
2137 gimple_stmt_iterator gsi;
2139 res = (bb_vec_info) xcalloc (1, sizeof (struct _bb_vec_info));
2140 res->kind = vec_info::bb;
2141 BB_VINFO_BB (res) = bb;
2142 res->region_begin = region_begin;
2143 res->region_end = region_end;
2145 for (gsi = region_begin; gsi_stmt (gsi) != gsi_stmt (region_end);
2146 gsi_next (&gsi))
2148 gimple *stmt = gsi_stmt (gsi);
2149 gimple_set_uid (stmt, 0);
2150 set_vinfo_for_stmt (stmt, new_stmt_vec_info (stmt, res));
2153 BB_VINFO_GROUPED_STORES (res).create (10);
2154 BB_VINFO_SLP_INSTANCES (res).create (2);
2155 BB_VINFO_TARGET_COST_DATA (res) = init_cost (NULL);
2157 bb->aux = res;
2158 return res;
2162 /* Free BB_VINFO struct, as well as all the stmt_vec_info structs of all the
2163 stmts in the basic block. */
2165 static void
2166 destroy_bb_vec_info (bb_vec_info bb_vinfo)
2168 vec<slp_instance> slp_instances;
2169 slp_instance instance;
2170 basic_block bb;
2171 gimple_stmt_iterator si;
2172 unsigned i;
2174 if (!bb_vinfo)
2175 return;
2177 bb = BB_VINFO_BB (bb_vinfo);
2179 for (si = bb_vinfo->region_begin;
2180 gsi_stmt (si) != gsi_stmt (bb_vinfo->region_end); gsi_next (&si))
2182 gimple *stmt = gsi_stmt (si);
2183 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2185 if (stmt_info)
2186 /* Free stmt_vec_info. */
2187 free_stmt_vec_info (stmt);
2189 /* Reset region marker. */
2190 gimple_set_uid (stmt, -1);
2193 vect_destroy_datarefs (bb_vinfo);
2194 free_dependence_relations (BB_VINFO_DDRS (bb_vinfo));
2195 BB_VINFO_GROUPED_STORES (bb_vinfo).release ();
2196 slp_instances = BB_VINFO_SLP_INSTANCES (bb_vinfo);
2197 FOR_EACH_VEC_ELT (slp_instances, i, instance)
2198 vect_free_slp_instance (instance);
2199 BB_VINFO_SLP_INSTANCES (bb_vinfo).release ();
2200 destroy_cost_data (BB_VINFO_TARGET_COST_DATA (bb_vinfo));
2201 free (bb_vinfo);
2202 bb->aux = NULL;
2206 /* Analyze statements contained in SLP tree node after recursively analyzing
2207 the subtree. Return TRUE if the operations are supported. */
2209 static bool
2210 vect_slp_analyze_node_operations (slp_tree node)
2212 bool dummy;
2213 int i, j;
2214 gimple *stmt;
2215 slp_tree child;
2217 if (SLP_TREE_DEF_TYPE (node) != vect_internal_def)
2218 return true;
2220 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
2221 if (!vect_slp_analyze_node_operations (child))
2222 return false;
2224 bool res = true;
2225 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
2227 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2228 gcc_assert (stmt_info);
2229 gcc_assert (STMT_SLP_TYPE (stmt_info) != loop_vect);
2231 /* Push SLP node def-type to stmt operands. */
2232 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), j, child)
2233 if (SLP_TREE_DEF_TYPE (child) != vect_internal_def)
2234 STMT_VINFO_DEF_TYPE (vinfo_for_stmt (SLP_TREE_SCALAR_STMTS (child)[i]))
2235 = SLP_TREE_DEF_TYPE (child);
2236 res = vect_analyze_stmt (stmt, &dummy, node);
2237 /* Restore def-types. */
2238 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), j, child)
2239 if (SLP_TREE_DEF_TYPE (child) != vect_internal_def)
2240 STMT_VINFO_DEF_TYPE (vinfo_for_stmt (SLP_TREE_SCALAR_STMTS (child)[i]))
2241 = vect_internal_def;
2242 if (! res)
2243 break;
2246 return res;
2250 /* Analyze statements in SLP instances of the basic block. Return TRUE if the
2251 operations are supported. */
2253 bool
2254 vect_slp_analyze_operations (vec<slp_instance> slp_instances, void *data)
2256 slp_instance instance;
2257 int i;
2259 if (dump_enabled_p ())
2260 dump_printf_loc (MSG_NOTE, vect_location,
2261 "=== vect_slp_analyze_operations ===\n");
2263 for (i = 0; slp_instances.iterate (i, &instance); )
2265 if (!vect_slp_analyze_node_operations (SLP_INSTANCE_TREE (instance)))
2267 dump_printf_loc (MSG_NOTE, vect_location,
2268 "removing SLP instance operations starting from: ");
2269 dump_gimple_stmt (MSG_NOTE, TDF_SLIM,
2270 SLP_TREE_SCALAR_STMTS
2271 (SLP_INSTANCE_TREE (instance))[0], 0);
2272 vect_free_slp_instance (instance);
2273 slp_instances.ordered_remove (i);
2275 else
2277 /* Compute the costs of the SLP instance. */
2278 vect_analyze_slp_cost (instance, data);
2279 i++;
2283 if (!slp_instances.length ())
2284 return false;
2286 return true;
2290 /* Compute the scalar cost of the SLP node NODE and its children
2291 and return it. Do not account defs that are marked in LIFE and
2292 update LIFE according to uses of NODE. */
2294 static unsigned
2295 vect_bb_slp_scalar_cost (basic_block bb,
2296 slp_tree node, vec<bool, va_heap> *life)
2298 unsigned scalar_cost = 0;
2299 unsigned i;
2300 gimple *stmt;
2301 slp_tree child;
2303 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
2305 unsigned stmt_cost;
2306 ssa_op_iter op_iter;
2307 def_operand_p def_p;
2308 stmt_vec_info stmt_info;
2310 if ((*life)[i])
2311 continue;
2313 /* If there is a non-vectorized use of the defs then the scalar
2314 stmt is kept live in which case we do not account it or any
2315 required defs in the SLP children in the scalar cost. This
2316 way we make the vectorization more costly when compared to
2317 the scalar cost. */
2318 FOR_EACH_SSA_DEF_OPERAND (def_p, stmt, op_iter, SSA_OP_DEF)
2320 imm_use_iterator use_iter;
2321 gimple *use_stmt;
2322 FOR_EACH_IMM_USE_STMT (use_stmt, use_iter, DEF_FROM_PTR (def_p))
2323 if (!is_gimple_debug (use_stmt)
2324 && (! vect_stmt_in_region_p (vinfo_for_stmt (stmt)->vinfo,
2325 use_stmt)
2326 || ! PURE_SLP_STMT (vinfo_for_stmt (use_stmt))))
2328 (*life)[i] = true;
2329 BREAK_FROM_IMM_USE_STMT (use_iter);
2332 if ((*life)[i])
2333 continue;
2335 stmt_info = vinfo_for_stmt (stmt);
2336 if (STMT_VINFO_DATA_REF (stmt_info))
2338 if (DR_IS_READ (STMT_VINFO_DATA_REF (stmt_info)))
2339 stmt_cost = vect_get_stmt_cost (scalar_load);
2340 else
2341 stmt_cost = vect_get_stmt_cost (scalar_store);
2343 else
2344 stmt_cost = vect_get_stmt_cost (scalar_stmt);
2346 scalar_cost += stmt_cost;
2349 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
2350 if (SLP_TREE_DEF_TYPE (child) == vect_internal_def)
2351 scalar_cost += vect_bb_slp_scalar_cost (bb, child, life);
2353 return scalar_cost;
2356 /* Check if vectorization of the basic block is profitable. */
2358 static bool
2359 vect_bb_vectorization_profitable_p (bb_vec_info bb_vinfo)
2361 vec<slp_instance> slp_instances = BB_VINFO_SLP_INSTANCES (bb_vinfo);
2362 slp_instance instance;
2363 int i;
2364 unsigned int vec_inside_cost = 0, vec_outside_cost = 0, scalar_cost = 0;
2365 unsigned int vec_prologue_cost = 0, vec_epilogue_cost = 0;
2367 /* Calculate scalar cost. */
2368 FOR_EACH_VEC_ELT (slp_instances, i, instance)
2370 auto_vec<bool, 20> life;
2371 life.safe_grow_cleared (SLP_INSTANCE_GROUP_SIZE (instance));
2372 scalar_cost += vect_bb_slp_scalar_cost (BB_VINFO_BB (bb_vinfo),
2373 SLP_INSTANCE_TREE (instance),
2374 &life);
2377 /* Complete the target-specific cost calculation. */
2378 finish_cost (BB_VINFO_TARGET_COST_DATA (bb_vinfo), &vec_prologue_cost,
2379 &vec_inside_cost, &vec_epilogue_cost);
2381 vec_outside_cost = vec_prologue_cost + vec_epilogue_cost;
2383 if (dump_enabled_p ())
2385 dump_printf_loc (MSG_NOTE, vect_location, "Cost model analysis: \n");
2386 dump_printf (MSG_NOTE, " Vector inside of basic block cost: %d\n",
2387 vec_inside_cost);
2388 dump_printf (MSG_NOTE, " Vector prologue cost: %d\n", vec_prologue_cost);
2389 dump_printf (MSG_NOTE, " Vector epilogue cost: %d\n", vec_epilogue_cost);
2390 dump_printf (MSG_NOTE, " Scalar cost of basic block: %d\n", scalar_cost);
2393 /* Vectorization is profitable if its cost is more than the cost of scalar
2394 version. Note that we err on the vector side for equal cost because
2395 the cost estimate is otherwise quite pessimistic (constant uses are
2396 free on the scalar side but cost a load on the vector side for
2397 example). */
2398 if (vec_outside_cost + vec_inside_cost > scalar_cost)
2399 return false;
2401 return true;
2404 /* Check if the basic block can be vectorized. Returns a bb_vec_info
2405 if so and sets fatal to true if failure is independent of
2406 current_vector_size. */
2408 static bb_vec_info
2409 vect_slp_analyze_bb_1 (gimple_stmt_iterator region_begin,
2410 gimple_stmt_iterator region_end,
2411 vec<data_reference_p> datarefs, int n_stmts,
2412 bool &fatal)
2414 bb_vec_info bb_vinfo;
2415 slp_instance instance;
2416 int i;
2417 int min_vf = 2;
2419 /* The first group of checks is independent of the vector size. */
2420 fatal = true;
2422 if (n_stmts > PARAM_VALUE (PARAM_SLP_MAX_INSNS_IN_BB))
2424 if (dump_enabled_p ())
2425 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2426 "not vectorized: too many instructions in "
2427 "basic block.\n");
2428 free_data_refs (datarefs);
2429 return NULL;
2432 bb_vinfo = new_bb_vec_info (region_begin, region_end);
2433 if (!bb_vinfo)
2434 return NULL;
2436 BB_VINFO_DATAREFS (bb_vinfo) = datarefs;
2438 /* Analyze the data references. */
2440 if (!vect_analyze_data_refs (bb_vinfo, &min_vf))
2442 if (dump_enabled_p ())
2443 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2444 "not vectorized: unhandled data-ref in basic "
2445 "block.\n");
2447 destroy_bb_vec_info (bb_vinfo);
2448 return NULL;
2451 if (BB_VINFO_DATAREFS (bb_vinfo).length () < 2)
2453 if (dump_enabled_p ())
2454 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2455 "not vectorized: not enough data-refs in "
2456 "basic block.\n");
2458 destroy_bb_vec_info (bb_vinfo);
2459 return NULL;
2462 if (!vect_analyze_data_ref_accesses (bb_vinfo))
2464 if (dump_enabled_p ())
2465 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2466 "not vectorized: unhandled data access in "
2467 "basic block.\n");
2469 destroy_bb_vec_info (bb_vinfo);
2470 return NULL;
2473 /* If there are no grouped stores in the region there is no need
2474 to continue with pattern recog as vect_analyze_slp will fail
2475 anyway. */
2476 if (bb_vinfo->grouped_stores.is_empty ())
2478 if (dump_enabled_p ())
2479 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2480 "not vectorized: no grouped stores in "
2481 "basic block.\n");
2483 destroy_bb_vec_info (bb_vinfo);
2484 return NULL;
2487 /* While the rest of the analysis below depends on it in some way. */
2488 fatal = false;
2490 vect_pattern_recog (bb_vinfo);
2492 /* Check the SLP opportunities in the basic block, analyze and build SLP
2493 trees. */
2494 if (!vect_analyze_slp (bb_vinfo, n_stmts))
2496 if (dump_enabled_p ())
2498 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2499 "Failed to SLP the basic block.\n");
2500 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2501 "not vectorized: failed to find SLP opportunities "
2502 "in basic block.\n");
2505 destroy_bb_vec_info (bb_vinfo);
2506 return NULL;
2509 /* Analyze and verify the alignment of data references and the
2510 dependence in the SLP instances. */
2511 for (i = 0; BB_VINFO_SLP_INSTANCES (bb_vinfo).iterate (i, &instance); )
2513 if (! vect_slp_analyze_and_verify_instance_alignment (instance)
2514 || ! vect_slp_analyze_instance_dependence (instance))
2516 dump_printf_loc (MSG_NOTE, vect_location,
2517 "removing SLP instance operations starting from: ");
2518 dump_gimple_stmt (MSG_NOTE, TDF_SLIM,
2519 SLP_TREE_SCALAR_STMTS
2520 (SLP_INSTANCE_TREE (instance))[0], 0);
2521 vect_free_slp_instance (instance);
2522 BB_VINFO_SLP_INSTANCES (bb_vinfo).ordered_remove (i);
2523 continue;
2526 /* Mark all the statements that we want to vectorize as pure SLP and
2527 relevant. */
2528 vect_mark_slp_stmts (SLP_INSTANCE_TREE (instance), pure_slp, -1);
2529 vect_mark_slp_stmts_relevant (SLP_INSTANCE_TREE (instance));
2531 i++;
2533 if (! BB_VINFO_SLP_INSTANCES (bb_vinfo).length ())
2535 destroy_bb_vec_info (bb_vinfo);
2536 return NULL;
2539 if (!vect_slp_analyze_operations (BB_VINFO_SLP_INSTANCES (bb_vinfo),
2540 BB_VINFO_TARGET_COST_DATA (bb_vinfo)))
2542 if (dump_enabled_p ())
2543 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2544 "not vectorized: bad operation in basic block.\n");
2546 destroy_bb_vec_info (bb_vinfo);
2547 return NULL;
2550 /* Cost model: check if the vectorization is worthwhile. */
2551 if (!unlimited_cost_model (NULL)
2552 && !vect_bb_vectorization_profitable_p (bb_vinfo))
2554 if (dump_enabled_p ())
2555 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2556 "not vectorized: vectorization is not "
2557 "profitable.\n");
2559 destroy_bb_vec_info (bb_vinfo);
2560 return NULL;
2563 if (dump_enabled_p ())
2564 dump_printf_loc (MSG_NOTE, vect_location,
2565 "Basic block will be vectorized using SLP\n");
2567 return bb_vinfo;
2571 /* Main entry for the BB vectorizer. Analyze and transform BB, returns
2572 true if anything in the basic-block was vectorized. */
2574 bool
2575 vect_slp_bb (basic_block bb)
2577 bb_vec_info bb_vinfo;
2578 gimple_stmt_iterator gsi;
2579 unsigned int vector_sizes;
2580 bool any_vectorized = false;
2582 if (dump_enabled_p ())
2583 dump_printf_loc (MSG_NOTE, vect_location, "===vect_slp_analyze_bb===\n");
2585 /* Autodetect first vector size we try. */
2586 current_vector_size = 0;
2587 vector_sizes = targetm.vectorize.autovectorize_vector_sizes ();
2589 gsi = gsi_start_bb (bb);
2591 while (1)
2593 if (gsi_end_p (gsi))
2594 break;
2596 gimple_stmt_iterator region_begin = gsi;
2597 vec<data_reference_p> datarefs = vNULL;
2598 int insns = 0;
2600 for (; !gsi_end_p (gsi); gsi_next (&gsi))
2602 gimple *stmt = gsi_stmt (gsi);
2603 if (is_gimple_debug (stmt))
2604 continue;
2605 insns++;
2607 if (gimple_location (stmt) != UNKNOWN_LOCATION)
2608 vect_location = gimple_location (stmt);
2610 if (!find_data_references_in_stmt (NULL, stmt, &datarefs))
2611 break;
2614 /* Skip leading unhandled stmts. */
2615 if (gsi_stmt (region_begin) == gsi_stmt (gsi))
2617 gsi_next (&gsi);
2618 continue;
2621 gimple_stmt_iterator region_end = gsi;
2623 bool vectorized = false;
2624 bool fatal = false;
2625 bb_vinfo = vect_slp_analyze_bb_1 (region_begin, region_end,
2626 datarefs, insns, fatal);
2627 if (bb_vinfo
2628 && dbg_cnt (vect_slp))
2630 if (dump_enabled_p ())
2631 dump_printf_loc (MSG_NOTE, vect_location, "SLPing BB part\n");
2633 vect_schedule_slp (bb_vinfo);
2635 if (dump_enabled_p ())
2636 dump_printf_loc (MSG_NOTE, vect_location,
2637 "basic block part vectorized\n");
2639 destroy_bb_vec_info (bb_vinfo);
2641 vectorized = true;
2643 else
2644 destroy_bb_vec_info (bb_vinfo);
2646 any_vectorized |= vectorized;
2648 vector_sizes &= ~current_vector_size;
2649 if (vectorized
2650 || vector_sizes == 0
2651 || current_vector_size == 0
2652 /* If vect_slp_analyze_bb_1 signaled that analysis for all
2653 vector sizes will fail do not bother iterating. */
2654 || fatal)
2656 if (gsi_end_p (region_end))
2657 break;
2659 /* Skip the unhandled stmt. */
2660 gsi_next (&gsi);
2662 /* And reset vector sizes. */
2663 current_vector_size = 0;
2664 vector_sizes = targetm.vectorize.autovectorize_vector_sizes ();
2666 else
2668 /* Try the next biggest vector size. */
2669 current_vector_size = 1 << floor_log2 (vector_sizes);
2670 if (dump_enabled_p ())
2671 dump_printf_loc (MSG_NOTE, vect_location,
2672 "***** Re-trying analysis with "
2673 "vector size %d\n", current_vector_size);
2675 /* Start over. */
2676 gsi = region_begin;
2680 return any_vectorized;
2684 /* Return 1 if vector type of boolean constant which is OPNUM
2685 operand in statement STMT is a boolean vector. */
2687 static bool
2688 vect_mask_constant_operand_p (gimple *stmt, int opnum)
2690 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
2691 enum tree_code code = gimple_expr_code (stmt);
2692 tree op, vectype;
2693 gimple *def_stmt;
2694 enum vect_def_type dt;
2696 /* For comparison and COND_EXPR type is chosen depending
2697 on the other comparison operand. */
2698 if (TREE_CODE_CLASS (code) == tcc_comparison)
2700 if (opnum)
2701 op = gimple_assign_rhs1 (stmt);
2702 else
2703 op = gimple_assign_rhs2 (stmt);
2705 if (!vect_is_simple_use (op, stmt_vinfo->vinfo, &def_stmt,
2706 &dt, &vectype))
2707 gcc_unreachable ();
2709 return !vectype || VECTOR_BOOLEAN_TYPE_P (vectype);
2712 if (code == COND_EXPR)
2714 tree cond = gimple_assign_rhs1 (stmt);
2716 if (TREE_CODE (cond) == SSA_NAME)
2717 return false;
2719 if (opnum)
2720 op = TREE_OPERAND (cond, 1);
2721 else
2722 op = TREE_OPERAND (cond, 0);
2724 if (!vect_is_simple_use (op, stmt_vinfo->vinfo, &def_stmt,
2725 &dt, &vectype))
2726 gcc_unreachable ();
2728 return !vectype || VECTOR_BOOLEAN_TYPE_P (vectype);
2731 return VECTOR_BOOLEAN_TYPE_P (STMT_VINFO_VECTYPE (stmt_vinfo));
2735 /* For constant and loop invariant defs of SLP_NODE this function returns
2736 (vector) defs (VEC_OPRNDS) that will be used in the vectorized stmts.
2737 OP_NUM determines if we gather defs for operand 0 or operand 1 of the RHS of
2738 scalar stmts. NUMBER_OF_VECTORS is the number of vector defs to create.
2739 REDUC_INDEX is the index of the reduction operand in the statements, unless
2740 it is -1. */
2742 static void
2743 vect_get_constant_vectors (tree op, slp_tree slp_node,
2744 vec<tree> *vec_oprnds,
2745 unsigned int op_num, unsigned int number_of_vectors,
2746 int reduc_index)
2748 vec<gimple *> stmts = SLP_TREE_SCALAR_STMTS (slp_node);
2749 gimple *stmt = stmts[0];
2750 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
2751 unsigned nunits;
2752 tree vec_cst;
2753 tree *elts;
2754 unsigned j, number_of_places_left_in_vector;
2755 tree vector_type;
2756 tree vop;
2757 int group_size = stmts.length ();
2758 unsigned int vec_num, i;
2759 unsigned number_of_copies = 1;
2760 vec<tree> voprnds;
2761 voprnds.create (number_of_vectors);
2762 bool constant_p, is_store;
2763 tree neutral_op = NULL;
2764 enum tree_code code = gimple_expr_code (stmt);
2765 gimple *def_stmt;
2766 struct loop *loop;
2767 gimple_seq ctor_seq = NULL;
2769 /* Check if vector type is a boolean vector. */
2770 if (TREE_CODE (TREE_TYPE (op)) == BOOLEAN_TYPE
2771 && vect_mask_constant_operand_p (stmt, op_num))
2772 vector_type
2773 = build_same_sized_truth_vector_type (STMT_VINFO_VECTYPE (stmt_vinfo));
2774 else
2775 vector_type = get_vectype_for_scalar_type (TREE_TYPE (op));
2776 nunits = TYPE_VECTOR_SUBPARTS (vector_type);
2778 if (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
2779 && reduc_index != -1)
2781 op_num = reduc_index;
2782 op = gimple_op (stmt, op_num + 1);
2783 /* For additional copies (see the explanation of NUMBER_OF_COPIES below)
2784 we need either neutral operands or the original operands. See
2785 get_initial_def_for_reduction() for details. */
2786 switch (code)
2788 case WIDEN_SUM_EXPR:
2789 case DOT_PROD_EXPR:
2790 case SAD_EXPR:
2791 case PLUS_EXPR:
2792 case MINUS_EXPR:
2793 case BIT_IOR_EXPR:
2794 case BIT_XOR_EXPR:
2795 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (op)))
2796 neutral_op = build_real (TREE_TYPE (op), dconst0);
2797 else
2798 neutral_op = build_int_cst (TREE_TYPE (op), 0);
2800 break;
2802 case MULT_EXPR:
2803 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (op)))
2804 neutral_op = build_real (TREE_TYPE (op), dconst1);
2805 else
2806 neutral_op = build_int_cst (TREE_TYPE (op), 1);
2808 break;
2810 case BIT_AND_EXPR:
2811 neutral_op = build_int_cst (TREE_TYPE (op), -1);
2812 break;
2814 /* For MIN/MAX we don't have an easy neutral operand but
2815 the initial values can be used fine here. Only for
2816 a reduction chain we have to force a neutral element. */
2817 case MAX_EXPR:
2818 case MIN_EXPR:
2819 if (!GROUP_FIRST_ELEMENT (stmt_vinfo))
2820 neutral_op = NULL;
2821 else
2823 def_stmt = SSA_NAME_DEF_STMT (op);
2824 loop = (gimple_bb (stmt))->loop_father;
2825 neutral_op = PHI_ARG_DEF_FROM_EDGE (def_stmt,
2826 loop_preheader_edge (loop));
2828 break;
2830 default:
2831 gcc_assert (!GROUP_FIRST_ELEMENT (stmt_vinfo));
2832 neutral_op = NULL;
2836 if (STMT_VINFO_DATA_REF (stmt_vinfo))
2838 is_store = true;
2839 op = gimple_assign_rhs1 (stmt);
2841 else
2842 is_store = false;
2844 gcc_assert (op);
2846 if (CONSTANT_CLASS_P (op))
2847 constant_p = true;
2848 else
2849 constant_p = false;
2851 /* NUMBER_OF_COPIES is the number of times we need to use the same values in
2852 created vectors. It is greater than 1 if unrolling is performed.
2854 For example, we have two scalar operands, s1 and s2 (e.g., group of
2855 strided accesses of size two), while NUNITS is four (i.e., four scalars
2856 of this type can be packed in a vector). The output vector will contain
2857 two copies of each scalar operand: {s1, s2, s1, s2}. (NUMBER_OF_COPIES
2858 will be 2).
2860 If GROUP_SIZE > NUNITS, the scalars will be split into several vectors
2861 containing the operands.
2863 For example, NUNITS is four as before, and the group size is 8
2864 (s1, s2, ..., s8). We will create two vectors {s1, s2, s3, s4} and
2865 {s5, s6, s7, s8}. */
2867 number_of_copies = nunits * number_of_vectors / group_size;
2869 number_of_places_left_in_vector = nunits;
2870 elts = XALLOCAVEC (tree, nunits);
2871 bool place_after_defs = false;
2872 for (j = 0; j < number_of_copies; j++)
2874 for (i = group_size - 1; stmts.iterate (i, &stmt); i--)
2876 if (is_store)
2877 op = gimple_assign_rhs1 (stmt);
2878 else
2880 switch (code)
2882 case COND_EXPR:
2884 tree cond = gimple_assign_rhs1 (stmt);
2885 if (TREE_CODE (cond) == SSA_NAME)
2886 op = gimple_op (stmt, op_num + 1);
2887 else if (op_num == 0 || op_num == 1)
2888 op = TREE_OPERAND (cond, op_num);
2889 else
2891 if (op_num == 2)
2892 op = gimple_assign_rhs2 (stmt);
2893 else
2894 op = gimple_assign_rhs3 (stmt);
2897 break;
2899 case CALL_EXPR:
2900 op = gimple_call_arg (stmt, op_num);
2901 break;
2903 case LSHIFT_EXPR:
2904 case RSHIFT_EXPR:
2905 case LROTATE_EXPR:
2906 case RROTATE_EXPR:
2907 op = gimple_op (stmt, op_num + 1);
2908 /* Unlike the other binary operators, shifts/rotates have
2909 the shift count being int, instead of the same type as
2910 the lhs, so make sure the scalar is the right type if
2911 we are dealing with vectors of
2912 long long/long/short/char. */
2913 if (op_num == 1 && TREE_CODE (op) == INTEGER_CST)
2914 op = fold_convert (TREE_TYPE (vector_type), op);
2915 break;
2917 default:
2918 op = gimple_op (stmt, op_num + 1);
2919 break;
2923 if (reduc_index != -1)
2925 loop = (gimple_bb (stmt))->loop_father;
2926 def_stmt = SSA_NAME_DEF_STMT (op);
2928 gcc_assert (loop);
2930 /* Get the def before the loop. In reduction chain we have only
2931 one initial value. */
2932 if ((j != (number_of_copies - 1)
2933 || (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt))
2934 && i != 0))
2935 && neutral_op)
2936 op = neutral_op;
2937 else
2938 op = PHI_ARG_DEF_FROM_EDGE (def_stmt,
2939 loop_preheader_edge (loop));
2942 /* Create 'vect_ = {op0,op1,...,opn}'. */
2943 number_of_places_left_in_vector--;
2944 tree orig_op = op;
2945 if (!types_compatible_p (TREE_TYPE (vector_type), TREE_TYPE (op)))
2947 if (CONSTANT_CLASS_P (op))
2949 if (VECTOR_BOOLEAN_TYPE_P (vector_type))
2951 /* Can't use VIEW_CONVERT_EXPR for booleans because
2952 of possibly different sizes of scalar value and
2953 vector element. */
2954 if (integer_zerop (op))
2955 op = build_int_cst (TREE_TYPE (vector_type), 0);
2956 else if (integer_onep (op))
2957 op = build_int_cst (TREE_TYPE (vector_type), 1);
2958 else
2959 gcc_unreachable ();
2961 else
2962 op = fold_unary (VIEW_CONVERT_EXPR,
2963 TREE_TYPE (vector_type), op);
2964 gcc_assert (op && CONSTANT_CLASS_P (op));
2966 else
2968 tree new_temp = make_ssa_name (TREE_TYPE (vector_type));
2969 gimple *init_stmt;
2970 if (VECTOR_BOOLEAN_TYPE_P (vector_type))
2972 gcc_assert (fold_convertible_p (TREE_TYPE (vector_type),
2973 op));
2974 init_stmt = gimple_build_assign (new_temp, NOP_EXPR, op);
2976 else if (fold_convertible_p (TREE_TYPE (vector_type), op))
2977 init_stmt = gimple_build_assign (new_temp, NOP_EXPR, op);
2978 else
2980 op = build1 (VIEW_CONVERT_EXPR, TREE_TYPE (vector_type),
2981 op);
2982 init_stmt
2983 = gimple_build_assign (new_temp, VIEW_CONVERT_EXPR,
2984 op);
2986 gimple_seq_add_stmt (&ctor_seq, init_stmt);
2987 op = new_temp;
2990 elts[number_of_places_left_in_vector] = op;
2991 if (!CONSTANT_CLASS_P (op))
2992 constant_p = false;
2993 if (TREE_CODE (orig_op) == SSA_NAME
2994 && !SSA_NAME_IS_DEFAULT_DEF (orig_op)
2995 && STMT_VINFO_BB_VINFO (stmt_vinfo)
2996 && (STMT_VINFO_BB_VINFO (stmt_vinfo)->bb
2997 == gimple_bb (SSA_NAME_DEF_STMT (orig_op))))
2998 place_after_defs = true;
3000 if (number_of_places_left_in_vector == 0)
3002 number_of_places_left_in_vector = nunits;
3004 if (constant_p)
3005 vec_cst = build_vector (vector_type, elts);
3006 else
3008 vec<constructor_elt, va_gc> *v;
3009 unsigned k;
3010 vec_alloc (v, nunits);
3011 for (k = 0; k < nunits; ++k)
3012 CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, elts[k]);
3013 vec_cst = build_constructor (vector_type, v);
3015 tree init;
3016 gimple_stmt_iterator gsi;
3017 if (place_after_defs)
3019 gsi = gsi_for_stmt
3020 (vect_find_last_scalar_stmt_in_slp (slp_node));
3021 init = vect_init_vector (stmt, vec_cst, vector_type, &gsi);
3023 else
3024 init = vect_init_vector (stmt, vec_cst, vector_type, NULL);
3025 if (ctor_seq != NULL)
3027 gsi = gsi_for_stmt (SSA_NAME_DEF_STMT (init));
3028 gsi_insert_seq_before_without_update (&gsi, ctor_seq,
3029 GSI_SAME_STMT);
3030 ctor_seq = NULL;
3032 voprnds.quick_push (init);
3033 place_after_defs = false;
3038 /* Since the vectors are created in the reverse order, we should invert
3039 them. */
3040 vec_num = voprnds.length ();
3041 for (j = vec_num; j != 0; j--)
3043 vop = voprnds[j - 1];
3044 vec_oprnds->quick_push (vop);
3047 voprnds.release ();
3049 /* In case that VF is greater than the unrolling factor needed for the SLP
3050 group of stmts, NUMBER_OF_VECTORS to be created is greater than
3051 NUMBER_OF_SCALARS/NUNITS or NUNITS/NUMBER_OF_SCALARS, and hence we have
3052 to replicate the vectors. */
3053 while (number_of_vectors > vec_oprnds->length ())
3055 tree neutral_vec = NULL;
3057 if (neutral_op)
3059 if (!neutral_vec)
3060 neutral_vec = build_vector_from_val (vector_type, neutral_op);
3062 vec_oprnds->quick_push (neutral_vec);
3064 else
3066 for (i = 0; vec_oprnds->iterate (i, &vop) && i < vec_num; i++)
3067 vec_oprnds->quick_push (vop);
3073 /* Get vectorized definitions from SLP_NODE that contains corresponding
3074 vectorized def-stmts. */
3076 static void
3077 vect_get_slp_vect_defs (slp_tree slp_node, vec<tree> *vec_oprnds)
3079 tree vec_oprnd;
3080 gimple *vec_def_stmt;
3081 unsigned int i;
3083 gcc_assert (SLP_TREE_VEC_STMTS (slp_node).exists ());
3085 FOR_EACH_VEC_ELT (SLP_TREE_VEC_STMTS (slp_node), i, vec_def_stmt)
3087 gcc_assert (vec_def_stmt);
3088 vec_oprnd = gimple_get_lhs (vec_def_stmt);
3089 vec_oprnds->quick_push (vec_oprnd);
3094 /* Get vectorized definitions for SLP_NODE.
3095 If the scalar definitions are loop invariants or constants, collect them and
3096 call vect_get_constant_vectors() to create vector stmts.
3097 Otherwise, the def-stmts must be already vectorized and the vectorized stmts
3098 must be stored in the corresponding child of SLP_NODE, and we call
3099 vect_get_slp_vect_defs () to retrieve them. */
3101 void
3102 vect_get_slp_defs (vec<tree> ops, slp_tree slp_node,
3103 vec<vec<tree> > *vec_oprnds, int reduc_index)
3105 gimple *first_stmt;
3106 int number_of_vects = 0, i;
3107 unsigned int child_index = 0;
3108 HOST_WIDE_INT lhs_size_unit, rhs_size_unit;
3109 slp_tree child = NULL;
3110 vec<tree> vec_defs;
3111 tree oprnd;
3112 bool vectorized_defs;
3114 first_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[0];
3115 FOR_EACH_VEC_ELT (ops, i, oprnd)
3117 /* For each operand we check if it has vectorized definitions in a child
3118 node or we need to create them (for invariants and constants). We
3119 check if the LHS of the first stmt of the next child matches OPRND.
3120 If it does, we found the correct child. Otherwise, we call
3121 vect_get_constant_vectors (), and not advance CHILD_INDEX in order
3122 to check this child node for the next operand. */
3123 vectorized_defs = false;
3124 if (SLP_TREE_CHILDREN (slp_node).length () > child_index)
3126 child = SLP_TREE_CHILDREN (slp_node)[child_index];
3128 /* We have to check both pattern and original def, if available. */
3129 if (SLP_TREE_DEF_TYPE (child) == vect_internal_def)
3131 gimple *first_def = SLP_TREE_SCALAR_STMTS (child)[0];
3132 gimple *related
3133 = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (first_def));
3135 if (operand_equal_p (oprnd, gimple_get_lhs (first_def), 0)
3136 || (related
3137 && operand_equal_p (oprnd, gimple_get_lhs (related), 0)))
3139 /* The number of vector defs is determined by the number of
3140 vector statements in the node from which we get those
3141 statements. */
3142 number_of_vects = SLP_TREE_NUMBER_OF_VEC_STMTS (child);
3143 vectorized_defs = true;
3144 child_index++;
3147 else
3148 child_index++;
3151 if (!vectorized_defs)
3153 if (i == 0)
3155 number_of_vects = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
3156 /* Number of vector stmts was calculated according to LHS in
3157 vect_schedule_slp_instance (), fix it by replacing LHS with
3158 RHS, if necessary. See vect_get_smallest_scalar_type () for
3159 details. */
3160 vect_get_smallest_scalar_type (first_stmt, &lhs_size_unit,
3161 &rhs_size_unit);
3162 if (rhs_size_unit != lhs_size_unit)
3164 number_of_vects *= rhs_size_unit;
3165 number_of_vects /= lhs_size_unit;
3170 /* Allocate memory for vectorized defs. */
3171 vec_defs = vNULL;
3172 vec_defs.create (number_of_vects);
3174 /* For reduction defs we call vect_get_constant_vectors (), since we are
3175 looking for initial loop invariant values. */
3176 if (vectorized_defs && reduc_index == -1)
3177 /* The defs are already vectorized. */
3178 vect_get_slp_vect_defs (child, &vec_defs);
3179 else
3180 /* Build vectors from scalar defs. */
3181 vect_get_constant_vectors (oprnd, slp_node, &vec_defs, i,
3182 number_of_vects, reduc_index);
3184 vec_oprnds->quick_push (vec_defs);
3186 /* For reductions, we only need initial values. */
3187 if (reduc_index != -1)
3188 return;
3193 /* Create NCOPIES permutation statements using the mask MASK_BYTES (by
3194 building a vector of type MASK_TYPE from it) and two input vectors placed in
3195 DR_CHAIN at FIRST_VEC_INDX and SECOND_VEC_INDX for the first copy and
3196 shifting by STRIDE elements of DR_CHAIN for every copy.
3197 (STRIDE is the number of vectorized stmts for NODE divided by the number of
3198 copies).
3199 VECT_STMTS_COUNTER specifies the index in the vectorized stmts of NODE, where
3200 the created stmts must be inserted. */
3202 static inline void
3203 vect_create_mask_and_perm (gimple *stmt,
3204 tree mask, int first_vec_indx, int second_vec_indx,
3205 gimple_stmt_iterator *gsi, slp_tree node,
3206 tree vectype, vec<tree> dr_chain,
3207 int ncopies, int vect_stmts_counter)
3209 tree perm_dest;
3210 gimple *perm_stmt = NULL;
3211 int i, stride_in, stride_out;
3212 tree first_vec, second_vec, data_ref;
3214 stride_out = SLP_TREE_NUMBER_OF_VEC_STMTS (node) / ncopies;
3215 stride_in = dr_chain.length () / ncopies;
3217 /* Initialize the vect stmts of NODE to properly insert the generated
3218 stmts later. */
3219 for (i = SLP_TREE_VEC_STMTS (node).length ();
3220 i < (int) SLP_TREE_NUMBER_OF_VEC_STMTS (node); i++)
3221 SLP_TREE_VEC_STMTS (node).quick_push (NULL);
3223 perm_dest = vect_create_destination_var (gimple_assign_lhs (stmt), vectype);
3224 for (i = 0; i < ncopies; i++)
3226 first_vec = dr_chain[first_vec_indx];
3227 second_vec = dr_chain[second_vec_indx];
3229 /* Generate the permute statement if necessary. */
3230 if (mask)
3232 perm_stmt = gimple_build_assign (perm_dest, VEC_PERM_EXPR,
3233 first_vec, second_vec, mask);
3234 data_ref = make_ssa_name (perm_dest, perm_stmt);
3235 gimple_set_lhs (perm_stmt, data_ref);
3236 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
3238 else
3239 /* If mask was NULL_TREE generate the requested identity transform. */
3240 perm_stmt = SSA_NAME_DEF_STMT (first_vec);
3242 /* Store the vector statement in NODE. */
3243 SLP_TREE_VEC_STMTS (node)[stride_out * i + vect_stmts_counter]
3244 = perm_stmt;
3246 first_vec_indx += stride_in;
3247 second_vec_indx += stride_in;
3252 /* Generate vector permute statements from a list of loads in DR_CHAIN.
3253 If ANALYZE_ONLY is TRUE, only check that it is possible to create valid
3254 permute statements for the SLP node NODE of the SLP instance
3255 SLP_NODE_INSTANCE. */
3257 bool
3258 vect_transform_slp_perm_load (slp_tree node, vec<tree> dr_chain,
3259 gimple_stmt_iterator *gsi, int vf,
3260 slp_instance slp_node_instance, bool analyze_only)
3262 gimple *stmt = SLP_TREE_SCALAR_STMTS (node)[0];
3263 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3264 tree mask_element_type = NULL_TREE, mask_type;
3265 int nunits, vec_index = 0;
3266 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
3267 int group_size = SLP_INSTANCE_GROUP_SIZE (slp_node_instance);
3268 int unroll_factor, mask_element, ncopies;
3269 unsigned char *mask;
3270 machine_mode mode;
3272 if (!STMT_VINFO_GROUPED_ACCESS (stmt_info))
3273 return false;
3275 stmt_info = vinfo_for_stmt (GROUP_FIRST_ELEMENT (stmt_info));
3277 mode = TYPE_MODE (vectype);
3279 /* The generic VEC_PERM_EXPR code always uses an integral type of the
3280 same size as the vector element being permuted. */
3281 mask_element_type = lang_hooks.types.type_for_mode
3282 (int_mode_for_mode (TYPE_MODE (TREE_TYPE (vectype))), 1);
3283 mask_type = get_vectype_for_scalar_type (mask_element_type);
3284 nunits = TYPE_VECTOR_SUBPARTS (vectype);
3285 mask = XALLOCAVEC (unsigned char, nunits);
3286 unroll_factor = SLP_INSTANCE_UNROLLING_FACTOR (slp_node_instance);
3288 /* Number of copies is determined by the final vectorization factor
3289 relatively to SLP_NODE_INSTANCE unrolling factor. */
3290 ncopies = vf / SLP_INSTANCE_UNROLLING_FACTOR (slp_node_instance);
3292 /* Generate permutation masks for every NODE. Number of masks for each NODE
3293 is equal to GROUP_SIZE.
3294 E.g., we have a group of three nodes with three loads from the same
3295 location in each node, and the vector size is 4. I.e., we have a
3296 a0b0c0a1b1c1... sequence and we need to create the following vectors:
3297 for a's: a0a0a0a1 a1a1a2a2 a2a3a3a3
3298 for b's: b0b0b0b1 b1b1b2b2 b2b3b3b3
3301 The masks for a's should be: {0,0,0,3} {3,3,6,6} {6,9,9,9}.
3302 The last mask is illegal since we assume two operands for permute
3303 operation, and the mask element values can't be outside that range.
3304 Hence, the last mask must be converted into {2,5,5,5}.
3305 For the first two permutations we need the first and the second input
3306 vectors: {a0,b0,c0,a1} and {b1,c1,a2,b2}, and for the last permutation
3307 we need the second and the third vectors: {b1,c1,a2,b2} and
3308 {c2,a3,b3,c3}. */
3310 int vect_stmts_counter = 0;
3311 int index = 0;
3312 int first_vec_index = -1;
3313 int second_vec_index = -1;
3314 bool noop_p = true;
3316 for (int j = 0; j < unroll_factor; j++)
3318 for (int k = 0; k < group_size; k++)
3320 int i = (SLP_TREE_LOAD_PERMUTATION (node)[k]
3321 + j * STMT_VINFO_GROUP_SIZE (stmt_info));
3322 vec_index = i / nunits;
3323 mask_element = i % nunits;
3324 if (vec_index == first_vec_index
3325 || first_vec_index == -1)
3327 first_vec_index = vec_index;
3329 else if (vec_index == second_vec_index
3330 || second_vec_index == -1)
3332 second_vec_index = vec_index;
3333 mask_element += nunits;
3335 else
3337 if (dump_enabled_p ())
3339 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3340 "permutation requires at "
3341 "least three vectors ");
3342 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
3343 stmt, 0);
3344 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
3346 return false;
3349 gcc_assert (mask_element >= 0
3350 && mask_element < 2 * nunits);
3351 if (mask_element != index)
3352 noop_p = false;
3353 mask[index++] = mask_element;
3355 if (index == nunits)
3357 if (! noop_p
3358 && ! can_vec_perm_p (mode, false, mask))
3360 if (dump_enabled_p ())
3362 dump_printf_loc (MSG_MISSED_OPTIMIZATION,
3363 vect_location,
3364 "unsupported vect permute { ");
3365 for (i = 0; i < nunits; ++i)
3366 dump_printf (MSG_MISSED_OPTIMIZATION, "%d ", mask[i]);
3367 dump_printf (MSG_MISSED_OPTIMIZATION, "}\n");
3369 return false;
3372 if (!analyze_only)
3374 tree mask_vec = NULL_TREE;
3376 if (! noop_p)
3378 tree *mask_elts = XALLOCAVEC (tree, nunits);
3379 for (int l = 0; l < nunits; ++l)
3380 mask_elts[l] = build_int_cst (mask_element_type,
3381 mask[l]);
3382 mask_vec = build_vector (mask_type, mask_elts);
3385 if (second_vec_index == -1)
3386 second_vec_index = first_vec_index;
3387 vect_create_mask_and_perm (stmt, mask_vec, first_vec_index,
3388 second_vec_index,
3389 gsi, node, vectype, dr_chain,
3390 ncopies, vect_stmts_counter++);
3393 index = 0;
3394 first_vec_index = -1;
3395 second_vec_index = -1;
3396 noop_p = true;
3401 return true;
3406 /* Vectorize SLP instance tree in postorder. */
3408 static bool
3409 vect_schedule_slp_instance (slp_tree node, slp_instance instance,
3410 unsigned int vectorization_factor)
3412 gimple *stmt;
3413 bool grouped_store, is_store;
3414 gimple_stmt_iterator si;
3415 stmt_vec_info stmt_info;
3416 unsigned int vec_stmts_size, nunits, group_size;
3417 tree vectype;
3418 int i, j;
3419 slp_tree child;
3421 if (SLP_TREE_DEF_TYPE (node) != vect_internal_def)
3422 return false;
3424 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
3425 vect_schedule_slp_instance (child, instance, vectorization_factor);
3427 /* Push SLP node def-type to stmts. */
3428 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
3429 if (SLP_TREE_DEF_TYPE (child) != vect_internal_def)
3430 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (child), j, stmt)
3431 STMT_VINFO_DEF_TYPE (vinfo_for_stmt (stmt)) = SLP_TREE_DEF_TYPE (child);
3433 stmt = SLP_TREE_SCALAR_STMTS (node)[0];
3434 stmt_info = vinfo_for_stmt (stmt);
3436 /* VECTYPE is the type of the destination. */
3437 vectype = STMT_VINFO_VECTYPE (stmt_info);
3438 nunits = (unsigned int) TYPE_VECTOR_SUBPARTS (vectype);
3439 group_size = SLP_INSTANCE_GROUP_SIZE (instance);
3441 /* For each SLP instance calculate number of vector stmts to be created
3442 for the scalar stmts in each node of the SLP tree. Number of vector
3443 elements in one vector iteration is the number of scalar elements in
3444 one scalar iteration (GROUP_SIZE) multiplied by VF divided by vector
3445 size.
3446 Unless this is a SLP reduction in which case the number of vector
3447 stmts is equal to the number of vector stmts of the children. */
3448 if (GROUP_FIRST_ELEMENT (stmt_info)
3449 && !STMT_VINFO_GROUPED_ACCESS (stmt_info))
3450 vec_stmts_size = SLP_TREE_NUMBER_OF_VEC_STMTS (SLP_TREE_CHILDREN (node)[0]);
3451 else
3452 vec_stmts_size = (vectorization_factor * group_size) / nunits;
3454 if (!SLP_TREE_VEC_STMTS (node).exists ())
3456 SLP_TREE_VEC_STMTS (node).create (vec_stmts_size);
3457 SLP_TREE_NUMBER_OF_VEC_STMTS (node) = vec_stmts_size;
3460 if (dump_enabled_p ())
3462 dump_printf_loc (MSG_NOTE,vect_location,
3463 "------>vectorizing SLP node starting from: ");
3464 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
3465 dump_printf (MSG_NOTE, "\n");
3468 /* Vectorized stmts go before the last scalar stmt which is where
3469 all uses are ready. */
3470 si = gsi_for_stmt (vect_find_last_scalar_stmt_in_slp (node));
3472 /* Mark the first element of the reduction chain as reduction to properly
3473 transform the node. In the analysis phase only the last element of the
3474 chain is marked as reduction. */
3475 if (GROUP_FIRST_ELEMENT (stmt_info) && !STMT_VINFO_GROUPED_ACCESS (stmt_info)
3476 && GROUP_FIRST_ELEMENT (stmt_info) == stmt)
3478 STMT_VINFO_DEF_TYPE (stmt_info) = vect_reduction_def;
3479 STMT_VINFO_TYPE (stmt_info) = reduc_vec_info_type;
3482 /* Handle two-operation SLP nodes by vectorizing the group with
3483 both operations and then performing a merge. */
3484 if (SLP_TREE_TWO_OPERATORS (node))
3486 enum tree_code code0 = gimple_assign_rhs_code (stmt);
3487 enum tree_code ocode;
3488 gimple *ostmt;
3489 unsigned char *mask = XALLOCAVEC (unsigned char, group_size);
3490 bool allsame = true;
3491 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, ostmt)
3492 if (gimple_assign_rhs_code (ostmt) != code0)
3494 mask[i] = 1;
3495 allsame = false;
3496 ocode = gimple_assign_rhs_code (ostmt);
3498 else
3499 mask[i] = 0;
3500 if (!allsame)
3502 vec<gimple *> v0;
3503 vec<gimple *> v1;
3504 unsigned j;
3505 tree tmask = NULL_TREE;
3506 vect_transform_stmt (stmt, &si, &grouped_store, node, instance);
3507 v0 = SLP_TREE_VEC_STMTS (node).copy ();
3508 SLP_TREE_VEC_STMTS (node).truncate (0);
3509 gimple_assign_set_rhs_code (stmt, ocode);
3510 vect_transform_stmt (stmt, &si, &grouped_store, node, instance);
3511 gimple_assign_set_rhs_code (stmt, code0);
3512 v1 = SLP_TREE_VEC_STMTS (node).copy ();
3513 SLP_TREE_VEC_STMTS (node).truncate (0);
3514 tree meltype = build_nonstandard_integer_type
3515 (GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (vectype))), 1);
3516 tree mvectype = get_same_sized_vectype (meltype, vectype);
3517 unsigned k = 0, l;
3518 for (j = 0; j < v0.length (); ++j)
3520 tree *melts = XALLOCAVEC (tree, TYPE_VECTOR_SUBPARTS (vectype));
3521 for (l = 0; l < TYPE_VECTOR_SUBPARTS (vectype); ++l)
3523 if (k >= group_size)
3524 k = 0;
3525 melts[l] = build_int_cst
3526 (meltype, mask[k++] * TYPE_VECTOR_SUBPARTS (vectype) + l);
3528 tmask = build_vector (mvectype, melts);
3530 /* ??? Not all targets support a VEC_PERM_EXPR with a
3531 constant mask that would translate to a vec_merge RTX
3532 (with their vec_perm_const_ok). We can either not
3533 vectorize in that case or let veclower do its job.
3534 Unfortunately that isn't too great and at least for
3535 plus/minus we'd eventually like to match targets
3536 vector addsub instructions. */
3537 gimple *vstmt;
3538 vstmt = gimple_build_assign (make_ssa_name (vectype),
3539 VEC_PERM_EXPR,
3540 gimple_assign_lhs (v0[j]),
3541 gimple_assign_lhs (v1[j]), tmask);
3542 vect_finish_stmt_generation (stmt, vstmt, &si);
3543 SLP_TREE_VEC_STMTS (node).quick_push (vstmt);
3545 v0.release ();
3546 v1.release ();
3547 return false;
3550 is_store = vect_transform_stmt (stmt, &si, &grouped_store, node, instance);
3552 /* Restore stmt def-types. */
3553 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
3554 if (SLP_TREE_DEF_TYPE (child) != vect_internal_def)
3555 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (child), j, stmt)
3556 STMT_VINFO_DEF_TYPE (vinfo_for_stmt (stmt)) = vect_internal_def;
3558 return is_store;
3561 /* Replace scalar calls from SLP node NODE with setting of their lhs to zero.
3562 For loop vectorization this is done in vectorizable_call, but for SLP
3563 it needs to be deferred until end of vect_schedule_slp, because multiple
3564 SLP instances may refer to the same scalar stmt. */
3566 static void
3567 vect_remove_slp_scalar_calls (slp_tree node)
3569 gimple *stmt, *new_stmt;
3570 gimple_stmt_iterator gsi;
3571 int i;
3572 slp_tree child;
3573 tree lhs;
3574 stmt_vec_info stmt_info;
3576 if (SLP_TREE_DEF_TYPE (node) != vect_internal_def)
3577 return;
3579 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
3580 vect_remove_slp_scalar_calls (child);
3582 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
3584 if (!is_gimple_call (stmt) || gimple_bb (stmt) == NULL)
3585 continue;
3586 stmt_info = vinfo_for_stmt (stmt);
3587 if (stmt_info == NULL
3588 || is_pattern_stmt_p (stmt_info)
3589 || !PURE_SLP_STMT (stmt_info))
3590 continue;
3591 lhs = gimple_call_lhs (stmt);
3592 new_stmt = gimple_build_assign (lhs, build_zero_cst (TREE_TYPE (lhs)));
3593 set_vinfo_for_stmt (new_stmt, stmt_info);
3594 set_vinfo_for_stmt (stmt, NULL);
3595 STMT_VINFO_STMT (stmt_info) = new_stmt;
3596 gsi = gsi_for_stmt (stmt);
3597 gsi_replace (&gsi, new_stmt, false);
3598 SSA_NAME_DEF_STMT (gimple_assign_lhs (new_stmt)) = new_stmt;
3602 /* Generate vector code for all SLP instances in the loop/basic block. */
3604 bool
3605 vect_schedule_slp (vec_info *vinfo)
3607 vec<slp_instance> slp_instances;
3608 slp_instance instance;
3609 unsigned int i, vf;
3610 bool is_store = false;
3612 slp_instances = vinfo->slp_instances;
3613 if (is_a <loop_vec_info> (vinfo))
3614 vf = as_a <loop_vec_info> (vinfo)->vectorization_factor;
3615 else
3616 vf = 1;
3618 FOR_EACH_VEC_ELT (slp_instances, i, instance)
3620 /* Schedule the tree of INSTANCE. */
3621 is_store = vect_schedule_slp_instance (SLP_INSTANCE_TREE (instance),
3622 instance, vf);
3623 if (dump_enabled_p ())
3624 dump_printf_loc (MSG_NOTE, vect_location,
3625 "vectorizing stmts using SLP.\n");
3628 FOR_EACH_VEC_ELT (slp_instances, i, instance)
3630 slp_tree root = SLP_INSTANCE_TREE (instance);
3631 gimple *store;
3632 unsigned int j;
3633 gimple_stmt_iterator gsi;
3635 /* Remove scalar call stmts. Do not do this for basic-block
3636 vectorization as not all uses may be vectorized.
3637 ??? Why should this be necessary? DCE should be able to
3638 remove the stmts itself.
3639 ??? For BB vectorization we can as well remove scalar
3640 stmts starting from the SLP tree root if they have no
3641 uses. */
3642 if (is_a <loop_vec_info> (vinfo))
3643 vect_remove_slp_scalar_calls (root);
3645 for (j = 0; SLP_TREE_SCALAR_STMTS (root).iterate (j, &store)
3646 && j < SLP_INSTANCE_GROUP_SIZE (instance); j++)
3648 if (!STMT_VINFO_DATA_REF (vinfo_for_stmt (store)))
3649 break;
3651 if (is_pattern_stmt_p (vinfo_for_stmt (store)))
3652 store = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (store));
3653 /* Free the attached stmt_vec_info and remove the stmt. */
3654 gsi = gsi_for_stmt (store);
3655 unlink_stmt_vdef (store);
3656 gsi_remove (&gsi, true);
3657 release_defs (store);
3658 free_stmt_vec_info (store);
3662 return is_store;