Remove snprintf from <avx512>_(load|store)<mode>_mask
[official-gcc.git] / gcc / tree-vect-slp.c
blob0aac2e2f6cfd2d1bd8fb26c6e6e7359a3428f266
1 /* SLP - Basic Block Vectorization
2 Copyright (C) 2007-2016 Free Software Foundation, Inc.
3 Contributed by Dorit Naishlos <dorit@il.ibm.com>
4 and Ira Rosen <irar@il.ibm.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "backend.h"
26 #include "target.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "gimple.h"
30 #include "tree-pass.h"
31 #include "ssa.h"
32 #include "optabs-tree.h"
33 #include "insn-config.h"
34 #include "recog.h" /* FIXME: for insn_data */
35 #include "params.h"
36 #include "fold-const.h"
37 #include "stor-layout.h"
38 #include "gimple-iterator.h"
39 #include "cfgloop.h"
40 #include "tree-vectorizer.h"
41 #include "langhooks.h"
42 #include "gimple-walk.h"
43 #include "dbgcnt.h"
46 /* Recursively free the memory allocated for the SLP tree rooted at NODE. */
48 static void
49 vect_free_slp_tree (slp_tree node)
51 int i;
52 slp_tree child;
54 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
55 vect_free_slp_tree (child);
57 SLP_TREE_CHILDREN (node).release ();
58 SLP_TREE_SCALAR_STMTS (node).release ();
59 SLP_TREE_VEC_STMTS (node).release ();
60 SLP_TREE_LOAD_PERMUTATION (node).release ();
62 free (node);
66 /* Free the memory allocated for the SLP instance. */
68 void
69 vect_free_slp_instance (slp_instance instance)
71 vect_free_slp_tree (SLP_INSTANCE_TREE (instance));
72 SLP_INSTANCE_LOADS (instance).release ();
73 free (instance);
77 /* Create an SLP node for SCALAR_STMTS. */
79 static slp_tree
80 vect_create_new_slp_node (vec<gimple *> scalar_stmts)
82 slp_tree node;
83 gimple *stmt = scalar_stmts[0];
84 unsigned int nops;
86 if (is_gimple_call (stmt))
87 nops = gimple_call_num_args (stmt);
88 else if (is_gimple_assign (stmt))
90 nops = gimple_num_ops (stmt) - 1;
91 if (gimple_assign_rhs_code (stmt) == COND_EXPR)
92 nops++;
94 else
95 return NULL;
97 node = XNEW (struct _slp_tree);
98 SLP_TREE_SCALAR_STMTS (node) = scalar_stmts;
99 SLP_TREE_VEC_STMTS (node).create (0);
100 SLP_TREE_CHILDREN (node).create (nops);
101 SLP_TREE_LOAD_PERMUTATION (node) = vNULL;
102 SLP_TREE_TWO_OPERATORS (node) = false;
103 SLP_TREE_DEF_TYPE (node) = vect_internal_def;
105 return node;
109 /* This structure is used in creation of an SLP tree. Each instance
110 corresponds to the same operand in a group of scalar stmts in an SLP
111 node. */
112 typedef struct _slp_oprnd_info
114 /* Def-stmts for the operands. */
115 vec<gimple *> def_stmts;
116 /* Information about the first statement, its vector def-type, type, the
117 operand itself in case it's constant, and an indication if it's a pattern
118 stmt. */
119 enum vect_def_type first_dt;
120 tree first_op_type;
121 bool first_pattern;
122 bool second_pattern;
123 } *slp_oprnd_info;
126 /* Allocate operands info for NOPS operands, and GROUP_SIZE def-stmts for each
127 operand. */
128 static vec<slp_oprnd_info>
129 vect_create_oprnd_info (int nops, int group_size)
131 int i;
132 slp_oprnd_info oprnd_info;
133 vec<slp_oprnd_info> oprnds_info;
135 oprnds_info.create (nops);
136 for (i = 0; i < nops; i++)
138 oprnd_info = XNEW (struct _slp_oprnd_info);
139 oprnd_info->def_stmts.create (group_size);
140 oprnd_info->first_dt = vect_uninitialized_def;
141 oprnd_info->first_op_type = NULL_TREE;
142 oprnd_info->first_pattern = false;
143 oprnd_info->second_pattern = false;
144 oprnds_info.quick_push (oprnd_info);
147 return oprnds_info;
151 /* Free operands info. */
153 static void
154 vect_free_oprnd_info (vec<slp_oprnd_info> &oprnds_info)
156 int i;
157 slp_oprnd_info oprnd_info;
159 FOR_EACH_VEC_ELT (oprnds_info, i, oprnd_info)
161 oprnd_info->def_stmts.release ();
162 XDELETE (oprnd_info);
165 oprnds_info.release ();
169 /* Find the place of the data-ref in STMT in the interleaving chain that starts
170 from FIRST_STMT. Return -1 if the data-ref is not a part of the chain. */
172 static int
173 vect_get_place_in_interleaving_chain (gimple *stmt, gimple *first_stmt)
175 gimple *next_stmt = first_stmt;
176 int result = 0;
178 if (first_stmt != GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
179 return -1;
183 if (next_stmt == stmt)
184 return result;
185 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
186 if (next_stmt)
187 result += GROUP_GAP (vinfo_for_stmt (next_stmt));
189 while (next_stmt);
191 return -1;
195 /* Get the defs for the rhs of STMT (collect them in OPRNDS_INFO), check that
196 they are of a valid type and that they match the defs of the first stmt of
197 the SLP group (stored in OPRNDS_INFO). If there was a fatal error
198 return -1, if the error could be corrected by swapping operands of the
199 operation return 1, if everything is ok return 0. */
201 static int
202 vect_get_and_check_slp_defs (vec_info *vinfo,
203 gimple *stmt, unsigned stmt_num,
204 vec<slp_oprnd_info> *oprnds_info)
206 tree oprnd;
207 unsigned int i, number_of_oprnds;
208 gimple *def_stmt;
209 enum vect_def_type dt = vect_uninitialized_def;
210 bool pattern = false;
211 slp_oprnd_info oprnd_info;
212 int first_op_idx = 1;
213 bool commutative = false;
214 bool first_op_cond = false;
215 bool first = stmt_num == 0;
216 bool second = stmt_num == 1;
218 if (is_gimple_call (stmt))
220 number_of_oprnds = gimple_call_num_args (stmt);
221 first_op_idx = 3;
223 else if (is_gimple_assign (stmt))
225 enum tree_code code = gimple_assign_rhs_code (stmt);
226 number_of_oprnds = gimple_num_ops (stmt) - 1;
227 if (gimple_assign_rhs_code (stmt) == COND_EXPR
228 && COMPARISON_CLASS_P (gimple_assign_rhs1 (stmt)))
230 first_op_cond = true;
231 commutative = true;
232 number_of_oprnds++;
234 else
235 commutative = commutative_tree_code (code);
237 else
238 return -1;
240 bool swapped = false;
241 for (i = 0; i < number_of_oprnds; i++)
243 again:
244 if (first_op_cond)
246 if (i == 0 || i == 1)
247 oprnd = TREE_OPERAND (gimple_op (stmt, first_op_idx),
248 swapped ? !i : i);
249 else
250 oprnd = gimple_op (stmt, first_op_idx + i - 1);
252 else
253 oprnd = gimple_op (stmt, first_op_idx + (swapped ? !i : i));
255 oprnd_info = (*oprnds_info)[i];
257 if (!vect_is_simple_use (oprnd, vinfo, &def_stmt, &dt))
259 if (dump_enabled_p ())
261 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
262 "Build SLP failed: can't analyze def for ");
263 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, oprnd);
264 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
267 return -1;
270 /* Check if DEF_STMT is a part of a pattern in LOOP and get the def stmt
271 from the pattern. Check that all the stmts of the node are in the
272 pattern. */
273 if (def_stmt && gimple_bb (def_stmt)
274 && vect_stmt_in_region_p (vinfo, def_stmt)
275 && vinfo_for_stmt (def_stmt)
276 && STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (def_stmt))
277 && !STMT_VINFO_RELEVANT (vinfo_for_stmt (def_stmt))
278 && !STMT_VINFO_LIVE_P (vinfo_for_stmt (def_stmt)))
280 pattern = true;
281 if (!first && !oprnd_info->first_pattern
282 /* Allow different pattern state for the defs of the
283 first stmt in reduction chains. */
284 && (oprnd_info->first_dt != vect_reduction_def
285 || (!second && !oprnd_info->second_pattern)))
287 if (i == 0
288 && !swapped
289 && commutative)
291 swapped = true;
292 goto again;
295 if (dump_enabled_p ())
297 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
298 "Build SLP failed: some of the stmts"
299 " are in a pattern, and others are not ");
300 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, oprnd);
301 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
304 return 1;
307 def_stmt = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (def_stmt));
308 dt = STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt));
310 if (dt == vect_unknown_def_type)
312 if (dump_enabled_p ())
313 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
314 "Unsupported pattern.\n");
315 return -1;
318 switch (gimple_code (def_stmt))
320 case GIMPLE_PHI:
321 case GIMPLE_ASSIGN:
322 break;
324 default:
325 if (dump_enabled_p ())
326 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
327 "unsupported defining stmt:\n");
328 return -1;
332 if (second)
333 oprnd_info->second_pattern = pattern;
335 if (first)
337 oprnd_info->first_dt = dt;
338 oprnd_info->first_pattern = pattern;
339 oprnd_info->first_op_type = TREE_TYPE (oprnd);
341 else
343 /* Not first stmt of the group, check that the def-stmt/s match
344 the def-stmt/s of the first stmt. Allow different definition
345 types for reduction chains: the first stmt must be a
346 vect_reduction_def (a phi node), and the rest
347 vect_internal_def. */
348 if (((oprnd_info->first_dt != dt
349 && !(oprnd_info->first_dt == vect_reduction_def
350 && dt == vect_internal_def)
351 && !((oprnd_info->first_dt == vect_external_def
352 || oprnd_info->first_dt == vect_constant_def)
353 && (dt == vect_external_def
354 || dt == vect_constant_def)))
355 || !types_compatible_p (oprnd_info->first_op_type,
356 TREE_TYPE (oprnd))))
358 /* Try swapping operands if we got a mismatch. */
359 if (i == 0
360 && !swapped
361 && commutative)
363 swapped = true;
364 goto again;
367 if (dump_enabled_p ())
368 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
369 "Build SLP failed: different types\n");
371 return 1;
375 /* Check the types of the definitions. */
376 switch (dt)
378 case vect_constant_def:
379 case vect_external_def:
380 case vect_reduction_def:
381 break;
383 case vect_internal_def:
384 oprnd_info->def_stmts.quick_push (def_stmt);
385 break;
387 default:
388 /* FORNOW: Not supported. */
389 if (dump_enabled_p ())
391 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
392 "Build SLP failed: illegal type of def ");
393 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, oprnd);
394 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
397 return -1;
401 /* Swap operands. */
402 if (swapped)
404 if (first_op_cond)
406 tree cond = gimple_assign_rhs1 (stmt);
407 swap_ssa_operands (stmt, &TREE_OPERAND (cond, 0),
408 &TREE_OPERAND (cond, 1));
409 TREE_SET_CODE (cond, swap_tree_comparison (TREE_CODE (cond)));
411 else
412 swap_ssa_operands (stmt, gimple_assign_rhs1_ptr (stmt),
413 gimple_assign_rhs2_ptr (stmt));
416 return 0;
420 /* Verify if the scalar stmts STMTS are isomorphic, require data
421 permutation or are of unsupported types of operation. Return
422 true if they are, otherwise return false and indicate in *MATCHES
423 which stmts are not isomorphic to the first one. If MATCHES[0]
424 is false then this indicates the comparison could not be
425 carried out or the stmts will never be vectorized by SLP. */
427 static bool
428 vect_build_slp_tree_1 (vec_info *vinfo,
429 vec<gimple *> stmts, unsigned int group_size,
430 unsigned nops, unsigned int *max_nunits,
431 bool *matches, bool *two_operators)
433 unsigned int i;
434 gimple *first_stmt = stmts[0], *stmt = stmts[0];
435 enum tree_code first_stmt_code = ERROR_MARK;
436 enum tree_code alt_stmt_code = ERROR_MARK;
437 enum tree_code rhs_code = ERROR_MARK;
438 enum tree_code first_cond_code = ERROR_MARK;
439 tree lhs;
440 bool need_same_oprnds = false;
441 tree vectype = NULL_TREE, scalar_type, first_op1 = NULL_TREE;
442 optab optab;
443 int icode;
444 machine_mode optab_op2_mode;
445 machine_mode vec_mode;
446 HOST_WIDE_INT dummy;
447 gimple *first_load = NULL, *prev_first_load = NULL;
449 /* For every stmt in NODE find its def stmt/s. */
450 FOR_EACH_VEC_ELT (stmts, i, stmt)
452 matches[i] = false;
454 if (dump_enabled_p ())
456 dump_printf_loc (MSG_NOTE, vect_location, "Build SLP for ");
457 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
460 /* Fail to vectorize statements marked as unvectorizable. */
461 if (!STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (stmt)))
463 if (dump_enabled_p ())
465 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
466 "Build SLP failed: unvectorizable statement ");
467 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
468 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
470 /* Fatal mismatch. */
471 matches[0] = false;
472 return false;
475 lhs = gimple_get_lhs (stmt);
476 if (lhs == NULL_TREE)
478 if (dump_enabled_p ())
480 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
481 "Build SLP failed: not GIMPLE_ASSIGN nor "
482 "GIMPLE_CALL ");
483 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
484 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
486 /* Fatal mismatch. */
487 matches[0] = false;
488 return false;
491 scalar_type = vect_get_smallest_scalar_type (stmt, &dummy, &dummy);
492 vectype = get_vectype_for_scalar_type (scalar_type);
493 if (!vectype)
495 if (dump_enabled_p ())
497 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
498 "Build SLP failed: unsupported data-type ");
499 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
500 scalar_type);
501 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
503 /* Fatal mismatch. */
504 matches[0] = false;
505 return false;
508 /* If populating the vector type requires unrolling then fail
509 before adjusting *max_nunits for basic-block vectorization. */
510 if (is_a <bb_vec_info> (vinfo)
511 && TYPE_VECTOR_SUBPARTS (vectype) > group_size)
513 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
514 "Build SLP failed: unrolling required "
515 "in basic block SLP\n");
516 /* Fatal mismatch. */
517 matches[0] = false;
518 return false;
521 /* In case of multiple types we need to detect the smallest type. */
522 if (*max_nunits < TYPE_VECTOR_SUBPARTS (vectype))
523 *max_nunits = TYPE_VECTOR_SUBPARTS (vectype);
525 if (gcall *call_stmt = dyn_cast <gcall *> (stmt))
527 rhs_code = CALL_EXPR;
528 if (gimple_call_internal_p (call_stmt)
529 || gimple_call_tail_p (call_stmt)
530 || gimple_call_noreturn_p (call_stmt)
531 || !gimple_call_nothrow_p (call_stmt)
532 || gimple_call_chain (call_stmt))
534 if (dump_enabled_p ())
536 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
537 "Build SLP failed: unsupported call type ");
538 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
539 call_stmt, 0);
540 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
542 /* Fatal mismatch. */
543 matches[0] = false;
544 return false;
547 else
548 rhs_code = gimple_assign_rhs_code (stmt);
550 /* Check the operation. */
551 if (i == 0)
553 first_stmt_code = rhs_code;
555 /* Shift arguments should be equal in all the packed stmts for a
556 vector shift with scalar shift operand. */
557 if (rhs_code == LSHIFT_EXPR || rhs_code == RSHIFT_EXPR
558 || rhs_code == LROTATE_EXPR
559 || rhs_code == RROTATE_EXPR)
561 vec_mode = TYPE_MODE (vectype);
563 /* First see if we have a vector/vector shift. */
564 optab = optab_for_tree_code (rhs_code, vectype,
565 optab_vector);
567 if (!optab
568 || optab_handler (optab, vec_mode) == CODE_FOR_nothing)
570 /* No vector/vector shift, try for a vector/scalar shift. */
571 optab = optab_for_tree_code (rhs_code, vectype,
572 optab_scalar);
574 if (!optab)
576 if (dump_enabled_p ())
577 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
578 "Build SLP failed: no optab.\n");
579 /* Fatal mismatch. */
580 matches[0] = false;
581 return false;
583 icode = (int) optab_handler (optab, vec_mode);
584 if (icode == CODE_FOR_nothing)
586 if (dump_enabled_p ())
587 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
588 "Build SLP failed: "
589 "op not supported by target.\n");
590 /* Fatal mismatch. */
591 matches[0] = false;
592 return false;
594 optab_op2_mode = insn_data[icode].operand[2].mode;
595 if (!VECTOR_MODE_P (optab_op2_mode))
597 need_same_oprnds = true;
598 first_op1 = gimple_assign_rhs2 (stmt);
602 else if (rhs_code == WIDEN_LSHIFT_EXPR)
604 need_same_oprnds = true;
605 first_op1 = gimple_assign_rhs2 (stmt);
608 else
610 if (first_stmt_code != rhs_code
611 && alt_stmt_code == ERROR_MARK)
612 alt_stmt_code = rhs_code;
613 if (first_stmt_code != rhs_code
614 && (first_stmt_code != IMAGPART_EXPR
615 || rhs_code != REALPART_EXPR)
616 && (first_stmt_code != REALPART_EXPR
617 || rhs_code != IMAGPART_EXPR)
618 /* Handle mismatches in plus/minus by computing both
619 and merging the results. */
620 && !((first_stmt_code == PLUS_EXPR
621 || first_stmt_code == MINUS_EXPR)
622 && (alt_stmt_code == PLUS_EXPR
623 || alt_stmt_code == MINUS_EXPR)
624 && rhs_code == alt_stmt_code)
625 && !(STMT_VINFO_GROUPED_ACCESS (vinfo_for_stmt (stmt))
626 && (first_stmt_code == ARRAY_REF
627 || first_stmt_code == BIT_FIELD_REF
628 || first_stmt_code == INDIRECT_REF
629 || first_stmt_code == COMPONENT_REF
630 || first_stmt_code == MEM_REF)))
632 if (dump_enabled_p ())
634 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
635 "Build SLP failed: different operation "
636 "in stmt ");
637 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
638 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
639 "original stmt ");
640 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
641 first_stmt, 0);
643 /* Mismatch. */
644 continue;
647 if (need_same_oprnds
648 && !operand_equal_p (first_op1, gimple_assign_rhs2 (stmt), 0))
650 if (dump_enabled_p ())
652 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
653 "Build SLP failed: different shift "
654 "arguments in ");
655 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
656 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
658 /* Mismatch. */
659 continue;
662 if (rhs_code == CALL_EXPR)
664 gimple *first_stmt = stmts[0];
665 if (gimple_call_num_args (stmt) != nops
666 || !operand_equal_p (gimple_call_fn (first_stmt),
667 gimple_call_fn (stmt), 0)
668 || gimple_call_fntype (first_stmt)
669 != gimple_call_fntype (stmt))
671 if (dump_enabled_p ())
673 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
674 "Build SLP failed: different calls in ");
675 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
676 stmt, 0);
677 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
679 /* Mismatch. */
680 continue;
685 /* Grouped store or load. */
686 if (STMT_VINFO_GROUPED_ACCESS (vinfo_for_stmt (stmt)))
688 if (REFERENCE_CLASS_P (lhs))
690 /* Store. */
693 else
695 /* Load. */
696 first_load = GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt));
697 if (prev_first_load)
699 /* Check that there are no loads from different interleaving
700 chains in the same node. */
701 if (prev_first_load != first_load)
703 if (dump_enabled_p ())
705 dump_printf_loc (MSG_MISSED_OPTIMIZATION,
706 vect_location,
707 "Build SLP failed: different "
708 "interleaving chains in one node ");
709 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
710 stmt, 0);
711 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
713 /* Mismatch. */
714 continue;
717 else
718 prev_first_load = first_load;
720 } /* Grouped access. */
721 else
723 if (TREE_CODE_CLASS (rhs_code) == tcc_reference)
725 /* Not grouped load. */
726 if (dump_enabled_p ())
728 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
729 "Build SLP failed: not grouped load ");
730 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
731 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
734 /* FORNOW: Not grouped loads are not supported. */
735 /* Fatal mismatch. */
736 matches[0] = false;
737 return false;
740 /* Not memory operation. */
741 if (TREE_CODE_CLASS (rhs_code) != tcc_binary
742 && TREE_CODE_CLASS (rhs_code) != tcc_unary
743 && TREE_CODE_CLASS (rhs_code) != tcc_expression
744 && TREE_CODE_CLASS (rhs_code) != tcc_comparison
745 && rhs_code != CALL_EXPR)
747 if (dump_enabled_p ())
749 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
750 "Build SLP failed: operation");
751 dump_printf (MSG_MISSED_OPTIMIZATION, " unsupported ");
752 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
753 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
755 /* Fatal mismatch. */
756 matches[0] = false;
757 return false;
760 if (rhs_code == COND_EXPR)
762 tree cond_expr = gimple_assign_rhs1 (stmt);
764 if (i == 0)
765 first_cond_code = TREE_CODE (cond_expr);
766 else if (first_cond_code != TREE_CODE (cond_expr))
768 if (dump_enabled_p ())
770 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
771 "Build SLP failed: different"
772 " operation");
773 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
774 stmt, 0);
775 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
777 /* Mismatch. */
778 continue;
783 matches[i] = true;
786 for (i = 0; i < group_size; ++i)
787 if (!matches[i])
788 return false;
790 /* If we allowed a two-operation SLP node verify the target can cope
791 with the permute we are going to use. */
792 if (alt_stmt_code != ERROR_MARK
793 && TREE_CODE_CLASS (alt_stmt_code) != tcc_reference)
795 unsigned char *sel
796 = XALLOCAVEC (unsigned char, TYPE_VECTOR_SUBPARTS (vectype));
797 for (i = 0; i < TYPE_VECTOR_SUBPARTS (vectype); ++i)
799 sel[i] = i;
800 if (gimple_assign_rhs_code (stmts[i % group_size]) == alt_stmt_code)
801 sel[i] += TYPE_VECTOR_SUBPARTS (vectype);
803 if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel))
805 for (i = 0; i < group_size; ++i)
806 if (gimple_assign_rhs_code (stmts[i]) == alt_stmt_code)
808 matches[i] = false;
809 if (dump_enabled_p ())
811 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
812 "Build SLP failed: different operation "
813 "in stmt ");
814 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
815 stmts[i], 0);
816 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
817 "original stmt ");
818 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
819 first_stmt, 0);
822 return false;
824 *two_operators = true;
827 return true;
830 /* Recursively build an SLP tree starting from NODE.
831 Fail (and return a value not equal to zero) if def-stmts are not
832 isomorphic, require data permutation or are of unsupported types of
833 operation. Otherwise, return 0.
834 The value returned is the depth in the SLP tree where a mismatch
835 was found. */
837 static bool
838 vect_build_slp_tree (vec_info *vinfo,
839 slp_tree *node, unsigned int group_size,
840 unsigned int *max_nunits,
841 vec<slp_tree> *loads,
842 bool *matches, unsigned *npermutes, unsigned *tree_size,
843 unsigned max_tree_size)
845 unsigned nops, i, this_tree_size = 0;
846 gimple *stmt;
848 matches[0] = false;
850 stmt = SLP_TREE_SCALAR_STMTS (*node)[0];
851 if (is_gimple_call (stmt))
852 nops = gimple_call_num_args (stmt);
853 else if (is_gimple_assign (stmt))
855 nops = gimple_num_ops (stmt) - 1;
856 if (gimple_assign_rhs_code (stmt) == COND_EXPR)
857 nops++;
859 else
860 return false;
862 bool two_operators = false;
863 if (!vect_build_slp_tree_1 (vinfo,
864 SLP_TREE_SCALAR_STMTS (*node), group_size, nops,
865 max_nunits, matches, &two_operators))
866 return false;
867 SLP_TREE_TWO_OPERATORS (*node) = two_operators;
869 /* If the SLP node is a load, terminate the recursion. */
870 if (STMT_VINFO_GROUPED_ACCESS (vinfo_for_stmt (stmt))
871 && DR_IS_READ (STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt))))
873 loads->safe_push (*node);
874 return true;
877 /* Get at the operands, verifying they are compatible. */
878 vec<slp_oprnd_info> oprnds_info = vect_create_oprnd_info (nops, group_size);
879 slp_oprnd_info oprnd_info;
880 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (*node), i, stmt)
882 switch (vect_get_and_check_slp_defs (vinfo, stmt, i, &oprnds_info))
884 case 0:
885 break;
886 case -1:
887 matches[0] = false;
888 vect_free_oprnd_info (oprnds_info);
889 return false;
890 case 1:
891 matches[i] = false;
892 break;
895 for (i = 0; i < group_size; ++i)
896 if (!matches[i])
898 vect_free_oprnd_info (oprnds_info);
899 return false;
902 stmt = SLP_TREE_SCALAR_STMTS (*node)[0];
904 /* Create SLP_TREE nodes for the definition node/s. */
905 FOR_EACH_VEC_ELT (oprnds_info, i, oprnd_info)
907 slp_tree child;
908 unsigned old_nloads = loads->length ();
909 unsigned old_max_nunits = *max_nunits;
911 if (oprnd_info->first_dt != vect_internal_def)
912 continue;
914 if (++this_tree_size > max_tree_size)
916 vect_free_oprnd_info (oprnds_info);
917 return false;
920 child = vect_create_new_slp_node (oprnd_info->def_stmts);
921 if (!child)
923 vect_free_oprnd_info (oprnds_info);
924 return false;
927 if (vect_build_slp_tree (vinfo, &child,
928 group_size, max_nunits, loads, matches,
929 npermutes, &this_tree_size, max_tree_size))
931 /* If we have all children of child built up from scalars then just
932 throw that away and build it up this node from scalars. */
933 if (!SLP_TREE_CHILDREN (child).is_empty ())
935 unsigned int j;
936 slp_tree grandchild;
938 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (child), j, grandchild)
939 if (SLP_TREE_DEF_TYPE (grandchild) == vect_internal_def)
940 break;
941 if (!grandchild)
943 /* Roll back. */
944 *max_nunits = old_max_nunits;
945 loads->truncate (old_nloads);
946 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (child), j, grandchild)
947 vect_free_slp_tree (grandchild);
948 SLP_TREE_CHILDREN (child).truncate (0);
950 dump_printf_loc (MSG_NOTE, vect_location,
951 "Building parent vector operands from "
952 "scalars instead\n");
953 oprnd_info->def_stmts = vNULL;
954 SLP_TREE_DEF_TYPE (child) = vect_external_def;
955 SLP_TREE_CHILDREN (*node).quick_push (child);
956 continue;
960 oprnd_info->def_stmts = vNULL;
961 SLP_TREE_CHILDREN (*node).quick_push (child);
962 continue;
965 /* If the SLP build failed fatally and we analyze a basic-block
966 simply treat nodes we fail to build as externally defined
967 (and thus build vectors from the scalar defs).
968 The cost model will reject outright expensive cases.
969 ??? This doesn't treat cases where permutation ultimatively
970 fails (or we don't try permutation below). Ideally we'd
971 even compute a permutation that will end up with the maximum
972 SLP tree size... */
973 if (is_a <bb_vec_info> (vinfo)
974 && !matches[0]
975 /* ??? Rejecting patterns this way doesn't work. We'd have to
976 do extra work to cancel the pattern so the uses see the
977 scalar version. */
978 && !is_pattern_stmt_p (vinfo_for_stmt (stmt)))
980 unsigned int j;
981 slp_tree grandchild;
983 /* Roll back. */
984 *max_nunits = old_max_nunits;
985 loads->truncate (old_nloads);
986 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (child), j, grandchild)
987 vect_free_slp_tree (grandchild);
988 SLP_TREE_CHILDREN (child).truncate (0);
990 dump_printf_loc (MSG_NOTE, vect_location,
991 "Building vector operands from scalars\n");
992 oprnd_info->def_stmts = vNULL;
993 SLP_TREE_DEF_TYPE (child) = vect_external_def;
994 SLP_TREE_CHILDREN (*node).quick_push (child);
995 continue;
998 /* If the SLP build for operand zero failed and operand zero
999 and one can be commutated try that for the scalar stmts
1000 that failed the match. */
1001 if (i == 0
1002 /* A first scalar stmt mismatch signals a fatal mismatch. */
1003 && matches[0]
1004 /* ??? For COND_EXPRs we can swap the comparison operands
1005 as well as the arms under some constraints. */
1006 && nops == 2
1007 && oprnds_info[1]->first_dt == vect_internal_def
1008 && is_gimple_assign (stmt)
1009 && commutative_tree_code (gimple_assign_rhs_code (stmt))
1010 && !SLP_TREE_TWO_OPERATORS (*node)
1011 /* Do so only if the number of not successful permutes was nor more
1012 than a cut-ff as re-trying the recursive match on
1013 possibly each level of the tree would expose exponential
1014 behavior. */
1015 && *npermutes < 4)
1017 unsigned int j;
1018 slp_tree grandchild;
1020 /* Roll back. */
1021 *max_nunits = old_max_nunits;
1022 loads->truncate (old_nloads);
1023 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (child), j, grandchild)
1024 vect_free_slp_tree (grandchild);
1025 SLP_TREE_CHILDREN (child).truncate (0);
1027 /* Swap mismatched definition stmts. */
1028 dump_printf_loc (MSG_NOTE, vect_location,
1029 "Re-trying with swapped operands of stmts ");
1030 for (j = 0; j < group_size; ++j)
1031 if (!matches[j])
1033 std::swap (oprnds_info[0]->def_stmts[j],
1034 oprnds_info[1]->def_stmts[j]);
1035 dump_printf (MSG_NOTE, "%d ", j);
1037 dump_printf (MSG_NOTE, "\n");
1038 /* And try again with scratch 'matches' ... */
1039 bool *tem = XALLOCAVEC (bool, group_size);
1040 if (vect_build_slp_tree (vinfo, &child,
1041 group_size, max_nunits, loads,
1042 tem, npermutes, &this_tree_size,
1043 max_tree_size))
1045 /* ... so if successful we can apply the operand swapping
1046 to the GIMPLE IL. This is necessary because for example
1047 vect_get_slp_defs uses operand indexes and thus expects
1048 canonical operand order. This is also necessary even
1049 if we end up building the operand from scalars as
1050 we'll continue to process swapped operand two. */
1051 for (j = 0; j < group_size; ++j)
1053 gimple *stmt = SLP_TREE_SCALAR_STMTS (*node)[j];
1054 gimple_set_plf (stmt, GF_PLF_1, false);
1056 for (j = 0; j < group_size; ++j)
1058 gimple *stmt = SLP_TREE_SCALAR_STMTS (*node)[j];
1059 if (!matches[j])
1061 /* Avoid swapping operands twice. */
1062 if (gimple_plf (stmt, GF_PLF_1))
1063 continue;
1064 swap_ssa_operands (stmt, gimple_assign_rhs1_ptr (stmt),
1065 gimple_assign_rhs2_ptr (stmt));
1066 gimple_set_plf (stmt, GF_PLF_1, true);
1069 /* Verify we swap all duplicates or none. */
1070 if (flag_checking)
1071 for (j = 0; j < group_size; ++j)
1073 gimple *stmt = SLP_TREE_SCALAR_STMTS (*node)[j];
1074 gcc_assert (gimple_plf (stmt, GF_PLF_1) == ! matches[j]);
1077 /* If we have all children of child built up from scalars then
1078 just throw that away and build it up this node from scalars. */
1079 if (!SLP_TREE_CHILDREN (child).is_empty ())
1081 unsigned int j;
1082 slp_tree grandchild;
1084 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (child), j, grandchild)
1085 if (SLP_TREE_DEF_TYPE (grandchild) == vect_internal_def)
1086 break;
1087 if (!grandchild)
1089 /* Roll back. */
1090 *max_nunits = old_max_nunits;
1091 loads->truncate (old_nloads);
1092 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (child), j, grandchild)
1093 vect_free_slp_tree (grandchild);
1094 SLP_TREE_CHILDREN (child).truncate (0);
1096 dump_printf_loc (MSG_NOTE, vect_location,
1097 "Building parent vector operands from "
1098 "scalars instead\n");
1099 oprnd_info->def_stmts = vNULL;
1100 SLP_TREE_DEF_TYPE (child) = vect_external_def;
1101 SLP_TREE_CHILDREN (*node).quick_push (child);
1102 continue;
1106 oprnd_info->def_stmts = vNULL;
1107 SLP_TREE_CHILDREN (*node).quick_push (child);
1108 continue;
1111 ++*npermutes;
1114 oprnd_info->def_stmts = vNULL;
1115 vect_free_slp_tree (child);
1116 vect_free_oprnd_info (oprnds_info);
1117 return false;
1120 if (tree_size)
1121 *tree_size += this_tree_size;
1123 vect_free_oprnd_info (oprnds_info);
1124 return true;
1127 /* Dump a slp tree NODE using flags specified in DUMP_KIND. */
1129 static void
1130 vect_print_slp_tree (int dump_kind, location_t loc, slp_tree node)
1132 int i;
1133 gimple *stmt;
1134 slp_tree child;
1136 dump_printf_loc (dump_kind, loc, "node%s\n",
1137 SLP_TREE_DEF_TYPE (node) != vect_internal_def
1138 ? " (external)" : "");
1139 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
1141 dump_printf_loc (dump_kind, loc, "\tstmt %d ", i);
1142 dump_gimple_stmt (dump_kind, TDF_SLIM, stmt, 0);
1144 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
1145 vect_print_slp_tree (dump_kind, loc, child);
1149 /* Mark the tree rooted at NODE with MARK (PURE_SLP or HYBRID).
1150 If MARK is HYBRID, it refers to a specific stmt in NODE (the stmt at index
1151 J). Otherwise, MARK is PURE_SLP and J is -1, which indicates that all the
1152 stmts in NODE are to be marked. */
1154 static void
1155 vect_mark_slp_stmts (slp_tree node, enum slp_vect_type mark, int j)
1157 int i;
1158 gimple *stmt;
1159 slp_tree child;
1161 if (SLP_TREE_DEF_TYPE (node) != vect_internal_def)
1162 return;
1164 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
1165 if (j < 0 || i == j)
1166 STMT_SLP_TYPE (vinfo_for_stmt (stmt)) = mark;
1168 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
1169 vect_mark_slp_stmts (child, mark, j);
1173 /* Mark the statements of the tree rooted at NODE as relevant (vect_used). */
1175 static void
1176 vect_mark_slp_stmts_relevant (slp_tree node)
1178 int i;
1179 gimple *stmt;
1180 stmt_vec_info stmt_info;
1181 slp_tree child;
1183 if (SLP_TREE_DEF_TYPE (node) != vect_internal_def)
1184 return;
1186 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
1188 stmt_info = vinfo_for_stmt (stmt);
1189 gcc_assert (!STMT_VINFO_RELEVANT (stmt_info)
1190 || STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_scope);
1191 STMT_VINFO_RELEVANT (stmt_info) = vect_used_in_scope;
1194 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
1195 vect_mark_slp_stmts_relevant (child);
1199 /* Rearrange the statements of NODE according to PERMUTATION. */
1201 static void
1202 vect_slp_rearrange_stmts (slp_tree node, unsigned int group_size,
1203 vec<unsigned> permutation)
1205 gimple *stmt;
1206 vec<gimple *> tmp_stmts;
1207 unsigned int i;
1208 slp_tree child;
1210 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
1211 vect_slp_rearrange_stmts (child, group_size, permutation);
1213 gcc_assert (group_size == SLP_TREE_SCALAR_STMTS (node).length ());
1214 tmp_stmts.create (group_size);
1215 tmp_stmts.quick_grow_cleared (group_size);
1217 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
1218 tmp_stmts[permutation[i]] = stmt;
1220 SLP_TREE_SCALAR_STMTS (node).release ();
1221 SLP_TREE_SCALAR_STMTS (node) = tmp_stmts;
1225 /* Attempt to reorder stmts in a reduction chain so that we don't
1226 require any load permutation. Return true if that was possible,
1227 otherwise return false. */
1229 static bool
1230 vect_attempt_slp_rearrange_stmts (slp_instance slp_instn)
1232 unsigned int group_size = SLP_INSTANCE_GROUP_SIZE (slp_instn);
1233 unsigned int i, j;
1234 sbitmap load_index;
1235 unsigned int lidx;
1236 slp_tree node, load;
1238 /* Compare all the permutation sequences to the first one. We know
1239 that at least one load is permuted. */
1240 node = SLP_INSTANCE_LOADS (slp_instn)[0];
1241 if (!node->load_permutation.exists ())
1242 return false;
1243 for (i = 1; SLP_INSTANCE_LOADS (slp_instn).iterate (i, &load); ++i)
1245 if (!load->load_permutation.exists ())
1246 return false;
1247 FOR_EACH_VEC_ELT (load->load_permutation, j, lidx)
1248 if (lidx != node->load_permutation[j])
1249 return false;
1252 /* Check that the loads in the first sequence are different and there
1253 are no gaps between them. */
1254 load_index = sbitmap_alloc (group_size);
1255 bitmap_clear (load_index);
1256 FOR_EACH_VEC_ELT (node->load_permutation, i, lidx)
1258 if (lidx >= group_size)
1259 return false;
1260 if (bitmap_bit_p (load_index, lidx))
1262 sbitmap_free (load_index);
1263 return false;
1265 bitmap_set_bit (load_index, lidx);
1267 for (i = 0; i < group_size; i++)
1268 if (!bitmap_bit_p (load_index, i))
1270 sbitmap_free (load_index);
1271 return false;
1273 sbitmap_free (load_index);
1275 /* This permutation is valid for reduction. Since the order of the
1276 statements in the nodes is not important unless they are memory
1277 accesses, we can rearrange the statements in all the nodes
1278 according to the order of the loads. */
1279 vect_slp_rearrange_stmts (SLP_INSTANCE_TREE (slp_instn), group_size,
1280 node->load_permutation);
1282 /* We are done, no actual permutations need to be generated. */
1283 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1284 SLP_TREE_LOAD_PERMUTATION (node).release ();
1285 return true;
1288 /* Check if the required load permutations in the SLP instance
1289 SLP_INSTN are supported. */
1291 static bool
1292 vect_supported_load_permutation_p (slp_instance slp_instn)
1294 unsigned int group_size = SLP_INSTANCE_GROUP_SIZE (slp_instn);
1295 unsigned int i, j, k, next;
1296 slp_tree node;
1297 gimple *stmt, *load, *next_load;
1299 if (dump_enabled_p ())
1301 dump_printf_loc (MSG_NOTE, vect_location, "Load permutation ");
1302 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1303 if (node->load_permutation.exists ())
1304 FOR_EACH_VEC_ELT (node->load_permutation, j, next)
1305 dump_printf (MSG_NOTE, "%d ", next);
1306 else
1307 for (k = 0; k < group_size; ++k)
1308 dump_printf (MSG_NOTE, "%d ", k);
1309 dump_printf (MSG_NOTE, "\n");
1312 /* In case of reduction every load permutation is allowed, since the order
1313 of the reduction statements is not important (as opposed to the case of
1314 grouped stores). The only condition we need to check is that all the
1315 load nodes are of the same size and have the same permutation (and then
1316 rearrange all the nodes of the SLP instance according to this
1317 permutation). */
1319 /* Check that all the load nodes are of the same size. */
1320 /* ??? Can't we assert this? */
1321 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1322 if (SLP_TREE_SCALAR_STMTS (node).length () != (unsigned) group_size)
1323 return false;
1325 node = SLP_INSTANCE_TREE (slp_instn);
1326 stmt = SLP_TREE_SCALAR_STMTS (node)[0];
1328 /* Reduction (there are no data-refs in the root).
1329 In reduction chain the order of the loads is not important. */
1330 if (!STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt))
1331 && !GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
1333 if (vect_attempt_slp_rearrange_stmts (slp_instn))
1334 return true;
1336 /* Fallthru to general load permutation handling. */
1339 /* In basic block vectorization we allow any subchain of an interleaving
1340 chain.
1341 FORNOW: not supported in loop SLP because of realignment compications. */
1342 if (STMT_VINFO_BB_VINFO (vinfo_for_stmt (stmt)))
1344 /* Check whether the loads in an instance form a subchain and thus
1345 no permutation is necessary. */
1346 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1348 if (!SLP_TREE_LOAD_PERMUTATION (node).exists ())
1349 continue;
1350 bool subchain_p = true;
1351 next_load = NULL;
1352 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), j, load)
1354 if (j != 0
1355 && (next_load != load
1356 || GROUP_GAP (vinfo_for_stmt (load)) != 1))
1358 subchain_p = false;
1359 break;
1361 next_load = GROUP_NEXT_ELEMENT (vinfo_for_stmt (load));
1363 if (subchain_p)
1364 SLP_TREE_LOAD_PERMUTATION (node).release ();
1365 else
1367 /* Verify the permutation can be generated. */
1368 vec<tree> tem;
1369 if (!vect_transform_slp_perm_load (node, tem, NULL,
1370 1, slp_instn, true))
1372 dump_printf_loc (MSG_MISSED_OPTIMIZATION,
1373 vect_location,
1374 "unsupported load permutation\n");
1375 return false;
1379 return true;
1382 /* For loop vectorization verify we can generate the permutation. */
1383 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1384 if (node->load_permutation.exists ()
1385 && !vect_transform_slp_perm_load
1386 (node, vNULL, NULL,
1387 SLP_INSTANCE_UNROLLING_FACTOR (slp_instn), slp_instn, true))
1388 return false;
1390 return true;
1394 /* Find the last store in SLP INSTANCE. */
1396 gimple *
1397 vect_find_last_scalar_stmt_in_slp (slp_tree node)
1399 gimple *last = NULL, *stmt;
1401 for (int i = 0; SLP_TREE_SCALAR_STMTS (node).iterate (i, &stmt); i++)
1403 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
1404 if (is_pattern_stmt_p (stmt_vinfo))
1405 last = get_later_stmt (STMT_VINFO_RELATED_STMT (stmt_vinfo), last);
1406 else
1407 last = get_later_stmt (stmt, last);
1410 return last;
1413 /* Compute the cost for the SLP node NODE in the SLP instance INSTANCE. */
1415 static void
1416 vect_analyze_slp_cost_1 (slp_instance instance, slp_tree node,
1417 stmt_vector_for_cost *prologue_cost_vec,
1418 stmt_vector_for_cost *body_cost_vec,
1419 unsigned ncopies_for_cost)
1421 unsigned i, j;
1422 slp_tree child;
1423 gimple *stmt;
1424 stmt_vec_info stmt_info;
1425 tree lhs;
1427 /* Recurse down the SLP tree. */
1428 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
1429 if (SLP_TREE_DEF_TYPE (child) == vect_internal_def)
1430 vect_analyze_slp_cost_1 (instance, child, prologue_cost_vec,
1431 body_cost_vec, ncopies_for_cost);
1433 /* Look at the first scalar stmt to determine the cost. */
1434 stmt = SLP_TREE_SCALAR_STMTS (node)[0];
1435 stmt_info = vinfo_for_stmt (stmt);
1436 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
1438 if (DR_IS_WRITE (STMT_VINFO_DATA_REF (stmt_info)))
1439 vect_model_store_cost (stmt_info, ncopies_for_cost, false,
1440 vect_uninitialized_def,
1441 node, prologue_cost_vec, body_cost_vec);
1442 else
1444 gcc_checking_assert (DR_IS_READ (STMT_VINFO_DATA_REF (stmt_info)));
1445 if (SLP_TREE_LOAD_PERMUTATION (node).exists ())
1447 /* If the load is permuted then the alignment is determined by
1448 the first group element not by the first scalar stmt DR. */
1449 stmt = GROUP_FIRST_ELEMENT (stmt_info);
1450 stmt_info = vinfo_for_stmt (stmt);
1451 /* Record the cost for the permutation. */
1452 record_stmt_cost (body_cost_vec, ncopies_for_cost, vec_perm,
1453 stmt_info, 0, vect_body);
1454 /* And adjust the number of loads performed. */
1455 unsigned nunits
1456 = TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info));
1457 ncopies_for_cost
1458 = (GROUP_SIZE (stmt_info) - GROUP_GAP (stmt_info)
1459 + nunits - 1) / nunits;
1460 ncopies_for_cost *= SLP_INSTANCE_UNROLLING_FACTOR (instance);
1462 /* Record the cost for the vector loads. */
1463 vect_model_load_cost (stmt_info, ncopies_for_cost, false,
1464 node, prologue_cost_vec, body_cost_vec);
1466 return;
1469 record_stmt_cost (body_cost_vec, ncopies_for_cost, vector_stmt,
1470 stmt_info, 0, vect_body);
1471 if (SLP_TREE_TWO_OPERATORS (node))
1473 record_stmt_cost (body_cost_vec, ncopies_for_cost, vector_stmt,
1474 stmt_info, 0, vect_body);
1475 record_stmt_cost (body_cost_vec, ncopies_for_cost, vec_perm,
1476 stmt_info, 0, vect_body);
1479 /* Push SLP node def-type to stmts. */
1480 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
1481 if (SLP_TREE_DEF_TYPE (child) != vect_internal_def)
1482 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (child), j, stmt)
1483 STMT_VINFO_DEF_TYPE (vinfo_for_stmt (stmt)) = SLP_TREE_DEF_TYPE (child);
1485 /* Scan operands and account for prologue cost of constants/externals.
1486 ??? This over-estimates cost for multiple uses and should be
1487 re-engineered. */
1488 stmt = SLP_TREE_SCALAR_STMTS (node)[0];
1489 lhs = gimple_get_lhs (stmt);
1490 for (i = 0; i < gimple_num_ops (stmt); ++i)
1492 tree op = gimple_op (stmt, i);
1493 gimple *def_stmt;
1494 enum vect_def_type dt;
1495 if (!op || op == lhs)
1496 continue;
1497 if (vect_is_simple_use (op, stmt_info->vinfo, &def_stmt, &dt))
1499 /* Without looking at the actual initializer a vector of
1500 constants can be implemented as load from the constant pool.
1501 ??? We need to pass down stmt_info for a vector type
1502 even if it points to the wrong stmt. */
1503 if (dt == vect_constant_def)
1504 record_stmt_cost (prologue_cost_vec, 1, vector_load,
1505 stmt_info, 0, vect_prologue);
1506 else if (dt == vect_external_def)
1507 record_stmt_cost (prologue_cost_vec, 1, vec_construct,
1508 stmt_info, 0, vect_prologue);
1512 /* Restore stmt def-types. */
1513 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
1514 if (SLP_TREE_DEF_TYPE (child) != vect_internal_def)
1515 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (child), j, stmt)
1516 STMT_VINFO_DEF_TYPE (vinfo_for_stmt (stmt)) = vect_internal_def;
1519 /* Compute the cost for the SLP instance INSTANCE. */
1521 static void
1522 vect_analyze_slp_cost (slp_instance instance, void *data)
1524 stmt_vector_for_cost body_cost_vec, prologue_cost_vec;
1525 unsigned ncopies_for_cost;
1526 stmt_info_for_cost *si;
1527 unsigned i;
1529 if (dump_enabled_p ())
1530 dump_printf_loc (MSG_NOTE, vect_location,
1531 "=== vect_analyze_slp_cost ===\n");
1533 /* Calculate the number of vector stmts to create based on the unrolling
1534 factor (number of vectors is 1 if NUNITS >= GROUP_SIZE, and is
1535 GROUP_SIZE / NUNITS otherwise. */
1536 unsigned group_size = SLP_INSTANCE_GROUP_SIZE (instance);
1537 slp_tree node = SLP_INSTANCE_TREE (instance);
1538 stmt_vec_info stmt_info = vinfo_for_stmt (SLP_TREE_SCALAR_STMTS (node)[0]);
1539 /* Adjust the group_size by the vectorization factor which is always one
1540 for basic-block vectorization. */
1541 if (STMT_VINFO_LOOP_VINFO (stmt_info))
1542 group_size *= LOOP_VINFO_VECT_FACTOR (STMT_VINFO_LOOP_VINFO (stmt_info));
1543 unsigned nunits = TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info));
1544 /* For reductions look at a reduction operand in case the reduction
1545 operation is widening like DOT_PROD or SAD. */
1546 if (!STMT_VINFO_GROUPED_ACCESS (stmt_info))
1548 gimple *stmt = SLP_TREE_SCALAR_STMTS (node)[0];
1549 switch (gimple_assign_rhs_code (stmt))
1551 case DOT_PROD_EXPR:
1552 case SAD_EXPR:
1553 nunits = TYPE_VECTOR_SUBPARTS (get_vectype_for_scalar_type
1554 (TREE_TYPE (gimple_assign_rhs1 (stmt))));
1555 break;
1556 default:;
1559 ncopies_for_cost = least_common_multiple (nunits, group_size) / nunits;
1561 prologue_cost_vec.create (10);
1562 body_cost_vec.create (10);
1563 vect_analyze_slp_cost_1 (instance, SLP_INSTANCE_TREE (instance),
1564 &prologue_cost_vec, &body_cost_vec,
1565 ncopies_for_cost);
1567 /* Record the prologue costs, which were delayed until we were
1568 sure that SLP was successful. */
1569 FOR_EACH_VEC_ELT (prologue_cost_vec, i, si)
1571 struct _stmt_vec_info *stmt_info
1572 = si->stmt ? vinfo_for_stmt (si->stmt) : NULL;
1573 (void) add_stmt_cost (data, si->count, si->kind, stmt_info,
1574 si->misalign, vect_prologue);
1577 /* Record the instance's instructions in the target cost model. */
1578 FOR_EACH_VEC_ELT (body_cost_vec, i, si)
1580 struct _stmt_vec_info *stmt_info
1581 = si->stmt ? vinfo_for_stmt (si->stmt) : NULL;
1582 (void) add_stmt_cost (data, si->count, si->kind, stmt_info,
1583 si->misalign, vect_body);
1586 prologue_cost_vec.release ();
1587 body_cost_vec.release ();
1590 /* Splits a group of stores, currently beginning at FIRST_STMT, into two groups:
1591 one (still beginning at FIRST_STMT) of size GROUP1_SIZE (also containing
1592 the first GROUP1_SIZE stmts, since stores are consecutive), the second
1593 containing the remainder.
1594 Return the first stmt in the second group. */
1596 static gimple *
1597 vect_split_slp_store_group (gimple *first_stmt, unsigned group1_size)
1599 stmt_vec_info first_vinfo = vinfo_for_stmt (first_stmt);
1600 gcc_assert (GROUP_FIRST_ELEMENT (first_vinfo) == first_stmt);
1601 gcc_assert (group1_size > 0);
1602 int group2_size = GROUP_SIZE (first_vinfo) - group1_size;
1603 gcc_assert (group2_size > 0);
1604 GROUP_SIZE (first_vinfo) = group1_size;
1606 gimple *stmt = first_stmt;
1607 for (unsigned i = group1_size; i > 1; i--)
1609 stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (stmt));
1610 gcc_assert (GROUP_GAP (vinfo_for_stmt (stmt)) == 1);
1612 /* STMT is now the last element of the first group. */
1613 gimple *group2 = GROUP_NEXT_ELEMENT (vinfo_for_stmt (stmt));
1614 GROUP_NEXT_ELEMENT (vinfo_for_stmt (stmt)) = 0;
1616 GROUP_SIZE (vinfo_for_stmt (group2)) = group2_size;
1617 for (stmt = group2; stmt; stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (stmt)))
1619 GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) = group2;
1620 gcc_assert (GROUP_GAP (vinfo_for_stmt (stmt)) == 1);
1623 /* For the second group, the GROUP_GAP is that before the original group,
1624 plus skipping over the first vector. */
1625 GROUP_GAP (vinfo_for_stmt (group2)) =
1626 GROUP_GAP (first_vinfo) + group1_size;
1628 /* GROUP_GAP of the first group now has to skip over the second group too. */
1629 GROUP_GAP (first_vinfo) += group2_size;
1631 if (dump_enabled_p ())
1632 dump_printf_loc (MSG_NOTE, vect_location, "Split group into %d and %d\n",
1633 group1_size, group2_size);
1635 return group2;
1638 /* Analyze an SLP instance starting from a group of grouped stores. Call
1639 vect_build_slp_tree to build a tree of packed stmts if possible.
1640 Return FALSE if it's impossible to SLP any stmt in the loop. */
1642 static bool
1643 vect_analyze_slp_instance (vec_info *vinfo,
1644 gimple *stmt, unsigned max_tree_size)
1646 slp_instance new_instance;
1647 slp_tree node;
1648 unsigned int group_size = GROUP_SIZE (vinfo_for_stmt (stmt));
1649 unsigned int unrolling_factor = 1, nunits;
1650 tree vectype, scalar_type = NULL_TREE;
1651 gimple *next;
1652 unsigned int i;
1653 unsigned int max_nunits = 0;
1654 vec<slp_tree> loads;
1655 struct data_reference *dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt));
1656 vec<gimple *> scalar_stmts;
1658 if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
1660 if (dr)
1662 scalar_type = TREE_TYPE (DR_REF (dr));
1663 vectype = get_vectype_for_scalar_type (scalar_type);
1665 else
1667 gcc_assert (is_a <loop_vec_info> (vinfo));
1668 vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt));
1671 group_size = GROUP_SIZE (vinfo_for_stmt (stmt));
1673 else
1675 gcc_assert (is_a <loop_vec_info> (vinfo));
1676 vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt));
1677 group_size = as_a <loop_vec_info> (vinfo)->reductions.length ();
1680 if (!vectype)
1682 if (dump_enabled_p ())
1684 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1685 "Build SLP failed: unsupported data-type ");
1686 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, scalar_type);
1687 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
1690 return false;
1692 nunits = TYPE_VECTOR_SUBPARTS (vectype);
1694 /* Calculate the unrolling factor. */
1695 unrolling_factor = least_common_multiple (nunits, group_size) / group_size;
1696 if (unrolling_factor != 1 && is_a <bb_vec_info> (vinfo))
1698 if (dump_enabled_p ())
1699 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1700 "Build SLP failed: unrolling required in basic"
1701 " block SLP\n");
1703 return false;
1706 /* Create a node (a root of the SLP tree) for the packed grouped stores. */
1707 scalar_stmts.create (group_size);
1708 next = stmt;
1709 if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
1711 /* Collect the stores and store them in SLP_TREE_SCALAR_STMTS. */
1712 while (next)
1714 if (STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (next))
1715 && STMT_VINFO_RELATED_STMT (vinfo_for_stmt (next)))
1716 scalar_stmts.safe_push (
1717 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (next)));
1718 else
1719 scalar_stmts.safe_push (next);
1720 next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next));
1722 /* Mark the first element of the reduction chain as reduction to properly
1723 transform the node. In the reduction analysis phase only the last
1724 element of the chain is marked as reduction. */
1725 if (!STMT_VINFO_GROUPED_ACCESS (vinfo_for_stmt (stmt)))
1726 STMT_VINFO_DEF_TYPE (vinfo_for_stmt (stmt)) = vect_reduction_def;
1728 else
1730 /* Collect reduction statements. */
1731 vec<gimple *> reductions = as_a <loop_vec_info> (vinfo)->reductions;
1732 for (i = 0; reductions.iterate (i, &next); i++)
1733 scalar_stmts.safe_push (next);
1736 node = vect_create_new_slp_node (scalar_stmts);
1738 loads.create (group_size);
1740 /* Build the tree for the SLP instance. */
1741 bool *matches = XALLOCAVEC (bool, group_size);
1742 unsigned npermutes = 0;
1743 if (vect_build_slp_tree (vinfo, &node, group_size,
1744 &max_nunits, &loads,
1745 matches, &npermutes, NULL, max_tree_size))
1747 /* Calculate the unrolling factor based on the smallest type. */
1748 if (max_nunits > nunits)
1749 unrolling_factor = least_common_multiple (max_nunits, group_size)
1750 / group_size;
1752 if (unrolling_factor != 1 && is_a <bb_vec_info> (vinfo))
1754 if (dump_enabled_p ())
1755 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1756 "Build SLP failed: unrolling required in basic"
1757 " block SLP\n");
1758 vect_free_slp_tree (node);
1759 loads.release ();
1760 return false;
1763 /* Create a new SLP instance. */
1764 new_instance = XNEW (struct _slp_instance);
1765 SLP_INSTANCE_TREE (new_instance) = node;
1766 SLP_INSTANCE_GROUP_SIZE (new_instance) = group_size;
1767 SLP_INSTANCE_UNROLLING_FACTOR (new_instance) = unrolling_factor;
1768 SLP_INSTANCE_LOADS (new_instance) = loads;
1770 /* Compute the load permutation. */
1771 slp_tree load_node;
1772 bool loads_permuted = false;
1773 FOR_EACH_VEC_ELT (loads, i, load_node)
1775 vec<unsigned> load_permutation;
1776 int j;
1777 gimple *load, *first_stmt;
1778 bool this_load_permuted = false;
1779 load_permutation.create (group_size);
1780 first_stmt = GROUP_FIRST_ELEMENT
1781 (vinfo_for_stmt (SLP_TREE_SCALAR_STMTS (load_node)[0]));
1782 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (load_node), j, load)
1784 int load_place
1785 = vect_get_place_in_interleaving_chain (load, first_stmt);
1786 gcc_assert (load_place != -1);
1787 if (load_place != j)
1788 this_load_permuted = true;
1789 load_permutation.safe_push (load_place);
1791 if (!this_load_permuted
1792 /* The load requires permutation when unrolling exposes
1793 a gap either because the group is larger than the SLP
1794 group-size or because there is a gap between the groups. */
1795 && (unrolling_factor == 1
1796 || (group_size == GROUP_SIZE (vinfo_for_stmt (first_stmt))
1797 && GROUP_GAP (vinfo_for_stmt (first_stmt)) == 0)))
1799 load_permutation.release ();
1800 continue;
1802 SLP_TREE_LOAD_PERMUTATION (load_node) = load_permutation;
1803 loads_permuted = true;
1806 if (loads_permuted)
1808 if (!vect_supported_load_permutation_p (new_instance))
1810 if (dump_enabled_p ())
1812 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1813 "Build SLP failed: unsupported load "
1814 "permutation ");
1815 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
1816 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
1818 vect_free_slp_instance (new_instance);
1819 return false;
1823 /* If the loads and stores can be handled with load/store-lane
1824 instructions do not generate this SLP instance. */
1825 if (is_a <loop_vec_info> (vinfo)
1826 && loads_permuted
1827 && dr && vect_store_lanes_supported (vectype, group_size))
1829 slp_tree load_node;
1830 FOR_EACH_VEC_ELT (loads, i, load_node)
1832 gimple *first_stmt = GROUP_FIRST_ELEMENT
1833 (vinfo_for_stmt (SLP_TREE_SCALAR_STMTS (load_node)[0]));
1834 stmt_vec_info stmt_vinfo = vinfo_for_stmt (first_stmt);
1835 /* Use SLP for strided accesses (or if we can't load-lanes). */
1836 if (STMT_VINFO_STRIDED_P (stmt_vinfo)
1837 || ! vect_load_lanes_supported
1838 (STMT_VINFO_VECTYPE (stmt_vinfo),
1839 GROUP_SIZE (stmt_vinfo)))
1840 break;
1842 if (i == loads.length ())
1844 if (dump_enabled_p ())
1845 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1846 "Built SLP cancelled: can use "
1847 "load/store-lanes\n");
1848 vect_free_slp_instance (new_instance);
1849 return false;
1853 vinfo->slp_instances.safe_push (new_instance);
1855 if (dump_enabled_p ())
1857 dump_printf_loc (MSG_NOTE, vect_location,
1858 "Final SLP tree for instance:\n");
1859 vect_print_slp_tree (MSG_NOTE, vect_location, node);
1862 return true;
1865 /* Failed to SLP. */
1866 /* Free the allocated memory. */
1867 vect_free_slp_tree (node);
1868 loads.release ();
1870 /* For basic block SLP, try to break the group up into multiples of the
1871 vector size. */
1872 if (is_a <bb_vec_info> (vinfo)
1873 && GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt))
1874 && STMT_VINFO_GROUPED_ACCESS (vinfo_for_stmt (stmt)))
1876 /* We consider breaking the group only on VF boundaries from the existing
1877 start. */
1878 for (i = 0; i < group_size; i++)
1879 if (!matches[i]) break;
1881 if (i >= nunits && i < group_size)
1883 /* Split into two groups at the first vector boundary before i. */
1884 gcc_assert ((nunits & (nunits - 1)) == 0);
1885 unsigned group1_size = i & ~(nunits - 1);
1887 gimple *rest = vect_split_slp_store_group (stmt, group1_size);
1888 bool res = vect_analyze_slp_instance (vinfo, stmt, max_tree_size);
1889 /* If the first non-match was in the middle of a vector,
1890 skip the rest of that vector. */
1891 if (group1_size < i)
1893 i = group1_size + nunits;
1894 if (i < group_size)
1895 rest = vect_split_slp_store_group (rest, nunits);
1897 if (i < group_size)
1898 res |= vect_analyze_slp_instance (vinfo, rest, max_tree_size);
1899 return res;
1901 /* Even though the first vector did not all match, we might be able to SLP
1902 (some) of the remainder. FORNOW ignore this possibility. */
1905 return false;
1909 /* Check if there are stmts in the loop can be vectorized using SLP. Build SLP
1910 trees of packed scalar stmts if SLP is possible. */
1912 bool
1913 vect_analyze_slp (vec_info *vinfo, unsigned max_tree_size)
1915 unsigned int i;
1916 gimple *first_element;
1917 bool ok = false;
1919 if (dump_enabled_p ())
1920 dump_printf_loc (MSG_NOTE, vect_location, "=== vect_analyze_slp ===\n");
1922 /* Find SLP sequences starting from groups of grouped stores. */
1923 FOR_EACH_VEC_ELT (vinfo->grouped_stores, i, first_element)
1924 if (vect_analyze_slp_instance (vinfo, first_element, max_tree_size))
1925 ok = true;
1927 if (loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo))
1929 if (loop_vinfo->reduction_chains.length () > 0)
1931 /* Find SLP sequences starting from reduction chains. */
1932 FOR_EACH_VEC_ELT (loop_vinfo->reduction_chains, i, first_element)
1933 if (vect_analyze_slp_instance (vinfo, first_element,
1934 max_tree_size))
1935 ok = true;
1936 else
1937 return false;
1939 /* Don't try to vectorize SLP reductions if reduction chain was
1940 detected. */
1941 return ok;
1944 /* Find SLP sequences starting from groups of reductions. */
1945 if (loop_vinfo->reductions.length () > 1
1946 && vect_analyze_slp_instance (vinfo, loop_vinfo->reductions[0],
1947 max_tree_size))
1948 ok = true;
1951 return true;
1955 /* For each possible SLP instance decide whether to SLP it and calculate overall
1956 unrolling factor needed to SLP the loop. Return TRUE if decided to SLP at
1957 least one instance. */
1959 bool
1960 vect_make_slp_decision (loop_vec_info loop_vinfo)
1962 unsigned int i, unrolling_factor = 1;
1963 vec<slp_instance> slp_instances = LOOP_VINFO_SLP_INSTANCES (loop_vinfo);
1964 slp_instance instance;
1965 int decided_to_slp = 0;
1967 if (dump_enabled_p ())
1968 dump_printf_loc (MSG_NOTE, vect_location, "=== vect_make_slp_decision ==="
1969 "\n");
1971 FOR_EACH_VEC_ELT (slp_instances, i, instance)
1973 /* FORNOW: SLP if you can. */
1974 if (unrolling_factor < SLP_INSTANCE_UNROLLING_FACTOR (instance))
1975 unrolling_factor = SLP_INSTANCE_UNROLLING_FACTOR (instance);
1977 /* Mark all the stmts that belong to INSTANCE as PURE_SLP stmts. Later we
1978 call vect_detect_hybrid_slp () to find stmts that need hybrid SLP and
1979 loop-based vectorization. Such stmts will be marked as HYBRID. */
1980 vect_mark_slp_stmts (SLP_INSTANCE_TREE (instance), pure_slp, -1);
1981 decided_to_slp++;
1984 LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo) = unrolling_factor;
1986 if (decided_to_slp && dump_enabled_p ())
1987 dump_printf_loc (MSG_NOTE, vect_location,
1988 "Decided to SLP %d instances. Unrolling factor %d\n",
1989 decided_to_slp, unrolling_factor);
1991 return (decided_to_slp > 0);
1995 /* Find stmts that must be both vectorized and SLPed (since they feed stmts that
1996 can't be SLPed) in the tree rooted at NODE. Mark such stmts as HYBRID. */
1998 static void
1999 vect_detect_hybrid_slp_stmts (slp_tree node, unsigned i, slp_vect_type stype)
2001 gimple *stmt = SLP_TREE_SCALAR_STMTS (node)[i];
2002 imm_use_iterator imm_iter;
2003 gimple *use_stmt;
2004 stmt_vec_info use_vinfo, stmt_vinfo = vinfo_for_stmt (stmt);
2005 slp_tree child;
2006 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
2007 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2008 int j;
2010 /* Propagate hybrid down the SLP tree. */
2011 if (stype == hybrid)
2013 else if (HYBRID_SLP_STMT (stmt_vinfo))
2014 stype = hybrid;
2015 else
2017 /* Check if a pure SLP stmt has uses in non-SLP stmts. */
2018 gcc_checking_assert (PURE_SLP_STMT (stmt_vinfo));
2019 /* We always get the pattern stmt here, but for immediate
2020 uses we have to use the LHS of the original stmt. */
2021 gcc_checking_assert (!STMT_VINFO_IN_PATTERN_P (stmt_vinfo));
2022 if (STMT_VINFO_RELATED_STMT (stmt_vinfo))
2023 stmt = STMT_VINFO_RELATED_STMT (stmt_vinfo);
2024 if (TREE_CODE (gimple_op (stmt, 0)) == SSA_NAME)
2025 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, gimple_op (stmt, 0))
2027 if (!flow_bb_inside_loop_p (loop, gimple_bb (use_stmt)))
2028 continue;
2029 use_vinfo = vinfo_for_stmt (use_stmt);
2030 if (STMT_VINFO_IN_PATTERN_P (use_vinfo)
2031 && STMT_VINFO_RELATED_STMT (use_vinfo))
2032 use_vinfo = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (use_vinfo));
2033 if (!STMT_SLP_TYPE (use_vinfo)
2034 && (STMT_VINFO_RELEVANT (use_vinfo)
2035 || VECTORIZABLE_CYCLE_DEF (STMT_VINFO_DEF_TYPE (use_vinfo)))
2036 && !(gimple_code (use_stmt) == GIMPLE_PHI
2037 && STMT_VINFO_DEF_TYPE (use_vinfo) == vect_reduction_def))
2039 if (dump_enabled_p ())
2041 dump_printf_loc (MSG_NOTE, vect_location, "use of SLP "
2042 "def in non-SLP stmt: ");
2043 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, use_stmt, 0);
2045 stype = hybrid;
2050 if (stype == hybrid
2051 && !HYBRID_SLP_STMT (stmt_vinfo))
2053 if (dump_enabled_p ())
2055 dump_printf_loc (MSG_NOTE, vect_location, "marking hybrid: ");
2056 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
2058 STMT_SLP_TYPE (stmt_vinfo) = hybrid;
2061 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), j, child)
2062 if (SLP_TREE_DEF_TYPE (child) != vect_external_def)
2063 vect_detect_hybrid_slp_stmts (child, i, stype);
2066 /* Helpers for vect_detect_hybrid_slp walking pattern stmt uses. */
2068 static tree
2069 vect_detect_hybrid_slp_1 (tree *tp, int *, void *data)
2071 walk_stmt_info *wi = (walk_stmt_info *)data;
2072 struct loop *loopp = (struct loop *)wi->info;
2074 if (wi->is_lhs)
2075 return NULL_TREE;
2077 if (TREE_CODE (*tp) == SSA_NAME
2078 && !SSA_NAME_IS_DEFAULT_DEF (*tp))
2080 gimple *def_stmt = SSA_NAME_DEF_STMT (*tp);
2081 if (flow_bb_inside_loop_p (loopp, gimple_bb (def_stmt))
2082 && PURE_SLP_STMT (vinfo_for_stmt (def_stmt)))
2084 if (dump_enabled_p ())
2086 dump_printf_loc (MSG_NOTE, vect_location, "marking hybrid: ");
2087 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, def_stmt, 0);
2089 STMT_SLP_TYPE (vinfo_for_stmt (def_stmt)) = hybrid;
2093 return NULL_TREE;
2096 static tree
2097 vect_detect_hybrid_slp_2 (gimple_stmt_iterator *gsi, bool *handled,
2098 walk_stmt_info *)
2100 /* If the stmt is in a SLP instance then this isn't a reason
2101 to mark use definitions in other SLP instances as hybrid. */
2102 if (STMT_SLP_TYPE (vinfo_for_stmt (gsi_stmt (*gsi))) != loop_vect)
2103 *handled = true;
2104 return NULL_TREE;
2107 /* Find stmts that must be both vectorized and SLPed. */
2109 void
2110 vect_detect_hybrid_slp (loop_vec_info loop_vinfo)
2112 unsigned int i;
2113 vec<slp_instance> slp_instances = LOOP_VINFO_SLP_INSTANCES (loop_vinfo);
2114 slp_instance instance;
2116 if (dump_enabled_p ())
2117 dump_printf_loc (MSG_NOTE, vect_location, "=== vect_detect_hybrid_slp ==="
2118 "\n");
2120 /* First walk all pattern stmt in the loop and mark defs of uses as
2121 hybrid because immediate uses in them are not recorded. */
2122 for (i = 0; i < LOOP_VINFO_LOOP (loop_vinfo)->num_nodes; ++i)
2124 basic_block bb = LOOP_VINFO_BBS (loop_vinfo)[i];
2125 for (gimple_stmt_iterator gsi = gsi_start_bb (bb); !gsi_end_p (gsi);
2126 gsi_next (&gsi))
2128 gimple *stmt = gsi_stmt (gsi);
2129 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2130 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
2132 walk_stmt_info wi;
2133 memset (&wi, 0, sizeof (wi));
2134 wi.info = LOOP_VINFO_LOOP (loop_vinfo);
2135 gimple_stmt_iterator gsi2
2136 = gsi_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info));
2137 walk_gimple_stmt (&gsi2, vect_detect_hybrid_slp_2,
2138 vect_detect_hybrid_slp_1, &wi);
2139 walk_gimple_seq (STMT_VINFO_PATTERN_DEF_SEQ (stmt_info),
2140 vect_detect_hybrid_slp_2,
2141 vect_detect_hybrid_slp_1, &wi);
2146 /* Then walk the SLP instance trees marking stmts with uses in
2147 non-SLP stmts as hybrid, also propagating hybrid down the
2148 SLP tree, collecting the above info on-the-fly. */
2149 FOR_EACH_VEC_ELT (slp_instances, i, instance)
2151 for (unsigned i = 0; i < SLP_INSTANCE_GROUP_SIZE (instance); ++i)
2152 vect_detect_hybrid_slp_stmts (SLP_INSTANCE_TREE (instance),
2153 i, pure_slp);
2158 /* Create and initialize a new bb_vec_info struct for BB, as well as
2159 stmt_vec_info structs for all the stmts in it. */
2161 static bb_vec_info
2162 new_bb_vec_info (gimple_stmt_iterator region_begin,
2163 gimple_stmt_iterator region_end)
2165 basic_block bb = gsi_bb (region_begin);
2166 bb_vec_info res = NULL;
2167 gimple_stmt_iterator gsi;
2169 res = (bb_vec_info) xcalloc (1, sizeof (struct _bb_vec_info));
2170 res->kind = vec_info::bb;
2171 BB_VINFO_BB (res) = bb;
2172 res->region_begin = region_begin;
2173 res->region_end = region_end;
2175 for (gsi = region_begin; gsi_stmt (gsi) != gsi_stmt (region_end);
2176 gsi_next (&gsi))
2178 gimple *stmt = gsi_stmt (gsi);
2179 gimple_set_uid (stmt, 0);
2180 set_vinfo_for_stmt (stmt, new_stmt_vec_info (stmt, res));
2183 BB_VINFO_GROUPED_STORES (res).create (10);
2184 BB_VINFO_SLP_INSTANCES (res).create (2);
2185 BB_VINFO_TARGET_COST_DATA (res) = init_cost (NULL);
2187 bb->aux = res;
2188 return res;
2192 /* Free BB_VINFO struct, as well as all the stmt_vec_info structs of all the
2193 stmts in the basic block. */
2195 static void
2196 destroy_bb_vec_info (bb_vec_info bb_vinfo)
2198 vec<slp_instance> slp_instances;
2199 slp_instance instance;
2200 basic_block bb;
2201 gimple_stmt_iterator si;
2202 unsigned i;
2204 if (!bb_vinfo)
2205 return;
2207 bb = BB_VINFO_BB (bb_vinfo);
2209 for (si = bb_vinfo->region_begin;
2210 gsi_stmt (si) != gsi_stmt (bb_vinfo->region_end); gsi_next (&si))
2212 gimple *stmt = gsi_stmt (si);
2213 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2215 if (stmt_info)
2216 /* Free stmt_vec_info. */
2217 free_stmt_vec_info (stmt);
2219 /* Reset region marker. */
2220 gimple_set_uid (stmt, -1);
2223 vect_destroy_datarefs (bb_vinfo);
2224 free_dependence_relations (BB_VINFO_DDRS (bb_vinfo));
2225 BB_VINFO_GROUPED_STORES (bb_vinfo).release ();
2226 slp_instances = BB_VINFO_SLP_INSTANCES (bb_vinfo);
2227 FOR_EACH_VEC_ELT (slp_instances, i, instance)
2228 vect_free_slp_instance (instance);
2229 BB_VINFO_SLP_INSTANCES (bb_vinfo).release ();
2230 destroy_cost_data (BB_VINFO_TARGET_COST_DATA (bb_vinfo));
2231 free (bb_vinfo);
2232 bb->aux = NULL;
2236 /* Analyze statements contained in SLP tree node after recursively analyzing
2237 the subtree. Return TRUE if the operations are supported. */
2239 static bool
2240 vect_slp_analyze_node_operations (slp_tree node)
2242 bool dummy;
2243 int i, j;
2244 gimple *stmt;
2245 slp_tree child;
2247 if (SLP_TREE_DEF_TYPE (node) != vect_internal_def)
2248 return true;
2250 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
2251 if (!vect_slp_analyze_node_operations (child))
2252 return false;
2254 bool res = true;
2255 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
2257 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2258 gcc_assert (stmt_info);
2259 gcc_assert (STMT_SLP_TYPE (stmt_info) != loop_vect);
2261 /* Push SLP node def-type to stmt operands. */
2262 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), j, child)
2263 if (SLP_TREE_DEF_TYPE (child) != vect_internal_def)
2264 STMT_VINFO_DEF_TYPE (vinfo_for_stmt (SLP_TREE_SCALAR_STMTS (child)[i]))
2265 = SLP_TREE_DEF_TYPE (child);
2266 res = vect_analyze_stmt (stmt, &dummy, node);
2267 /* Restore def-types. */
2268 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), j, child)
2269 if (SLP_TREE_DEF_TYPE (child) != vect_internal_def)
2270 STMT_VINFO_DEF_TYPE (vinfo_for_stmt (SLP_TREE_SCALAR_STMTS (child)[i]))
2271 = vect_internal_def;
2272 if (! res)
2273 break;
2276 return res;
2280 /* Analyze statements in SLP instances of the basic block. Return TRUE if the
2281 operations are supported. */
2283 bool
2284 vect_slp_analyze_operations (vec<slp_instance> slp_instances, void *data)
2286 slp_instance instance;
2287 int i;
2289 if (dump_enabled_p ())
2290 dump_printf_loc (MSG_NOTE, vect_location,
2291 "=== vect_slp_analyze_operations ===\n");
2293 for (i = 0; slp_instances.iterate (i, &instance); )
2295 if (!vect_slp_analyze_node_operations (SLP_INSTANCE_TREE (instance)))
2297 dump_printf_loc (MSG_NOTE, vect_location,
2298 "removing SLP instance operations starting from: ");
2299 dump_gimple_stmt (MSG_NOTE, TDF_SLIM,
2300 SLP_TREE_SCALAR_STMTS
2301 (SLP_INSTANCE_TREE (instance))[0], 0);
2302 vect_free_slp_instance (instance);
2303 slp_instances.ordered_remove (i);
2305 else
2307 /* Compute the costs of the SLP instance. */
2308 vect_analyze_slp_cost (instance, data);
2309 i++;
2313 if (!slp_instances.length ())
2314 return false;
2316 return true;
2320 /* Compute the scalar cost of the SLP node NODE and its children
2321 and return it. Do not account defs that are marked in LIFE and
2322 update LIFE according to uses of NODE. */
2324 static unsigned
2325 vect_bb_slp_scalar_cost (basic_block bb,
2326 slp_tree node, vec<bool, va_heap> *life)
2328 unsigned scalar_cost = 0;
2329 unsigned i;
2330 gimple *stmt;
2331 slp_tree child;
2333 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
2335 unsigned stmt_cost;
2336 ssa_op_iter op_iter;
2337 def_operand_p def_p;
2338 stmt_vec_info stmt_info;
2340 if ((*life)[i])
2341 continue;
2343 /* If there is a non-vectorized use of the defs then the scalar
2344 stmt is kept live in which case we do not account it or any
2345 required defs in the SLP children in the scalar cost. This
2346 way we make the vectorization more costly when compared to
2347 the scalar cost. */
2348 FOR_EACH_SSA_DEF_OPERAND (def_p, stmt, op_iter, SSA_OP_DEF)
2350 imm_use_iterator use_iter;
2351 gimple *use_stmt;
2352 FOR_EACH_IMM_USE_STMT (use_stmt, use_iter, DEF_FROM_PTR (def_p))
2353 if (!is_gimple_debug (use_stmt)
2354 && (! vect_stmt_in_region_p (vinfo_for_stmt (stmt)->vinfo,
2355 use_stmt)
2356 || ! PURE_SLP_STMT (vinfo_for_stmt (use_stmt))))
2358 (*life)[i] = true;
2359 BREAK_FROM_IMM_USE_STMT (use_iter);
2362 if ((*life)[i])
2363 continue;
2365 stmt_info = vinfo_for_stmt (stmt);
2366 if (STMT_VINFO_DATA_REF (stmt_info))
2368 if (DR_IS_READ (STMT_VINFO_DATA_REF (stmt_info)))
2369 stmt_cost = vect_get_stmt_cost (scalar_load);
2370 else
2371 stmt_cost = vect_get_stmt_cost (scalar_store);
2373 else
2374 stmt_cost = vect_get_stmt_cost (scalar_stmt);
2376 scalar_cost += stmt_cost;
2379 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
2380 if (SLP_TREE_DEF_TYPE (child) == vect_internal_def)
2381 scalar_cost += vect_bb_slp_scalar_cost (bb, child, life);
2383 return scalar_cost;
2386 /* Check if vectorization of the basic block is profitable. */
2388 static bool
2389 vect_bb_vectorization_profitable_p (bb_vec_info bb_vinfo)
2391 vec<slp_instance> slp_instances = BB_VINFO_SLP_INSTANCES (bb_vinfo);
2392 slp_instance instance;
2393 int i;
2394 unsigned int vec_inside_cost = 0, vec_outside_cost = 0, scalar_cost = 0;
2395 unsigned int vec_prologue_cost = 0, vec_epilogue_cost = 0;
2397 /* Calculate scalar cost. */
2398 FOR_EACH_VEC_ELT (slp_instances, i, instance)
2400 auto_vec<bool, 20> life;
2401 life.safe_grow_cleared (SLP_INSTANCE_GROUP_SIZE (instance));
2402 scalar_cost += vect_bb_slp_scalar_cost (BB_VINFO_BB (bb_vinfo),
2403 SLP_INSTANCE_TREE (instance),
2404 &life);
2407 /* Complete the target-specific cost calculation. */
2408 finish_cost (BB_VINFO_TARGET_COST_DATA (bb_vinfo), &vec_prologue_cost,
2409 &vec_inside_cost, &vec_epilogue_cost);
2411 vec_outside_cost = vec_prologue_cost + vec_epilogue_cost;
2413 if (dump_enabled_p ())
2415 dump_printf_loc (MSG_NOTE, vect_location, "Cost model analysis: \n");
2416 dump_printf (MSG_NOTE, " Vector inside of basic block cost: %d\n",
2417 vec_inside_cost);
2418 dump_printf (MSG_NOTE, " Vector prologue cost: %d\n", vec_prologue_cost);
2419 dump_printf (MSG_NOTE, " Vector epilogue cost: %d\n", vec_epilogue_cost);
2420 dump_printf (MSG_NOTE, " Scalar cost of basic block: %d\n", scalar_cost);
2423 /* Vectorization is profitable if its cost is more than the cost of scalar
2424 version. Note that we err on the vector side for equal cost because
2425 the cost estimate is otherwise quite pessimistic (constant uses are
2426 free on the scalar side but cost a load on the vector side for
2427 example). */
2428 if (vec_outside_cost + vec_inside_cost > scalar_cost)
2429 return false;
2431 return true;
2434 /* Check if the basic block can be vectorized. Returns a bb_vec_info
2435 if so and sets fatal to true if failure is independent of
2436 current_vector_size. */
2438 static bb_vec_info
2439 vect_slp_analyze_bb_1 (gimple_stmt_iterator region_begin,
2440 gimple_stmt_iterator region_end,
2441 vec<data_reference_p> datarefs, int n_stmts,
2442 bool &fatal)
2444 bb_vec_info bb_vinfo;
2445 slp_instance instance;
2446 int i;
2447 int min_vf = 2;
2449 /* The first group of checks is independent of the vector size. */
2450 fatal = true;
2452 if (n_stmts > PARAM_VALUE (PARAM_SLP_MAX_INSNS_IN_BB))
2454 if (dump_enabled_p ())
2455 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2456 "not vectorized: too many instructions in "
2457 "basic block.\n");
2458 free_data_refs (datarefs);
2459 return NULL;
2462 bb_vinfo = new_bb_vec_info (region_begin, region_end);
2463 if (!bb_vinfo)
2464 return NULL;
2466 BB_VINFO_DATAREFS (bb_vinfo) = datarefs;
2468 /* Analyze the data references. */
2470 if (!vect_analyze_data_refs (bb_vinfo, &min_vf))
2472 if (dump_enabled_p ())
2473 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2474 "not vectorized: unhandled data-ref in basic "
2475 "block.\n");
2477 destroy_bb_vec_info (bb_vinfo);
2478 return NULL;
2481 if (BB_VINFO_DATAREFS (bb_vinfo).length () < 2)
2483 if (dump_enabled_p ())
2484 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2485 "not vectorized: not enough data-refs in "
2486 "basic block.\n");
2488 destroy_bb_vec_info (bb_vinfo);
2489 return NULL;
2492 if (!vect_analyze_data_ref_accesses (bb_vinfo))
2494 if (dump_enabled_p ())
2495 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2496 "not vectorized: unhandled data access in "
2497 "basic block.\n");
2499 destroy_bb_vec_info (bb_vinfo);
2500 return NULL;
2503 /* If there are no grouped stores in the region there is no need
2504 to continue with pattern recog as vect_analyze_slp will fail
2505 anyway. */
2506 if (bb_vinfo->grouped_stores.is_empty ())
2508 if (dump_enabled_p ())
2509 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2510 "not vectorized: no grouped stores in "
2511 "basic block.\n");
2513 destroy_bb_vec_info (bb_vinfo);
2514 return NULL;
2517 /* While the rest of the analysis below depends on it in some way. */
2518 fatal = false;
2520 vect_pattern_recog (bb_vinfo);
2522 /* Check the SLP opportunities in the basic block, analyze and build SLP
2523 trees. */
2524 if (!vect_analyze_slp (bb_vinfo, n_stmts))
2526 if (dump_enabled_p ())
2528 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2529 "Failed to SLP the basic block.\n");
2530 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2531 "not vectorized: failed to find SLP opportunities "
2532 "in basic block.\n");
2535 destroy_bb_vec_info (bb_vinfo);
2536 return NULL;
2539 /* Analyze and verify the alignment of data references and the
2540 dependence in the SLP instances. */
2541 for (i = 0; BB_VINFO_SLP_INSTANCES (bb_vinfo).iterate (i, &instance); )
2543 if (! vect_slp_analyze_and_verify_instance_alignment (instance)
2544 || ! vect_slp_analyze_instance_dependence (instance))
2546 dump_printf_loc (MSG_NOTE, vect_location,
2547 "removing SLP instance operations starting from: ");
2548 dump_gimple_stmt (MSG_NOTE, TDF_SLIM,
2549 SLP_TREE_SCALAR_STMTS
2550 (SLP_INSTANCE_TREE (instance))[0], 0);
2551 vect_free_slp_instance (instance);
2552 BB_VINFO_SLP_INSTANCES (bb_vinfo).ordered_remove (i);
2553 continue;
2556 /* Mark all the statements that we want to vectorize as pure SLP and
2557 relevant. */
2558 vect_mark_slp_stmts (SLP_INSTANCE_TREE (instance), pure_slp, -1);
2559 vect_mark_slp_stmts_relevant (SLP_INSTANCE_TREE (instance));
2561 i++;
2563 if (! BB_VINFO_SLP_INSTANCES (bb_vinfo).length ())
2565 destroy_bb_vec_info (bb_vinfo);
2566 return NULL;
2569 if (!vect_slp_analyze_operations (BB_VINFO_SLP_INSTANCES (bb_vinfo),
2570 BB_VINFO_TARGET_COST_DATA (bb_vinfo)))
2572 if (dump_enabled_p ())
2573 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2574 "not vectorized: bad operation in basic block.\n");
2576 destroy_bb_vec_info (bb_vinfo);
2577 return NULL;
2580 /* Cost model: check if the vectorization is worthwhile. */
2581 if (!unlimited_cost_model (NULL)
2582 && !vect_bb_vectorization_profitable_p (bb_vinfo))
2584 if (dump_enabled_p ())
2585 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2586 "not vectorized: vectorization is not "
2587 "profitable.\n");
2589 destroy_bb_vec_info (bb_vinfo);
2590 return NULL;
2593 if (dump_enabled_p ())
2594 dump_printf_loc (MSG_NOTE, vect_location,
2595 "Basic block will be vectorized using SLP\n");
2597 return bb_vinfo;
2601 /* Main entry for the BB vectorizer. Analyze and transform BB, returns
2602 true if anything in the basic-block was vectorized. */
2604 bool
2605 vect_slp_bb (basic_block bb)
2607 bb_vec_info bb_vinfo;
2608 gimple_stmt_iterator gsi;
2609 unsigned int vector_sizes;
2610 bool any_vectorized = false;
2612 if (dump_enabled_p ())
2613 dump_printf_loc (MSG_NOTE, vect_location, "===vect_slp_analyze_bb===\n");
2615 /* Autodetect first vector size we try. */
2616 current_vector_size = 0;
2617 vector_sizes = targetm.vectorize.autovectorize_vector_sizes ();
2619 gsi = gsi_start_bb (bb);
2621 while (1)
2623 if (gsi_end_p (gsi))
2624 break;
2626 gimple_stmt_iterator region_begin = gsi;
2627 vec<data_reference_p> datarefs = vNULL;
2628 int insns = 0;
2630 for (; !gsi_end_p (gsi); gsi_next (&gsi))
2632 gimple *stmt = gsi_stmt (gsi);
2633 if (is_gimple_debug (stmt))
2634 continue;
2635 insns++;
2637 if (gimple_location (stmt) != UNKNOWN_LOCATION)
2638 vect_location = gimple_location (stmt);
2640 if (!find_data_references_in_stmt (NULL, stmt, &datarefs))
2641 break;
2644 /* Skip leading unhandled stmts. */
2645 if (gsi_stmt (region_begin) == gsi_stmt (gsi))
2647 gsi_next (&gsi);
2648 continue;
2651 gimple_stmt_iterator region_end = gsi;
2653 bool vectorized = false;
2654 bool fatal = false;
2655 bb_vinfo = vect_slp_analyze_bb_1 (region_begin, region_end,
2656 datarefs, insns, fatal);
2657 if (bb_vinfo
2658 && dbg_cnt (vect_slp))
2660 if (dump_enabled_p ())
2661 dump_printf_loc (MSG_NOTE, vect_location, "SLPing BB part\n");
2663 vect_schedule_slp (bb_vinfo);
2665 if (dump_enabled_p ())
2666 dump_printf_loc (MSG_NOTE, vect_location,
2667 "basic block part vectorized\n");
2669 destroy_bb_vec_info (bb_vinfo);
2671 vectorized = true;
2673 else
2674 destroy_bb_vec_info (bb_vinfo);
2676 any_vectorized |= vectorized;
2678 vector_sizes &= ~current_vector_size;
2679 if (vectorized
2680 || vector_sizes == 0
2681 || current_vector_size == 0
2682 /* If vect_slp_analyze_bb_1 signaled that analysis for all
2683 vector sizes will fail do not bother iterating. */
2684 || fatal)
2686 if (gsi_end_p (region_end))
2687 break;
2689 /* Skip the unhandled stmt. */
2690 gsi_next (&gsi);
2692 /* And reset vector sizes. */
2693 current_vector_size = 0;
2694 vector_sizes = targetm.vectorize.autovectorize_vector_sizes ();
2696 else
2698 /* Try the next biggest vector size. */
2699 current_vector_size = 1 << floor_log2 (vector_sizes);
2700 if (dump_enabled_p ())
2701 dump_printf_loc (MSG_NOTE, vect_location,
2702 "***** Re-trying analysis with "
2703 "vector size %d\n", current_vector_size);
2705 /* Start over. */
2706 gsi = region_begin;
2710 return any_vectorized;
2714 /* Return 1 if vector type of boolean constant which is OPNUM
2715 operand in statement STMT is a boolean vector. */
2717 static bool
2718 vect_mask_constant_operand_p (gimple *stmt, int opnum)
2720 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
2721 enum tree_code code = gimple_expr_code (stmt);
2722 tree op, vectype;
2723 gimple *def_stmt;
2724 enum vect_def_type dt;
2726 /* For comparison and COND_EXPR type is chosen depending
2727 on the other comparison operand. */
2728 if (TREE_CODE_CLASS (code) == tcc_comparison)
2730 if (opnum)
2731 op = gimple_assign_rhs1 (stmt);
2732 else
2733 op = gimple_assign_rhs2 (stmt);
2735 if (!vect_is_simple_use (op, stmt_vinfo->vinfo, &def_stmt,
2736 &dt, &vectype))
2737 gcc_unreachable ();
2739 return !vectype || VECTOR_BOOLEAN_TYPE_P (vectype);
2742 if (code == COND_EXPR)
2744 tree cond = gimple_assign_rhs1 (stmt);
2746 if (TREE_CODE (cond) == SSA_NAME)
2747 return false;
2749 if (opnum)
2750 op = TREE_OPERAND (cond, 1);
2751 else
2752 op = TREE_OPERAND (cond, 0);
2754 if (!vect_is_simple_use (op, stmt_vinfo->vinfo, &def_stmt,
2755 &dt, &vectype))
2756 gcc_unreachable ();
2758 return !vectype || VECTOR_BOOLEAN_TYPE_P (vectype);
2761 return VECTOR_BOOLEAN_TYPE_P (STMT_VINFO_VECTYPE (stmt_vinfo));
2765 /* For constant and loop invariant defs of SLP_NODE this function returns
2766 (vector) defs (VEC_OPRNDS) that will be used in the vectorized stmts.
2767 OP_NUM determines if we gather defs for operand 0 or operand 1 of the RHS of
2768 scalar stmts. NUMBER_OF_VECTORS is the number of vector defs to create.
2769 REDUC_INDEX is the index of the reduction operand in the statements, unless
2770 it is -1. */
2772 static void
2773 vect_get_constant_vectors (tree op, slp_tree slp_node,
2774 vec<tree> *vec_oprnds,
2775 unsigned int op_num, unsigned int number_of_vectors,
2776 int reduc_index)
2778 vec<gimple *> stmts = SLP_TREE_SCALAR_STMTS (slp_node);
2779 gimple *stmt = stmts[0];
2780 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
2781 unsigned nunits;
2782 tree vec_cst;
2783 tree *elts;
2784 unsigned j, number_of_places_left_in_vector;
2785 tree vector_type;
2786 tree vop;
2787 int group_size = stmts.length ();
2788 unsigned int vec_num, i;
2789 unsigned number_of_copies = 1;
2790 vec<tree> voprnds;
2791 voprnds.create (number_of_vectors);
2792 bool constant_p, is_store;
2793 tree neutral_op = NULL;
2794 enum tree_code code = gimple_expr_code (stmt);
2795 gimple *def_stmt;
2796 struct loop *loop;
2797 gimple_seq ctor_seq = NULL;
2799 /* Check if vector type is a boolean vector. */
2800 if (TREE_CODE (TREE_TYPE (op)) == BOOLEAN_TYPE
2801 && vect_mask_constant_operand_p (stmt, op_num))
2802 vector_type
2803 = build_same_sized_truth_vector_type (STMT_VINFO_VECTYPE (stmt_vinfo));
2804 else
2805 vector_type = get_vectype_for_scalar_type (TREE_TYPE (op));
2806 nunits = TYPE_VECTOR_SUBPARTS (vector_type);
2808 if (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
2809 && reduc_index != -1)
2811 op_num = reduc_index;
2812 op = gimple_op (stmt, op_num + 1);
2813 /* For additional copies (see the explanation of NUMBER_OF_COPIES below)
2814 we need either neutral operands or the original operands. See
2815 get_initial_def_for_reduction() for details. */
2816 switch (code)
2818 case WIDEN_SUM_EXPR:
2819 case DOT_PROD_EXPR:
2820 case SAD_EXPR:
2821 case PLUS_EXPR:
2822 case MINUS_EXPR:
2823 case BIT_IOR_EXPR:
2824 case BIT_XOR_EXPR:
2825 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (op)))
2826 neutral_op = build_real (TREE_TYPE (op), dconst0);
2827 else
2828 neutral_op = build_int_cst (TREE_TYPE (op), 0);
2830 break;
2832 case MULT_EXPR:
2833 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (op)))
2834 neutral_op = build_real (TREE_TYPE (op), dconst1);
2835 else
2836 neutral_op = build_int_cst (TREE_TYPE (op), 1);
2838 break;
2840 case BIT_AND_EXPR:
2841 neutral_op = build_int_cst (TREE_TYPE (op), -1);
2842 break;
2844 /* For MIN/MAX we don't have an easy neutral operand but
2845 the initial values can be used fine here. Only for
2846 a reduction chain we have to force a neutral element. */
2847 case MAX_EXPR:
2848 case MIN_EXPR:
2849 if (!GROUP_FIRST_ELEMENT (stmt_vinfo))
2850 neutral_op = NULL;
2851 else
2853 def_stmt = SSA_NAME_DEF_STMT (op);
2854 loop = (gimple_bb (stmt))->loop_father;
2855 neutral_op = PHI_ARG_DEF_FROM_EDGE (def_stmt,
2856 loop_preheader_edge (loop));
2858 break;
2860 default:
2861 gcc_assert (!GROUP_FIRST_ELEMENT (stmt_vinfo));
2862 neutral_op = NULL;
2866 if (STMT_VINFO_DATA_REF (stmt_vinfo))
2868 is_store = true;
2869 op = gimple_assign_rhs1 (stmt);
2871 else
2872 is_store = false;
2874 gcc_assert (op);
2876 if (CONSTANT_CLASS_P (op))
2877 constant_p = true;
2878 else
2879 constant_p = false;
2881 /* NUMBER_OF_COPIES is the number of times we need to use the same values in
2882 created vectors. It is greater than 1 if unrolling is performed.
2884 For example, we have two scalar operands, s1 and s2 (e.g., group of
2885 strided accesses of size two), while NUNITS is four (i.e., four scalars
2886 of this type can be packed in a vector). The output vector will contain
2887 two copies of each scalar operand: {s1, s2, s1, s2}. (NUMBER_OF_COPIES
2888 will be 2).
2890 If GROUP_SIZE > NUNITS, the scalars will be split into several vectors
2891 containing the operands.
2893 For example, NUNITS is four as before, and the group size is 8
2894 (s1, s2, ..., s8). We will create two vectors {s1, s2, s3, s4} and
2895 {s5, s6, s7, s8}. */
2897 number_of_copies = nunits * number_of_vectors / group_size;
2899 number_of_places_left_in_vector = nunits;
2900 elts = XALLOCAVEC (tree, nunits);
2901 bool place_after_defs = false;
2902 for (j = 0; j < number_of_copies; j++)
2904 for (i = group_size - 1; stmts.iterate (i, &stmt); i--)
2906 if (is_store)
2907 op = gimple_assign_rhs1 (stmt);
2908 else
2910 switch (code)
2912 case COND_EXPR:
2914 tree cond = gimple_assign_rhs1 (stmt);
2915 if (TREE_CODE (cond) == SSA_NAME)
2916 op = gimple_op (stmt, op_num + 1);
2917 else if (op_num == 0 || op_num == 1)
2918 op = TREE_OPERAND (cond, op_num);
2919 else
2921 if (op_num == 2)
2922 op = gimple_assign_rhs2 (stmt);
2923 else
2924 op = gimple_assign_rhs3 (stmt);
2927 break;
2929 case CALL_EXPR:
2930 op = gimple_call_arg (stmt, op_num);
2931 break;
2933 case LSHIFT_EXPR:
2934 case RSHIFT_EXPR:
2935 case LROTATE_EXPR:
2936 case RROTATE_EXPR:
2937 op = gimple_op (stmt, op_num + 1);
2938 /* Unlike the other binary operators, shifts/rotates have
2939 the shift count being int, instead of the same type as
2940 the lhs, so make sure the scalar is the right type if
2941 we are dealing with vectors of
2942 long long/long/short/char. */
2943 if (op_num == 1 && TREE_CODE (op) == INTEGER_CST)
2944 op = fold_convert (TREE_TYPE (vector_type), op);
2945 break;
2947 default:
2948 op = gimple_op (stmt, op_num + 1);
2949 break;
2953 if (reduc_index != -1)
2955 loop = (gimple_bb (stmt))->loop_father;
2956 def_stmt = SSA_NAME_DEF_STMT (op);
2958 gcc_assert (loop);
2960 /* Get the def before the loop. In reduction chain we have only
2961 one initial value. */
2962 if ((j != (number_of_copies - 1)
2963 || (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt))
2964 && i != 0))
2965 && neutral_op)
2966 op = neutral_op;
2967 else
2968 op = PHI_ARG_DEF_FROM_EDGE (def_stmt,
2969 loop_preheader_edge (loop));
2972 /* Create 'vect_ = {op0,op1,...,opn}'. */
2973 number_of_places_left_in_vector--;
2974 tree orig_op = op;
2975 if (!types_compatible_p (TREE_TYPE (vector_type), TREE_TYPE (op)))
2977 if (CONSTANT_CLASS_P (op))
2979 if (VECTOR_BOOLEAN_TYPE_P (vector_type))
2981 /* Can't use VIEW_CONVERT_EXPR for booleans because
2982 of possibly different sizes of scalar value and
2983 vector element. */
2984 if (integer_zerop (op))
2985 op = build_int_cst (TREE_TYPE (vector_type), 0);
2986 else if (integer_onep (op))
2987 op = build_int_cst (TREE_TYPE (vector_type), 1);
2988 else
2989 gcc_unreachable ();
2991 else
2992 op = fold_unary (VIEW_CONVERT_EXPR,
2993 TREE_TYPE (vector_type), op);
2994 gcc_assert (op && CONSTANT_CLASS_P (op));
2996 else
2998 tree new_temp = make_ssa_name (TREE_TYPE (vector_type));
2999 gimple *init_stmt;
3000 if (VECTOR_BOOLEAN_TYPE_P (vector_type))
3002 gcc_assert (fold_convertible_p (TREE_TYPE (vector_type),
3003 op));
3004 init_stmt = gimple_build_assign (new_temp, NOP_EXPR, op);
3006 else if (fold_convertible_p (TREE_TYPE (vector_type), op))
3007 init_stmt = gimple_build_assign (new_temp, NOP_EXPR, op);
3008 else
3010 op = build1 (VIEW_CONVERT_EXPR, TREE_TYPE (vector_type),
3011 op);
3012 init_stmt
3013 = gimple_build_assign (new_temp, VIEW_CONVERT_EXPR,
3014 op);
3016 gimple_seq_add_stmt (&ctor_seq, init_stmt);
3017 op = new_temp;
3020 elts[number_of_places_left_in_vector] = op;
3021 if (!CONSTANT_CLASS_P (op))
3022 constant_p = false;
3023 if (TREE_CODE (orig_op) == SSA_NAME
3024 && !SSA_NAME_IS_DEFAULT_DEF (orig_op)
3025 && STMT_VINFO_BB_VINFO (stmt_vinfo)
3026 && (STMT_VINFO_BB_VINFO (stmt_vinfo)->bb
3027 == gimple_bb (SSA_NAME_DEF_STMT (orig_op))))
3028 place_after_defs = true;
3030 if (number_of_places_left_in_vector == 0)
3032 number_of_places_left_in_vector = nunits;
3034 if (constant_p)
3035 vec_cst = build_vector (vector_type, elts);
3036 else
3038 vec<constructor_elt, va_gc> *v;
3039 unsigned k;
3040 vec_alloc (v, nunits);
3041 for (k = 0; k < nunits; ++k)
3042 CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, elts[k]);
3043 vec_cst = build_constructor (vector_type, v);
3045 tree init;
3046 gimple_stmt_iterator gsi;
3047 if (place_after_defs)
3049 gsi = gsi_for_stmt
3050 (vect_find_last_scalar_stmt_in_slp (slp_node));
3051 init = vect_init_vector (stmt, vec_cst, vector_type, &gsi);
3053 else
3054 init = vect_init_vector (stmt, vec_cst, vector_type, NULL);
3055 if (ctor_seq != NULL)
3057 gsi = gsi_for_stmt (SSA_NAME_DEF_STMT (init));
3058 gsi_insert_seq_before_without_update (&gsi, ctor_seq,
3059 GSI_SAME_STMT);
3060 ctor_seq = NULL;
3062 voprnds.quick_push (init);
3063 place_after_defs = false;
3068 /* Since the vectors are created in the reverse order, we should invert
3069 them. */
3070 vec_num = voprnds.length ();
3071 for (j = vec_num; j != 0; j--)
3073 vop = voprnds[j - 1];
3074 vec_oprnds->quick_push (vop);
3077 voprnds.release ();
3079 /* In case that VF is greater than the unrolling factor needed for the SLP
3080 group of stmts, NUMBER_OF_VECTORS to be created is greater than
3081 NUMBER_OF_SCALARS/NUNITS or NUNITS/NUMBER_OF_SCALARS, and hence we have
3082 to replicate the vectors. */
3083 while (number_of_vectors > vec_oprnds->length ())
3085 tree neutral_vec = NULL;
3087 if (neutral_op)
3089 if (!neutral_vec)
3090 neutral_vec = build_vector_from_val (vector_type, neutral_op);
3092 vec_oprnds->quick_push (neutral_vec);
3094 else
3096 for (i = 0; vec_oprnds->iterate (i, &vop) && i < vec_num; i++)
3097 vec_oprnds->quick_push (vop);
3103 /* Get vectorized definitions from SLP_NODE that contains corresponding
3104 vectorized def-stmts. */
3106 static void
3107 vect_get_slp_vect_defs (slp_tree slp_node, vec<tree> *vec_oprnds)
3109 tree vec_oprnd;
3110 gimple *vec_def_stmt;
3111 unsigned int i;
3113 gcc_assert (SLP_TREE_VEC_STMTS (slp_node).exists ());
3115 FOR_EACH_VEC_ELT (SLP_TREE_VEC_STMTS (slp_node), i, vec_def_stmt)
3117 gcc_assert (vec_def_stmt);
3118 vec_oprnd = gimple_get_lhs (vec_def_stmt);
3119 vec_oprnds->quick_push (vec_oprnd);
3124 /* Get vectorized definitions for SLP_NODE.
3125 If the scalar definitions are loop invariants or constants, collect them and
3126 call vect_get_constant_vectors() to create vector stmts.
3127 Otherwise, the def-stmts must be already vectorized and the vectorized stmts
3128 must be stored in the corresponding child of SLP_NODE, and we call
3129 vect_get_slp_vect_defs () to retrieve them. */
3131 void
3132 vect_get_slp_defs (vec<tree> ops, slp_tree slp_node,
3133 vec<vec<tree> > *vec_oprnds, int reduc_index)
3135 gimple *first_stmt;
3136 int number_of_vects = 0, i;
3137 unsigned int child_index = 0;
3138 HOST_WIDE_INT lhs_size_unit, rhs_size_unit;
3139 slp_tree child = NULL;
3140 vec<tree> vec_defs;
3141 tree oprnd;
3142 bool vectorized_defs;
3144 first_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[0];
3145 FOR_EACH_VEC_ELT (ops, i, oprnd)
3147 /* For each operand we check if it has vectorized definitions in a child
3148 node or we need to create them (for invariants and constants). We
3149 check if the LHS of the first stmt of the next child matches OPRND.
3150 If it does, we found the correct child. Otherwise, we call
3151 vect_get_constant_vectors (), and not advance CHILD_INDEX in order
3152 to check this child node for the next operand. */
3153 vectorized_defs = false;
3154 if (SLP_TREE_CHILDREN (slp_node).length () > child_index)
3156 child = SLP_TREE_CHILDREN (slp_node)[child_index];
3158 /* We have to check both pattern and original def, if available. */
3159 if (SLP_TREE_DEF_TYPE (child) == vect_internal_def)
3161 gimple *first_def = SLP_TREE_SCALAR_STMTS (child)[0];
3162 gimple *related
3163 = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (first_def));
3165 if (operand_equal_p (oprnd, gimple_get_lhs (first_def), 0)
3166 || (related
3167 && operand_equal_p (oprnd, gimple_get_lhs (related), 0)))
3169 /* The number of vector defs is determined by the number of
3170 vector statements in the node from which we get those
3171 statements. */
3172 number_of_vects = SLP_TREE_NUMBER_OF_VEC_STMTS (child);
3173 vectorized_defs = true;
3174 child_index++;
3177 else
3178 child_index++;
3181 if (!vectorized_defs)
3183 if (i == 0)
3185 number_of_vects = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
3186 /* Number of vector stmts was calculated according to LHS in
3187 vect_schedule_slp_instance (), fix it by replacing LHS with
3188 RHS, if necessary. See vect_get_smallest_scalar_type () for
3189 details. */
3190 vect_get_smallest_scalar_type (first_stmt, &lhs_size_unit,
3191 &rhs_size_unit);
3192 if (rhs_size_unit != lhs_size_unit)
3194 number_of_vects *= rhs_size_unit;
3195 number_of_vects /= lhs_size_unit;
3200 /* Allocate memory for vectorized defs. */
3201 vec_defs = vNULL;
3202 vec_defs.create (number_of_vects);
3204 /* For reduction defs we call vect_get_constant_vectors (), since we are
3205 looking for initial loop invariant values. */
3206 if (vectorized_defs && reduc_index == -1)
3207 /* The defs are already vectorized. */
3208 vect_get_slp_vect_defs (child, &vec_defs);
3209 else
3210 /* Build vectors from scalar defs. */
3211 vect_get_constant_vectors (oprnd, slp_node, &vec_defs, i,
3212 number_of_vects, reduc_index);
3214 vec_oprnds->quick_push (vec_defs);
3216 /* For reductions, we only need initial values. */
3217 if (reduc_index != -1)
3218 return;
3223 /* Create NCOPIES permutation statements using the mask MASK_BYTES (by
3224 building a vector of type MASK_TYPE from it) and two input vectors placed in
3225 DR_CHAIN at FIRST_VEC_INDX and SECOND_VEC_INDX for the first copy and
3226 shifting by STRIDE elements of DR_CHAIN for every copy.
3227 (STRIDE is the number of vectorized stmts for NODE divided by the number of
3228 copies).
3229 VECT_STMTS_COUNTER specifies the index in the vectorized stmts of NODE, where
3230 the created stmts must be inserted. */
3232 static inline void
3233 vect_create_mask_and_perm (gimple *stmt,
3234 tree mask, int first_vec_indx, int second_vec_indx,
3235 gimple_stmt_iterator *gsi, slp_tree node,
3236 tree vectype, vec<tree> dr_chain,
3237 int ncopies, int vect_stmts_counter)
3239 tree perm_dest;
3240 gimple *perm_stmt = NULL;
3241 int i, stride_in, stride_out;
3242 tree first_vec, second_vec, data_ref;
3244 stride_out = SLP_TREE_NUMBER_OF_VEC_STMTS (node) / ncopies;
3245 stride_in = dr_chain.length () / ncopies;
3247 /* Initialize the vect stmts of NODE to properly insert the generated
3248 stmts later. */
3249 for (i = SLP_TREE_VEC_STMTS (node).length ();
3250 i < (int) SLP_TREE_NUMBER_OF_VEC_STMTS (node); i++)
3251 SLP_TREE_VEC_STMTS (node).quick_push (NULL);
3253 perm_dest = vect_create_destination_var (gimple_assign_lhs (stmt), vectype);
3254 for (i = 0; i < ncopies; i++)
3256 first_vec = dr_chain[first_vec_indx];
3257 second_vec = dr_chain[second_vec_indx];
3259 /* Generate the permute statement if necessary. */
3260 if (mask)
3262 perm_stmt = gimple_build_assign (perm_dest, VEC_PERM_EXPR,
3263 first_vec, second_vec, mask);
3264 data_ref = make_ssa_name (perm_dest, perm_stmt);
3265 gimple_set_lhs (perm_stmt, data_ref);
3266 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
3268 else
3269 /* If mask was NULL_TREE generate the requested identity transform. */
3270 perm_stmt = SSA_NAME_DEF_STMT (first_vec);
3272 /* Store the vector statement in NODE. */
3273 SLP_TREE_VEC_STMTS (node)[stride_out * i + vect_stmts_counter]
3274 = perm_stmt;
3276 first_vec_indx += stride_in;
3277 second_vec_indx += stride_in;
3282 /* Generate vector permute statements from a list of loads in DR_CHAIN.
3283 If ANALYZE_ONLY is TRUE, only check that it is possible to create valid
3284 permute statements for the SLP node NODE of the SLP instance
3285 SLP_NODE_INSTANCE. */
3287 bool
3288 vect_transform_slp_perm_load (slp_tree node, vec<tree> dr_chain,
3289 gimple_stmt_iterator *gsi, int vf,
3290 slp_instance slp_node_instance, bool analyze_only)
3292 gimple *stmt = SLP_TREE_SCALAR_STMTS (node)[0];
3293 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3294 tree mask_element_type = NULL_TREE, mask_type;
3295 int nunits, vec_index = 0;
3296 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
3297 int group_size = SLP_INSTANCE_GROUP_SIZE (slp_node_instance);
3298 int unroll_factor, mask_element, ncopies;
3299 unsigned char *mask;
3300 machine_mode mode;
3302 if (!STMT_VINFO_GROUPED_ACCESS (stmt_info))
3303 return false;
3305 stmt_info = vinfo_for_stmt (GROUP_FIRST_ELEMENT (stmt_info));
3307 mode = TYPE_MODE (vectype);
3309 /* The generic VEC_PERM_EXPR code always uses an integral type of the
3310 same size as the vector element being permuted. */
3311 mask_element_type = lang_hooks.types.type_for_mode
3312 (int_mode_for_mode (TYPE_MODE (TREE_TYPE (vectype))), 1);
3313 mask_type = get_vectype_for_scalar_type (mask_element_type);
3314 nunits = TYPE_VECTOR_SUBPARTS (vectype);
3315 mask = XALLOCAVEC (unsigned char, nunits);
3316 unroll_factor = SLP_INSTANCE_UNROLLING_FACTOR (slp_node_instance);
3318 /* Number of copies is determined by the final vectorization factor
3319 relatively to SLP_NODE_INSTANCE unrolling factor. */
3320 ncopies = vf / SLP_INSTANCE_UNROLLING_FACTOR (slp_node_instance);
3322 /* Generate permutation masks for every NODE. Number of masks for each NODE
3323 is equal to GROUP_SIZE.
3324 E.g., we have a group of three nodes with three loads from the same
3325 location in each node, and the vector size is 4. I.e., we have a
3326 a0b0c0a1b1c1... sequence and we need to create the following vectors:
3327 for a's: a0a0a0a1 a1a1a2a2 a2a3a3a3
3328 for b's: b0b0b0b1 b1b1b2b2 b2b3b3b3
3331 The masks for a's should be: {0,0,0,3} {3,3,6,6} {6,9,9,9}.
3332 The last mask is illegal since we assume two operands for permute
3333 operation, and the mask element values can't be outside that range.
3334 Hence, the last mask must be converted into {2,5,5,5}.
3335 For the first two permutations we need the first and the second input
3336 vectors: {a0,b0,c0,a1} and {b1,c1,a2,b2}, and for the last permutation
3337 we need the second and the third vectors: {b1,c1,a2,b2} and
3338 {c2,a3,b3,c3}. */
3340 int vect_stmts_counter = 0;
3341 int index = 0;
3342 int first_vec_index = -1;
3343 int second_vec_index = -1;
3344 bool noop_p = true;
3346 for (int j = 0; j < unroll_factor; j++)
3348 for (int k = 0; k < group_size; k++)
3350 int i = (SLP_TREE_LOAD_PERMUTATION (node)[k]
3351 + j * STMT_VINFO_GROUP_SIZE (stmt_info));
3352 vec_index = i / nunits;
3353 mask_element = i % nunits;
3354 if (vec_index == first_vec_index
3355 || first_vec_index == -1)
3357 first_vec_index = vec_index;
3359 else if (vec_index == second_vec_index
3360 || second_vec_index == -1)
3362 second_vec_index = vec_index;
3363 mask_element += nunits;
3365 else
3367 if (dump_enabled_p ())
3369 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3370 "permutation requires at "
3371 "least three vectors ");
3372 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
3373 stmt, 0);
3374 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
3376 return false;
3379 gcc_assert (mask_element >= 0
3380 && mask_element < 2 * nunits);
3381 if (mask_element != index)
3382 noop_p = false;
3383 mask[index++] = mask_element;
3385 if (index == nunits)
3387 if (! noop_p
3388 && ! can_vec_perm_p (mode, false, mask))
3390 if (dump_enabled_p ())
3392 dump_printf_loc (MSG_MISSED_OPTIMIZATION,
3393 vect_location,
3394 "unsupported vect permute { ");
3395 for (i = 0; i < nunits; ++i)
3396 dump_printf (MSG_MISSED_OPTIMIZATION, "%d ", mask[i]);
3397 dump_printf (MSG_MISSED_OPTIMIZATION, "}\n");
3399 return false;
3402 if (!analyze_only)
3404 tree mask_vec = NULL_TREE;
3406 if (! noop_p)
3408 tree *mask_elts = XALLOCAVEC (tree, nunits);
3409 for (int l = 0; l < nunits; ++l)
3410 mask_elts[l] = build_int_cst (mask_element_type,
3411 mask[l]);
3412 mask_vec = build_vector (mask_type, mask_elts);
3415 if (second_vec_index == -1)
3416 second_vec_index = first_vec_index;
3417 vect_create_mask_and_perm (stmt, mask_vec, first_vec_index,
3418 second_vec_index,
3419 gsi, node, vectype, dr_chain,
3420 ncopies, vect_stmts_counter++);
3423 index = 0;
3424 first_vec_index = -1;
3425 second_vec_index = -1;
3426 noop_p = true;
3431 return true;
3436 /* Vectorize SLP instance tree in postorder. */
3438 static bool
3439 vect_schedule_slp_instance (slp_tree node, slp_instance instance,
3440 unsigned int vectorization_factor)
3442 gimple *stmt;
3443 bool grouped_store, is_store;
3444 gimple_stmt_iterator si;
3445 stmt_vec_info stmt_info;
3446 unsigned int vec_stmts_size, nunits, group_size;
3447 tree vectype;
3448 int i, j;
3449 slp_tree child;
3451 if (SLP_TREE_DEF_TYPE (node) != vect_internal_def)
3452 return false;
3454 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
3455 vect_schedule_slp_instance (child, instance, vectorization_factor);
3457 /* Push SLP node def-type to stmts. */
3458 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
3459 if (SLP_TREE_DEF_TYPE (child) != vect_internal_def)
3460 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (child), j, stmt)
3461 STMT_VINFO_DEF_TYPE (vinfo_for_stmt (stmt)) = SLP_TREE_DEF_TYPE (child);
3463 stmt = SLP_TREE_SCALAR_STMTS (node)[0];
3464 stmt_info = vinfo_for_stmt (stmt);
3466 /* VECTYPE is the type of the destination. */
3467 vectype = STMT_VINFO_VECTYPE (stmt_info);
3468 nunits = (unsigned int) TYPE_VECTOR_SUBPARTS (vectype);
3469 group_size = SLP_INSTANCE_GROUP_SIZE (instance);
3471 /* For each SLP instance calculate number of vector stmts to be created
3472 for the scalar stmts in each node of the SLP tree. Number of vector
3473 elements in one vector iteration is the number of scalar elements in
3474 one scalar iteration (GROUP_SIZE) multiplied by VF divided by vector
3475 size.
3476 Unless this is a SLP reduction in which case the number of vector
3477 stmts is equal to the number of vector stmts of the children. */
3478 if (GROUP_FIRST_ELEMENT (stmt_info)
3479 && !STMT_VINFO_GROUPED_ACCESS (stmt_info))
3480 vec_stmts_size = SLP_TREE_NUMBER_OF_VEC_STMTS (SLP_TREE_CHILDREN (node)[0]);
3481 else
3482 vec_stmts_size = (vectorization_factor * group_size) / nunits;
3484 if (!SLP_TREE_VEC_STMTS (node).exists ())
3486 SLP_TREE_VEC_STMTS (node).create (vec_stmts_size);
3487 SLP_TREE_NUMBER_OF_VEC_STMTS (node) = vec_stmts_size;
3490 if (dump_enabled_p ())
3492 dump_printf_loc (MSG_NOTE,vect_location,
3493 "------>vectorizing SLP node starting from: ");
3494 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
3495 dump_printf (MSG_NOTE, "\n");
3498 /* Vectorized stmts go before the last scalar stmt which is where
3499 all uses are ready. */
3500 si = gsi_for_stmt (vect_find_last_scalar_stmt_in_slp (node));
3502 /* Mark the first element of the reduction chain as reduction to properly
3503 transform the node. In the analysis phase only the last element of the
3504 chain is marked as reduction. */
3505 if (GROUP_FIRST_ELEMENT (stmt_info) && !STMT_VINFO_GROUPED_ACCESS (stmt_info)
3506 && GROUP_FIRST_ELEMENT (stmt_info) == stmt)
3508 STMT_VINFO_DEF_TYPE (stmt_info) = vect_reduction_def;
3509 STMT_VINFO_TYPE (stmt_info) = reduc_vec_info_type;
3512 /* Handle two-operation SLP nodes by vectorizing the group with
3513 both operations and then performing a merge. */
3514 if (SLP_TREE_TWO_OPERATORS (node))
3516 enum tree_code code0 = gimple_assign_rhs_code (stmt);
3517 enum tree_code ocode;
3518 gimple *ostmt;
3519 unsigned char *mask = XALLOCAVEC (unsigned char, group_size);
3520 bool allsame = true;
3521 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, ostmt)
3522 if (gimple_assign_rhs_code (ostmt) != code0)
3524 mask[i] = 1;
3525 allsame = false;
3526 ocode = gimple_assign_rhs_code (ostmt);
3528 else
3529 mask[i] = 0;
3530 if (!allsame)
3532 vec<gimple *> v0;
3533 vec<gimple *> v1;
3534 unsigned j;
3535 tree tmask = NULL_TREE;
3536 vect_transform_stmt (stmt, &si, &grouped_store, node, instance);
3537 v0 = SLP_TREE_VEC_STMTS (node).copy ();
3538 SLP_TREE_VEC_STMTS (node).truncate (0);
3539 gimple_assign_set_rhs_code (stmt, ocode);
3540 vect_transform_stmt (stmt, &si, &grouped_store, node, instance);
3541 gimple_assign_set_rhs_code (stmt, code0);
3542 v1 = SLP_TREE_VEC_STMTS (node).copy ();
3543 SLP_TREE_VEC_STMTS (node).truncate (0);
3544 tree meltype = build_nonstandard_integer_type
3545 (GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (vectype))), 1);
3546 tree mvectype = get_same_sized_vectype (meltype, vectype);
3547 unsigned k = 0, l;
3548 for (j = 0; j < v0.length (); ++j)
3550 tree *melts = XALLOCAVEC (tree, TYPE_VECTOR_SUBPARTS (vectype));
3551 for (l = 0; l < TYPE_VECTOR_SUBPARTS (vectype); ++l)
3553 if (k >= group_size)
3554 k = 0;
3555 melts[l] = build_int_cst
3556 (meltype, mask[k++] * TYPE_VECTOR_SUBPARTS (vectype) + l);
3558 tmask = build_vector (mvectype, melts);
3560 /* ??? Not all targets support a VEC_PERM_EXPR with a
3561 constant mask that would translate to a vec_merge RTX
3562 (with their vec_perm_const_ok). We can either not
3563 vectorize in that case or let veclower do its job.
3564 Unfortunately that isn't too great and at least for
3565 plus/minus we'd eventually like to match targets
3566 vector addsub instructions. */
3567 gimple *vstmt;
3568 vstmt = gimple_build_assign (make_ssa_name (vectype),
3569 VEC_PERM_EXPR,
3570 gimple_assign_lhs (v0[j]),
3571 gimple_assign_lhs (v1[j]), tmask);
3572 vect_finish_stmt_generation (stmt, vstmt, &si);
3573 SLP_TREE_VEC_STMTS (node).quick_push (vstmt);
3575 v0.release ();
3576 v1.release ();
3577 return false;
3580 is_store = vect_transform_stmt (stmt, &si, &grouped_store, node, instance);
3582 /* Restore stmt def-types. */
3583 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
3584 if (SLP_TREE_DEF_TYPE (child) != vect_internal_def)
3585 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (child), j, stmt)
3586 STMT_VINFO_DEF_TYPE (vinfo_for_stmt (stmt)) = vect_internal_def;
3588 return is_store;
3591 /* Replace scalar calls from SLP node NODE with setting of their lhs to zero.
3592 For loop vectorization this is done in vectorizable_call, but for SLP
3593 it needs to be deferred until end of vect_schedule_slp, because multiple
3594 SLP instances may refer to the same scalar stmt. */
3596 static void
3597 vect_remove_slp_scalar_calls (slp_tree node)
3599 gimple *stmt, *new_stmt;
3600 gimple_stmt_iterator gsi;
3601 int i;
3602 slp_tree child;
3603 tree lhs;
3604 stmt_vec_info stmt_info;
3606 if (SLP_TREE_DEF_TYPE (node) != vect_internal_def)
3607 return;
3609 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
3610 vect_remove_slp_scalar_calls (child);
3612 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
3614 if (!is_gimple_call (stmt) || gimple_bb (stmt) == NULL)
3615 continue;
3616 stmt_info = vinfo_for_stmt (stmt);
3617 if (stmt_info == NULL
3618 || is_pattern_stmt_p (stmt_info)
3619 || !PURE_SLP_STMT (stmt_info))
3620 continue;
3621 lhs = gimple_call_lhs (stmt);
3622 new_stmt = gimple_build_assign (lhs, build_zero_cst (TREE_TYPE (lhs)));
3623 set_vinfo_for_stmt (new_stmt, stmt_info);
3624 set_vinfo_for_stmt (stmt, NULL);
3625 STMT_VINFO_STMT (stmt_info) = new_stmt;
3626 gsi = gsi_for_stmt (stmt);
3627 gsi_replace (&gsi, new_stmt, false);
3628 SSA_NAME_DEF_STMT (gimple_assign_lhs (new_stmt)) = new_stmt;
3632 /* Generate vector code for all SLP instances in the loop/basic block. */
3634 bool
3635 vect_schedule_slp (vec_info *vinfo)
3637 vec<slp_instance> slp_instances;
3638 slp_instance instance;
3639 unsigned int i, vf;
3640 bool is_store = false;
3642 slp_instances = vinfo->slp_instances;
3643 if (is_a <loop_vec_info> (vinfo))
3644 vf = as_a <loop_vec_info> (vinfo)->vectorization_factor;
3645 else
3646 vf = 1;
3648 FOR_EACH_VEC_ELT (slp_instances, i, instance)
3650 /* Schedule the tree of INSTANCE. */
3651 is_store = vect_schedule_slp_instance (SLP_INSTANCE_TREE (instance),
3652 instance, vf);
3653 if (dump_enabled_p ())
3654 dump_printf_loc (MSG_NOTE, vect_location,
3655 "vectorizing stmts using SLP.\n");
3658 FOR_EACH_VEC_ELT (slp_instances, i, instance)
3660 slp_tree root = SLP_INSTANCE_TREE (instance);
3661 gimple *store;
3662 unsigned int j;
3663 gimple_stmt_iterator gsi;
3665 /* Remove scalar call stmts. Do not do this for basic-block
3666 vectorization as not all uses may be vectorized.
3667 ??? Why should this be necessary? DCE should be able to
3668 remove the stmts itself.
3669 ??? For BB vectorization we can as well remove scalar
3670 stmts starting from the SLP tree root if they have no
3671 uses. */
3672 if (is_a <loop_vec_info> (vinfo))
3673 vect_remove_slp_scalar_calls (root);
3675 for (j = 0; SLP_TREE_SCALAR_STMTS (root).iterate (j, &store)
3676 && j < SLP_INSTANCE_GROUP_SIZE (instance); j++)
3678 if (!STMT_VINFO_DATA_REF (vinfo_for_stmt (store)))
3679 break;
3681 if (is_pattern_stmt_p (vinfo_for_stmt (store)))
3682 store = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (store));
3683 /* Free the attached stmt_vec_info and remove the stmt. */
3684 gsi = gsi_for_stmt (store);
3685 unlink_stmt_vdef (store);
3686 gsi_remove (&gsi, true);
3687 release_defs (store);
3688 free_stmt_vec_info (store);
3692 return is_store;