don't compare ARG_FRAME_POINTER_REGNUM and FRAME_POINTER_REGNUM with the preprocessor
[official-gcc.git] / gcc / tree-vect-slp.c
blobcad1604b438909413e4724a385014fc15deddf29
1 /* SLP - Basic Block Vectorization
2 Copyright (C) 2007-2015 Free Software Foundation, Inc.
3 Contributed by Dorit Naishlos <dorit@il.ibm.com>
4 and Ira Rosen <irar@il.ibm.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "dumpfile.h"
26 #include "tm.h"
27 #include "hash-set.h"
28 #include "machmode.h"
29 #include "vec.h"
30 #include "double-int.h"
31 #include "input.h"
32 #include "alias.h"
33 #include "symtab.h"
34 #include "wide-int.h"
35 #include "inchash.h"
36 #include "tree.h"
37 #include "fold-const.h"
38 #include "stor-layout.h"
39 #include "target.h"
40 #include "predict.h"
41 #include "hard-reg-set.h"
42 #include "function.h"
43 #include "basic-block.h"
44 #include "gimple-pretty-print.h"
45 #include "tree-ssa-alias.h"
46 #include "internal-fn.h"
47 #include "gimple-expr.h"
48 #include "is-a.h"
49 #include "gimple.h"
50 #include "gimple-iterator.h"
51 #include "gimple-ssa.h"
52 #include "tree-phinodes.h"
53 #include "ssa-iterators.h"
54 #include "stringpool.h"
55 #include "tree-ssanames.h"
56 #include "tree-pass.h"
57 #include "cfgloop.h"
58 #include "hashtab.h"
59 #include "rtl.h"
60 #include "flags.h"
61 #include "statistics.h"
62 #include "real.h"
63 #include "fixed-value.h"
64 #include "insn-config.h"
65 #include "expmed.h"
66 #include "dojump.h"
67 #include "explow.h"
68 #include "calls.h"
69 #include "emit-rtl.h"
70 #include "varasm.h"
71 #include "stmt.h"
72 #include "expr.h"
73 #include "recog.h" /* FIXME: for insn_data */
74 #include "insn-codes.h"
75 #include "optabs.h"
76 #include "tree-vectorizer.h"
77 #include "langhooks.h"
78 #include "gimple-walk.h"
80 /* Extract the location of the basic block in the source code.
81 Return the basic block location if succeed and NULL if not. */
83 source_location
84 find_bb_location (basic_block bb)
86 gimple stmt = NULL;
87 gimple_stmt_iterator si;
89 if (!bb)
90 return UNKNOWN_LOCATION;
92 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
94 stmt = gsi_stmt (si);
95 if (gimple_location (stmt) != UNKNOWN_LOCATION)
96 return gimple_location (stmt);
99 return UNKNOWN_LOCATION;
103 /* Recursively free the memory allocated for the SLP tree rooted at NODE. */
105 static void
106 vect_free_slp_tree (slp_tree node)
108 int i;
109 slp_tree child;
111 if (!node)
112 return;
114 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
115 vect_free_slp_tree (child);
117 SLP_TREE_CHILDREN (node).release ();
118 SLP_TREE_SCALAR_STMTS (node).release ();
119 SLP_TREE_VEC_STMTS (node).release ();
120 SLP_TREE_LOAD_PERMUTATION (node).release ();
122 free (node);
126 /* Free the memory allocated for the SLP instance. */
128 void
129 vect_free_slp_instance (slp_instance instance)
131 vect_free_slp_tree (SLP_INSTANCE_TREE (instance));
132 SLP_INSTANCE_LOADS (instance).release ();
133 SLP_INSTANCE_BODY_COST_VEC (instance).release ();
134 free (instance);
138 /* Create an SLP node for SCALAR_STMTS. */
140 static slp_tree
141 vect_create_new_slp_node (vec<gimple> scalar_stmts)
143 slp_tree node;
144 gimple stmt = scalar_stmts[0];
145 unsigned int nops;
147 if (is_gimple_call (stmt))
148 nops = gimple_call_num_args (stmt);
149 else if (is_gimple_assign (stmt))
151 nops = gimple_num_ops (stmt) - 1;
152 if (gimple_assign_rhs_code (stmt) == COND_EXPR)
153 nops++;
155 else
156 return NULL;
158 node = XNEW (struct _slp_tree);
159 SLP_TREE_SCALAR_STMTS (node) = scalar_stmts;
160 SLP_TREE_VEC_STMTS (node).create (0);
161 SLP_TREE_CHILDREN (node).create (nops);
162 SLP_TREE_LOAD_PERMUTATION (node) = vNULL;
163 SLP_TREE_TWO_OPERATORS (node) = false;
165 return node;
169 /* Allocate operands info for NOPS operands, and GROUP_SIZE def-stmts for each
170 operand. */
171 static vec<slp_oprnd_info>
172 vect_create_oprnd_info (int nops, int group_size)
174 int i;
175 slp_oprnd_info oprnd_info;
176 vec<slp_oprnd_info> oprnds_info;
178 oprnds_info.create (nops);
179 for (i = 0; i < nops; i++)
181 oprnd_info = XNEW (struct _slp_oprnd_info);
182 oprnd_info->def_stmts.create (group_size);
183 oprnd_info->first_dt = vect_uninitialized_def;
184 oprnd_info->first_op_type = NULL_TREE;
185 oprnd_info->first_pattern = false;
186 oprnds_info.quick_push (oprnd_info);
189 return oprnds_info;
193 /* Free operands info. */
195 static void
196 vect_free_oprnd_info (vec<slp_oprnd_info> &oprnds_info)
198 int i;
199 slp_oprnd_info oprnd_info;
201 FOR_EACH_VEC_ELT (oprnds_info, i, oprnd_info)
203 oprnd_info->def_stmts.release ();
204 XDELETE (oprnd_info);
207 oprnds_info.release ();
211 /* Find the place of the data-ref in STMT in the interleaving chain that starts
212 from FIRST_STMT. Return -1 if the data-ref is not a part of the chain. */
214 static int
215 vect_get_place_in_interleaving_chain (gimple stmt, gimple first_stmt)
217 gimple next_stmt = first_stmt;
218 int result = 0;
220 if (first_stmt != GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
221 return -1;
225 if (next_stmt == stmt)
226 return result;
227 next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
228 if (next_stmt)
229 result += GROUP_GAP (vinfo_for_stmt (next_stmt));
231 while (next_stmt);
233 return -1;
237 /* Get the defs for the rhs of STMT (collect them in OPRNDS_INFO), check that
238 they are of a valid type and that they match the defs of the first stmt of
239 the SLP group (stored in OPRNDS_INFO). If there was a fatal error
240 return -1, if the error could be corrected by swapping operands of the
241 operation return 1, if everything is ok return 0. */
243 static int
244 vect_get_and_check_slp_defs (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
245 gimple stmt, bool first,
246 vec<slp_oprnd_info> *oprnds_info)
248 tree oprnd;
249 unsigned int i, number_of_oprnds;
250 tree def;
251 gimple def_stmt;
252 enum vect_def_type dt = vect_uninitialized_def;
253 struct loop *loop = NULL;
254 bool pattern = false;
255 slp_oprnd_info oprnd_info;
256 int first_op_idx = 1;
257 bool commutative = false;
258 bool first_op_cond = false;
260 if (loop_vinfo)
261 loop = LOOP_VINFO_LOOP (loop_vinfo);
263 if (is_gimple_call (stmt))
265 number_of_oprnds = gimple_call_num_args (stmt);
266 first_op_idx = 3;
268 else if (is_gimple_assign (stmt))
270 enum tree_code code = gimple_assign_rhs_code (stmt);
271 number_of_oprnds = gimple_num_ops (stmt) - 1;
272 if (gimple_assign_rhs_code (stmt) == COND_EXPR)
274 first_op_cond = true;
275 commutative = true;
276 number_of_oprnds++;
278 else
279 commutative = commutative_tree_code (code);
281 else
282 return -1;
284 bool swapped = false;
285 for (i = 0; i < number_of_oprnds; i++)
287 again:
288 if (first_op_cond)
290 if (i == 0 || i == 1)
291 oprnd = TREE_OPERAND (gimple_op (stmt, first_op_idx),
292 swapped ? !i : i);
293 else
294 oprnd = gimple_op (stmt, first_op_idx + i - 1);
296 else
297 oprnd = gimple_op (stmt, first_op_idx + (swapped ? !i : i));
299 oprnd_info = (*oprnds_info)[i];
301 if (!vect_is_simple_use (oprnd, NULL, loop_vinfo, bb_vinfo, &def_stmt,
302 &def, &dt)
303 || (!def_stmt && dt != vect_constant_def))
305 if (dump_enabled_p ())
307 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
308 "Build SLP failed: can't find def for ");
309 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, oprnd);
310 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
313 return -1;
316 /* Check if DEF_STMT is a part of a pattern in LOOP and get the def stmt
317 from the pattern. Check that all the stmts of the node are in the
318 pattern. */
319 if (def_stmt && gimple_bb (def_stmt)
320 && ((loop && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt)))
321 || (!loop && gimple_bb (def_stmt) == BB_VINFO_BB (bb_vinfo)
322 && gimple_code (def_stmt) != GIMPLE_PHI))
323 && vinfo_for_stmt (def_stmt)
324 && STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (def_stmt))
325 && !STMT_VINFO_RELEVANT (vinfo_for_stmt (def_stmt))
326 && !STMT_VINFO_LIVE_P (vinfo_for_stmt (def_stmt)))
328 pattern = true;
329 if (!first && !oprnd_info->first_pattern)
331 if (i == 0
332 && !swapped
333 && commutative)
335 swapped = true;
336 goto again;
339 if (dump_enabled_p ())
341 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
342 "Build SLP failed: some of the stmts"
343 " are in a pattern, and others are not ");
344 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, oprnd);
345 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
348 return 1;
351 def_stmt = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (def_stmt));
352 dt = STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt));
354 if (dt == vect_unknown_def_type)
356 if (dump_enabled_p ())
357 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
358 "Unsupported pattern.\n");
359 return -1;
362 switch (gimple_code (def_stmt))
364 case GIMPLE_PHI:
365 def = gimple_phi_result (def_stmt);
366 break;
368 case GIMPLE_ASSIGN:
369 def = gimple_assign_lhs (def_stmt);
370 break;
372 default:
373 if (dump_enabled_p ())
374 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
375 "unsupported defining stmt:\n");
376 return -1;
380 if (first)
382 oprnd_info->first_dt = dt;
383 oprnd_info->first_pattern = pattern;
384 oprnd_info->first_op_type = TREE_TYPE (oprnd);
386 else
388 /* Not first stmt of the group, check that the def-stmt/s match
389 the def-stmt/s of the first stmt. Allow different definition
390 types for reduction chains: the first stmt must be a
391 vect_reduction_def (a phi node), and the rest
392 vect_internal_def. */
393 if (((oprnd_info->first_dt != dt
394 && !(oprnd_info->first_dt == vect_reduction_def
395 && dt == vect_internal_def)
396 && !((oprnd_info->first_dt == vect_external_def
397 || oprnd_info->first_dt == vect_constant_def)
398 && (dt == vect_external_def
399 || dt == vect_constant_def)))
400 || !types_compatible_p (oprnd_info->first_op_type,
401 TREE_TYPE (oprnd))))
403 /* Try swapping operands if we got a mismatch. */
404 if (i == 0
405 && !swapped
406 && commutative)
408 swapped = true;
409 goto again;
412 if (dump_enabled_p ())
413 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
414 "Build SLP failed: different types\n");
416 return 1;
420 /* Check the types of the definitions. */
421 switch (dt)
423 case vect_constant_def:
424 case vect_external_def:
425 case vect_reduction_def:
426 break;
428 case vect_internal_def:
429 oprnd_info->def_stmts.quick_push (def_stmt);
430 break;
432 default:
433 /* FORNOW: Not supported. */
434 if (dump_enabled_p ())
436 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
437 "Build SLP failed: illegal type of def ");
438 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, def);
439 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
442 return -1;
446 /* Swap operands. */
447 if (swapped)
449 if (first_op_cond)
451 tree cond = gimple_assign_rhs1 (stmt);
452 swap_ssa_operands (stmt, &TREE_OPERAND (cond, 0),
453 &TREE_OPERAND (cond, 1));
454 TREE_SET_CODE (cond, swap_tree_comparison (TREE_CODE (cond)));
456 else
457 swap_ssa_operands (stmt, gimple_assign_rhs1_ptr (stmt),
458 gimple_assign_rhs2_ptr (stmt));
461 return 0;
465 /* Verify if the scalar stmts STMTS are isomorphic, require data
466 permutation or are of unsupported types of operation. Return
467 true if they are, otherwise return false and indicate in *MATCHES
468 which stmts are not isomorphic to the first one. If MATCHES[0]
469 is false then this indicates the comparison could not be
470 carried out or the stmts will never be vectorized by SLP. */
472 static bool
473 vect_build_slp_tree_1 (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
474 vec<gimple> stmts, unsigned int group_size,
475 unsigned nops, unsigned int *max_nunits,
476 unsigned int vectorization_factor, bool *matches,
477 bool *two_operators)
479 unsigned int i;
480 gimple first_stmt = stmts[0], stmt = stmts[0];
481 enum tree_code first_stmt_code = ERROR_MARK;
482 enum tree_code alt_stmt_code = ERROR_MARK;
483 enum tree_code rhs_code = ERROR_MARK;
484 enum tree_code first_cond_code = ERROR_MARK;
485 tree lhs;
486 bool need_same_oprnds = false;
487 tree vectype, scalar_type, first_op1 = NULL_TREE;
488 optab optab;
489 int icode;
490 machine_mode optab_op2_mode;
491 machine_mode vec_mode;
492 struct data_reference *first_dr;
493 HOST_WIDE_INT dummy;
494 gimple first_load = NULL, prev_first_load = NULL, old_first_load = NULL;
495 tree cond;
497 /* For every stmt in NODE find its def stmt/s. */
498 FOR_EACH_VEC_ELT (stmts, i, stmt)
500 matches[i] = false;
502 if (dump_enabled_p ())
504 dump_printf_loc (MSG_NOTE, vect_location, "Build SLP for ");
505 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
506 dump_printf (MSG_NOTE, "\n");
509 /* Fail to vectorize statements marked as unvectorizable. */
510 if (!STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (stmt)))
512 if (dump_enabled_p ())
514 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
515 "Build SLP failed: unvectorizable statement ");
516 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
517 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
519 /* Fatal mismatch. */
520 matches[0] = false;
521 return false;
524 lhs = gimple_get_lhs (stmt);
525 if (lhs == NULL_TREE)
527 if (dump_enabled_p ())
529 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
530 "Build SLP failed: not GIMPLE_ASSIGN nor "
531 "GIMPLE_CALL ");
532 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
533 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
535 /* Fatal mismatch. */
536 matches[0] = false;
537 return false;
540 if (is_gimple_assign (stmt)
541 && gimple_assign_rhs_code (stmt) == COND_EXPR
542 && (cond = gimple_assign_rhs1 (stmt))
543 && !COMPARISON_CLASS_P (cond))
545 if (dump_enabled_p ())
547 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
548 "Build SLP failed: condition is not "
549 "comparison ");
550 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
551 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
553 /* Fatal mismatch. */
554 matches[0] = false;
555 return false;
558 scalar_type = vect_get_smallest_scalar_type (stmt, &dummy, &dummy);
559 vectype = get_vectype_for_scalar_type (scalar_type);
560 if (!vectype)
562 if (dump_enabled_p ())
564 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
565 "Build SLP failed: unsupported data-type ");
566 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
567 scalar_type);
568 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
570 /* Fatal mismatch. */
571 matches[0] = false;
572 return false;
575 /* If populating the vector type requires unrolling then fail
576 before adjusting *max_nunits for basic-block vectorization. */
577 if (bb_vinfo
578 && TYPE_VECTOR_SUBPARTS (vectype) > group_size)
580 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
581 "Build SLP failed: unrolling required "
582 "in basic block SLP\n");
583 /* Fatal mismatch. */
584 matches[0] = false;
585 return false;
588 /* In case of multiple types we need to detect the smallest type. */
589 if (*max_nunits < TYPE_VECTOR_SUBPARTS (vectype))
591 *max_nunits = TYPE_VECTOR_SUBPARTS (vectype);
592 if (bb_vinfo)
593 vectorization_factor = *max_nunits;
596 if (gcall *call_stmt = dyn_cast <gcall *> (stmt))
598 rhs_code = CALL_EXPR;
599 if (gimple_call_internal_p (call_stmt)
600 || gimple_call_tail_p (call_stmt)
601 || gimple_call_noreturn_p (call_stmt)
602 || !gimple_call_nothrow_p (call_stmt)
603 || gimple_call_chain (call_stmt))
605 if (dump_enabled_p ())
607 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
608 "Build SLP failed: unsupported call type ");
609 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
610 call_stmt, 0);
611 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
613 /* Fatal mismatch. */
614 matches[0] = false;
615 return false;
618 else
619 rhs_code = gimple_assign_rhs_code (stmt);
621 /* Check the operation. */
622 if (i == 0)
624 first_stmt_code = rhs_code;
626 /* Shift arguments should be equal in all the packed stmts for a
627 vector shift with scalar shift operand. */
628 if (rhs_code == LSHIFT_EXPR || rhs_code == RSHIFT_EXPR
629 || rhs_code == LROTATE_EXPR
630 || rhs_code == RROTATE_EXPR)
632 vec_mode = TYPE_MODE (vectype);
634 /* First see if we have a vector/vector shift. */
635 optab = optab_for_tree_code (rhs_code, vectype,
636 optab_vector);
638 if (!optab
639 || optab_handler (optab, vec_mode) == CODE_FOR_nothing)
641 /* No vector/vector shift, try for a vector/scalar shift. */
642 optab = optab_for_tree_code (rhs_code, vectype,
643 optab_scalar);
645 if (!optab)
647 if (dump_enabled_p ())
648 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
649 "Build SLP failed: no optab.\n");
650 /* Fatal mismatch. */
651 matches[0] = false;
652 return false;
654 icode = (int) optab_handler (optab, vec_mode);
655 if (icode == CODE_FOR_nothing)
657 if (dump_enabled_p ())
658 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
659 "Build SLP failed: "
660 "op not supported by target.\n");
661 /* Fatal mismatch. */
662 matches[0] = false;
663 return false;
665 optab_op2_mode = insn_data[icode].operand[2].mode;
666 if (!VECTOR_MODE_P (optab_op2_mode))
668 need_same_oprnds = true;
669 first_op1 = gimple_assign_rhs2 (stmt);
673 else if (rhs_code == WIDEN_LSHIFT_EXPR)
675 need_same_oprnds = true;
676 first_op1 = gimple_assign_rhs2 (stmt);
679 else
681 if (first_stmt_code != rhs_code
682 && alt_stmt_code == ERROR_MARK)
683 alt_stmt_code = rhs_code;
684 if (first_stmt_code != rhs_code
685 && (first_stmt_code != IMAGPART_EXPR
686 || rhs_code != REALPART_EXPR)
687 && (first_stmt_code != REALPART_EXPR
688 || rhs_code != IMAGPART_EXPR)
689 /* Handle mismatches in plus/minus by computing both
690 and merging the results. */
691 && !((first_stmt_code == PLUS_EXPR
692 || first_stmt_code == MINUS_EXPR)
693 && (alt_stmt_code == PLUS_EXPR
694 || alt_stmt_code == MINUS_EXPR)
695 && rhs_code == alt_stmt_code)
696 && !(STMT_VINFO_GROUPED_ACCESS (vinfo_for_stmt (stmt))
697 && (first_stmt_code == ARRAY_REF
698 || first_stmt_code == BIT_FIELD_REF
699 || first_stmt_code == INDIRECT_REF
700 || first_stmt_code == COMPONENT_REF
701 || first_stmt_code == MEM_REF)))
703 if (dump_enabled_p ())
705 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
706 "Build SLP failed: different operation "
707 "in stmt ");
708 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
709 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
710 "original stmt ");
711 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
712 first_stmt, 0);
714 /* Mismatch. */
715 continue;
718 if (need_same_oprnds
719 && !operand_equal_p (first_op1, gimple_assign_rhs2 (stmt), 0))
721 if (dump_enabled_p ())
723 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
724 "Build SLP failed: different shift "
725 "arguments in ");
726 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
727 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
729 /* Mismatch. */
730 continue;
733 if (rhs_code == CALL_EXPR)
735 gimple first_stmt = stmts[0];
736 if (gimple_call_num_args (stmt) != nops
737 || !operand_equal_p (gimple_call_fn (first_stmt),
738 gimple_call_fn (stmt), 0)
739 || gimple_call_fntype (first_stmt)
740 != gimple_call_fntype (stmt))
742 if (dump_enabled_p ())
744 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
745 "Build SLP failed: different calls in ");
746 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
747 stmt, 0);
748 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
750 /* Mismatch. */
751 continue;
756 /* Grouped store or load. */
757 if (STMT_VINFO_GROUPED_ACCESS (vinfo_for_stmt (stmt)))
759 if (REFERENCE_CLASS_P (lhs))
761 /* Store. */
764 else
766 /* Load. */
767 unsigned unrolling_factor
768 = least_common_multiple
769 (*max_nunits, group_size) / group_size;
770 /* FORNOW: Check that there is no gap between the loads
771 and no gap between the groups when we need to load
772 multiple groups at once.
773 ??? We should enhance this to only disallow gaps
774 inside vectors. */
775 if ((unrolling_factor > 1
776 && ((GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) == stmt
777 && GROUP_GAP (vinfo_for_stmt (stmt)) != 0)
778 /* If the group is split up then GROUP_GAP
779 isn't correct here, nor is GROUP_FIRST_ELEMENT. */
780 || GROUP_SIZE (vinfo_for_stmt (stmt)) > group_size))
781 || (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) != stmt
782 && GROUP_GAP (vinfo_for_stmt (stmt)) != 1))
784 if (dump_enabled_p ())
786 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
787 "Build SLP failed: grouped "
788 "loads have gaps ");
789 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
790 stmt, 0);
791 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
793 /* Fatal mismatch. */
794 matches[0] = false;
795 return false;
798 /* Check that the size of interleaved loads group is not
799 greater than the SLP group size. */
800 unsigned ncopies
801 = vectorization_factor / TYPE_VECTOR_SUBPARTS (vectype);
802 if (loop_vinfo
803 && GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) == stmt
804 && ((GROUP_SIZE (vinfo_for_stmt (stmt))
805 - GROUP_GAP (vinfo_for_stmt (stmt)))
806 > ncopies * group_size))
808 if (dump_enabled_p ())
810 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
811 "Build SLP failed: the number "
812 "of interleaved loads is greater than "
813 "the SLP group size ");
814 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
815 stmt, 0);
816 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
818 /* Fatal mismatch. */
819 matches[0] = false;
820 return false;
823 old_first_load = first_load;
824 first_load = GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt));
825 if (prev_first_load)
827 /* Check that there are no loads from different interleaving
828 chains in the same node. */
829 if (prev_first_load != first_load)
831 if (dump_enabled_p ())
833 dump_printf_loc (MSG_MISSED_OPTIMIZATION,
834 vect_location,
835 "Build SLP failed: different "
836 "interleaving chains in one node ");
837 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
838 stmt, 0);
839 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
841 /* Mismatch. */
842 continue;
845 else
846 prev_first_load = first_load;
848 /* In some cases a group of loads is just the same load
849 repeated N times. Only analyze its cost once. */
850 if (first_load == stmt && old_first_load != first_load)
852 first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt));
853 if (vect_supportable_dr_alignment (first_dr, false)
854 == dr_unaligned_unsupported)
856 if (dump_enabled_p ())
858 dump_printf_loc (MSG_MISSED_OPTIMIZATION,
859 vect_location,
860 "Build SLP failed: unsupported "
861 "unaligned load ");
862 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
863 stmt, 0);
864 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
866 /* Fatal mismatch. */
867 matches[0] = false;
868 return false;
872 } /* Grouped access. */
873 else
875 if (TREE_CODE_CLASS (rhs_code) == tcc_reference)
877 /* Not grouped load. */
878 if (dump_enabled_p ())
880 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
881 "Build SLP failed: not grouped load ");
882 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
883 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
886 /* FORNOW: Not grouped loads are not supported. */
887 /* Fatal mismatch. */
888 matches[0] = false;
889 return false;
892 /* Not memory operation. */
893 if (TREE_CODE_CLASS (rhs_code) != tcc_binary
894 && TREE_CODE_CLASS (rhs_code) != tcc_unary
895 && rhs_code != COND_EXPR
896 && rhs_code != CALL_EXPR)
898 if (dump_enabled_p ())
900 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
901 "Build SLP failed: operation");
902 dump_printf (MSG_MISSED_OPTIMIZATION, " unsupported ");
903 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
904 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
906 /* Fatal mismatch. */
907 matches[0] = false;
908 return false;
911 if (rhs_code == COND_EXPR)
913 tree cond_expr = gimple_assign_rhs1 (stmt);
915 if (i == 0)
916 first_cond_code = TREE_CODE (cond_expr);
917 else if (first_cond_code != TREE_CODE (cond_expr))
919 if (dump_enabled_p ())
921 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
922 "Build SLP failed: different"
923 " operation");
924 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
925 stmt, 0);
926 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
928 /* Mismatch. */
929 continue;
934 matches[i] = true;
937 for (i = 0; i < group_size; ++i)
938 if (!matches[i])
939 return false;
941 /* If we allowed a two-operation SLP node verify the target can cope
942 with the permute we are going to use. */
943 if (alt_stmt_code != ERROR_MARK
944 && TREE_CODE_CLASS (alt_stmt_code) != tcc_reference)
946 unsigned char *sel
947 = XALLOCAVEC (unsigned char, TYPE_VECTOR_SUBPARTS (vectype));
948 for (i = 0; i < TYPE_VECTOR_SUBPARTS (vectype); ++i)
950 sel[i] = i;
951 if (gimple_assign_rhs_code (stmts[i % group_size]) == alt_stmt_code)
952 sel[i] += TYPE_VECTOR_SUBPARTS (vectype);
954 if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel))
956 for (i = 0; i < group_size; ++i)
957 if (gimple_assign_rhs_code (stmts[i]) == alt_stmt_code)
959 matches[i] = false;
960 if (dump_enabled_p ())
962 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
963 "Build SLP failed: different operation "
964 "in stmt ");
965 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
966 stmts[i], 0);
967 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
968 "original stmt ");
969 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
970 first_stmt, 0);
973 return false;
975 *two_operators = true;
978 return true;
981 /* Recursively build an SLP tree starting from NODE.
982 Fail (and return a value not equal to zero) if def-stmts are not
983 isomorphic, require data permutation or are of unsupported types of
984 operation. Otherwise, return 0.
985 The value returned is the depth in the SLP tree where a mismatch
986 was found. */
988 static bool
989 vect_build_slp_tree (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
990 slp_tree *node, unsigned int group_size,
991 unsigned int *max_nunits,
992 vec<slp_tree> *loads,
993 unsigned int vectorization_factor,
994 bool *matches, unsigned *npermutes, unsigned *tree_size,
995 unsigned max_tree_size)
997 unsigned nops, i, this_tree_size = 0;
998 gimple stmt;
1000 matches[0] = false;
1002 stmt = SLP_TREE_SCALAR_STMTS (*node)[0];
1003 if (is_gimple_call (stmt))
1004 nops = gimple_call_num_args (stmt);
1005 else if (is_gimple_assign (stmt))
1007 nops = gimple_num_ops (stmt) - 1;
1008 if (gimple_assign_rhs_code (stmt) == COND_EXPR)
1009 nops++;
1011 else
1012 return false;
1014 bool two_operators = false;
1015 if (!vect_build_slp_tree_1 (loop_vinfo, bb_vinfo,
1016 SLP_TREE_SCALAR_STMTS (*node), group_size, nops,
1017 max_nunits, vectorization_factor, matches,
1018 &two_operators))
1019 return false;
1020 SLP_TREE_TWO_OPERATORS (*node) = two_operators;
1022 /* If the SLP node is a load, terminate the recursion. */
1023 if (STMT_VINFO_GROUPED_ACCESS (vinfo_for_stmt (stmt))
1024 && DR_IS_READ (STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt))))
1026 loads->safe_push (*node);
1027 return true;
1030 /* Get at the operands, verifying they are compatible. */
1031 vec<slp_oprnd_info> oprnds_info = vect_create_oprnd_info (nops, group_size);
1032 slp_oprnd_info oprnd_info;
1033 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (*node), i, stmt)
1035 switch (vect_get_and_check_slp_defs (loop_vinfo, bb_vinfo,
1036 stmt, (i == 0), &oprnds_info))
1038 case 0:
1039 break;
1040 case -1:
1041 matches[0] = false;
1042 vect_free_oprnd_info (oprnds_info);
1043 return false;
1044 case 1:
1045 matches[i] = false;
1046 break;
1049 for (i = 0; i < group_size; ++i)
1050 if (!matches[i])
1052 vect_free_oprnd_info (oprnds_info);
1053 return false;
1056 stmt = SLP_TREE_SCALAR_STMTS (*node)[0];
1058 /* Create SLP_TREE nodes for the definition node/s. */
1059 FOR_EACH_VEC_ELT (oprnds_info, i, oprnd_info)
1061 slp_tree child;
1062 unsigned old_nloads = loads->length ();
1063 unsigned old_max_nunits = *max_nunits;
1065 if (oprnd_info->first_dt != vect_internal_def)
1066 continue;
1068 if (++this_tree_size > max_tree_size)
1070 vect_free_oprnd_info (oprnds_info);
1071 return false;
1074 child = vect_create_new_slp_node (oprnd_info->def_stmts);
1075 if (!child)
1077 vect_free_oprnd_info (oprnds_info);
1078 return false;
1081 if (vect_build_slp_tree (loop_vinfo, bb_vinfo, &child,
1082 group_size, max_nunits, loads,
1083 vectorization_factor, matches,
1084 npermutes, &this_tree_size, max_tree_size))
1086 oprnd_info->def_stmts = vNULL;
1087 SLP_TREE_CHILDREN (*node).quick_push (child);
1088 continue;
1091 /* If the SLP build failed fatally and we analyze a basic-block
1092 simply treat nodes we fail to build as externally defined
1093 (and thus build vectors from the scalar defs).
1094 The cost model will reject outright expensive cases.
1095 ??? This doesn't treat cases where permutation ultimatively
1096 fails (or we don't try permutation below). Ideally we'd
1097 even compute a permutation that will end up with the maximum
1098 SLP tree size... */
1099 if (bb_vinfo
1100 && !matches[0]
1101 /* ??? Rejecting patterns this way doesn't work. We'd have to
1102 do extra work to cancel the pattern so the uses see the
1103 scalar version. */
1104 && !is_pattern_stmt_p (vinfo_for_stmt (stmt)))
1106 unsigned int j;
1107 slp_tree grandchild;
1109 /* Roll back. */
1110 *max_nunits = old_max_nunits;
1111 loads->truncate (old_nloads);
1112 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (child), j, grandchild)
1113 vect_free_slp_tree (grandchild);
1114 SLP_TREE_CHILDREN (child).truncate (0);
1116 dump_printf_loc (MSG_NOTE, vect_location,
1117 "Building vector operands from scalars\n");
1118 oprnd_info->def_stmts = vNULL;
1119 vect_free_slp_tree (child);
1120 SLP_TREE_CHILDREN (*node).quick_push (NULL);
1121 continue;
1124 /* If the SLP build for operand zero failed and operand zero
1125 and one can be commutated try that for the scalar stmts
1126 that failed the match. */
1127 if (i == 0
1128 /* A first scalar stmt mismatch signals a fatal mismatch. */
1129 && matches[0]
1130 /* ??? For COND_EXPRs we can swap the comparison operands
1131 as well as the arms under some constraints. */
1132 && nops == 2
1133 && oprnds_info[1]->first_dt == vect_internal_def
1134 && is_gimple_assign (stmt)
1135 && commutative_tree_code (gimple_assign_rhs_code (stmt))
1136 && !SLP_TREE_TWO_OPERATORS (*node)
1137 /* Do so only if the number of not successful permutes was nor more
1138 than a cut-ff as re-trying the recursive match on
1139 possibly each level of the tree would expose exponential
1140 behavior. */
1141 && *npermutes < 4)
1143 unsigned int j;
1144 slp_tree grandchild;
1146 /* Roll back. */
1147 *max_nunits = old_max_nunits;
1148 loads->truncate (old_nloads);
1149 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (child), j, grandchild)
1150 vect_free_slp_tree (grandchild);
1151 SLP_TREE_CHILDREN (child).truncate (0);
1153 /* Swap mismatched definition stmts. */
1154 dump_printf_loc (MSG_NOTE, vect_location,
1155 "Re-trying with swapped operands of stmts ");
1156 for (j = 0; j < group_size; ++j)
1157 if (!matches[j])
1159 gimple tem = oprnds_info[0]->def_stmts[j];
1160 oprnds_info[0]->def_stmts[j] = oprnds_info[1]->def_stmts[j];
1161 oprnds_info[1]->def_stmts[j] = tem;
1162 dump_printf (MSG_NOTE, "%d ", j);
1164 dump_printf (MSG_NOTE, "\n");
1165 /* And try again with scratch 'matches' ... */
1166 bool *tem = XALLOCAVEC (bool, group_size);
1167 if (vect_build_slp_tree (loop_vinfo, bb_vinfo, &child,
1168 group_size, max_nunits, loads,
1169 vectorization_factor,
1170 tem, npermutes, &this_tree_size,
1171 max_tree_size))
1173 /* ... so if successful we can apply the operand swapping
1174 to the GIMPLE IL. This is necessary because for example
1175 vect_get_slp_defs uses operand indexes and thus expects
1176 canonical operand order. */
1177 for (j = 0; j < group_size; ++j)
1178 if (!matches[j])
1180 gimple stmt = SLP_TREE_SCALAR_STMTS (*node)[j];
1181 swap_ssa_operands (stmt, gimple_assign_rhs1_ptr (stmt),
1182 gimple_assign_rhs2_ptr (stmt));
1184 oprnd_info->def_stmts = vNULL;
1185 SLP_TREE_CHILDREN (*node).quick_push (child);
1186 continue;
1189 ++*npermutes;
1192 oprnd_info->def_stmts = vNULL;
1193 vect_free_slp_tree (child);
1194 vect_free_oprnd_info (oprnds_info);
1195 return false;
1198 if (tree_size)
1199 *tree_size += this_tree_size;
1201 vect_free_oprnd_info (oprnds_info);
1202 return true;
1205 /* Dump a slp tree NODE using flags specified in DUMP_KIND. */
1207 static void
1208 vect_print_slp_tree (int dump_kind, slp_tree node)
1210 int i;
1211 gimple stmt;
1212 slp_tree child;
1214 if (!node)
1215 return;
1217 dump_printf (dump_kind, "node ");
1218 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
1220 dump_printf (dump_kind, "\n\tstmt %d ", i);
1221 dump_gimple_stmt (dump_kind, TDF_SLIM, stmt, 0);
1223 dump_printf (dump_kind, "\n");
1225 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
1226 vect_print_slp_tree (dump_kind, child);
1230 /* Mark the tree rooted at NODE with MARK (PURE_SLP or HYBRID).
1231 If MARK is HYBRID, it refers to a specific stmt in NODE (the stmt at index
1232 J). Otherwise, MARK is PURE_SLP and J is -1, which indicates that all the
1233 stmts in NODE are to be marked. */
1235 static void
1236 vect_mark_slp_stmts (slp_tree node, enum slp_vect_type mark, int j)
1238 int i;
1239 gimple stmt;
1240 slp_tree child;
1242 if (!node)
1243 return;
1245 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
1246 if (j < 0 || i == j)
1247 STMT_SLP_TYPE (vinfo_for_stmt (stmt)) = mark;
1249 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
1250 vect_mark_slp_stmts (child, mark, j);
1254 /* Mark the statements of the tree rooted at NODE as relevant (vect_used). */
1256 static void
1257 vect_mark_slp_stmts_relevant (slp_tree node)
1259 int i;
1260 gimple stmt;
1261 stmt_vec_info stmt_info;
1262 slp_tree child;
1264 if (!node)
1265 return;
1267 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
1269 stmt_info = vinfo_for_stmt (stmt);
1270 gcc_assert (!STMT_VINFO_RELEVANT (stmt_info)
1271 || STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_scope);
1272 STMT_VINFO_RELEVANT (stmt_info) = vect_used_in_scope;
1275 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
1276 vect_mark_slp_stmts_relevant (child);
1280 /* Rearrange the statements of NODE according to PERMUTATION. */
1282 static void
1283 vect_slp_rearrange_stmts (slp_tree node, unsigned int group_size,
1284 vec<unsigned> permutation)
1286 gimple stmt;
1287 vec<gimple> tmp_stmts;
1288 unsigned int i;
1289 slp_tree child;
1291 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
1292 vect_slp_rearrange_stmts (child, group_size, permutation);
1294 gcc_assert (group_size == SLP_TREE_SCALAR_STMTS (node).length ());
1295 tmp_stmts.create (group_size);
1296 tmp_stmts.quick_grow_cleared (group_size);
1298 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
1299 tmp_stmts[permutation[i]] = stmt;
1301 SLP_TREE_SCALAR_STMTS (node).release ();
1302 SLP_TREE_SCALAR_STMTS (node) = tmp_stmts;
1306 /* Check if the required load permutations in the SLP instance
1307 SLP_INSTN are supported. */
1309 static bool
1310 vect_supported_load_permutation_p (slp_instance slp_instn)
1312 unsigned int group_size = SLP_INSTANCE_GROUP_SIZE (slp_instn);
1313 unsigned int i, j, k, next;
1314 sbitmap load_index;
1315 slp_tree node;
1316 gimple stmt, load, next_load, first_load;
1317 struct data_reference *dr;
1319 if (dump_enabled_p ())
1321 dump_printf_loc (MSG_NOTE, vect_location, "Load permutation ");
1322 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1323 if (node->load_permutation.exists ())
1324 FOR_EACH_VEC_ELT (node->load_permutation, j, next)
1325 dump_printf (MSG_NOTE, "%d ", next);
1326 else
1327 for (k = 0; k < group_size; ++k)
1328 dump_printf (MSG_NOTE, "%d ", k);
1329 dump_printf (MSG_NOTE, "\n");
1332 /* In case of reduction every load permutation is allowed, since the order
1333 of the reduction statements is not important (as opposed to the case of
1334 grouped stores). The only condition we need to check is that all the
1335 load nodes are of the same size and have the same permutation (and then
1336 rearrange all the nodes of the SLP instance according to this
1337 permutation). */
1339 /* Check that all the load nodes are of the same size. */
1340 /* ??? Can't we assert this? */
1341 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1342 if (SLP_TREE_SCALAR_STMTS (node).length () != (unsigned) group_size)
1343 return false;
1345 node = SLP_INSTANCE_TREE (slp_instn);
1346 stmt = SLP_TREE_SCALAR_STMTS (node)[0];
1348 /* Reduction (there are no data-refs in the root).
1349 In reduction chain the order of the loads is important. */
1350 if (!STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt))
1351 && !GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
1353 slp_tree load;
1354 unsigned int lidx;
1356 /* Compare all the permutation sequences to the first one. We know
1357 that at least one load is permuted. */
1358 node = SLP_INSTANCE_LOADS (slp_instn)[0];
1359 if (!node->load_permutation.exists ())
1360 return false;
1361 for (i = 1; SLP_INSTANCE_LOADS (slp_instn).iterate (i, &load); ++i)
1363 if (!load->load_permutation.exists ())
1364 return false;
1365 FOR_EACH_VEC_ELT (load->load_permutation, j, lidx)
1366 if (lidx != node->load_permutation[j])
1367 return false;
1370 /* Check that the loads in the first sequence are different and there
1371 are no gaps between them. */
1372 load_index = sbitmap_alloc (group_size);
1373 bitmap_clear (load_index);
1374 FOR_EACH_VEC_ELT (node->load_permutation, i, lidx)
1376 if (bitmap_bit_p (load_index, lidx))
1378 sbitmap_free (load_index);
1379 return false;
1381 bitmap_set_bit (load_index, lidx);
1383 for (i = 0; i < group_size; i++)
1384 if (!bitmap_bit_p (load_index, i))
1386 sbitmap_free (load_index);
1387 return false;
1389 sbitmap_free (load_index);
1391 /* This permutation is valid for reduction. Since the order of the
1392 statements in the nodes is not important unless they are memory
1393 accesses, we can rearrange the statements in all the nodes
1394 according to the order of the loads. */
1395 vect_slp_rearrange_stmts (SLP_INSTANCE_TREE (slp_instn), group_size,
1396 node->load_permutation);
1398 /* We are done, no actual permutations need to be generated. */
1399 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1400 SLP_TREE_LOAD_PERMUTATION (node).release ();
1401 return true;
1404 /* In basic block vectorization we allow any subchain of an interleaving
1405 chain.
1406 FORNOW: not supported in loop SLP because of realignment compications. */
1407 if (STMT_VINFO_BB_VINFO (vinfo_for_stmt (stmt)))
1409 /* Check whether the loads in an instance form a subchain and thus
1410 no permutation is necessary. */
1411 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1413 if (!SLP_TREE_LOAD_PERMUTATION (node).exists ())
1414 continue;
1415 bool subchain_p = true;
1416 next_load = NULL;
1417 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), j, load)
1419 if (j != 0 && next_load != load)
1421 subchain_p = false;
1422 break;
1424 next_load = GROUP_NEXT_ELEMENT (vinfo_for_stmt (load));
1426 if (subchain_p)
1427 SLP_TREE_LOAD_PERMUTATION (node).release ();
1428 else
1430 /* Verify the permutation can be generated. */
1431 vec<tree> tem;
1432 if (!vect_transform_slp_perm_load (node, tem, NULL,
1433 1, slp_instn, true))
1435 dump_printf_loc (MSG_MISSED_OPTIMIZATION,
1436 vect_location,
1437 "unsupported load permutation\n");
1438 return false;
1443 /* Check that the alignment of the first load in every subchain, i.e.,
1444 the first statement in every load node, is supported.
1445 ??? This belongs in alignment checking. */
1446 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1448 first_load = SLP_TREE_SCALAR_STMTS (node)[0];
1449 if (first_load != GROUP_FIRST_ELEMENT (vinfo_for_stmt (first_load)))
1451 dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_load));
1452 if (vect_supportable_dr_alignment (dr, false)
1453 == dr_unaligned_unsupported)
1455 if (dump_enabled_p ())
1457 dump_printf_loc (MSG_MISSED_OPTIMIZATION,
1458 vect_location,
1459 "unsupported unaligned load ");
1460 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
1461 first_load, 0);
1462 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
1464 return false;
1469 return true;
1472 /* FORNOW: the only supported permutation is 0..01..1.. of length equal to
1473 GROUP_SIZE and where each sequence of same drs is of GROUP_SIZE length as
1474 well (unless it's reduction). */
1475 if (SLP_INSTANCE_LOADS (slp_instn).length () != group_size)
1476 return false;
1477 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1478 if (!node->load_permutation.exists ())
1479 return false;
1481 load_index = sbitmap_alloc (group_size);
1482 bitmap_clear (load_index);
1483 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1485 unsigned int lidx = node->load_permutation[0];
1486 if (bitmap_bit_p (load_index, lidx))
1488 sbitmap_free (load_index);
1489 return false;
1491 bitmap_set_bit (load_index, lidx);
1492 FOR_EACH_VEC_ELT (node->load_permutation, j, k)
1493 if (k != lidx)
1495 sbitmap_free (load_index);
1496 return false;
1499 for (i = 0; i < group_size; i++)
1500 if (!bitmap_bit_p (load_index, i))
1502 sbitmap_free (load_index);
1503 return false;
1505 sbitmap_free (load_index);
1507 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
1508 if (node->load_permutation.exists ()
1509 && !vect_transform_slp_perm_load
1510 (node, vNULL, NULL,
1511 SLP_INSTANCE_UNROLLING_FACTOR (slp_instn), slp_instn, true))
1512 return false;
1513 return true;
1517 /* Find the last store in SLP INSTANCE. */
1519 static gimple
1520 vect_find_last_scalar_stmt_in_slp (slp_tree node)
1522 gimple last = NULL, stmt;
1524 for (int i = 0; SLP_TREE_SCALAR_STMTS (node).iterate (i, &stmt); i++)
1526 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
1527 if (is_pattern_stmt_p (stmt_vinfo))
1528 last = get_later_stmt (STMT_VINFO_RELATED_STMT (stmt_vinfo), last);
1529 else
1530 last = get_later_stmt (stmt, last);
1533 return last;
1536 /* Compute the cost for the SLP node NODE in the SLP instance INSTANCE. */
1538 static void
1539 vect_analyze_slp_cost_1 (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
1540 slp_instance instance, slp_tree node,
1541 stmt_vector_for_cost *prologue_cost_vec,
1542 unsigned ncopies_for_cost)
1544 stmt_vector_for_cost *body_cost_vec = &SLP_INSTANCE_BODY_COST_VEC (instance);
1546 unsigned i;
1547 slp_tree child;
1548 gimple stmt, s;
1549 stmt_vec_info stmt_info;
1550 tree lhs;
1551 unsigned group_size = SLP_INSTANCE_GROUP_SIZE (instance);
1553 /* Recurse down the SLP tree. */
1554 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
1555 if (child)
1556 vect_analyze_slp_cost_1 (loop_vinfo, bb_vinfo,
1557 instance, child, prologue_cost_vec,
1558 ncopies_for_cost);
1560 /* Look at the first scalar stmt to determine the cost. */
1561 stmt = SLP_TREE_SCALAR_STMTS (node)[0];
1562 stmt_info = vinfo_for_stmt (stmt);
1563 if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
1565 if (DR_IS_WRITE (STMT_VINFO_DATA_REF (stmt_info)))
1566 vect_model_store_cost (stmt_info, ncopies_for_cost, false,
1567 vect_uninitialized_def,
1568 node, prologue_cost_vec, body_cost_vec);
1569 else
1571 int i;
1572 gcc_checking_assert (DR_IS_READ (STMT_VINFO_DATA_REF (stmt_info)));
1573 vect_model_load_cost (stmt_info, ncopies_for_cost, false,
1574 node, prologue_cost_vec, body_cost_vec);
1575 /* If the load is permuted record the cost for the permutation.
1576 ??? Loads from multiple chains are let through here only
1577 for a single special case involving complex numbers where
1578 in the end no permutation is necessary. */
1579 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, s)
1580 if ((STMT_VINFO_GROUP_FIRST_ELEMENT (vinfo_for_stmt (s))
1581 == STMT_VINFO_GROUP_FIRST_ELEMENT (stmt_info))
1582 && vect_get_place_in_interleaving_chain
1583 (s, STMT_VINFO_GROUP_FIRST_ELEMENT (stmt_info)) != i)
1585 record_stmt_cost (body_cost_vec, group_size, vec_perm,
1586 stmt_info, 0, vect_body);
1587 break;
1591 else
1593 record_stmt_cost (body_cost_vec, ncopies_for_cost, vector_stmt,
1594 stmt_info, 0, vect_body);
1595 if (SLP_TREE_TWO_OPERATORS (node))
1597 record_stmt_cost (body_cost_vec, ncopies_for_cost, vector_stmt,
1598 stmt_info, 0, vect_body);
1599 record_stmt_cost (body_cost_vec, ncopies_for_cost, vec_perm,
1600 stmt_info, 0, vect_body);
1604 /* Scan operands and account for prologue cost of constants/externals.
1605 ??? This over-estimates cost for multiple uses and should be
1606 re-engineered. */
1607 lhs = gimple_get_lhs (stmt);
1608 for (i = 0; i < gimple_num_ops (stmt); ++i)
1610 tree def, op = gimple_op (stmt, i);
1611 gimple def_stmt;
1612 enum vect_def_type dt;
1613 if (!op || op == lhs)
1614 continue;
1615 if (vect_is_simple_use (op, NULL, loop_vinfo, bb_vinfo,
1616 &def_stmt, &def, &dt))
1618 /* Without looking at the actual initializer a vector of
1619 constants can be implemented as load from the constant pool.
1620 ??? We need to pass down stmt_info for a vector type
1621 even if it points to the wrong stmt. */
1622 if (dt == vect_constant_def)
1623 record_stmt_cost (prologue_cost_vec, 1, vector_load,
1624 stmt_info, 0, vect_prologue);
1625 else if (dt == vect_external_def)
1626 record_stmt_cost (prologue_cost_vec, 1, vec_construct,
1627 stmt_info, 0, vect_prologue);
1632 /* Compute the cost for the SLP instance INSTANCE. */
1634 static void
1635 vect_analyze_slp_cost (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
1636 slp_instance instance, unsigned nunits)
1638 stmt_vector_for_cost body_cost_vec, prologue_cost_vec;
1639 unsigned ncopies_for_cost;
1640 stmt_info_for_cost *si;
1641 unsigned i;
1643 /* Calculate the number of vector stmts to create based on the unrolling
1644 factor (number of vectors is 1 if NUNITS >= GROUP_SIZE, and is
1645 GROUP_SIZE / NUNITS otherwise. */
1646 unsigned group_size = SLP_INSTANCE_GROUP_SIZE (instance);
1647 ncopies_for_cost = least_common_multiple (nunits, group_size) / nunits;
1649 prologue_cost_vec.create (10);
1650 body_cost_vec.create (10);
1651 SLP_INSTANCE_BODY_COST_VEC (instance) = body_cost_vec;
1652 vect_analyze_slp_cost_1 (loop_vinfo, bb_vinfo,
1653 instance, SLP_INSTANCE_TREE (instance),
1654 &prologue_cost_vec, ncopies_for_cost);
1656 /* Record the prologue costs, which were delayed until we were
1657 sure that SLP was successful. Unlike the body costs, we know
1658 the final values now regardless of the loop vectorization factor. */
1659 void *data = (loop_vinfo ? LOOP_VINFO_TARGET_COST_DATA (loop_vinfo)
1660 : BB_VINFO_TARGET_COST_DATA (bb_vinfo));
1661 FOR_EACH_VEC_ELT (prologue_cost_vec, i, si)
1663 struct _stmt_vec_info *stmt_info
1664 = si->stmt ? vinfo_for_stmt (si->stmt) : NULL;
1665 (void) add_stmt_cost (data, si->count, si->kind, stmt_info,
1666 si->misalign, vect_prologue);
1669 prologue_cost_vec.release ();
1672 /* Analyze an SLP instance starting from a group of grouped stores. Call
1673 vect_build_slp_tree to build a tree of packed stmts if possible.
1674 Return FALSE if it's impossible to SLP any stmt in the loop. */
1676 static bool
1677 vect_analyze_slp_instance (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
1678 gimple stmt, unsigned max_tree_size)
1680 slp_instance new_instance;
1681 slp_tree node;
1682 unsigned int group_size = GROUP_SIZE (vinfo_for_stmt (stmt));
1683 unsigned int unrolling_factor = 1, nunits;
1684 tree vectype, scalar_type = NULL_TREE;
1685 gimple next;
1686 unsigned int vectorization_factor = 0;
1687 int i;
1688 unsigned int max_nunits = 0;
1689 vec<slp_tree> loads;
1690 struct data_reference *dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt));
1691 vec<gimple> scalar_stmts;
1693 if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
1695 if (dr)
1697 scalar_type = TREE_TYPE (DR_REF (dr));
1698 vectype = get_vectype_for_scalar_type (scalar_type);
1700 else
1702 gcc_assert (loop_vinfo);
1703 vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt));
1706 group_size = GROUP_SIZE (vinfo_for_stmt (stmt));
1708 else
1710 gcc_assert (loop_vinfo);
1711 vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt));
1712 group_size = LOOP_VINFO_REDUCTIONS (loop_vinfo).length ();
1715 if (!vectype)
1717 if (dump_enabled_p ())
1719 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1720 "Build SLP failed: unsupported data-type ");
1721 dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, scalar_type);
1722 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
1725 return false;
1728 nunits = TYPE_VECTOR_SUBPARTS (vectype);
1729 if (loop_vinfo)
1730 vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1731 else
1732 vectorization_factor = nunits;
1734 /* Calculate the unrolling factor. */
1735 unrolling_factor = least_common_multiple (nunits, group_size) / group_size;
1736 if (unrolling_factor != 1 && !loop_vinfo)
1738 if (dump_enabled_p ())
1739 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1740 "Build SLP failed: unrolling required in basic"
1741 " block SLP\n");
1743 return false;
1746 /* Create a node (a root of the SLP tree) for the packed grouped stores. */
1747 scalar_stmts.create (group_size);
1748 next = stmt;
1749 if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
1751 /* Collect the stores and store them in SLP_TREE_SCALAR_STMTS. */
1752 while (next)
1754 if (STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (next))
1755 && STMT_VINFO_RELATED_STMT (vinfo_for_stmt (next)))
1756 scalar_stmts.safe_push (
1757 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (next)));
1758 else
1759 scalar_stmts.safe_push (next);
1760 next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next));
1763 else
1765 /* Collect reduction statements. */
1766 vec<gimple> reductions = LOOP_VINFO_REDUCTIONS (loop_vinfo);
1767 for (i = 0; reductions.iterate (i, &next); i++)
1768 scalar_stmts.safe_push (next);
1771 node = vect_create_new_slp_node (scalar_stmts);
1773 loads.create (group_size);
1775 /* Build the tree for the SLP instance. */
1776 bool *matches = XALLOCAVEC (bool, group_size);
1777 unsigned npermutes = 0;
1778 if (vect_build_slp_tree (loop_vinfo, bb_vinfo, &node, group_size,
1779 &max_nunits, &loads,
1780 vectorization_factor, matches, &npermutes, NULL,
1781 max_tree_size))
1783 /* Calculate the unrolling factor based on the smallest type. */
1784 if (max_nunits > nunits)
1785 unrolling_factor = least_common_multiple (max_nunits, group_size)
1786 / group_size;
1788 if (unrolling_factor != 1 && !loop_vinfo)
1790 if (dump_enabled_p ())
1791 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1792 "Build SLP failed: unrolling required in basic"
1793 " block SLP\n");
1794 vect_free_slp_tree (node);
1795 loads.release ();
1796 return false;
1799 /* Create a new SLP instance. */
1800 new_instance = XNEW (struct _slp_instance);
1801 SLP_INSTANCE_TREE (new_instance) = node;
1802 SLP_INSTANCE_GROUP_SIZE (new_instance) = group_size;
1803 SLP_INSTANCE_UNROLLING_FACTOR (new_instance) = unrolling_factor;
1804 SLP_INSTANCE_BODY_COST_VEC (new_instance) = vNULL;
1805 SLP_INSTANCE_LOADS (new_instance) = loads;
1807 /* Compute the load permutation. */
1808 slp_tree load_node;
1809 bool loads_permuted = false;
1810 FOR_EACH_VEC_ELT (loads, i, load_node)
1812 vec<unsigned> load_permutation;
1813 int j;
1814 gimple load, first_stmt;
1815 bool this_load_permuted = false;
1816 load_permutation.create (group_size);
1817 first_stmt = GROUP_FIRST_ELEMENT
1818 (vinfo_for_stmt (SLP_TREE_SCALAR_STMTS (load_node)[0]));
1819 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (load_node), j, load)
1821 int load_place
1822 = vect_get_place_in_interleaving_chain (load, first_stmt);
1823 gcc_assert (load_place != -1);
1824 if (load_place != j)
1825 this_load_permuted = true;
1826 load_permutation.safe_push (load_place);
1828 if (!this_load_permuted)
1830 load_permutation.release ();
1831 continue;
1833 SLP_TREE_LOAD_PERMUTATION (load_node) = load_permutation;
1834 loads_permuted = true;
1837 if (loads_permuted)
1839 if (!vect_supported_load_permutation_p (new_instance))
1841 if (dump_enabled_p ())
1843 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1844 "Build SLP failed: unsupported load "
1845 "permutation ");
1846 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
1847 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
1849 vect_free_slp_instance (new_instance);
1850 return false;
1855 if (loop_vinfo)
1857 /* Compute the costs of this SLP instance. Delay this for BB
1858 vectorization as we don't have vector types computed yet. */
1859 vect_analyze_slp_cost (loop_vinfo, bb_vinfo,
1860 new_instance, TYPE_VECTOR_SUBPARTS (vectype));
1861 LOOP_VINFO_SLP_INSTANCES (loop_vinfo).safe_push (new_instance);
1863 else
1864 BB_VINFO_SLP_INSTANCES (bb_vinfo).safe_push (new_instance);
1866 if (dump_enabled_p ())
1867 vect_print_slp_tree (MSG_NOTE, node);
1869 return true;
1872 /* Failed to SLP. */
1873 /* Free the allocated memory. */
1874 vect_free_slp_tree (node);
1875 loads.release ();
1877 return false;
1881 /* Check if there are stmts in the loop can be vectorized using SLP. Build SLP
1882 trees of packed scalar stmts if SLP is possible. */
1884 bool
1885 vect_analyze_slp (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
1886 unsigned max_tree_size)
1888 unsigned int i;
1889 vec<gimple> grouped_stores;
1890 vec<gimple> reductions = vNULL;
1891 vec<gimple> reduc_chains = vNULL;
1892 gimple first_element;
1893 bool ok = false;
1895 if (dump_enabled_p ())
1896 dump_printf_loc (MSG_NOTE, vect_location, "=== vect_analyze_slp ===\n");
1898 if (loop_vinfo)
1900 grouped_stores = LOOP_VINFO_GROUPED_STORES (loop_vinfo);
1901 reduc_chains = LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo);
1902 reductions = LOOP_VINFO_REDUCTIONS (loop_vinfo);
1904 else
1905 grouped_stores = BB_VINFO_GROUPED_STORES (bb_vinfo);
1907 /* Find SLP sequences starting from groups of grouped stores. */
1908 FOR_EACH_VEC_ELT (grouped_stores, i, first_element)
1909 if (vect_analyze_slp_instance (loop_vinfo, bb_vinfo, first_element,
1910 max_tree_size))
1911 ok = true;
1913 if (bb_vinfo && !ok)
1915 if (dump_enabled_p ())
1916 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1917 "Failed to SLP the basic block.\n");
1919 return false;
1922 if (loop_vinfo
1923 && LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo).length () > 0)
1925 /* Find SLP sequences starting from reduction chains. */
1926 FOR_EACH_VEC_ELT (reduc_chains, i, first_element)
1927 if (vect_analyze_slp_instance (loop_vinfo, bb_vinfo, first_element,
1928 max_tree_size))
1929 ok = true;
1930 else
1931 return false;
1933 /* Don't try to vectorize SLP reductions if reduction chain was
1934 detected. */
1935 return ok;
1938 /* Find SLP sequences starting from groups of reductions. */
1939 if (loop_vinfo && LOOP_VINFO_REDUCTIONS (loop_vinfo).length () > 1
1940 && vect_analyze_slp_instance (loop_vinfo, bb_vinfo, reductions[0],
1941 max_tree_size))
1942 ok = true;
1944 return true;
1948 /* For each possible SLP instance decide whether to SLP it and calculate overall
1949 unrolling factor needed to SLP the loop. Return TRUE if decided to SLP at
1950 least one instance. */
1952 bool
1953 vect_make_slp_decision (loop_vec_info loop_vinfo)
1955 unsigned int i, unrolling_factor = 1;
1956 vec<slp_instance> slp_instances = LOOP_VINFO_SLP_INSTANCES (loop_vinfo);
1957 slp_instance instance;
1958 int decided_to_slp = 0;
1960 if (dump_enabled_p ())
1961 dump_printf_loc (MSG_NOTE, vect_location, "=== vect_make_slp_decision ==="
1962 "\n");
1964 FOR_EACH_VEC_ELT (slp_instances, i, instance)
1966 /* FORNOW: SLP if you can. */
1967 if (unrolling_factor < SLP_INSTANCE_UNROLLING_FACTOR (instance))
1968 unrolling_factor = SLP_INSTANCE_UNROLLING_FACTOR (instance);
1970 /* Mark all the stmts that belong to INSTANCE as PURE_SLP stmts. Later we
1971 call vect_detect_hybrid_slp () to find stmts that need hybrid SLP and
1972 loop-based vectorization. Such stmts will be marked as HYBRID. */
1973 vect_mark_slp_stmts (SLP_INSTANCE_TREE (instance), pure_slp, -1);
1974 decided_to_slp++;
1977 LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo) = unrolling_factor;
1979 if (decided_to_slp && dump_enabled_p ())
1980 dump_printf_loc (MSG_NOTE, vect_location,
1981 "Decided to SLP %d instances. Unrolling factor %d\n",
1982 decided_to_slp, unrolling_factor);
1984 return (decided_to_slp > 0);
1988 /* Find stmts that must be both vectorized and SLPed (since they feed stmts that
1989 can't be SLPed) in the tree rooted at NODE. Mark such stmts as HYBRID. */
1991 static void
1992 vect_detect_hybrid_slp_stmts (slp_tree node, unsigned i, slp_vect_type stype)
1994 gimple stmt = SLP_TREE_SCALAR_STMTS (node)[i];
1995 imm_use_iterator imm_iter;
1996 gimple use_stmt;
1997 stmt_vec_info use_vinfo, stmt_vinfo = vinfo_for_stmt (stmt);
1998 slp_tree child;
1999 loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
2000 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2001 int j;
2003 /* Propagate hybrid down the SLP tree. */
2004 if (stype == hybrid)
2006 else if (HYBRID_SLP_STMT (stmt_vinfo))
2007 stype = hybrid;
2008 else
2010 /* Check if a pure SLP stmt has uses in non-SLP stmts. */
2011 gcc_checking_assert (PURE_SLP_STMT (stmt_vinfo));
2012 if (TREE_CODE (gimple_op (stmt, 0)) == SSA_NAME)
2013 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, gimple_op (stmt, 0))
2014 if (gimple_bb (use_stmt)
2015 && flow_bb_inside_loop_p (loop, gimple_bb (use_stmt))
2016 && (use_vinfo = vinfo_for_stmt (use_stmt))
2017 && !STMT_SLP_TYPE (use_vinfo)
2018 && (STMT_VINFO_RELEVANT (use_vinfo)
2019 || VECTORIZABLE_CYCLE_DEF (STMT_VINFO_DEF_TYPE (use_vinfo))
2020 || (STMT_VINFO_IN_PATTERN_P (use_vinfo)
2021 && STMT_VINFO_RELATED_STMT (use_vinfo)
2022 && !STMT_SLP_TYPE (vinfo_for_stmt
2023 (STMT_VINFO_RELATED_STMT (use_vinfo)))))
2024 && !(gimple_code (use_stmt) == GIMPLE_PHI
2025 && STMT_VINFO_DEF_TYPE (use_vinfo) == vect_reduction_def))
2026 stype = hybrid;
2029 if (stype == hybrid)
2030 STMT_SLP_TYPE (stmt_vinfo) = hybrid;
2032 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), j, child)
2033 if (child)
2034 vect_detect_hybrid_slp_stmts (child, i, stype);
2037 /* Helpers for vect_detect_hybrid_slp walking pattern stmt uses. */
2039 static tree
2040 vect_detect_hybrid_slp_1 (tree *tp, int *, void *data)
2042 walk_stmt_info *wi = (walk_stmt_info *)data;
2043 struct loop *loopp = (struct loop *)wi->info;
2045 if (wi->is_lhs)
2046 return NULL_TREE;
2048 if (TREE_CODE (*tp) == SSA_NAME
2049 && !SSA_NAME_IS_DEFAULT_DEF (*tp))
2051 gimple def_stmt = SSA_NAME_DEF_STMT (*tp);
2052 if (flow_bb_inside_loop_p (loopp, gimple_bb (def_stmt))
2053 && PURE_SLP_STMT (vinfo_for_stmt (def_stmt)))
2054 STMT_SLP_TYPE (vinfo_for_stmt (def_stmt)) = hybrid;
2057 return NULL_TREE;
2060 static tree
2061 vect_detect_hybrid_slp_2 (gimple_stmt_iterator *gsi, bool *handled,
2062 walk_stmt_info *)
2064 /* If the stmt is in a SLP instance then this isn't a reason
2065 to mark use definitions in other SLP instances as hybrid. */
2066 if (STMT_SLP_TYPE (vinfo_for_stmt (gsi_stmt (*gsi))) != loop_vect)
2067 *handled = true;
2068 return NULL_TREE;
2071 /* Find stmts that must be both vectorized and SLPed. */
2073 void
2074 vect_detect_hybrid_slp (loop_vec_info loop_vinfo)
2076 unsigned int i;
2077 vec<slp_instance> slp_instances = LOOP_VINFO_SLP_INSTANCES (loop_vinfo);
2078 slp_instance instance;
2080 if (dump_enabled_p ())
2081 dump_printf_loc (MSG_NOTE, vect_location, "=== vect_detect_hybrid_slp ==="
2082 "\n");
2084 /* First walk all pattern stmt in the loop and mark defs of uses as
2085 hybrid because immediate uses in them are not recorded. */
2086 for (i = 0; i < LOOP_VINFO_LOOP (loop_vinfo)->num_nodes; ++i)
2088 basic_block bb = LOOP_VINFO_BBS (loop_vinfo)[i];
2089 for (gimple_stmt_iterator gsi = gsi_start_bb (bb); !gsi_end_p (gsi);
2090 gsi_next (&gsi))
2092 gimple stmt = gsi_stmt (gsi);
2093 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2094 if (STMT_VINFO_IN_PATTERN_P (stmt_info))
2096 walk_stmt_info wi;
2097 memset (&wi, 0, sizeof (wi));
2098 wi.info = LOOP_VINFO_LOOP (loop_vinfo);
2099 gimple_stmt_iterator gsi2
2100 = gsi_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info));
2101 walk_gimple_stmt (&gsi2, vect_detect_hybrid_slp_2,
2102 vect_detect_hybrid_slp_1, &wi);
2103 walk_gimple_seq (STMT_VINFO_PATTERN_DEF_SEQ (stmt_info),
2104 vect_detect_hybrid_slp_2,
2105 vect_detect_hybrid_slp_1, &wi);
2110 /* Then walk the SLP instance trees marking stmts with uses in
2111 non-SLP stmts as hybrid, also propagating hybrid down the
2112 SLP tree, collecting the above info on-the-fly. */
2113 FOR_EACH_VEC_ELT (slp_instances, i, instance)
2115 for (unsigned i = 0; i < SLP_INSTANCE_GROUP_SIZE (instance); ++i)
2116 vect_detect_hybrid_slp_stmts (SLP_INSTANCE_TREE (instance),
2117 i, pure_slp);
2122 /* Create and initialize a new bb_vec_info struct for BB, as well as
2123 stmt_vec_info structs for all the stmts in it. */
2125 static bb_vec_info
2126 new_bb_vec_info (basic_block bb)
2128 bb_vec_info res = NULL;
2129 gimple_stmt_iterator gsi;
2131 res = (bb_vec_info) xcalloc (1, sizeof (struct _bb_vec_info));
2132 BB_VINFO_BB (res) = bb;
2134 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2136 gimple stmt = gsi_stmt (gsi);
2137 gimple_set_uid (stmt, 0);
2138 set_vinfo_for_stmt (stmt, new_stmt_vec_info (stmt, NULL, res));
2141 BB_VINFO_GROUPED_STORES (res).create (10);
2142 BB_VINFO_SLP_INSTANCES (res).create (2);
2143 BB_VINFO_TARGET_COST_DATA (res) = init_cost (NULL);
2145 bb->aux = res;
2146 return res;
2150 /* Free BB_VINFO struct, as well as all the stmt_vec_info structs of all the
2151 stmts in the basic block. */
2153 static void
2154 destroy_bb_vec_info (bb_vec_info bb_vinfo)
2156 vec<slp_instance> slp_instances;
2157 slp_instance instance;
2158 basic_block bb;
2159 gimple_stmt_iterator si;
2160 unsigned i;
2162 if (!bb_vinfo)
2163 return;
2165 bb = BB_VINFO_BB (bb_vinfo);
2167 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
2169 gimple stmt = gsi_stmt (si);
2170 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2172 if (stmt_info)
2173 /* Free stmt_vec_info. */
2174 free_stmt_vec_info (stmt);
2177 vect_destroy_datarefs (NULL, bb_vinfo);
2178 free_dependence_relations (BB_VINFO_DDRS (bb_vinfo));
2179 BB_VINFO_GROUPED_STORES (bb_vinfo).release ();
2180 slp_instances = BB_VINFO_SLP_INSTANCES (bb_vinfo);
2181 FOR_EACH_VEC_ELT (slp_instances, i, instance)
2182 vect_free_slp_instance (instance);
2183 BB_VINFO_SLP_INSTANCES (bb_vinfo).release ();
2184 destroy_cost_data (BB_VINFO_TARGET_COST_DATA (bb_vinfo));
2185 free (bb_vinfo);
2186 bb->aux = NULL;
2190 /* Analyze statements contained in SLP tree node after recursively analyzing
2191 the subtree. Return TRUE if the operations are supported. */
2193 static bool
2194 vect_slp_analyze_node_operations (bb_vec_info bb_vinfo, slp_tree node)
2196 bool dummy;
2197 int i;
2198 gimple stmt;
2199 slp_tree child;
2201 if (!node)
2202 return true;
2204 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
2205 if (!vect_slp_analyze_node_operations (bb_vinfo, child))
2206 return false;
2208 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
2210 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
2211 gcc_assert (stmt_info);
2212 gcc_assert (PURE_SLP_STMT (stmt_info));
2214 if (!vect_analyze_stmt (stmt, &dummy, node))
2215 return false;
2218 return true;
2222 /* Analyze statements in SLP instances of the basic block. Return TRUE if the
2223 operations are supported. */
2225 static bool
2226 vect_slp_analyze_operations (bb_vec_info bb_vinfo)
2228 vec<slp_instance> slp_instances = BB_VINFO_SLP_INSTANCES (bb_vinfo);
2229 slp_instance instance;
2230 int i;
2232 for (i = 0; slp_instances.iterate (i, &instance); )
2234 if (!vect_slp_analyze_node_operations (bb_vinfo,
2235 SLP_INSTANCE_TREE (instance)))
2237 vect_free_slp_instance (instance);
2238 slp_instances.ordered_remove (i);
2240 else
2241 i++;
2244 if (!slp_instances.length ())
2245 return false;
2247 return true;
2251 /* Compute the scalar cost of the SLP node NODE and its children
2252 and return it. Do not account defs that are marked in LIFE and
2253 update LIFE according to uses of NODE. */
2255 static unsigned
2256 vect_bb_slp_scalar_cost (basic_block bb,
2257 slp_tree node, vec<bool, va_heap> *life)
2259 unsigned scalar_cost = 0;
2260 unsigned i;
2261 gimple stmt;
2262 slp_tree child;
2264 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
2266 unsigned stmt_cost;
2267 ssa_op_iter op_iter;
2268 def_operand_p def_p;
2269 stmt_vec_info stmt_info;
2271 if ((*life)[i])
2272 continue;
2274 /* If there is a non-vectorized use of the defs then the scalar
2275 stmt is kept live in which case we do not account it or any
2276 required defs in the SLP children in the scalar cost. This
2277 way we make the vectorization more costly when compared to
2278 the scalar cost. */
2279 FOR_EACH_SSA_DEF_OPERAND (def_p, stmt, op_iter, SSA_OP_DEF)
2281 imm_use_iterator use_iter;
2282 gimple use_stmt;
2283 FOR_EACH_IMM_USE_STMT (use_stmt, use_iter, DEF_FROM_PTR (def_p))
2284 if (!is_gimple_debug (use_stmt)
2285 && (gimple_code (use_stmt) == GIMPLE_PHI
2286 || gimple_bb (use_stmt) != bb
2287 || !STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (use_stmt))))
2289 (*life)[i] = true;
2290 BREAK_FROM_IMM_USE_STMT (use_iter);
2293 if ((*life)[i])
2294 continue;
2296 stmt_info = vinfo_for_stmt (stmt);
2297 if (STMT_VINFO_DATA_REF (stmt_info))
2299 if (DR_IS_READ (STMT_VINFO_DATA_REF (stmt_info)))
2300 stmt_cost = vect_get_stmt_cost (scalar_load);
2301 else
2302 stmt_cost = vect_get_stmt_cost (scalar_store);
2304 else
2305 stmt_cost = vect_get_stmt_cost (scalar_stmt);
2307 scalar_cost += stmt_cost;
2310 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
2311 if (child)
2312 scalar_cost += vect_bb_slp_scalar_cost (bb, child, life);
2314 return scalar_cost;
2317 /* Check if vectorization of the basic block is profitable. */
2319 static bool
2320 vect_bb_vectorization_profitable_p (bb_vec_info bb_vinfo)
2322 vec<slp_instance> slp_instances = BB_VINFO_SLP_INSTANCES (bb_vinfo);
2323 slp_instance instance;
2324 int i, j;
2325 unsigned int vec_inside_cost = 0, vec_outside_cost = 0, scalar_cost = 0;
2326 unsigned int vec_prologue_cost = 0, vec_epilogue_cost = 0;
2327 void *target_cost_data = BB_VINFO_TARGET_COST_DATA (bb_vinfo);
2328 stmt_vec_info stmt_info = NULL;
2329 stmt_vector_for_cost body_cost_vec;
2330 stmt_info_for_cost *ci;
2332 /* Calculate vector costs. */
2333 FOR_EACH_VEC_ELT (slp_instances, i, instance)
2335 body_cost_vec = SLP_INSTANCE_BODY_COST_VEC (instance);
2337 FOR_EACH_VEC_ELT (body_cost_vec, j, ci)
2339 stmt_info = ci->stmt ? vinfo_for_stmt (ci->stmt) : NULL;
2340 (void) add_stmt_cost (target_cost_data, ci->count, ci->kind,
2341 stmt_info, ci->misalign, vect_body);
2345 /* Calculate scalar cost. */
2346 FOR_EACH_VEC_ELT (slp_instances, i, instance)
2348 auto_vec<bool, 20> life;
2349 life.safe_grow_cleared (SLP_INSTANCE_GROUP_SIZE (instance));
2350 scalar_cost += vect_bb_slp_scalar_cost (BB_VINFO_BB (bb_vinfo),
2351 SLP_INSTANCE_TREE (instance),
2352 &life);
2355 /* Complete the target-specific cost calculation. */
2356 finish_cost (BB_VINFO_TARGET_COST_DATA (bb_vinfo), &vec_prologue_cost,
2357 &vec_inside_cost, &vec_epilogue_cost);
2359 vec_outside_cost = vec_prologue_cost + vec_epilogue_cost;
2361 if (dump_enabled_p ())
2363 dump_printf_loc (MSG_NOTE, vect_location, "Cost model analysis: \n");
2364 dump_printf (MSG_NOTE, " Vector inside of basic block cost: %d\n",
2365 vec_inside_cost);
2366 dump_printf (MSG_NOTE, " Vector prologue cost: %d\n", vec_prologue_cost);
2367 dump_printf (MSG_NOTE, " Vector epilogue cost: %d\n", vec_epilogue_cost);
2368 dump_printf (MSG_NOTE, " Scalar cost of basic block: %d\n", scalar_cost);
2371 /* Vectorization is profitable if its cost is less than the cost of scalar
2372 version. */
2373 if (vec_outside_cost + vec_inside_cost >= scalar_cost)
2374 return false;
2376 return true;
2379 /* Check if the basic block can be vectorized. */
2381 static bb_vec_info
2382 vect_slp_analyze_bb_1 (basic_block bb)
2384 bb_vec_info bb_vinfo;
2385 vec<slp_instance> slp_instances;
2386 slp_instance instance;
2387 int i;
2388 int min_vf = 2;
2389 unsigned n_stmts = 0;
2391 bb_vinfo = new_bb_vec_info (bb);
2392 if (!bb_vinfo)
2393 return NULL;
2395 if (!vect_analyze_data_refs (NULL, bb_vinfo, &min_vf, &n_stmts))
2397 if (dump_enabled_p ())
2398 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2399 "not vectorized: unhandled data-ref in basic "
2400 "block.\n");
2402 destroy_bb_vec_info (bb_vinfo);
2403 return NULL;
2406 if (BB_VINFO_DATAREFS (bb_vinfo).length () < 2)
2408 if (dump_enabled_p ())
2409 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2410 "not vectorized: not enough data-refs in "
2411 "basic block.\n");
2413 destroy_bb_vec_info (bb_vinfo);
2414 return NULL;
2417 if (!vect_analyze_data_ref_accesses (NULL, bb_vinfo))
2419 if (dump_enabled_p ())
2420 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2421 "not vectorized: unhandled data access in "
2422 "basic block.\n");
2424 destroy_bb_vec_info (bb_vinfo);
2425 return NULL;
2428 vect_pattern_recog (NULL, bb_vinfo);
2430 if (!vect_analyze_data_refs_alignment (NULL, bb_vinfo))
2432 if (dump_enabled_p ())
2433 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2434 "not vectorized: bad data alignment in basic "
2435 "block.\n");
2437 destroy_bb_vec_info (bb_vinfo);
2438 return NULL;
2441 /* Check the SLP opportunities in the basic block, analyze and build SLP
2442 trees. */
2443 if (!vect_analyze_slp (NULL, bb_vinfo, n_stmts))
2445 if (dump_enabled_p ())
2446 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2447 "not vectorized: failed to find SLP opportunities "
2448 "in basic block.\n");
2450 destroy_bb_vec_info (bb_vinfo);
2451 return NULL;
2454 slp_instances = BB_VINFO_SLP_INSTANCES (bb_vinfo);
2456 /* Mark all the statements that we want to vectorize as pure SLP and
2457 relevant. */
2458 FOR_EACH_VEC_ELT (slp_instances, i, instance)
2460 vect_mark_slp_stmts (SLP_INSTANCE_TREE (instance), pure_slp, -1);
2461 vect_mark_slp_stmts_relevant (SLP_INSTANCE_TREE (instance));
2464 /* Mark all the statements that we do not want to vectorize. */
2465 for (gimple_stmt_iterator gsi = gsi_start_bb (BB_VINFO_BB (bb_vinfo));
2466 !gsi_end_p (gsi); gsi_next (&gsi))
2468 stmt_vec_info vinfo = vinfo_for_stmt (gsi_stmt (gsi));
2469 if (STMT_SLP_TYPE (vinfo) != pure_slp)
2470 STMT_VINFO_VECTORIZABLE (vinfo) = false;
2473 /* Analyze dependences. At this point all stmts not participating in
2474 vectorization have to be marked. Dependence analysis assumes
2475 that we either vectorize all SLP instances or none at all. */
2476 if (!vect_slp_analyze_data_ref_dependences (bb_vinfo))
2478 if (dump_enabled_p ())
2479 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2480 "not vectorized: unhandled data dependence "
2481 "in basic block.\n");
2483 destroy_bb_vec_info (bb_vinfo);
2484 return NULL;
2487 if (!vect_verify_datarefs_alignment (NULL, bb_vinfo))
2489 if (dump_enabled_p ())
2490 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2491 "not vectorized: unsupported alignment in basic "
2492 "block.\n");
2493 destroy_bb_vec_info (bb_vinfo);
2494 return NULL;
2497 if (!vect_slp_analyze_operations (bb_vinfo))
2499 if (dump_enabled_p ())
2500 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2501 "not vectorized: bad operation in basic block.\n");
2503 destroy_bb_vec_info (bb_vinfo);
2504 return NULL;
2507 /* Compute the costs of the SLP instances. */
2508 FOR_EACH_VEC_ELT (slp_instances, i, instance)
2510 gimple stmt = SLP_TREE_SCALAR_STMTS (SLP_INSTANCE_TREE (instance))[0];
2511 tree vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt));
2512 vect_analyze_slp_cost (NULL, bb_vinfo,
2513 instance, TYPE_VECTOR_SUBPARTS (vectype));
2516 /* Cost model: check if the vectorization is worthwhile. */
2517 if (!unlimited_cost_model (NULL)
2518 && !vect_bb_vectorization_profitable_p (bb_vinfo))
2520 if (dump_enabled_p ())
2521 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2522 "not vectorized: vectorization is not "
2523 "profitable.\n");
2525 destroy_bb_vec_info (bb_vinfo);
2526 return NULL;
2529 if (dump_enabled_p ())
2530 dump_printf_loc (MSG_NOTE, vect_location,
2531 "Basic block will be vectorized using SLP\n");
2533 return bb_vinfo;
2537 bb_vec_info
2538 vect_slp_analyze_bb (basic_block bb)
2540 bb_vec_info bb_vinfo;
2541 int insns = 0;
2542 gimple_stmt_iterator gsi;
2543 unsigned int vector_sizes;
2545 if (dump_enabled_p ())
2546 dump_printf_loc (MSG_NOTE, vect_location, "===vect_slp_analyze_bb===\n");
2548 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2550 gimple stmt = gsi_stmt (gsi);
2551 if (!is_gimple_debug (stmt)
2552 && !gimple_nop_p (stmt)
2553 && gimple_code (stmt) != GIMPLE_LABEL)
2554 insns++;
2557 if (insns > PARAM_VALUE (PARAM_SLP_MAX_INSNS_IN_BB))
2559 if (dump_enabled_p ())
2560 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2561 "not vectorized: too many instructions in "
2562 "basic block.\n");
2564 return NULL;
2567 /* Autodetect first vector size we try. */
2568 current_vector_size = 0;
2569 vector_sizes = targetm.vectorize.autovectorize_vector_sizes ();
2571 while (1)
2573 bb_vinfo = vect_slp_analyze_bb_1 (bb);
2574 if (bb_vinfo)
2575 return bb_vinfo;
2577 destroy_bb_vec_info (bb_vinfo);
2579 vector_sizes &= ~current_vector_size;
2580 if (vector_sizes == 0
2581 || current_vector_size == 0)
2582 return NULL;
2584 /* Try the next biggest vector size. */
2585 current_vector_size = 1 << floor_log2 (vector_sizes);
2586 if (dump_enabled_p ())
2587 dump_printf_loc (MSG_NOTE, vect_location,
2588 "***** Re-trying analysis with "
2589 "vector size %d\n", current_vector_size);
2594 /* SLP costs are calculated according to SLP instance unrolling factor (i.e.,
2595 the number of created vector stmts depends on the unrolling factor).
2596 However, the actual number of vector stmts for every SLP node depends on
2597 VF which is set later in vect_analyze_operations (). Hence, SLP costs
2598 should be updated. In this function we assume that the inside costs
2599 calculated in vect_model_xxx_cost are linear in ncopies. */
2601 void
2602 vect_update_slp_costs_according_to_vf (loop_vec_info loop_vinfo)
2604 unsigned int i, j, vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2605 vec<slp_instance> slp_instances = LOOP_VINFO_SLP_INSTANCES (loop_vinfo);
2606 slp_instance instance;
2607 stmt_vector_for_cost body_cost_vec;
2608 stmt_info_for_cost *si;
2609 void *data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo);
2611 if (dump_enabled_p ())
2612 dump_printf_loc (MSG_NOTE, vect_location,
2613 "=== vect_update_slp_costs_according_to_vf ===\n");
2615 FOR_EACH_VEC_ELT (slp_instances, i, instance)
2617 /* We assume that costs are linear in ncopies. */
2618 int ncopies = vf / SLP_INSTANCE_UNROLLING_FACTOR (instance);
2620 /* Record the instance's instructions in the target cost model.
2621 This was delayed until here because the count of instructions
2622 isn't known beforehand. */
2623 body_cost_vec = SLP_INSTANCE_BODY_COST_VEC (instance);
2625 FOR_EACH_VEC_ELT (body_cost_vec, j, si)
2626 (void) add_stmt_cost (data, si->count * ncopies, si->kind,
2627 vinfo_for_stmt (si->stmt), si->misalign,
2628 vect_body);
2633 /* For constant and loop invariant defs of SLP_NODE this function returns
2634 (vector) defs (VEC_OPRNDS) that will be used in the vectorized stmts.
2635 OP_NUM determines if we gather defs for operand 0 or operand 1 of the RHS of
2636 scalar stmts. NUMBER_OF_VECTORS is the number of vector defs to create.
2637 REDUC_INDEX is the index of the reduction operand in the statements, unless
2638 it is -1. */
2640 static void
2641 vect_get_constant_vectors (tree op, slp_tree slp_node,
2642 vec<tree> *vec_oprnds,
2643 unsigned int op_num, unsigned int number_of_vectors,
2644 int reduc_index)
2646 vec<gimple> stmts = SLP_TREE_SCALAR_STMTS (slp_node);
2647 gimple stmt = stmts[0];
2648 stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
2649 unsigned nunits;
2650 tree vec_cst;
2651 tree *elts;
2652 unsigned j, number_of_places_left_in_vector;
2653 tree vector_type;
2654 tree vop;
2655 int group_size = stmts.length ();
2656 unsigned int vec_num, i;
2657 unsigned number_of_copies = 1;
2658 vec<tree> voprnds;
2659 voprnds.create (number_of_vectors);
2660 bool constant_p, is_store;
2661 tree neutral_op = NULL;
2662 enum tree_code code = gimple_expr_code (stmt);
2663 gimple def_stmt;
2664 struct loop *loop;
2665 gimple_seq ctor_seq = NULL;
2667 if (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
2668 && reduc_index != -1)
2670 op_num = reduc_index - 1;
2671 op = gimple_op (stmt, reduc_index);
2672 /* For additional copies (see the explanation of NUMBER_OF_COPIES below)
2673 we need either neutral operands or the original operands. See
2674 get_initial_def_for_reduction() for details. */
2675 switch (code)
2677 case WIDEN_SUM_EXPR:
2678 case DOT_PROD_EXPR:
2679 case PLUS_EXPR:
2680 case MINUS_EXPR:
2681 case BIT_IOR_EXPR:
2682 case BIT_XOR_EXPR:
2683 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (op)))
2684 neutral_op = build_real (TREE_TYPE (op), dconst0);
2685 else
2686 neutral_op = build_int_cst (TREE_TYPE (op), 0);
2688 break;
2690 case MULT_EXPR:
2691 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (op)))
2692 neutral_op = build_real (TREE_TYPE (op), dconst1);
2693 else
2694 neutral_op = build_int_cst (TREE_TYPE (op), 1);
2696 break;
2698 case BIT_AND_EXPR:
2699 neutral_op = build_int_cst (TREE_TYPE (op), -1);
2700 break;
2702 /* For MIN/MAX we don't have an easy neutral operand but
2703 the initial values can be used fine here. Only for
2704 a reduction chain we have to force a neutral element. */
2705 case MAX_EXPR:
2706 case MIN_EXPR:
2707 if (!GROUP_FIRST_ELEMENT (stmt_vinfo))
2708 neutral_op = NULL;
2709 else
2711 def_stmt = SSA_NAME_DEF_STMT (op);
2712 loop = (gimple_bb (stmt))->loop_father;
2713 neutral_op = PHI_ARG_DEF_FROM_EDGE (def_stmt,
2714 loop_preheader_edge (loop));
2716 break;
2718 default:
2719 neutral_op = NULL;
2723 if (STMT_VINFO_DATA_REF (stmt_vinfo))
2725 is_store = true;
2726 op = gimple_assign_rhs1 (stmt);
2728 else
2729 is_store = false;
2731 gcc_assert (op);
2733 if (CONSTANT_CLASS_P (op))
2734 constant_p = true;
2735 else
2736 constant_p = false;
2738 vector_type = get_vectype_for_scalar_type (TREE_TYPE (op));
2739 gcc_assert (vector_type);
2740 nunits = TYPE_VECTOR_SUBPARTS (vector_type);
2742 /* NUMBER_OF_COPIES is the number of times we need to use the same values in
2743 created vectors. It is greater than 1 if unrolling is performed.
2745 For example, we have two scalar operands, s1 and s2 (e.g., group of
2746 strided accesses of size two), while NUNITS is four (i.e., four scalars
2747 of this type can be packed in a vector). The output vector will contain
2748 two copies of each scalar operand: {s1, s2, s1, s2}. (NUMBER_OF_COPIES
2749 will be 2).
2751 If GROUP_SIZE > NUNITS, the scalars will be split into several vectors
2752 containing the operands.
2754 For example, NUNITS is four as before, and the group size is 8
2755 (s1, s2, ..., s8). We will create two vectors {s1, s2, s3, s4} and
2756 {s5, s6, s7, s8}. */
2758 number_of_copies = least_common_multiple (nunits, group_size) / group_size;
2760 number_of_places_left_in_vector = nunits;
2761 elts = XALLOCAVEC (tree, nunits);
2762 bool place_after_defs = false;
2763 for (j = 0; j < number_of_copies; j++)
2765 for (i = group_size - 1; stmts.iterate (i, &stmt); i--)
2767 if (is_store)
2768 op = gimple_assign_rhs1 (stmt);
2769 else
2771 switch (code)
2773 case COND_EXPR:
2774 if (op_num == 0 || op_num == 1)
2776 tree cond = gimple_assign_rhs1 (stmt);
2777 op = TREE_OPERAND (cond, op_num);
2779 else
2781 if (op_num == 2)
2782 op = gimple_assign_rhs2 (stmt);
2783 else
2784 op = gimple_assign_rhs3 (stmt);
2786 break;
2788 case CALL_EXPR:
2789 op = gimple_call_arg (stmt, op_num);
2790 break;
2792 case LSHIFT_EXPR:
2793 case RSHIFT_EXPR:
2794 case LROTATE_EXPR:
2795 case RROTATE_EXPR:
2796 op = gimple_op (stmt, op_num + 1);
2797 /* Unlike the other binary operators, shifts/rotates have
2798 the shift count being int, instead of the same type as
2799 the lhs, so make sure the scalar is the right type if
2800 we are dealing with vectors of
2801 long long/long/short/char. */
2802 if (op_num == 1 && TREE_CODE (op) == INTEGER_CST)
2803 op = fold_convert (TREE_TYPE (vector_type), op);
2804 break;
2806 default:
2807 op = gimple_op (stmt, op_num + 1);
2808 break;
2812 if (reduc_index != -1)
2814 loop = (gimple_bb (stmt))->loop_father;
2815 def_stmt = SSA_NAME_DEF_STMT (op);
2817 gcc_assert (loop);
2819 /* Get the def before the loop. In reduction chain we have only
2820 one initial value. */
2821 if ((j != (number_of_copies - 1)
2822 || (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt))
2823 && i != 0))
2824 && neutral_op)
2825 op = neutral_op;
2826 else
2827 op = PHI_ARG_DEF_FROM_EDGE (def_stmt,
2828 loop_preheader_edge (loop));
2831 /* Create 'vect_ = {op0,op1,...,opn}'. */
2832 number_of_places_left_in_vector--;
2833 tree orig_op = op;
2834 if (!types_compatible_p (TREE_TYPE (vector_type), TREE_TYPE (op)))
2836 if (CONSTANT_CLASS_P (op))
2838 op = fold_unary (VIEW_CONVERT_EXPR,
2839 TREE_TYPE (vector_type), op);
2840 gcc_assert (op && CONSTANT_CLASS_P (op));
2842 else
2844 tree new_temp = make_ssa_name (TREE_TYPE (vector_type));
2845 gimple init_stmt;
2846 op = build1 (VIEW_CONVERT_EXPR, TREE_TYPE (vector_type), op);
2847 init_stmt
2848 = gimple_build_assign (new_temp, VIEW_CONVERT_EXPR, op);
2849 gimple_seq_add_stmt (&ctor_seq, init_stmt);
2850 op = new_temp;
2853 elts[number_of_places_left_in_vector] = op;
2854 if (!CONSTANT_CLASS_P (op))
2855 constant_p = false;
2856 if (TREE_CODE (orig_op) == SSA_NAME
2857 && !SSA_NAME_IS_DEFAULT_DEF (orig_op)
2858 && STMT_VINFO_BB_VINFO (stmt_vinfo)
2859 && (STMT_VINFO_BB_VINFO (stmt_vinfo)->bb
2860 == gimple_bb (SSA_NAME_DEF_STMT (orig_op))))
2861 place_after_defs = true;
2863 if (number_of_places_left_in_vector == 0)
2865 number_of_places_left_in_vector = nunits;
2867 if (constant_p)
2868 vec_cst = build_vector (vector_type, elts);
2869 else
2871 vec<constructor_elt, va_gc> *v;
2872 unsigned k;
2873 vec_alloc (v, nunits);
2874 for (k = 0; k < nunits; ++k)
2875 CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, elts[k]);
2876 vec_cst = build_constructor (vector_type, v);
2878 tree init;
2879 gimple_stmt_iterator gsi;
2880 if (place_after_defs)
2882 gsi = gsi_for_stmt
2883 (vect_find_last_scalar_stmt_in_slp (slp_node));
2884 init = vect_init_vector (stmt, vec_cst, vector_type, &gsi);
2886 else
2887 init = vect_init_vector (stmt, vec_cst, vector_type, NULL);
2888 if (ctor_seq != NULL)
2890 gsi = gsi_for_stmt (SSA_NAME_DEF_STMT (init));
2891 gsi_insert_seq_before_without_update (&gsi, ctor_seq,
2892 GSI_SAME_STMT);
2893 ctor_seq = NULL;
2895 voprnds.quick_push (init);
2896 place_after_defs = false;
2901 /* Since the vectors are created in the reverse order, we should invert
2902 them. */
2903 vec_num = voprnds.length ();
2904 for (j = vec_num; j != 0; j--)
2906 vop = voprnds[j - 1];
2907 vec_oprnds->quick_push (vop);
2910 voprnds.release ();
2912 /* In case that VF is greater than the unrolling factor needed for the SLP
2913 group of stmts, NUMBER_OF_VECTORS to be created is greater than
2914 NUMBER_OF_SCALARS/NUNITS or NUNITS/NUMBER_OF_SCALARS, and hence we have
2915 to replicate the vectors. */
2916 while (number_of_vectors > vec_oprnds->length ())
2918 tree neutral_vec = NULL;
2920 if (neutral_op)
2922 if (!neutral_vec)
2923 neutral_vec = build_vector_from_val (vector_type, neutral_op);
2925 vec_oprnds->quick_push (neutral_vec);
2927 else
2929 for (i = 0; vec_oprnds->iterate (i, &vop) && i < vec_num; i++)
2930 vec_oprnds->quick_push (vop);
2936 /* Get vectorized definitions from SLP_NODE that contains corresponding
2937 vectorized def-stmts. */
2939 static void
2940 vect_get_slp_vect_defs (slp_tree slp_node, vec<tree> *vec_oprnds)
2942 tree vec_oprnd;
2943 gimple vec_def_stmt;
2944 unsigned int i;
2946 gcc_assert (SLP_TREE_VEC_STMTS (slp_node).exists ());
2948 FOR_EACH_VEC_ELT (SLP_TREE_VEC_STMTS (slp_node), i, vec_def_stmt)
2950 gcc_assert (vec_def_stmt);
2951 vec_oprnd = gimple_get_lhs (vec_def_stmt);
2952 vec_oprnds->quick_push (vec_oprnd);
2957 /* Get vectorized definitions for SLP_NODE.
2958 If the scalar definitions are loop invariants or constants, collect them and
2959 call vect_get_constant_vectors() to create vector stmts.
2960 Otherwise, the def-stmts must be already vectorized and the vectorized stmts
2961 must be stored in the corresponding child of SLP_NODE, and we call
2962 vect_get_slp_vect_defs () to retrieve them. */
2964 void
2965 vect_get_slp_defs (vec<tree> ops, slp_tree slp_node,
2966 vec<vec<tree> > *vec_oprnds, int reduc_index)
2968 gimple first_stmt;
2969 int number_of_vects = 0, i;
2970 unsigned int child_index = 0;
2971 HOST_WIDE_INT lhs_size_unit, rhs_size_unit;
2972 slp_tree child = NULL;
2973 vec<tree> vec_defs;
2974 tree oprnd;
2975 bool vectorized_defs;
2977 first_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[0];
2978 FOR_EACH_VEC_ELT (ops, i, oprnd)
2980 /* For each operand we check if it has vectorized definitions in a child
2981 node or we need to create them (for invariants and constants). We
2982 check if the LHS of the first stmt of the next child matches OPRND.
2983 If it does, we found the correct child. Otherwise, we call
2984 vect_get_constant_vectors (), and not advance CHILD_INDEX in order
2985 to check this child node for the next operand. */
2986 vectorized_defs = false;
2987 if (SLP_TREE_CHILDREN (slp_node).length () > child_index)
2989 child = SLP_TREE_CHILDREN (slp_node)[child_index];
2991 /* We have to check both pattern and original def, if available. */
2992 if (child)
2994 gimple first_def = SLP_TREE_SCALAR_STMTS (child)[0];
2995 gimple related
2996 = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (first_def));
2998 if (operand_equal_p (oprnd, gimple_get_lhs (first_def), 0)
2999 || (related
3000 && operand_equal_p (oprnd, gimple_get_lhs (related), 0)))
3002 /* The number of vector defs is determined by the number of
3003 vector statements in the node from which we get those
3004 statements. */
3005 number_of_vects = SLP_TREE_NUMBER_OF_VEC_STMTS (child);
3006 vectorized_defs = true;
3007 child_index++;
3010 else
3011 child_index++;
3014 if (!vectorized_defs)
3016 if (i == 0)
3018 number_of_vects = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
3019 /* Number of vector stmts was calculated according to LHS in
3020 vect_schedule_slp_instance (), fix it by replacing LHS with
3021 RHS, if necessary. See vect_get_smallest_scalar_type () for
3022 details. */
3023 vect_get_smallest_scalar_type (first_stmt, &lhs_size_unit,
3024 &rhs_size_unit);
3025 if (rhs_size_unit != lhs_size_unit)
3027 number_of_vects *= rhs_size_unit;
3028 number_of_vects /= lhs_size_unit;
3033 /* Allocate memory for vectorized defs. */
3034 vec_defs = vNULL;
3035 vec_defs.create (number_of_vects);
3037 /* For reduction defs we call vect_get_constant_vectors (), since we are
3038 looking for initial loop invariant values. */
3039 if (vectorized_defs && reduc_index == -1)
3040 /* The defs are already vectorized. */
3041 vect_get_slp_vect_defs (child, &vec_defs);
3042 else
3043 /* Build vectors from scalar defs. */
3044 vect_get_constant_vectors (oprnd, slp_node, &vec_defs, i,
3045 number_of_vects, reduc_index);
3047 vec_oprnds->quick_push (vec_defs);
3049 /* For reductions, we only need initial values. */
3050 if (reduc_index != -1)
3051 return;
3056 /* Create NCOPIES permutation statements using the mask MASK_BYTES (by
3057 building a vector of type MASK_TYPE from it) and two input vectors placed in
3058 DR_CHAIN at FIRST_VEC_INDX and SECOND_VEC_INDX for the first copy and
3059 shifting by STRIDE elements of DR_CHAIN for every copy.
3060 (STRIDE is the number of vectorized stmts for NODE divided by the number of
3061 copies).
3062 VECT_STMTS_COUNTER specifies the index in the vectorized stmts of NODE, where
3063 the created stmts must be inserted. */
3065 static inline void
3066 vect_create_mask_and_perm (gimple stmt, gimple next_scalar_stmt,
3067 tree mask, int first_vec_indx, int second_vec_indx,
3068 gimple_stmt_iterator *gsi, slp_tree node,
3069 tree vectype, vec<tree> dr_chain,
3070 int ncopies, int vect_stmts_counter)
3072 tree perm_dest;
3073 gimple perm_stmt = NULL;
3074 stmt_vec_info next_stmt_info;
3075 int i, stride;
3076 tree first_vec, second_vec, data_ref;
3078 stride = SLP_TREE_NUMBER_OF_VEC_STMTS (node) / ncopies;
3080 /* Initialize the vect stmts of NODE to properly insert the generated
3081 stmts later. */
3082 for (i = SLP_TREE_VEC_STMTS (node).length ();
3083 i < (int) SLP_TREE_NUMBER_OF_VEC_STMTS (node); i++)
3084 SLP_TREE_VEC_STMTS (node).quick_push (NULL);
3086 perm_dest = vect_create_destination_var (gimple_assign_lhs (stmt), vectype);
3087 for (i = 0; i < ncopies; i++)
3089 first_vec = dr_chain[first_vec_indx];
3090 second_vec = dr_chain[second_vec_indx];
3092 /* Generate the permute statement. */
3093 perm_stmt = gimple_build_assign (perm_dest, VEC_PERM_EXPR,
3094 first_vec, second_vec, mask);
3095 data_ref = make_ssa_name (perm_dest, perm_stmt);
3096 gimple_set_lhs (perm_stmt, data_ref);
3097 vect_finish_stmt_generation (stmt, perm_stmt, gsi);
3099 /* Store the vector statement in NODE. */
3100 SLP_TREE_VEC_STMTS (node)[stride * i + vect_stmts_counter] = perm_stmt;
3102 first_vec_indx += stride;
3103 second_vec_indx += stride;
3106 /* Mark the scalar stmt as vectorized. */
3107 next_stmt_info = vinfo_for_stmt (next_scalar_stmt);
3108 STMT_VINFO_VEC_STMT (next_stmt_info) = perm_stmt;
3112 /* Given FIRST_MASK_ELEMENT - the mask element in element representation,
3113 return in CURRENT_MASK_ELEMENT its equivalent in target specific
3114 representation. Check that the mask is valid and return FALSE if not.
3115 Return TRUE in NEED_NEXT_VECTOR if the permutation requires to move to
3116 the next vector, i.e., the current first vector is not needed. */
3118 static bool
3119 vect_get_mask_element (gimple stmt, int first_mask_element, int m,
3120 int mask_nunits, bool only_one_vec, int index,
3121 unsigned char *mask, int *current_mask_element,
3122 bool *need_next_vector, int *number_of_mask_fixes,
3123 bool *mask_fixed, bool *needs_first_vector)
3125 int i;
3127 /* Convert to target specific representation. */
3128 *current_mask_element = first_mask_element + m;
3129 /* Adjust the value in case it's a mask for second and third vectors. */
3130 *current_mask_element -= mask_nunits * (*number_of_mask_fixes - 1);
3132 if (*current_mask_element < 0)
3134 if (dump_enabled_p ())
3136 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3137 "permutation requires past vector ");
3138 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
3139 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
3141 return false;
3144 if (*current_mask_element < mask_nunits)
3145 *needs_first_vector = true;
3147 /* We have only one input vector to permute but the mask accesses values in
3148 the next vector as well. */
3149 if (only_one_vec && *current_mask_element >= mask_nunits)
3151 if (dump_enabled_p ())
3153 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3154 "permutation requires at least two vectors ");
3155 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
3156 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
3159 return false;
3162 /* The mask requires the next vector. */
3163 while (*current_mask_element >= mask_nunits * 2)
3165 if (*needs_first_vector || *mask_fixed)
3167 /* We either need the first vector too or have already moved to the
3168 next vector. In both cases, this permutation needs three
3169 vectors. */
3170 if (dump_enabled_p ())
3172 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3173 "permutation requires at "
3174 "least three vectors ");
3175 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
3176 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
3179 return false;
3182 /* We move to the next vector, dropping the first one and working with
3183 the second and the third - we need to adjust the values of the mask
3184 accordingly. */
3185 *current_mask_element -= mask_nunits * *number_of_mask_fixes;
3187 for (i = 0; i < index; i++)
3188 mask[i] -= mask_nunits * *number_of_mask_fixes;
3190 (*number_of_mask_fixes)++;
3191 *mask_fixed = true;
3194 *need_next_vector = *mask_fixed;
3196 /* This was the last element of this mask. Start a new one. */
3197 if (index == mask_nunits - 1)
3199 *number_of_mask_fixes = 1;
3200 *mask_fixed = false;
3201 *needs_first_vector = false;
3204 return true;
3208 /* Generate vector permute statements from a list of loads in DR_CHAIN.
3209 If ANALYZE_ONLY is TRUE, only check that it is possible to create valid
3210 permute statements for the SLP node NODE of the SLP instance
3211 SLP_NODE_INSTANCE. */
3213 bool
3214 vect_transform_slp_perm_load (slp_tree node, vec<tree> dr_chain,
3215 gimple_stmt_iterator *gsi, int vf,
3216 slp_instance slp_node_instance, bool analyze_only)
3218 gimple stmt = SLP_TREE_SCALAR_STMTS (node)[0];
3219 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
3220 tree mask_element_type = NULL_TREE, mask_type;
3221 int i, j, k, nunits, vec_index = 0, scalar_index;
3222 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
3223 gimple next_scalar_stmt;
3224 int group_size = SLP_INSTANCE_GROUP_SIZE (slp_node_instance);
3225 int first_mask_element;
3226 int index, unroll_factor, current_mask_element, ncopies;
3227 unsigned char *mask;
3228 bool only_one_vec = false, need_next_vector = false;
3229 int first_vec_index, second_vec_index, orig_vec_stmts_num, vect_stmts_counter;
3230 int number_of_mask_fixes = 1;
3231 bool mask_fixed = false;
3232 bool needs_first_vector = false;
3233 machine_mode mode;
3235 mode = TYPE_MODE (vectype);
3237 if (!can_vec_perm_p (mode, false, NULL))
3239 if (dump_enabled_p ())
3241 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3242 "no vect permute for ");
3243 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
3244 dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
3246 return false;
3249 /* The generic VEC_PERM_EXPR code always uses an integral type of the
3250 same size as the vector element being permuted. */
3251 mask_element_type = lang_hooks.types.type_for_mode
3252 (int_mode_for_mode (TYPE_MODE (TREE_TYPE (vectype))), 1);
3253 mask_type = get_vectype_for_scalar_type (mask_element_type);
3254 nunits = TYPE_VECTOR_SUBPARTS (vectype);
3255 mask = XALLOCAVEC (unsigned char, nunits);
3256 unroll_factor = SLP_INSTANCE_UNROLLING_FACTOR (slp_node_instance);
3258 /* The number of vector stmts to generate based only on SLP_NODE_INSTANCE
3259 unrolling factor. */
3260 orig_vec_stmts_num = group_size *
3261 SLP_INSTANCE_UNROLLING_FACTOR (slp_node_instance) / nunits;
3262 if (orig_vec_stmts_num == 1)
3263 only_one_vec = true;
3265 /* Number of copies is determined by the final vectorization factor
3266 relatively to SLP_NODE_INSTANCE unrolling factor. */
3267 ncopies = vf / SLP_INSTANCE_UNROLLING_FACTOR (slp_node_instance);
3269 if (!STMT_VINFO_GROUPED_ACCESS (stmt_info))
3270 return false;
3272 /* Generate permutation masks for every NODE. Number of masks for each NODE
3273 is equal to GROUP_SIZE.
3274 E.g., we have a group of three nodes with three loads from the same
3275 location in each node, and the vector size is 4. I.e., we have a
3276 a0b0c0a1b1c1... sequence and we need to create the following vectors:
3277 for a's: a0a0a0a1 a1a1a2a2 a2a3a3a3
3278 for b's: b0b0b0b1 b1b1b2b2 b2b3b3b3
3281 The masks for a's should be: {0,0,0,3} {3,3,6,6} {6,9,9,9}.
3282 The last mask is illegal since we assume two operands for permute
3283 operation, and the mask element values can't be outside that range.
3284 Hence, the last mask must be converted into {2,5,5,5}.
3285 For the first two permutations we need the first and the second input
3286 vectors: {a0,b0,c0,a1} and {b1,c1,a2,b2}, and for the last permutation
3287 we need the second and the third vectors: {b1,c1,a2,b2} and
3288 {c2,a3,b3,c3}. */
3291 scalar_index = 0;
3292 index = 0;
3293 vect_stmts_counter = 0;
3294 vec_index = 0;
3295 first_vec_index = vec_index++;
3296 if (only_one_vec)
3297 second_vec_index = first_vec_index;
3298 else
3299 second_vec_index = vec_index++;
3301 for (j = 0; j < unroll_factor; j++)
3303 for (k = 0; k < group_size; k++)
3305 i = SLP_TREE_LOAD_PERMUTATION (node)[k];
3306 first_mask_element = i + j * group_size;
3307 if (!vect_get_mask_element (stmt, first_mask_element, 0,
3308 nunits, only_one_vec, index,
3309 mask, &current_mask_element,
3310 &need_next_vector,
3311 &number_of_mask_fixes, &mask_fixed,
3312 &needs_first_vector))
3313 return false;
3314 gcc_assert (current_mask_element >= 0
3315 && current_mask_element < 2 * nunits);
3316 mask[index++] = current_mask_element;
3318 if (index == nunits)
3320 index = 0;
3321 if (!can_vec_perm_p (mode, false, mask))
3323 if (dump_enabled_p ())
3325 dump_printf_loc (MSG_MISSED_OPTIMIZATION,
3326 vect_location,
3327 "unsupported vect permute { ");
3328 for (i = 0; i < nunits; ++i)
3329 dump_printf (MSG_MISSED_OPTIMIZATION, "%d ",
3330 mask[i]);
3331 dump_printf (MSG_MISSED_OPTIMIZATION, "}\n");
3333 return false;
3336 if (!analyze_only)
3338 int l;
3339 tree mask_vec, *mask_elts;
3340 mask_elts = XALLOCAVEC (tree, nunits);
3341 for (l = 0; l < nunits; ++l)
3342 mask_elts[l] = build_int_cst (mask_element_type,
3343 mask[l]);
3344 mask_vec = build_vector (mask_type, mask_elts);
3346 if (need_next_vector)
3348 first_vec_index = second_vec_index;
3349 second_vec_index = vec_index;
3352 next_scalar_stmt
3353 = SLP_TREE_SCALAR_STMTS (node)[scalar_index++];
3355 vect_create_mask_and_perm (stmt, next_scalar_stmt,
3356 mask_vec, first_vec_index, second_vec_index,
3357 gsi, node, vectype, dr_chain,
3358 ncopies, vect_stmts_counter++);
3365 return true;
3370 /* Vectorize SLP instance tree in postorder. */
3372 static bool
3373 vect_schedule_slp_instance (slp_tree node, slp_instance instance,
3374 unsigned int vectorization_factor)
3376 gimple stmt;
3377 bool grouped_store, is_store;
3378 gimple_stmt_iterator si;
3379 stmt_vec_info stmt_info;
3380 unsigned int vec_stmts_size, nunits, group_size;
3381 tree vectype;
3382 int i;
3383 slp_tree child;
3385 if (!node)
3386 return false;
3388 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
3389 vect_schedule_slp_instance (child, instance, vectorization_factor);
3391 stmt = SLP_TREE_SCALAR_STMTS (node)[0];
3392 stmt_info = vinfo_for_stmt (stmt);
3394 /* VECTYPE is the type of the destination. */
3395 vectype = STMT_VINFO_VECTYPE (stmt_info);
3396 nunits = (unsigned int) TYPE_VECTOR_SUBPARTS (vectype);
3397 group_size = SLP_INSTANCE_GROUP_SIZE (instance);
3399 /* For each SLP instance calculate number of vector stmts to be created
3400 for the scalar stmts in each node of the SLP tree. Number of vector
3401 elements in one vector iteration is the number of scalar elements in
3402 one scalar iteration (GROUP_SIZE) multiplied by VF divided by vector
3403 size. */
3404 vec_stmts_size = (vectorization_factor * group_size) / nunits;
3406 if (!SLP_TREE_VEC_STMTS (node).exists ())
3408 SLP_TREE_VEC_STMTS (node).create (vec_stmts_size);
3409 SLP_TREE_NUMBER_OF_VEC_STMTS (node) = vec_stmts_size;
3412 if (dump_enabled_p ())
3414 dump_printf_loc (MSG_NOTE,vect_location,
3415 "------>vectorizing SLP node starting from: ");
3416 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
3417 dump_printf (MSG_NOTE, "\n");
3420 /* Vectorized stmts go before the last scalar stmt which is where
3421 all uses are ready. */
3422 si = gsi_for_stmt (vect_find_last_scalar_stmt_in_slp (node));
3424 /* Mark the first element of the reduction chain as reduction to properly
3425 transform the node. In the analysis phase only the last element of the
3426 chain is marked as reduction. */
3427 if (GROUP_FIRST_ELEMENT (stmt_info) && !STMT_VINFO_GROUPED_ACCESS (stmt_info)
3428 && GROUP_FIRST_ELEMENT (stmt_info) == stmt)
3430 STMT_VINFO_DEF_TYPE (stmt_info) = vect_reduction_def;
3431 STMT_VINFO_TYPE (stmt_info) = reduc_vec_info_type;
3434 /* Handle two-operation SLP nodes by vectorizing the group with
3435 both operations and then performing a merge. */
3436 if (SLP_TREE_TWO_OPERATORS (node))
3438 enum tree_code code0 = gimple_assign_rhs_code (stmt);
3439 enum tree_code ocode;
3440 gimple ostmt;
3441 unsigned char *mask = XALLOCAVEC (unsigned char, group_size);
3442 bool allsame = true;
3443 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, ostmt)
3444 if (gimple_assign_rhs_code (ostmt) != code0)
3446 mask[i] = 1;
3447 allsame = false;
3448 ocode = gimple_assign_rhs_code (ostmt);
3450 else
3451 mask[i] = 0;
3452 if (!allsame)
3454 vec<gimple> v0;
3455 vec<gimple> v1;
3456 unsigned j;
3457 tree tmask = NULL_TREE;
3458 vect_transform_stmt (stmt, &si, &grouped_store, node, instance);
3459 v0 = SLP_TREE_VEC_STMTS (node).copy ();
3460 SLP_TREE_VEC_STMTS (node).truncate (0);
3461 gimple_assign_set_rhs_code (stmt, ocode);
3462 vect_transform_stmt (stmt, &si, &grouped_store, node, instance);
3463 gimple_assign_set_rhs_code (stmt, code0);
3464 v1 = SLP_TREE_VEC_STMTS (node).copy ();
3465 SLP_TREE_VEC_STMTS (node).truncate (0);
3466 tree meltype = build_nonstandard_integer_type
3467 (GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (vectype))), 1);
3468 tree mvectype = get_same_sized_vectype (meltype, vectype);
3469 unsigned k = 0, l;
3470 for (j = 0; j < v0.length (); ++j)
3472 tree *melts = XALLOCAVEC (tree, TYPE_VECTOR_SUBPARTS (vectype));
3473 for (l = 0; l < TYPE_VECTOR_SUBPARTS (vectype); ++l)
3475 if (k >= group_size)
3476 k = 0;
3477 melts[l] = build_int_cst
3478 (meltype, mask[k++] * TYPE_VECTOR_SUBPARTS (vectype) + l);
3480 tmask = build_vector (mvectype, melts);
3482 /* ??? Not all targets support a VEC_PERM_EXPR with a
3483 constant mask that would translate to a vec_merge RTX
3484 (with their vec_perm_const_ok). We can either not
3485 vectorize in that case or let veclower do its job.
3486 Unfortunately that isn't too great and at least for
3487 plus/minus we'd eventually like to match targets
3488 vector addsub instructions. */
3489 gimple vstmt;
3490 vstmt = gimple_build_assign (make_ssa_name (vectype),
3491 VEC_PERM_EXPR,
3492 gimple_assign_lhs (v0[j]),
3493 gimple_assign_lhs (v1[j]), tmask);
3494 vect_finish_stmt_generation (stmt, vstmt, &si);
3495 SLP_TREE_VEC_STMTS (node).quick_push (vstmt);
3497 v0.release ();
3498 v1.release ();
3499 return false;
3502 is_store = vect_transform_stmt (stmt, &si, &grouped_store, node, instance);
3503 return is_store;
3506 /* Replace scalar calls from SLP node NODE with setting of their lhs to zero.
3507 For loop vectorization this is done in vectorizable_call, but for SLP
3508 it needs to be deferred until end of vect_schedule_slp, because multiple
3509 SLP instances may refer to the same scalar stmt. */
3511 static void
3512 vect_remove_slp_scalar_calls (slp_tree node)
3514 gimple stmt, new_stmt;
3515 gimple_stmt_iterator gsi;
3516 int i;
3517 slp_tree child;
3518 tree lhs;
3519 stmt_vec_info stmt_info;
3521 if (!node)
3522 return;
3524 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
3525 vect_remove_slp_scalar_calls (child);
3527 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
3529 if (!is_gimple_call (stmt) || gimple_bb (stmt) == NULL)
3530 continue;
3531 stmt_info = vinfo_for_stmt (stmt);
3532 if (stmt_info == NULL
3533 || is_pattern_stmt_p (stmt_info)
3534 || !PURE_SLP_STMT (stmt_info))
3535 continue;
3536 lhs = gimple_call_lhs (stmt);
3537 new_stmt = gimple_build_assign (lhs, build_zero_cst (TREE_TYPE (lhs)));
3538 set_vinfo_for_stmt (new_stmt, stmt_info);
3539 set_vinfo_for_stmt (stmt, NULL);
3540 STMT_VINFO_STMT (stmt_info) = new_stmt;
3541 gsi = gsi_for_stmt (stmt);
3542 gsi_replace (&gsi, new_stmt, false);
3543 SSA_NAME_DEF_STMT (gimple_assign_lhs (new_stmt)) = new_stmt;
3547 /* Generate vector code for all SLP instances in the loop/basic block. */
3549 bool
3550 vect_schedule_slp (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo)
3552 vec<slp_instance> slp_instances;
3553 slp_instance instance;
3554 unsigned int i, vf;
3555 bool is_store = false;
3557 if (loop_vinfo)
3559 slp_instances = LOOP_VINFO_SLP_INSTANCES (loop_vinfo);
3560 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
3562 else
3564 slp_instances = BB_VINFO_SLP_INSTANCES (bb_vinfo);
3565 vf = 1;
3568 FOR_EACH_VEC_ELT (slp_instances, i, instance)
3570 /* Schedule the tree of INSTANCE. */
3571 is_store = vect_schedule_slp_instance (SLP_INSTANCE_TREE (instance),
3572 instance, vf);
3573 if (dump_enabled_p ())
3574 dump_printf_loc (MSG_NOTE, vect_location,
3575 "vectorizing stmts using SLP.\n");
3578 FOR_EACH_VEC_ELT (slp_instances, i, instance)
3580 slp_tree root = SLP_INSTANCE_TREE (instance);
3581 gimple store;
3582 unsigned int j;
3583 gimple_stmt_iterator gsi;
3585 /* Remove scalar call stmts. Do not do this for basic-block
3586 vectorization as not all uses may be vectorized.
3587 ??? Why should this be necessary? DCE should be able to
3588 remove the stmts itself.
3589 ??? For BB vectorization we can as well remove scalar
3590 stmts starting from the SLP tree root if they have no
3591 uses. */
3592 if (loop_vinfo)
3593 vect_remove_slp_scalar_calls (root);
3595 for (j = 0; SLP_TREE_SCALAR_STMTS (root).iterate (j, &store)
3596 && j < SLP_INSTANCE_GROUP_SIZE (instance); j++)
3598 if (!STMT_VINFO_DATA_REF (vinfo_for_stmt (store)))
3599 break;
3601 if (is_pattern_stmt_p (vinfo_for_stmt (store)))
3602 store = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (store));
3603 /* Free the attached stmt_vec_info and remove the stmt. */
3604 gsi = gsi_for_stmt (store);
3605 unlink_stmt_vdef (store);
3606 gsi_remove (&gsi, true);
3607 release_defs (store);
3608 free_stmt_vec_info (store);
3612 return is_store;
3616 /* Vectorize the basic block. */
3618 void
3619 vect_slp_transform_bb (basic_block bb)
3621 bb_vec_info bb_vinfo = vec_info_for_bb (bb);
3622 gimple_stmt_iterator si;
3624 gcc_assert (bb_vinfo);
3626 if (dump_enabled_p ())
3627 dump_printf_loc (MSG_NOTE, vect_location, "SLPing BB\n");
3629 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
3631 gimple stmt = gsi_stmt (si);
3632 stmt_vec_info stmt_info;
3634 if (dump_enabled_p ())
3636 dump_printf_loc (MSG_NOTE, vect_location,
3637 "------>SLPing statement: ");
3638 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
3639 dump_printf (MSG_NOTE, "\n");
3642 stmt_info = vinfo_for_stmt (stmt);
3643 gcc_assert (stmt_info);
3645 /* Schedule all the SLP instances when the first SLP stmt is reached. */
3646 if (STMT_SLP_TYPE (stmt_info))
3648 vect_schedule_slp (NULL, bb_vinfo);
3649 break;
3653 if (dump_enabled_p ())
3654 dump_printf_loc (MSG_NOTE, vect_location,
3655 "BASIC BLOCK VECTORIZED\n");
3657 destroy_bb_vec_info (bb_vinfo);