1 /* SLP - Basic Block Vectorization
2 Copyright (C) 2007-2013 Free Software Foundation, Inc.
3 Contributed by Dorit Naishlos <dorit@il.ibm.com>
4 and Ira Rosen <irar@il.ibm.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
24 #include "coretypes.h"
30 #include "basic-block.h"
31 #include "gimple-pretty-print.h"
32 #include "tree-flow.h"
33 #include "tree-pass.h"
36 #include "recog.h" /* FIXME: for insn_data */
38 #include "tree-vectorizer.h"
39 #include "langhooks.h"
41 /* Extract the location of the basic block in the source code.
42 Return the basic block location if succeed and NULL if not. */
45 find_bb_location (basic_block bb
)
48 gimple_stmt_iterator si
;
53 for (si
= gsi_start_bb (bb
); !gsi_end_p (si
); gsi_next (&si
))
56 if (gimple_location (stmt
) != UNKNOWN_LOC
)
57 return gimple_location (stmt
);
64 /* Recursively free the memory allocated for the SLP tree rooted at NODE. */
67 vect_free_slp_tree (slp_tree node
)
75 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node
), i
, child
)
76 vect_free_slp_tree ((slp_tree
) child
);
78 SLP_TREE_CHILDREN (node
).release ();
79 SLP_TREE_SCALAR_STMTS (node
).release ();
80 SLP_TREE_VEC_STMTS (node
).release ();
86 /* Free the memory allocated for the SLP instance. */
89 vect_free_slp_instance (slp_instance instance
)
91 vect_free_slp_tree (SLP_INSTANCE_TREE (instance
));
92 SLP_INSTANCE_LOAD_PERMUTATION (instance
).release ();
93 SLP_INSTANCE_LOADS (instance
).release ();
94 SLP_INSTANCE_BODY_COST_VEC (instance
).release ();
99 /* Create an SLP node for SCALAR_STMTS. */
102 vect_create_new_slp_node (vec
<gimple
> scalar_stmts
)
105 gimple stmt
= scalar_stmts
[0];
108 if (is_gimple_call (stmt
))
109 nops
= gimple_call_num_args (stmt
);
110 else if (is_gimple_assign (stmt
))
112 nops
= gimple_num_ops (stmt
) - 1;
113 if (gimple_assign_rhs_code (stmt
) == COND_EXPR
)
119 node
= XNEW (struct _slp_tree
);
120 SLP_TREE_SCALAR_STMTS (node
) = scalar_stmts
;
121 SLP_TREE_VEC_STMTS (node
).create (0);
122 SLP_TREE_CHILDREN (node
).create (nops
);
128 /* Allocate operands info for NOPS operands, and GROUP_SIZE def-stmts for each
130 static vec
<slp_oprnd_info
>
131 vect_create_oprnd_info (int nops
, int group_size
)
134 slp_oprnd_info oprnd_info
;
135 vec
<slp_oprnd_info
> oprnds_info
;
137 oprnds_info
.create (nops
);
138 for (i
= 0; i
< nops
; i
++)
140 oprnd_info
= XNEW (struct _slp_oprnd_info
);
141 oprnd_info
->def_stmts
.create (group_size
);
142 oprnd_info
->first_dt
= vect_uninitialized_def
;
143 oprnd_info
->first_def_type
= NULL_TREE
;
144 oprnd_info
->first_const_oprnd
= NULL_TREE
;
145 oprnd_info
->first_pattern
= false;
146 oprnds_info
.quick_push (oprnd_info
);
153 /* Free operands info. */
156 vect_free_oprnd_info (vec
<slp_oprnd_info
> &oprnds_info
)
159 slp_oprnd_info oprnd_info
;
161 FOR_EACH_VEC_ELT (oprnds_info
, i
, oprnd_info
)
163 oprnd_info
->def_stmts
.release ();
164 XDELETE (oprnd_info
);
167 oprnds_info
.release ();
171 /* Get the defs for the rhs of STMT (collect them in OPRNDS_INFO), check that
172 they are of a valid type and that they match the defs of the first stmt of
173 the SLP group (stored in OPRNDS_INFO). */
176 vect_get_and_check_slp_defs (loop_vec_info loop_vinfo
, bb_vec_info bb_vinfo
,
177 slp_tree slp_node
, gimple stmt
,
178 int ncopies_for_cost
, bool first
,
179 vec
<slp_oprnd_info
> *oprnds_info
,
180 stmt_vector_for_cost
*prologue_cost_vec
,
181 stmt_vector_for_cost
*body_cost_vec
)
184 unsigned int i
, number_of_oprnds
;
185 tree def
, def_op0
= NULL_TREE
;
187 enum vect_def_type dt
= vect_uninitialized_def
;
188 enum vect_def_type dt_op0
= vect_uninitialized_def
;
189 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
190 tree lhs
= gimple_get_lhs (stmt
);
191 struct loop
*loop
= NULL
;
192 enum tree_code rhs_code
;
193 bool different_types
= false;
194 bool pattern
= false;
195 slp_oprnd_info oprnd_info
, oprnd0_info
, oprnd1_info
;
197 tree compare_rhs
= NULL_TREE
;
200 loop
= LOOP_VINFO_LOOP (loop_vinfo
);
202 if (is_gimple_call (stmt
))
204 number_of_oprnds
= gimple_call_num_args (stmt
);
207 else if (is_gimple_assign (stmt
))
209 number_of_oprnds
= gimple_num_ops (stmt
) - 1;
210 if (gimple_assign_rhs_code (stmt
) == COND_EXPR
)
216 for (i
= 0; i
< number_of_oprnds
; i
++)
221 compare_rhs
= NULL_TREE
;
224 oprnd
= gimple_op (stmt
, op_idx
++);
226 oprnd_info
= (*oprnds_info
)[i
];
228 if (COMPARISON_CLASS_P (oprnd
))
230 compare_rhs
= TREE_OPERAND (oprnd
, 1);
231 oprnd
= TREE_OPERAND (oprnd
, 0);
234 if (!vect_is_simple_use (oprnd
, NULL
, loop_vinfo
, bb_vinfo
, &def_stmt
,
236 || (!def_stmt
&& dt
!= vect_constant_def
))
238 if (dump_enabled_p ())
240 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
241 "Build SLP failed: can't find def for ");
242 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, oprnd
);
248 /* Check if DEF_STMT is a part of a pattern in LOOP and get the def stmt
249 from the pattern. Check that all the stmts of the node are in the
251 if (def_stmt
&& gimple_bb (def_stmt
)
252 && ((loop
&& flow_bb_inside_loop_p (loop
, gimple_bb (def_stmt
)))
253 || (!loop
&& gimple_bb (def_stmt
) == BB_VINFO_BB (bb_vinfo
)
254 && gimple_code (def_stmt
) != GIMPLE_PHI
))
255 && vinfo_for_stmt (def_stmt
)
256 && STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (def_stmt
))
257 && !STMT_VINFO_RELEVANT (vinfo_for_stmt (def_stmt
))
258 && !STMT_VINFO_LIVE_P (vinfo_for_stmt (def_stmt
)))
261 if (!first
&& !oprnd_info
->first_pattern
)
263 if (dump_enabled_p ())
265 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
266 "Build SLP failed: some of the stmts"
267 " are in a pattern, and others are not ");
268 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, oprnd
);
274 def_stmt
= STMT_VINFO_RELATED_STMT (vinfo_for_stmt (def_stmt
));
275 dt
= STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt
));
277 if (dt
== vect_unknown_def_type
)
279 if (dump_enabled_p ())
280 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
281 "Unsupported pattern.");
285 switch (gimple_code (def_stmt
))
288 def
= gimple_phi_result (def_stmt
);
292 def
= gimple_assign_lhs (def_stmt
);
296 if (dump_enabled_p ())
297 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
298 "unsupported defining stmt: ");
305 oprnd_info
->first_dt
= dt
;
306 oprnd_info
->first_pattern
= pattern
;
309 oprnd_info
->first_def_type
= TREE_TYPE (def
);
310 oprnd_info
->first_const_oprnd
= NULL_TREE
;
314 oprnd_info
->first_def_type
= NULL_TREE
;
315 oprnd_info
->first_const_oprnd
= oprnd
;
322 /* Analyze costs (for the first stmt of the group only). */
323 if (REFERENCE_CLASS_P (lhs
))
325 vect_model_store_cost (stmt_info
, ncopies_for_cost
, false,
326 dt
, slp_node
, prologue_cost_vec
,
330 enum vect_def_type dts
[2];
332 dts
[1] = vect_uninitialized_def
;
333 /* Not memory operation (we don't call this function for
335 vect_model_simple_cost (stmt_info
, ncopies_for_cost
, dts
,
336 prologue_cost_vec
, body_cost_vec
);
342 /* Not first stmt of the group, check that the def-stmt/s match
343 the def-stmt/s of the first stmt. Allow different definition
344 types for reduction chains: the first stmt must be a
345 vect_reduction_def (a phi node), and the rest
346 vect_internal_def. */
347 if (((oprnd_info
->first_dt
!= dt
348 && !(oprnd_info
->first_dt
== vect_reduction_def
349 && dt
== vect_internal_def
))
350 || (oprnd_info
->first_def_type
!= NULL_TREE
352 && !types_compatible_p (oprnd_info
->first_def_type
,
355 && !types_compatible_p (TREE_TYPE (oprnd_info
->first_const_oprnd
),
359 if (number_of_oprnds
!= 2)
361 if (dump_enabled_p ())
362 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
363 "Build SLP failed: different types ");
368 /* Try to swap operands in case of binary operation. */
370 different_types
= true;
373 oprnd0_info
= (*oprnds_info
)[0];
374 if (is_gimple_assign (stmt
)
375 && (rhs_code
= gimple_assign_rhs_code (stmt
))
376 && TREE_CODE_CLASS (rhs_code
) == tcc_binary
377 && commutative_tree_code (rhs_code
)
378 && oprnd0_info
->first_dt
== dt
379 && oprnd_info
->first_dt
== dt_op0
381 && !(oprnd0_info
->first_def_type
382 && !types_compatible_p (oprnd0_info
->first_def_type
,
384 && !(oprnd_info
->first_def_type
385 && !types_compatible_p (oprnd_info
->first_def_type
,
386 TREE_TYPE (def_op0
))))
388 if (dump_enabled_p ())
390 dump_printf_loc (MSG_NOTE
, vect_location
,
391 "Swapping operands of ");
392 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
395 swap_tree_operands (stmt
, gimple_assign_rhs1_ptr (stmt
),
396 gimple_assign_rhs2_ptr (stmt
));
400 if (dump_enabled_p ())
401 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
402 "Build SLP failed: different types ");
410 /* Check the types of the definitions. */
413 case vect_constant_def
:
414 case vect_external_def
:
415 case vect_reduction_def
:
418 case vect_internal_def
:
421 oprnd0_info
= (*oprnds_info
)[0];
422 oprnd1_info
= (*oprnds_info
)[0];
424 oprnd1_info
->def_stmts
.quick_push (def_stmt
);
426 oprnd0_info
->def_stmts
.quick_push (def_stmt
);
429 oprnd_info
->def_stmts
.quick_push (def_stmt
);
434 /* FORNOW: Not supported. */
435 if (dump_enabled_p ())
437 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
438 "Build SLP failed: illegal type of def ");
439 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, def
);
450 /* Recursively build an SLP tree starting from NODE.
451 Fail (and return FALSE) if def-stmts are not isomorphic, require data
452 permutation or are of unsupported types of operation. Otherwise, return
456 vect_build_slp_tree (loop_vec_info loop_vinfo
, bb_vec_info bb_vinfo
,
457 slp_tree
*node
, unsigned int group_size
, int *outside_cost
,
458 int ncopies_for_cost
, unsigned int *max_nunits
,
459 vec
<int> *load_permutation
,
460 vec
<slp_tree
> *loads
,
461 unsigned int vectorization_factor
, bool *loads_permuted
,
462 stmt_vector_for_cost
*prologue_cost_vec
,
463 stmt_vector_for_cost
*body_cost_vec
)
466 vec
<gimple
> stmts
= SLP_TREE_SCALAR_STMTS (*node
);
467 gimple stmt
= stmts
[0];
468 enum tree_code first_stmt_code
= ERROR_MARK
, rhs_code
= ERROR_MARK
;
469 enum tree_code first_cond_code
= ERROR_MARK
;
471 bool stop_recursion
= false, need_same_oprnds
= false;
472 tree vectype
, scalar_type
, first_op1
= NULL_TREE
;
473 unsigned int ncopies
;
476 enum machine_mode optab_op2_mode
;
477 enum machine_mode vec_mode
;
478 struct data_reference
*first_dr
;
480 bool permutation
= false;
481 unsigned int load_place
;
482 gimple first_load
= NULL
, prev_first_load
= NULL
, old_first_load
= NULL
;
483 vec
<slp_oprnd_info
> oprnds_info
;
485 slp_oprnd_info oprnd_info
;
488 if (is_gimple_call (stmt
))
489 nops
= gimple_call_num_args (stmt
);
490 else if (is_gimple_assign (stmt
))
492 nops
= gimple_num_ops (stmt
) - 1;
493 if (gimple_assign_rhs_code (stmt
) == COND_EXPR
)
499 oprnds_info
= vect_create_oprnd_info (nops
, group_size
);
501 /* For every stmt in NODE find its def stmt/s. */
502 FOR_EACH_VEC_ELT (stmts
, i
, stmt
)
504 if (dump_enabled_p ())
506 dump_printf_loc (MSG_NOTE
, vect_location
, "Build SLP for ");
507 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
510 /* Fail to vectorize statements marked as unvectorizable. */
511 if (!STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (stmt
)))
513 if (dump_enabled_p ())
515 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
516 "Build SLP failed: unvectorizable statement ");
517 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
520 vect_free_oprnd_info (oprnds_info
);
524 lhs
= gimple_get_lhs (stmt
);
525 if (lhs
== NULL_TREE
)
527 if (dump_enabled_p ())
529 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
530 "Build SLP failed: not GIMPLE_ASSIGN nor "
532 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
535 vect_free_oprnd_info (oprnds_info
);
539 if (is_gimple_assign (stmt
)
540 && gimple_assign_rhs_code (stmt
) == COND_EXPR
541 && (cond
= gimple_assign_rhs1 (stmt
))
542 && !COMPARISON_CLASS_P (cond
))
544 if (dump_enabled_p ())
546 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
547 "Build SLP failed: condition is not "
549 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
552 vect_free_oprnd_info (oprnds_info
);
556 scalar_type
= vect_get_smallest_scalar_type (stmt
, &dummy
, &dummy
);
557 vectype
= get_vectype_for_scalar_type (scalar_type
);
560 if (dump_enabled_p ())
562 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
563 "Build SLP failed: unsupported data-type ");
564 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
568 vect_free_oprnd_info (oprnds_info
);
572 /* In case of multiple types we need to detect the smallest type. */
573 if (*max_nunits
< TYPE_VECTOR_SUBPARTS (vectype
))
575 *max_nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
577 vectorization_factor
= *max_nunits
;
580 ncopies
= vectorization_factor
/ TYPE_VECTOR_SUBPARTS (vectype
);
582 if (is_gimple_call (stmt
))
584 rhs_code
= CALL_EXPR
;
585 if (gimple_call_internal_p (stmt
)
586 || gimple_call_tail_p (stmt
)
587 || gimple_call_noreturn_p (stmt
)
588 || !gimple_call_nothrow_p (stmt
)
589 || gimple_call_chain (stmt
))
591 if (dump_enabled_p ())
593 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
594 "Build SLP failed: unsupported call type ");
595 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
598 vect_free_oprnd_info (oprnds_info
);
603 rhs_code
= gimple_assign_rhs_code (stmt
);
605 /* Check the operation. */
608 first_stmt_code
= rhs_code
;
610 /* Shift arguments should be equal in all the packed stmts for a
611 vector shift with scalar shift operand. */
612 if (rhs_code
== LSHIFT_EXPR
|| rhs_code
== RSHIFT_EXPR
613 || rhs_code
== LROTATE_EXPR
614 || rhs_code
== RROTATE_EXPR
)
616 vec_mode
= TYPE_MODE (vectype
);
618 /* First see if we have a vector/vector shift. */
619 optab
= optab_for_tree_code (rhs_code
, vectype
,
623 || optab_handler (optab
, vec_mode
) == CODE_FOR_nothing
)
625 /* No vector/vector shift, try for a vector/scalar shift. */
626 optab
= optab_for_tree_code (rhs_code
, vectype
,
631 if (dump_enabled_p ())
632 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
633 "Build SLP failed: no optab.");
634 vect_free_oprnd_info (oprnds_info
);
637 icode
= (int) optab_handler (optab
, vec_mode
);
638 if (icode
== CODE_FOR_nothing
)
640 if (dump_enabled_p ())
641 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
643 "op not supported by target.");
644 vect_free_oprnd_info (oprnds_info
);
647 optab_op2_mode
= insn_data
[icode
].operand
[2].mode
;
648 if (!VECTOR_MODE_P (optab_op2_mode
))
650 need_same_oprnds
= true;
651 first_op1
= gimple_assign_rhs2 (stmt
);
655 else if (rhs_code
== WIDEN_LSHIFT_EXPR
)
657 need_same_oprnds
= true;
658 first_op1
= gimple_assign_rhs2 (stmt
);
663 if (first_stmt_code
!= rhs_code
664 && (first_stmt_code
!= IMAGPART_EXPR
665 || rhs_code
!= REALPART_EXPR
)
666 && (first_stmt_code
!= REALPART_EXPR
667 || rhs_code
!= IMAGPART_EXPR
)
668 && !(STMT_VINFO_GROUPED_ACCESS (vinfo_for_stmt (stmt
))
669 && (first_stmt_code
== ARRAY_REF
670 || first_stmt_code
== INDIRECT_REF
671 || first_stmt_code
== COMPONENT_REF
672 || first_stmt_code
== MEM_REF
)))
674 if (dump_enabled_p ())
676 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
677 "Build SLP failed: different operation "
679 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
682 vect_free_oprnd_info (oprnds_info
);
687 && !operand_equal_p (first_op1
, gimple_assign_rhs2 (stmt
), 0))
689 if (dump_enabled_p ())
691 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
692 "Build SLP failed: different shift "
694 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
697 vect_free_oprnd_info (oprnds_info
);
701 if (rhs_code
== CALL_EXPR
)
703 gimple first_stmt
= stmts
[0];
704 if (gimple_call_num_args (stmt
) != nops
705 || !operand_equal_p (gimple_call_fn (first_stmt
),
706 gimple_call_fn (stmt
), 0)
707 || gimple_call_fntype (first_stmt
)
708 != gimple_call_fntype (stmt
))
710 if (dump_enabled_p ())
712 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
713 "Build SLP failed: different calls in ");
714 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
718 vect_free_oprnd_info (oprnds_info
);
724 /* Grouped store or load. */
725 if (STMT_VINFO_GROUPED_ACCESS (vinfo_for_stmt (stmt
)))
727 if (REFERENCE_CLASS_P (lhs
))
730 if (!vect_get_and_check_slp_defs (loop_vinfo
, bb_vinfo
, *node
,
731 stmt
, ncopies_for_cost
,
732 (i
== 0), &oprnds_info
,
736 vect_free_oprnd_info (oprnds_info
);
743 /* FORNOW: Check that there is no gap between the loads. */
744 if ((GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt
)) == stmt
745 && GROUP_GAP (vinfo_for_stmt (stmt
)) != 0)
746 || (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt
)) != stmt
747 && GROUP_GAP (vinfo_for_stmt (stmt
)) != 1))
749 if (dump_enabled_p ())
751 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
752 "Build SLP failed: grouped "
754 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
758 vect_free_oprnd_info (oprnds_info
);
762 /* Check that the size of interleaved loads group is not
763 greater than the SLP group size. */
765 && GROUP_SIZE (vinfo_for_stmt (stmt
)) > ncopies
* group_size
)
767 if (dump_enabled_p ())
769 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
770 "Build SLP failed: the number "
771 "of interleaved loads is greater than "
772 "the SLP group size ");
773 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
777 vect_free_oprnd_info (oprnds_info
);
781 old_first_load
= first_load
;
782 first_load
= GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt
));
785 /* Check that there are no loads from different interleaving
786 chains in the same node. The only exception is complex
788 if (prev_first_load
!= first_load
789 && rhs_code
!= REALPART_EXPR
790 && rhs_code
!= IMAGPART_EXPR
)
792 if (dump_enabled_p ())
794 dump_printf_loc (MSG_MISSED_OPTIMIZATION
,
796 "Build SLP failed: different "
797 "interleaving chains in one node ");
798 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
802 vect_free_oprnd_info (oprnds_info
);
807 prev_first_load
= first_load
;
809 /* In some cases a group of loads is just the same load
810 repeated N times. Only analyze its cost once. */
811 if (first_load
== stmt
&& old_first_load
!= first_load
)
813 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt
));
814 if (vect_supportable_dr_alignment (first_dr
, false)
815 == dr_unaligned_unsupported
)
817 if (dump_enabled_p ())
819 dump_printf_loc (MSG_MISSED_OPTIMIZATION
,
821 "Build SLP failed: unsupported "
823 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
827 vect_free_oprnd_info (oprnds_info
);
831 /* Analyze costs (for the first stmt in the group). */
832 vect_model_load_cost (vinfo_for_stmt (stmt
),
833 ncopies_for_cost
, false, *node
,
834 prologue_cost_vec
, body_cost_vec
);
837 /* Store the place of this load in the interleaving chain. In
838 case that permutation is needed we later decide if a specific
839 permutation is supported. */
840 load_place
= vect_get_place_in_interleaving_chain (stmt
,
845 load_permutation
->safe_push (load_place
);
847 /* We stop the tree when we reach a group of loads. */
848 stop_recursion
= true;
851 } /* Grouped access. */
854 if (TREE_CODE_CLASS (rhs_code
) == tcc_reference
)
856 /* Not grouped load. */
857 if (dump_enabled_p ())
859 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
860 "Build SLP failed: not grouped load ");
861 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
864 /* FORNOW: Not grouped loads are not supported. */
865 vect_free_oprnd_info (oprnds_info
);
869 /* Not memory operation. */
870 if (TREE_CODE_CLASS (rhs_code
) != tcc_binary
871 && TREE_CODE_CLASS (rhs_code
) != tcc_unary
872 && rhs_code
!= COND_EXPR
873 && rhs_code
!= CALL_EXPR
)
875 if (dump_enabled_p ())
877 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
878 "Build SLP failed: operation");
879 dump_printf (MSG_MISSED_OPTIMIZATION
, " unsupported ");
880 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
883 vect_free_oprnd_info (oprnds_info
);
887 if (rhs_code
== COND_EXPR
)
889 tree cond_expr
= gimple_assign_rhs1 (stmt
);
892 first_cond_code
= TREE_CODE (cond_expr
);
893 else if (first_cond_code
!= TREE_CODE (cond_expr
))
895 if (dump_enabled_p ())
897 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
898 "Build SLP failed: different"
900 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
904 vect_free_oprnd_info (oprnds_info
);
909 /* Find the def-stmts. */
910 if (!vect_get_and_check_slp_defs (loop_vinfo
, bb_vinfo
, *node
, stmt
,
911 ncopies_for_cost
, (i
== 0),
912 &oprnds_info
, prologue_cost_vec
,
915 vect_free_oprnd_info (oprnds_info
);
921 /* Grouped loads were reached - stop the recursion. */
924 loads
->safe_push (*node
);
927 gimple first_stmt
= stmts
[0];
928 *loads_permuted
= true;
929 (void) record_stmt_cost (body_cost_vec
, group_size
, vec_perm
,
930 vinfo_for_stmt (first_stmt
), 0, vect_body
);
934 /* We don't check here complex numbers chains, so we set
935 LOADS_PERMUTED for further check in
936 vect_supported_load_permutation_p. */
937 if (rhs_code
== REALPART_EXPR
|| rhs_code
== IMAGPART_EXPR
)
938 *loads_permuted
= true;
941 vect_free_oprnd_info (oprnds_info
);
945 /* Create SLP_TREE nodes for the definition node/s. */
946 FOR_EACH_VEC_ELT (oprnds_info
, i
, oprnd_info
)
950 if (oprnd_info
->first_dt
!= vect_internal_def
)
953 child
= vect_create_new_slp_node (oprnd_info
->def_stmts
);
955 || !vect_build_slp_tree (loop_vinfo
, bb_vinfo
, &child
, group_size
,
956 outside_cost
, ncopies_for_cost
,
957 max_nunits
, load_permutation
, loads
,
958 vectorization_factor
, loads_permuted
,
959 prologue_cost_vec
, body_cost_vec
))
962 oprnd_info
->def_stmts
= vNULL
;
963 vect_free_slp_tree (child
);
964 vect_free_oprnd_info (oprnds_info
);
968 oprnd_info
->def_stmts
.create (0);
969 SLP_TREE_CHILDREN (*node
).quick_push (child
);
972 vect_free_oprnd_info (oprnds_info
);
976 /* Dump a slp tree NODE using flags specified in DUMP_KIND. */
979 vect_print_slp_tree (int dump_kind
, slp_tree node
)
988 dump_printf (dump_kind
, "node ");
989 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node
), i
, stmt
)
991 dump_printf (dump_kind
, "\n\tstmt %d ", i
);
992 dump_gimple_stmt (dump_kind
, TDF_SLIM
, stmt
, 0);
994 dump_printf (dump_kind
, "\n");
996 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node
), i
, child
)
997 vect_print_slp_tree (dump_kind
, (slp_tree
) child
);
1001 /* Mark the tree rooted at NODE with MARK (PURE_SLP or HYBRID).
1002 If MARK is HYBRID, it refers to a specific stmt in NODE (the stmt at index
1003 J). Otherwise, MARK is PURE_SLP and J is -1, which indicates that all the
1004 stmts in NODE are to be marked. */
1007 vect_mark_slp_stmts (slp_tree node
, enum slp_vect_type mark
, int j
)
1016 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node
), i
, stmt
)
1017 if (j
< 0 || i
== j
)
1018 STMT_SLP_TYPE (vinfo_for_stmt (stmt
)) = mark
;
1020 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node
), i
, child
)
1021 vect_mark_slp_stmts ((slp_tree
) child
, mark
, j
);
1025 /* Mark the statements of the tree rooted at NODE as relevant (vect_used). */
1028 vect_mark_slp_stmts_relevant (slp_tree node
)
1032 stmt_vec_info stmt_info
;
1038 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node
), i
, stmt
)
1040 stmt_info
= vinfo_for_stmt (stmt
);
1041 gcc_assert (!STMT_VINFO_RELEVANT (stmt_info
)
1042 || STMT_VINFO_RELEVANT (stmt_info
) == vect_used_in_scope
);
1043 STMT_VINFO_RELEVANT (stmt_info
) = vect_used_in_scope
;
1046 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node
), i
, child
)
1047 vect_mark_slp_stmts_relevant ((slp_tree
) child
);
1051 /* Check if the permutation required by the SLP INSTANCE is supported.
1052 Reorganize the SLP nodes stored in SLP_INSTANCE_LOADS if needed. */
1055 vect_supported_slp_permutation_p (slp_instance instance
)
1057 slp_tree node
= SLP_INSTANCE_LOADS (instance
)[0];
1058 gimple stmt
= SLP_TREE_SCALAR_STMTS (node
)[0];
1059 gimple first_load
= GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt
));
1060 vec
<slp_tree
> sorted_loads
= vNULL
;
1062 slp_tree
*tmp_loads
= NULL
;
1063 int group_size
= SLP_INSTANCE_GROUP_SIZE (instance
), i
, j
;
1066 /* FORNOW: The only supported loads permutation is loads from the same
1067 location in all the loads in the node, when the data-refs in
1068 nodes of LOADS constitute an interleaving chain.
1069 Sort the nodes according to the order of accesses in the chain. */
1070 tmp_loads
= (slp_tree
*) xmalloc (sizeof (slp_tree
) * group_size
);
1072 SLP_INSTANCE_LOAD_PERMUTATION (instance
).iterate (i
, &index
)
1073 && SLP_INSTANCE_LOADS (instance
).iterate (j
, &load
);
1074 i
+= group_size
, j
++)
1076 gimple scalar_stmt
= SLP_TREE_SCALAR_STMTS (load
)[0];
1077 /* Check that the loads are all in the same interleaving chain. */
1078 if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (scalar_stmt
)) != first_load
)
1080 if (dump_enabled_p ())
1082 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1083 "Build SLP failed: unsupported data "
1085 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
1093 tmp_loads
[index
] = load
;
1096 sorted_loads
.create (group_size
);
1097 for (i
= 0; i
< group_size
; i
++)
1098 sorted_loads
.safe_push (tmp_loads
[i
]);
1100 SLP_INSTANCE_LOADS (instance
).release ();
1101 SLP_INSTANCE_LOADS (instance
) = sorted_loads
;
1104 if (!vect_transform_slp_perm_load (stmt
, vNULL
, NULL
,
1105 SLP_INSTANCE_UNROLLING_FACTOR (instance
),
1113 /* Rearrange the statements of NODE according to PERMUTATION. */
1116 vect_slp_rearrange_stmts (slp_tree node
, unsigned int group_size
,
1117 vec
<int> permutation
)
1120 vec
<gimple
> tmp_stmts
;
1121 unsigned int index
, i
;
1127 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node
), i
, child
)
1128 vect_slp_rearrange_stmts ((slp_tree
) child
, group_size
, permutation
);
1130 gcc_assert (group_size
== SLP_TREE_SCALAR_STMTS (node
).length ());
1131 tmp_stmts
.create (group_size
);
1133 for (i
= 0; i
< group_size
; i
++)
1134 tmp_stmts
.safe_push (NULL
);
1136 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node
), i
, stmt
)
1138 index
= permutation
[i
];
1139 tmp_stmts
[index
] = stmt
;
1142 SLP_TREE_SCALAR_STMTS (node
).release ();
1143 SLP_TREE_SCALAR_STMTS (node
) = tmp_stmts
;
1147 /* Check if the required load permutation is supported.
1148 LOAD_PERMUTATION contains a list of indices of the loads.
1149 In SLP this permutation is relative to the order of grouped stores that are
1150 the base of the SLP instance. */
1153 vect_supported_load_permutation_p (slp_instance slp_instn
, int group_size
,
1154 vec
<int> load_permutation
)
1156 int i
= 0, j
, prev
= -1, next
, k
, number_of_groups
;
1157 bool supported
, bad_permutation
= false;
1159 slp_tree node
, other_complex_node
;
1160 gimple stmt
, first
= NULL
, other_node_first
, load
, next_load
, first_load
;
1161 unsigned complex_numbers
= 0;
1162 struct data_reference
*dr
;
1163 bb_vec_info bb_vinfo
;
1165 /* FORNOW: permutations are only supported in SLP. */
1169 if (dump_enabled_p ())
1171 dump_printf_loc (MSG_NOTE
, vect_location
, "Load permutation ");
1172 FOR_EACH_VEC_ELT (load_permutation
, i
, next
)
1173 dump_printf (MSG_NOTE
, "%d ", next
);
1176 /* In case of reduction every load permutation is allowed, since the order
1177 of the reduction statements is not important (as opposed to the case of
1178 grouped stores). The only condition we need to check is that all the
1179 load nodes are of the same size and have the same permutation (and then
1180 rearrange all the nodes of the SLP instance according to this
1183 /* Check that all the load nodes are of the same size. */
1184 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn
), i
, node
)
1186 if (SLP_TREE_SCALAR_STMTS (node
).length () != (unsigned) group_size
)
1189 stmt
= SLP_TREE_SCALAR_STMTS (node
)[0];
1190 if (is_gimple_assign (stmt
)
1191 && (gimple_assign_rhs_code (stmt
) == REALPART_EXPR
1192 || gimple_assign_rhs_code (stmt
) == IMAGPART_EXPR
))
1196 /* Complex operands can be swapped as following:
1197 real_c = real_b + real_a;
1198 imag_c = imag_a + imag_b;
1199 i.e., we have {real_b, imag_a} and {real_a, imag_b} instead of
1200 {real_a, imag_a} and {real_b, imag_b}. We check here that if interleaving
1201 chains are mixed, they match the above pattern. */
1202 if (complex_numbers
)
1204 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn
), i
, node
)
1206 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node
), j
, stmt
)
1212 if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt
)) != first
)
1214 if (complex_numbers
!= 2)
1222 other_complex_node
= SLP_INSTANCE_LOADS (slp_instn
)[k
];
1224 SLP_TREE_SCALAR_STMTS (other_complex_node
)[0];
1226 if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt
))
1227 != other_node_first
)
1235 /* We checked that this case ok, so there is no need to proceed with
1236 permutation tests. */
1237 if (complex_numbers
== 2
1238 && SLP_INSTANCE_LOADS (slp_instn
).length () == 2)
1240 SLP_INSTANCE_LOADS (slp_instn
).release ();
1241 SLP_INSTANCE_LOAD_PERMUTATION (slp_instn
).release ();
1245 node
= SLP_INSTANCE_TREE (slp_instn
);
1246 stmt
= SLP_TREE_SCALAR_STMTS (node
)[0];
1247 /* LOAD_PERMUTATION is a list of indices of all the loads of the SLP
1248 instance, not all the loads belong to the same node or interleaving
1249 group. Hence, we need to divide them into groups according to
1251 number_of_groups
= load_permutation
.length () / group_size
;
1253 /* Reduction (there are no data-refs in the root).
1254 In reduction chain the order of the loads is important. */
1255 if (!STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt
))
1256 && !GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt
)))
1258 int first_group_load_index
;
1260 /* Compare all the permutation sequences to the first one. */
1261 for (i
= 1; i
< number_of_groups
; i
++)
1264 for (j
= i
* group_size
; j
< i
* group_size
+ group_size
; j
++)
1266 next
= load_permutation
[j
];
1267 first_group_load_index
= load_permutation
[k
];
1269 if (next
!= first_group_load_index
)
1271 bad_permutation
= true;
1278 if (bad_permutation
)
1282 if (!bad_permutation
)
1284 /* Check that the loads in the first sequence are different and there
1285 are no gaps between them. */
1286 load_index
= sbitmap_alloc (group_size
);
1287 bitmap_clear (load_index
);
1288 for (k
= 0; k
< group_size
; k
++)
1290 first_group_load_index
= load_permutation
[k
];
1291 if (bitmap_bit_p (load_index
, first_group_load_index
))
1293 bad_permutation
= true;
1297 bitmap_set_bit (load_index
, first_group_load_index
);
1300 if (!bad_permutation
)
1301 for (k
= 0; k
< group_size
; k
++)
1302 if (!bitmap_bit_p (load_index
, k
))
1304 bad_permutation
= true;
1308 sbitmap_free (load_index
);
1311 if (!bad_permutation
)
1313 /* This permutation is valid for reduction. Since the order of the
1314 statements in the nodes is not important unless they are memory
1315 accesses, we can rearrange the statements in all the nodes
1316 according to the order of the loads. */
1317 vect_slp_rearrange_stmts (SLP_INSTANCE_TREE (slp_instn
), group_size
,
1319 SLP_INSTANCE_LOAD_PERMUTATION (slp_instn
).release ();
1324 /* In basic block vectorization we allow any subchain of an interleaving
1326 FORNOW: not supported in loop SLP because of realignment compications. */
1327 bb_vinfo
= STMT_VINFO_BB_VINFO (vinfo_for_stmt (stmt
));
1328 bad_permutation
= false;
1329 /* Check that for every node in the instance the loads form a subchain. */
1332 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn
), i
, node
)
1336 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node
), j
, load
)
1339 first_load
= GROUP_FIRST_ELEMENT (vinfo_for_stmt (load
));
1341 != GROUP_FIRST_ELEMENT (vinfo_for_stmt (load
)))
1343 bad_permutation
= true;
1347 if (j
!= 0 && next_load
!= load
)
1349 bad_permutation
= true;
1353 next_load
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (load
));
1356 if (bad_permutation
)
1360 /* Check that the alignment of the first load in every subchain, i.e.,
1361 the first statement in every load node, is supported. */
1362 if (!bad_permutation
)
1364 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn
), i
, node
)
1366 first_load
= SLP_TREE_SCALAR_STMTS (node
)[0];
1368 != GROUP_FIRST_ELEMENT (vinfo_for_stmt (first_load
)))
1370 dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_load
));
1371 if (vect_supportable_dr_alignment (dr
, false)
1372 == dr_unaligned_unsupported
)
1374 if (dump_enabled_p ())
1376 dump_printf_loc (MSG_MISSED_OPTIMIZATION
,
1378 "unsupported unaligned load ");
1379 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
1382 bad_permutation
= true;
1388 if (!bad_permutation
)
1390 SLP_INSTANCE_LOAD_PERMUTATION (slp_instn
).release ();
1396 /* FORNOW: the only supported permutation is 0..01..1.. of length equal to
1397 GROUP_SIZE and where each sequence of same drs is of GROUP_SIZE length as
1398 well (unless it's reduction). */
1399 if (load_permutation
.length ()
1400 != (unsigned int) (group_size
* group_size
))
1404 load_index
= sbitmap_alloc (group_size
);
1405 bitmap_clear (load_index
);
1406 for (j
= 0; j
< group_size
; j
++)
1408 for (i
= j
* group_size
, k
= 0;
1409 load_permutation
.iterate (i
, &next
) && k
< group_size
;
1412 if (i
!= j
* group_size
&& next
!= prev
)
1421 if (bitmap_bit_p (load_index
, prev
))
1427 bitmap_set_bit (load_index
, prev
);
1430 for (j
= 0; j
< group_size
; j
++)
1431 if (!bitmap_bit_p (load_index
, j
))
1433 sbitmap_free (load_index
);
1437 sbitmap_free (load_index
);
1439 if (supported
&& i
== group_size
* group_size
1440 && vect_supported_slp_permutation_p (slp_instn
))
1447 /* Find the first load in the loop that belongs to INSTANCE.
1448 When loads are in several SLP nodes, there can be a case in which the first
1449 load does not appear in the first SLP node to be transformed, causing
1450 incorrect order of statements. Since we generate all the loads together,
1451 they must be inserted before the first load of the SLP instance and not
1452 before the first load of the first node of the instance. */
1455 vect_find_first_load_in_slp_instance (slp_instance instance
)
1459 gimple first_load
= NULL
, load
;
1461 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (instance
), i
, load_node
)
1462 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (load_node
), j
, load
)
1463 first_load
= get_earlier_stmt (load
, first_load
);
1469 /* Find the last store in SLP INSTANCE. */
1472 vect_find_last_store_in_slp_instance (slp_instance instance
)
1476 gimple last_store
= NULL
, store
;
1478 node
= SLP_INSTANCE_TREE (instance
);
1479 for (i
= 0; SLP_TREE_SCALAR_STMTS (node
).iterate (i
, &store
); i
++)
1480 last_store
= get_later_stmt (store
, last_store
);
1486 /* Analyze an SLP instance starting from a group of grouped stores. Call
1487 vect_build_slp_tree to build a tree of packed stmts if possible.
1488 Return FALSE if it's impossible to SLP any stmt in the loop. */
1491 vect_analyze_slp_instance (loop_vec_info loop_vinfo
, bb_vec_info bb_vinfo
,
1494 slp_instance new_instance
;
1496 unsigned int group_size
= GROUP_SIZE (vinfo_for_stmt (stmt
));
1497 unsigned int unrolling_factor
= 1, nunits
;
1498 tree vectype
, scalar_type
= NULL_TREE
;
1500 unsigned int vectorization_factor
= 0;
1501 int outside_cost
= 0, ncopies_for_cost
, i
;
1502 unsigned int max_nunits
= 0;
1503 vec
<int> load_permutation
;
1504 vec
<slp_tree
> loads
;
1505 struct data_reference
*dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt
));
1506 bool loads_permuted
= false;
1507 vec
<gimple
> scalar_stmts
;
1508 stmt_vector_for_cost body_cost_vec
, prologue_cost_vec
;
1509 stmt_info_for_cost
*si
;
1511 if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt
)))
1515 scalar_type
= TREE_TYPE (DR_REF (dr
));
1516 vectype
= get_vectype_for_scalar_type (scalar_type
);
1520 gcc_assert (loop_vinfo
);
1521 vectype
= STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt
));
1524 group_size
= GROUP_SIZE (vinfo_for_stmt (stmt
));
1528 gcc_assert (loop_vinfo
);
1529 vectype
= STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt
));
1530 group_size
= LOOP_VINFO_REDUCTIONS (loop_vinfo
).length ();
1535 if (dump_enabled_p ())
1537 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1538 "Build SLP failed: unsupported data-type ");
1539 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, scalar_type
);
1545 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
1547 vectorization_factor
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
1549 vectorization_factor
= nunits
;
1551 /* Calculate the unrolling factor. */
1552 unrolling_factor
= least_common_multiple (nunits
, group_size
) / group_size
;
1553 if (unrolling_factor
!= 1 && !loop_vinfo
)
1555 if (dump_enabled_p ())
1556 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1557 "Build SLP failed: unrolling required in basic"
1563 /* Create a node (a root of the SLP tree) for the packed grouped stores. */
1564 scalar_stmts
.create (group_size
);
1566 if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt
)))
1568 /* Collect the stores and store them in SLP_TREE_SCALAR_STMTS. */
1571 if (STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (next
))
1572 && STMT_VINFO_RELATED_STMT (vinfo_for_stmt (next
)))
1573 scalar_stmts
.safe_push (
1574 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (next
)));
1576 scalar_stmts
.safe_push (next
);
1577 next
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (next
));
1582 /* Collect reduction statements. */
1583 vec
<gimple
> reductions
= LOOP_VINFO_REDUCTIONS (loop_vinfo
);
1584 for (i
= 0; reductions
.iterate (i
, &next
); i
++)
1585 scalar_stmts
.safe_push (next
);
1588 node
= vect_create_new_slp_node (scalar_stmts
);
1590 /* Calculate the number of vector stmts to create based on the unrolling
1591 factor (number of vectors is 1 if NUNITS >= GROUP_SIZE, and is
1592 GROUP_SIZE / NUNITS otherwise. */
1593 ncopies_for_cost
= unrolling_factor
* group_size
/ nunits
;
1595 load_permutation
.create (group_size
* group_size
);
1596 loads
.create (group_size
);
1597 prologue_cost_vec
.create (10);
1598 body_cost_vec
.create (10);
1600 /* Build the tree for the SLP instance. */
1601 if (vect_build_slp_tree (loop_vinfo
, bb_vinfo
, &node
, group_size
,
1602 &outside_cost
, ncopies_for_cost
,
1603 &max_nunits
, &load_permutation
, &loads
,
1604 vectorization_factor
, &loads_permuted
,
1605 &prologue_cost_vec
, &body_cost_vec
))
1607 void *data
= (loop_vinfo
? LOOP_VINFO_TARGET_COST_DATA (loop_vinfo
)
1608 : BB_VINFO_TARGET_COST_DATA (bb_vinfo
));
1610 /* Calculate the unrolling factor based on the smallest type. */
1611 if (max_nunits
> nunits
)
1612 unrolling_factor
= least_common_multiple (max_nunits
, group_size
)
1615 if (unrolling_factor
!= 1 && !loop_vinfo
)
1617 if (dump_enabled_p ())
1618 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1619 "Build SLP failed: unrolling required in basic"
1621 vect_free_slp_tree (node
);
1622 body_cost_vec
.release ();
1623 prologue_cost_vec
.release ();
1624 load_permutation
.release ();
1629 /* Create a new SLP instance. */
1630 new_instance
= XNEW (struct _slp_instance
);
1631 SLP_INSTANCE_TREE (new_instance
) = node
;
1632 SLP_INSTANCE_GROUP_SIZE (new_instance
) = group_size
;
1633 SLP_INSTANCE_UNROLLING_FACTOR (new_instance
) = unrolling_factor
;
1634 SLP_INSTANCE_BODY_COST_VEC (new_instance
) = body_cost_vec
;
1635 SLP_INSTANCE_LOADS (new_instance
) = loads
;
1636 SLP_INSTANCE_FIRST_LOAD_STMT (new_instance
) = NULL
;
1637 SLP_INSTANCE_LOAD_PERMUTATION (new_instance
) = load_permutation
;
1641 if (!vect_supported_load_permutation_p (new_instance
, group_size
,
1644 if (dump_enabled_p ())
1646 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1647 "Build SLP failed: unsupported load "
1649 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
1652 vect_free_slp_instance (new_instance
);
1653 prologue_cost_vec
.release ();
1657 SLP_INSTANCE_FIRST_LOAD_STMT (new_instance
)
1658 = vect_find_first_load_in_slp_instance (new_instance
);
1661 SLP_INSTANCE_LOAD_PERMUTATION (new_instance
).release ();
1663 /* Record the prologue costs, which were delayed until we were
1664 sure that SLP was successful. Unlike the body costs, we know
1665 the final values now regardless of the loop vectorization factor. */
1666 FOR_EACH_VEC_ELT (prologue_cost_vec
, i
, si
)
1668 struct _stmt_vec_info
*stmt_info
1669 = si
->stmt
? vinfo_for_stmt (si
->stmt
) : NULL
;
1670 (void) add_stmt_cost (data
, si
->count
, si
->kind
, stmt_info
,
1671 si
->misalign
, vect_prologue
);
1674 prologue_cost_vec
.release ();
1677 LOOP_VINFO_SLP_INSTANCES (loop_vinfo
).safe_push (new_instance
);
1679 BB_VINFO_SLP_INSTANCES (bb_vinfo
).safe_push (new_instance
);
1681 if (dump_enabled_p ())
1682 vect_print_slp_tree (MSG_NOTE
, node
);
1688 body_cost_vec
.release ();
1689 prologue_cost_vec
.release ();
1692 /* Failed to SLP. */
1693 /* Free the allocated memory. */
1694 vect_free_slp_tree (node
);
1695 load_permutation
.release ();
1702 /* Check if there are stmts in the loop can be vectorized using SLP. Build SLP
1703 trees of packed scalar stmts if SLP is possible. */
1706 vect_analyze_slp (loop_vec_info loop_vinfo
, bb_vec_info bb_vinfo
)
1709 vec
<gimple
> grouped_stores
;
1710 vec
<gimple
> reductions
= vNULL
;
1711 vec
<gimple
> reduc_chains
= vNULL
;
1712 gimple first_element
;
1715 if (dump_enabled_p ())
1716 dump_printf_loc (MSG_NOTE
, vect_location
, "=== vect_analyze_slp ===");
1720 grouped_stores
= LOOP_VINFO_GROUPED_STORES (loop_vinfo
);
1721 reduc_chains
= LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo
);
1722 reductions
= LOOP_VINFO_REDUCTIONS (loop_vinfo
);
1725 grouped_stores
= BB_VINFO_GROUPED_STORES (bb_vinfo
);
1727 /* Find SLP sequences starting from groups of grouped stores. */
1728 FOR_EACH_VEC_ELT (grouped_stores
, i
, first_element
)
1729 if (vect_analyze_slp_instance (loop_vinfo
, bb_vinfo
, first_element
))
1732 if (bb_vinfo
&& !ok
)
1734 if (dump_enabled_p ())
1735 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1736 "Failed to SLP the basic block.");
1742 && LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo
).length () > 0)
1744 /* Find SLP sequences starting from reduction chains. */
1745 FOR_EACH_VEC_ELT (reduc_chains
, i
, first_element
)
1746 if (vect_analyze_slp_instance (loop_vinfo
, bb_vinfo
, first_element
))
1751 /* Don't try to vectorize SLP reductions if reduction chain was
1756 /* Find SLP sequences starting from groups of reductions. */
1757 if (loop_vinfo
&& LOOP_VINFO_REDUCTIONS (loop_vinfo
).length () > 1
1758 && vect_analyze_slp_instance (loop_vinfo
, bb_vinfo
, reductions
[0]))
1765 /* For each possible SLP instance decide whether to SLP it and calculate overall
1766 unrolling factor needed to SLP the loop. Return TRUE if decided to SLP at
1767 least one instance. */
1770 vect_make_slp_decision (loop_vec_info loop_vinfo
)
1772 unsigned int i
, unrolling_factor
= 1;
1773 vec
<slp_instance
> slp_instances
= LOOP_VINFO_SLP_INSTANCES (loop_vinfo
);
1774 slp_instance instance
;
1775 int decided_to_slp
= 0;
1777 if (dump_enabled_p ())
1778 dump_printf_loc (MSG_NOTE
, vect_location
, "=== vect_make_slp_decision ===");
1780 FOR_EACH_VEC_ELT (slp_instances
, i
, instance
)
1782 /* FORNOW: SLP if you can. */
1783 if (unrolling_factor
< SLP_INSTANCE_UNROLLING_FACTOR (instance
))
1784 unrolling_factor
= SLP_INSTANCE_UNROLLING_FACTOR (instance
);
1786 /* Mark all the stmts that belong to INSTANCE as PURE_SLP stmts. Later we
1787 call vect_detect_hybrid_slp () to find stmts that need hybrid SLP and
1788 loop-based vectorization. Such stmts will be marked as HYBRID. */
1789 vect_mark_slp_stmts (SLP_INSTANCE_TREE (instance
), pure_slp
, -1);
1793 LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo
) = unrolling_factor
;
1795 if (decided_to_slp
&& dump_enabled_p ())
1796 dump_printf_loc (MSG_OPTIMIZED_LOCATIONS
, vect_location
,
1797 "Decided to SLP %d instances. Unrolling factor %d",
1798 decided_to_slp
, unrolling_factor
);
1800 return (decided_to_slp
> 0);
1804 /* Find stmts that must be both vectorized and SLPed (since they feed stmts that
1805 can't be SLPed) in the tree rooted at NODE. Mark such stmts as HYBRID. */
1808 vect_detect_hybrid_slp_stmts (slp_tree node
)
1811 vec
<gimple
> stmts
= SLP_TREE_SCALAR_STMTS (node
);
1812 gimple stmt
= stmts
[0];
1813 imm_use_iterator imm_iter
;
1815 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (stmt
);
1817 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_vinfo
);
1818 struct loop
*loop
= NULL
;
1819 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_vinfo
);
1820 basic_block bb
= NULL
;
1826 loop
= LOOP_VINFO_LOOP (loop_vinfo
);
1828 bb
= BB_VINFO_BB (bb_vinfo
);
1830 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node
), i
, stmt
)
1831 if (PURE_SLP_STMT (vinfo_for_stmt (stmt
))
1832 && TREE_CODE (gimple_op (stmt
, 0)) == SSA_NAME
)
1833 FOR_EACH_IMM_USE_STMT (use_stmt
, imm_iter
, gimple_op (stmt
, 0))
1834 if (gimple_bb (use_stmt
)
1835 && ((loop
&& flow_bb_inside_loop_p (loop
, gimple_bb (use_stmt
)))
1836 || bb
== gimple_bb (use_stmt
))
1837 && (stmt_vinfo
= vinfo_for_stmt (use_stmt
))
1838 && !STMT_SLP_TYPE (stmt_vinfo
)
1839 && (STMT_VINFO_RELEVANT (stmt_vinfo
)
1840 || VECTORIZABLE_CYCLE_DEF (STMT_VINFO_DEF_TYPE (stmt_vinfo
)))
1841 && !(gimple_code (use_stmt
) == GIMPLE_PHI
1842 && STMT_VINFO_DEF_TYPE (stmt_vinfo
)
1843 == vect_reduction_def
))
1844 vect_mark_slp_stmts (node
, hybrid
, i
);
1846 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node
), i
, child
)
1847 vect_detect_hybrid_slp_stmts ((slp_tree
) child
);
1851 /* Find stmts that must be both vectorized and SLPed. */
1854 vect_detect_hybrid_slp (loop_vec_info loop_vinfo
)
1857 vec
<slp_instance
> slp_instances
= LOOP_VINFO_SLP_INSTANCES (loop_vinfo
);
1858 slp_instance instance
;
1860 if (dump_enabled_p ())
1861 dump_printf_loc (MSG_NOTE
, vect_location
, "=== vect_detect_hybrid_slp ===");
1863 FOR_EACH_VEC_ELT (slp_instances
, i
, instance
)
1864 vect_detect_hybrid_slp_stmts (SLP_INSTANCE_TREE (instance
));
1868 /* Create and initialize a new bb_vec_info struct for BB, as well as
1869 stmt_vec_info structs for all the stmts in it. */
1872 new_bb_vec_info (basic_block bb
)
1874 bb_vec_info res
= NULL
;
1875 gimple_stmt_iterator gsi
;
1877 res
= (bb_vec_info
) xcalloc (1, sizeof (struct _bb_vec_info
));
1878 BB_VINFO_BB (res
) = bb
;
1880 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
1882 gimple stmt
= gsi_stmt (gsi
);
1883 gimple_set_uid (stmt
, 0);
1884 set_vinfo_for_stmt (stmt
, new_stmt_vec_info (stmt
, NULL
, res
));
1887 BB_VINFO_GROUPED_STORES (res
).create (10);
1888 BB_VINFO_SLP_INSTANCES (res
).create (2);
1889 BB_VINFO_TARGET_COST_DATA (res
) = init_cost (NULL
);
1896 /* Free BB_VINFO struct, as well as all the stmt_vec_info structs of all the
1897 stmts in the basic block. */
1900 destroy_bb_vec_info (bb_vec_info bb_vinfo
)
1902 vec
<slp_instance
> slp_instances
;
1903 slp_instance instance
;
1905 gimple_stmt_iterator si
;
1911 bb
= BB_VINFO_BB (bb_vinfo
);
1913 for (si
= gsi_start_bb (bb
); !gsi_end_p (si
); gsi_next (&si
))
1915 gimple stmt
= gsi_stmt (si
);
1916 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1919 /* Free stmt_vec_info. */
1920 free_stmt_vec_info (stmt
);
1923 free_data_refs (BB_VINFO_DATAREFS (bb_vinfo
));
1924 free_dependence_relations (BB_VINFO_DDRS (bb_vinfo
));
1925 BB_VINFO_GROUPED_STORES (bb_vinfo
).release ();
1926 slp_instances
= BB_VINFO_SLP_INSTANCES (bb_vinfo
);
1927 FOR_EACH_VEC_ELT (slp_instances
, i
, instance
)
1928 vect_free_slp_instance (instance
);
1929 BB_VINFO_SLP_INSTANCES (bb_vinfo
).release ();
1930 destroy_cost_data (BB_VINFO_TARGET_COST_DATA (bb_vinfo
));
1936 /* Analyze statements contained in SLP tree node after recursively analyzing
1937 the subtree. Return TRUE if the operations are supported. */
1940 vect_slp_analyze_node_operations (bb_vec_info bb_vinfo
, slp_tree node
)
1950 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node
), i
, child
)
1951 if (!vect_slp_analyze_node_operations (bb_vinfo
, (slp_tree
) child
))
1954 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node
), i
, stmt
)
1956 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1957 gcc_assert (stmt_info
);
1958 gcc_assert (PURE_SLP_STMT (stmt_info
));
1960 if (!vect_analyze_stmt (stmt
, &dummy
, node
))
1968 /* Analyze statements in SLP instances of the basic block. Return TRUE if the
1969 operations are supported. */
1972 vect_slp_analyze_operations (bb_vec_info bb_vinfo
)
1974 vec
<slp_instance
> slp_instances
= BB_VINFO_SLP_INSTANCES (bb_vinfo
);
1975 slp_instance instance
;
1978 for (i
= 0; slp_instances
.iterate (i
, &instance
); )
1980 if (!vect_slp_analyze_node_operations (bb_vinfo
,
1981 SLP_INSTANCE_TREE (instance
)))
1983 vect_free_slp_instance (instance
);
1984 slp_instances
.ordered_remove (i
);
1990 if (!slp_instances
.length ())
1996 /* Check if vectorization of the basic block is profitable. */
1999 vect_bb_vectorization_profitable_p (bb_vec_info bb_vinfo
)
2001 vec
<slp_instance
> slp_instances
= BB_VINFO_SLP_INSTANCES (bb_vinfo
);
2002 slp_instance instance
;
2004 unsigned int vec_inside_cost
= 0, vec_outside_cost
= 0, scalar_cost
= 0;
2005 unsigned int vec_prologue_cost
= 0, vec_epilogue_cost
= 0;
2006 unsigned int stmt_cost
;
2008 gimple_stmt_iterator si
;
2009 basic_block bb
= BB_VINFO_BB (bb_vinfo
);
2010 void *target_cost_data
= BB_VINFO_TARGET_COST_DATA (bb_vinfo
);
2011 stmt_vec_info stmt_info
= NULL
;
2012 stmt_vector_for_cost body_cost_vec
;
2013 stmt_info_for_cost
*ci
;
2015 /* Calculate vector costs. */
2016 FOR_EACH_VEC_ELT (slp_instances
, i
, instance
)
2018 body_cost_vec
= SLP_INSTANCE_BODY_COST_VEC (instance
);
2020 FOR_EACH_VEC_ELT (body_cost_vec
, j
, ci
)
2022 stmt_info
= ci
->stmt
? vinfo_for_stmt (ci
->stmt
) : NULL
;
2023 (void) add_stmt_cost (target_cost_data
, ci
->count
, ci
->kind
,
2024 stmt_info
, ci
->misalign
, vect_body
);
2028 /* Calculate scalar cost. */
2029 for (si
= gsi_start_bb (bb
); !gsi_end_p (si
); gsi_next (&si
))
2031 stmt
= gsi_stmt (si
);
2032 stmt_info
= vinfo_for_stmt (stmt
);
2034 if (!stmt_info
|| !STMT_VINFO_VECTORIZABLE (stmt_info
)
2035 || !PURE_SLP_STMT (stmt_info
))
2038 if (STMT_VINFO_DATA_REF (stmt_info
))
2040 if (DR_IS_READ (STMT_VINFO_DATA_REF (stmt_info
)))
2041 stmt_cost
= vect_get_stmt_cost (scalar_load
);
2043 stmt_cost
= vect_get_stmt_cost (scalar_store
);
2046 stmt_cost
= vect_get_stmt_cost (scalar_stmt
);
2048 scalar_cost
+= stmt_cost
;
2051 /* Complete the target-specific cost calculation. */
2052 finish_cost (BB_VINFO_TARGET_COST_DATA (bb_vinfo
), &vec_prologue_cost
,
2053 &vec_inside_cost
, &vec_epilogue_cost
);
2055 vec_outside_cost
= vec_prologue_cost
+ vec_epilogue_cost
;
2057 if (dump_enabled_p ())
2059 dump_printf_loc (MSG_NOTE
, vect_location
, "Cost model analysis: \n");
2060 dump_printf (MSG_NOTE
, " Vector inside of basic block cost: %d\n",
2062 dump_printf (MSG_NOTE
, " Vector prologue cost: %d\n", vec_prologue_cost
);
2063 dump_printf (MSG_NOTE
, " Vector epilogue cost: %d\n", vec_epilogue_cost
);
2064 dump_printf (MSG_NOTE
, " Scalar cost of basic block: %d", scalar_cost
);
2067 /* Vectorization is profitable if its cost is less than the cost of scalar
2069 if (vec_outside_cost
+ vec_inside_cost
>= scalar_cost
)
2075 /* Check if the basic block can be vectorized. */
2078 vect_slp_analyze_bb_1 (basic_block bb
)
2080 bb_vec_info bb_vinfo
;
2081 vec
<slp_instance
> slp_instances
;
2082 slp_instance instance
;
2086 bb_vinfo
= new_bb_vec_info (bb
);
2090 if (!vect_analyze_data_refs (NULL
, bb_vinfo
, &min_vf
))
2092 if (dump_enabled_p ())
2093 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2094 "not vectorized: unhandled data-ref in basic "
2097 destroy_bb_vec_info (bb_vinfo
);
2101 if (BB_VINFO_DATAREFS (bb_vinfo
).length () < 2)
2103 if (dump_enabled_p ())
2104 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2105 "not vectorized: not enough data-refs in "
2108 destroy_bb_vec_info (bb_vinfo
);
2112 if (!vect_analyze_data_ref_accesses (NULL
, bb_vinfo
))
2114 if (dump_enabled_p ())
2115 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2116 "not vectorized: unhandled data access in "
2119 destroy_bb_vec_info (bb_vinfo
);
2123 vect_pattern_recog (NULL
, bb_vinfo
);
2125 if (!vect_slp_analyze_data_ref_dependences (bb_vinfo
))
2127 if (dump_enabled_p ())
2128 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2129 "not vectorized: unhandled data dependence "
2130 "in basic block.\n");
2132 destroy_bb_vec_info (bb_vinfo
);
2136 if (!vect_analyze_data_refs_alignment (NULL
, bb_vinfo
))
2138 if (dump_enabled_p ())
2139 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2140 "not vectorized: bad data alignment in basic "
2143 destroy_bb_vec_info (bb_vinfo
);
2147 /* Check the SLP opportunities in the basic block, analyze and build SLP
2149 if (!vect_analyze_slp (NULL
, bb_vinfo
))
2151 if (dump_enabled_p ())
2152 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2153 "not vectorized: failed to find SLP opportunities "
2154 "in basic block.\n");
2156 destroy_bb_vec_info (bb_vinfo
);
2160 slp_instances
= BB_VINFO_SLP_INSTANCES (bb_vinfo
);
2162 /* Mark all the statements that we want to vectorize as pure SLP and
2164 FOR_EACH_VEC_ELT (slp_instances
, i
, instance
)
2166 vect_mark_slp_stmts (SLP_INSTANCE_TREE (instance
), pure_slp
, -1);
2167 vect_mark_slp_stmts_relevant (SLP_INSTANCE_TREE (instance
));
2170 if (!vect_verify_datarefs_alignment (NULL
, bb_vinfo
))
2172 if (dump_enabled_p ())
2173 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2174 "not vectorized: unsupported alignment in basic "
2176 destroy_bb_vec_info (bb_vinfo
);
2180 if (!vect_slp_analyze_operations (bb_vinfo
))
2182 if (dump_enabled_p ())
2183 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2184 "not vectorized: bad operation in basic block.\n");
2186 destroy_bb_vec_info (bb_vinfo
);
2190 /* Cost model: check if the vectorization is worthwhile. */
2191 if (flag_vect_cost_model
2192 && !vect_bb_vectorization_profitable_p (bb_vinfo
))
2194 if (dump_enabled_p ())
2195 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2196 "not vectorized: vectorization is not "
2199 destroy_bb_vec_info (bb_vinfo
);
2203 if (dump_enabled_p ())
2204 dump_printf_loc (MSG_NOTE
, vect_location
,
2205 "Basic block will be vectorized using SLP\n");
2212 vect_slp_analyze_bb (basic_block bb
)
2214 bb_vec_info bb_vinfo
;
2216 gimple_stmt_iterator gsi
;
2217 unsigned int vector_sizes
;
2219 if (dump_enabled_p ())
2220 dump_printf_loc (MSG_NOTE
, vect_location
, "===vect_slp_analyze_bb===\n");
2222 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
2224 gimple stmt
= gsi_stmt (gsi
);
2225 if (!is_gimple_debug (stmt
)
2226 && !gimple_nop_p (stmt
)
2227 && gimple_code (stmt
) != GIMPLE_LABEL
)
2231 if (insns
> PARAM_VALUE (PARAM_SLP_MAX_INSNS_IN_BB
))
2233 if (dump_enabled_p ())
2234 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2235 "not vectorized: too many instructions in "
2241 /* Autodetect first vector size we try. */
2242 current_vector_size
= 0;
2243 vector_sizes
= targetm
.vectorize
.autovectorize_vector_sizes ();
2247 bb_vinfo
= vect_slp_analyze_bb_1 (bb
);
2251 destroy_bb_vec_info (bb_vinfo
);
2253 vector_sizes
&= ~current_vector_size
;
2254 if (vector_sizes
== 0
2255 || current_vector_size
== 0)
2258 /* Try the next biggest vector size. */
2259 current_vector_size
= 1 << floor_log2 (vector_sizes
);
2260 if (dump_enabled_p ())
2261 dump_printf_loc (MSG_NOTE
, vect_location
,
2262 "***** Re-trying analysis with "
2263 "vector size %d\n", current_vector_size
);
2268 /* SLP costs are calculated according to SLP instance unrolling factor (i.e.,
2269 the number of created vector stmts depends on the unrolling factor).
2270 However, the actual number of vector stmts for every SLP node depends on
2271 VF which is set later in vect_analyze_operations (). Hence, SLP costs
2272 should be updated. In this function we assume that the inside costs
2273 calculated in vect_model_xxx_cost are linear in ncopies. */
2276 vect_update_slp_costs_according_to_vf (loop_vec_info loop_vinfo
)
2278 unsigned int i
, j
, vf
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
2279 vec
<slp_instance
> slp_instances
= LOOP_VINFO_SLP_INSTANCES (loop_vinfo
);
2280 slp_instance instance
;
2281 stmt_vector_for_cost body_cost_vec
;
2282 stmt_info_for_cost
*si
;
2283 void *data
= LOOP_VINFO_TARGET_COST_DATA (loop_vinfo
);
2285 if (dump_enabled_p ())
2286 dump_printf_loc (MSG_NOTE
, vect_location
,
2287 "=== vect_update_slp_costs_according_to_vf ===");
2289 FOR_EACH_VEC_ELT (slp_instances
, i
, instance
)
2291 /* We assume that costs are linear in ncopies. */
2292 int ncopies
= vf
/ SLP_INSTANCE_UNROLLING_FACTOR (instance
);
2294 /* Record the instance's instructions in the target cost model.
2295 This was delayed until here because the count of instructions
2296 isn't known beforehand. */
2297 body_cost_vec
= SLP_INSTANCE_BODY_COST_VEC (instance
);
2299 FOR_EACH_VEC_ELT (body_cost_vec
, j
, si
)
2300 (void) add_stmt_cost (data
, si
->count
* ncopies
, si
->kind
,
2301 vinfo_for_stmt (si
->stmt
), si
->misalign
,
2307 /* For constant and loop invariant defs of SLP_NODE this function returns
2308 (vector) defs (VEC_OPRNDS) that will be used in the vectorized stmts.
2309 OP_NUM determines if we gather defs for operand 0 or operand 1 of the RHS of
2310 scalar stmts. NUMBER_OF_VECTORS is the number of vector defs to create.
2311 REDUC_INDEX is the index of the reduction operand in the statements, unless
2315 vect_get_constant_vectors (tree op
, slp_tree slp_node
,
2316 vec
<tree
> *vec_oprnds
,
2317 unsigned int op_num
, unsigned int number_of_vectors
,
2320 vec
<gimple
> stmts
= SLP_TREE_SCALAR_STMTS (slp_node
);
2321 gimple stmt
= stmts
[0];
2322 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (stmt
);
2326 unsigned j
, number_of_places_left_in_vector
;
2329 int group_size
= stmts
.length ();
2330 unsigned int vec_num
, i
;
2331 unsigned number_of_copies
= 1;
2333 voprnds
.create (number_of_vectors
);
2334 bool constant_p
, is_store
;
2335 tree neutral_op
= NULL
;
2336 enum tree_code code
= gimple_expr_code (stmt
);
2339 gimple_seq ctor_seq
= NULL
;
2341 if (STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_reduction_def
2342 && reduc_index
!= -1)
2344 op_num
= reduc_index
- 1;
2345 op
= gimple_op (stmt
, reduc_index
);
2346 /* For additional copies (see the explanation of NUMBER_OF_COPIES below)
2347 we need either neutral operands or the original operands. See
2348 get_initial_def_for_reduction() for details. */
2351 case WIDEN_SUM_EXPR
:
2357 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (op
)))
2358 neutral_op
= build_real (TREE_TYPE (op
), dconst0
);
2360 neutral_op
= build_int_cst (TREE_TYPE (op
), 0);
2365 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (op
)))
2366 neutral_op
= build_real (TREE_TYPE (op
), dconst1
);
2368 neutral_op
= build_int_cst (TREE_TYPE (op
), 1);
2373 neutral_op
= build_int_cst (TREE_TYPE (op
), -1);
2378 def_stmt
= SSA_NAME_DEF_STMT (op
);
2379 loop
= (gimple_bb (stmt
))->loop_father
;
2380 neutral_op
= PHI_ARG_DEF_FROM_EDGE (def_stmt
,
2381 loop_preheader_edge (loop
));
2389 if (STMT_VINFO_DATA_REF (stmt_vinfo
))
2392 op
= gimple_assign_rhs1 (stmt
);
2399 if (CONSTANT_CLASS_P (op
))
2404 vector_type
= get_vectype_for_scalar_type (TREE_TYPE (op
));
2405 gcc_assert (vector_type
);
2406 nunits
= TYPE_VECTOR_SUBPARTS (vector_type
);
2408 /* NUMBER_OF_COPIES is the number of times we need to use the same values in
2409 created vectors. It is greater than 1 if unrolling is performed.
2411 For example, we have two scalar operands, s1 and s2 (e.g., group of
2412 strided accesses of size two), while NUNITS is four (i.e., four scalars
2413 of this type can be packed in a vector). The output vector will contain
2414 two copies of each scalar operand: {s1, s2, s1, s2}. (NUMBER_OF_COPIES
2417 If GROUP_SIZE > NUNITS, the scalars will be split into several vectors
2418 containing the operands.
2420 For example, NUNITS is four as before, and the group size is 8
2421 (s1, s2, ..., s8). We will create two vectors {s1, s2, s3, s4} and
2422 {s5, s6, s7, s8}. */
2424 number_of_copies
= least_common_multiple (nunits
, group_size
) / group_size
;
2426 number_of_places_left_in_vector
= nunits
;
2427 elts
= XALLOCAVEC (tree
, nunits
);
2428 for (j
= 0; j
< number_of_copies
; j
++)
2430 for (i
= group_size
- 1; stmts
.iterate (i
, &stmt
); i
--)
2433 op
= gimple_assign_rhs1 (stmt
);
2439 if (op_num
== 0 || op_num
== 1)
2441 tree cond
= gimple_assign_rhs1 (stmt
);
2442 op
= TREE_OPERAND (cond
, op_num
);
2447 op
= gimple_assign_rhs2 (stmt
);
2449 op
= gimple_assign_rhs3 (stmt
);
2454 op
= gimple_call_arg (stmt
, op_num
);
2461 op
= gimple_op (stmt
, op_num
+ 1);
2462 /* Unlike the other binary operators, shifts/rotates have
2463 the shift count being int, instead of the same type as
2464 the lhs, so make sure the scalar is the right type if
2465 we are dealing with vectors of
2466 long long/long/short/char. */
2467 if (op_num
== 1 && constant_p
)
2468 op
= fold_convert (TREE_TYPE (vector_type
), op
);
2472 op
= gimple_op (stmt
, op_num
+ 1);
2477 if (reduc_index
!= -1)
2479 loop
= (gimple_bb (stmt
))->loop_father
;
2480 def_stmt
= SSA_NAME_DEF_STMT (op
);
2484 /* Get the def before the loop. In reduction chain we have only
2485 one initial value. */
2486 if ((j
!= (number_of_copies
- 1)
2487 || (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt
))
2492 op
= PHI_ARG_DEF_FROM_EDGE (def_stmt
,
2493 loop_preheader_edge (loop
));
2496 /* Create 'vect_ = {op0,op1,...,opn}'. */
2497 number_of_places_left_in_vector
--;
2498 if (!types_compatible_p (TREE_TYPE (vector_type
), TREE_TYPE (op
)))
2502 op
= fold_unary (VIEW_CONVERT_EXPR
,
2503 TREE_TYPE (vector_type
), op
);
2504 gcc_assert (op
&& CONSTANT_CLASS_P (op
));
2509 = make_ssa_name (TREE_TYPE (vector_type
), NULL
);
2511 op
= build1 (VIEW_CONVERT_EXPR
, TREE_TYPE (vector_type
),
2514 = gimple_build_assign_with_ops (VIEW_CONVERT_EXPR
,
2515 new_temp
, op
, NULL_TREE
);
2516 gimple_seq_add_stmt (&ctor_seq
, init_stmt
);
2520 elts
[number_of_places_left_in_vector
] = op
;
2522 if (number_of_places_left_in_vector
== 0)
2524 number_of_places_left_in_vector
= nunits
;
2527 vec_cst
= build_vector (vector_type
, elts
);
2530 vec
<constructor_elt
, va_gc
> *v
;
2532 vec_alloc (v
, nunits
);
2533 for (k
= 0; k
< nunits
; ++k
)
2534 CONSTRUCTOR_APPEND_ELT (v
, NULL_TREE
, elts
[k
]);
2535 vec_cst
= build_constructor (vector_type
, v
);
2537 voprnds
.quick_push (vect_init_vector (stmt
, vec_cst
,
2538 vector_type
, NULL
));
2539 if (ctor_seq
!= NULL
)
2541 gimple init_stmt
= SSA_NAME_DEF_STMT (voprnds
.last ());
2542 gimple_stmt_iterator gsi
= gsi_for_stmt (init_stmt
);
2543 gsi_insert_seq_before_without_update (&gsi
, ctor_seq
,
2551 /* Since the vectors are created in the reverse order, we should invert
2553 vec_num
= voprnds
.length ();
2554 for (j
= vec_num
; j
!= 0; j
--)
2556 vop
= voprnds
[j
- 1];
2557 vec_oprnds
->quick_push (vop
);
2562 /* In case that VF is greater than the unrolling factor needed for the SLP
2563 group of stmts, NUMBER_OF_VECTORS to be created is greater than
2564 NUMBER_OF_SCALARS/NUNITS or NUNITS/NUMBER_OF_SCALARS, and hence we have
2565 to replicate the vectors. */
2566 while (number_of_vectors
> vec_oprnds
->length ())
2568 tree neutral_vec
= NULL
;
2573 neutral_vec
= build_vector_from_val (vector_type
, neutral_op
);
2575 vec_oprnds
->quick_push (neutral_vec
);
2579 for (i
= 0; vec_oprnds
->iterate (i
, &vop
) && i
< vec_num
; i
++)
2580 vec_oprnds
->quick_push (vop
);
2586 /* Get vectorized definitions from SLP_NODE that contains corresponding
2587 vectorized def-stmts. */
2590 vect_get_slp_vect_defs (slp_tree slp_node
, vec
<tree
> *vec_oprnds
)
2593 gimple vec_def_stmt
;
2596 gcc_assert (SLP_TREE_VEC_STMTS (slp_node
).exists ());
2598 FOR_EACH_VEC_ELT (SLP_TREE_VEC_STMTS (slp_node
), i
, vec_def_stmt
)
2600 gcc_assert (vec_def_stmt
);
2601 vec_oprnd
= gimple_get_lhs (vec_def_stmt
);
2602 vec_oprnds
->quick_push (vec_oprnd
);
2607 /* Get vectorized definitions for SLP_NODE.
2608 If the scalar definitions are loop invariants or constants, collect them and
2609 call vect_get_constant_vectors() to create vector stmts.
2610 Otherwise, the def-stmts must be already vectorized and the vectorized stmts
2611 must be stored in the corresponding child of SLP_NODE, and we call
2612 vect_get_slp_vect_defs () to retrieve them. */
2615 vect_get_slp_defs (vec
<tree
> ops
, slp_tree slp_node
,
2616 vec
<vec
<tree
> > *vec_oprnds
, int reduc_index
)
2619 int number_of_vects
= 0, i
;
2620 unsigned int child_index
= 0;
2621 HOST_WIDE_INT lhs_size_unit
, rhs_size_unit
;
2622 slp_tree child
= NULL
;
2625 bool vectorized_defs
;
2627 first_stmt
= SLP_TREE_SCALAR_STMTS (slp_node
)[0];
2628 FOR_EACH_VEC_ELT (ops
, i
, oprnd
)
2630 /* For each operand we check if it has vectorized definitions in a child
2631 node or we need to create them (for invariants and constants). We
2632 check if the LHS of the first stmt of the next child matches OPRND.
2633 If it does, we found the correct child. Otherwise, we call
2634 vect_get_constant_vectors (), and not advance CHILD_INDEX in order
2635 to check this child node for the next operand. */
2636 vectorized_defs
= false;
2637 if (SLP_TREE_CHILDREN (slp_node
).length () > child_index
)
2639 child
= (slp_tree
) SLP_TREE_CHILDREN (slp_node
)[child_index
];
2641 /* We have to check both pattern and original def, if available. */
2642 gimple first_def
= SLP_TREE_SCALAR_STMTS (child
)[0];
2643 gimple related
= STMT_VINFO_RELATED_STMT (vinfo_for_stmt (first_def
));
2645 if (operand_equal_p (oprnd
, gimple_get_lhs (first_def
), 0)
2647 && operand_equal_p (oprnd
, gimple_get_lhs (related
), 0)))
2649 /* The number of vector defs is determined by the number of
2650 vector statements in the node from which we get those
2652 number_of_vects
= SLP_TREE_NUMBER_OF_VEC_STMTS (child
);
2653 vectorized_defs
= true;
2658 if (!vectorized_defs
)
2662 number_of_vects
= SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node
);
2663 /* Number of vector stmts was calculated according to LHS in
2664 vect_schedule_slp_instance (), fix it by replacing LHS with
2665 RHS, if necessary. See vect_get_smallest_scalar_type () for
2667 vect_get_smallest_scalar_type (first_stmt
, &lhs_size_unit
,
2669 if (rhs_size_unit
!= lhs_size_unit
)
2671 number_of_vects
*= rhs_size_unit
;
2672 number_of_vects
/= lhs_size_unit
;
2677 /* Allocate memory for vectorized defs. */
2679 vec_defs
.create (number_of_vects
);
2681 /* For reduction defs we call vect_get_constant_vectors (), since we are
2682 looking for initial loop invariant values. */
2683 if (vectorized_defs
&& reduc_index
== -1)
2684 /* The defs are already vectorized. */
2685 vect_get_slp_vect_defs (child
, &vec_defs
);
2687 /* Build vectors from scalar defs. */
2688 vect_get_constant_vectors (oprnd
, slp_node
, &vec_defs
, i
,
2689 number_of_vects
, reduc_index
);
2691 vec_oprnds
->quick_push (vec_defs
);
2693 /* For reductions, we only need initial values. */
2694 if (reduc_index
!= -1)
2700 /* Create NCOPIES permutation statements using the mask MASK_BYTES (by
2701 building a vector of type MASK_TYPE from it) and two input vectors placed in
2702 DR_CHAIN at FIRST_VEC_INDX and SECOND_VEC_INDX for the first copy and
2703 shifting by STRIDE elements of DR_CHAIN for every copy.
2704 (STRIDE is the number of vectorized stmts for NODE divided by the number of
2706 VECT_STMTS_COUNTER specifies the index in the vectorized stmts of NODE, where
2707 the created stmts must be inserted. */
2710 vect_create_mask_and_perm (gimple stmt
, gimple next_scalar_stmt
,
2711 tree mask
, int first_vec_indx
, int second_vec_indx
,
2712 gimple_stmt_iterator
*gsi
, slp_tree node
,
2713 tree vectype
, vec
<tree
> dr_chain
,
2714 int ncopies
, int vect_stmts_counter
)
2717 gimple perm_stmt
= NULL
;
2718 stmt_vec_info next_stmt_info
;
2720 tree first_vec
, second_vec
, data_ref
;
2722 stride
= SLP_TREE_NUMBER_OF_VEC_STMTS (node
) / ncopies
;
2724 /* Initialize the vect stmts of NODE to properly insert the generated
2726 for (i
= SLP_TREE_VEC_STMTS (node
).length ();
2727 i
< (int) SLP_TREE_NUMBER_OF_VEC_STMTS (node
); i
++)
2728 SLP_TREE_VEC_STMTS (node
).quick_push (NULL
);
2730 perm_dest
= vect_create_destination_var (gimple_assign_lhs (stmt
), vectype
);
2731 for (i
= 0; i
< ncopies
; i
++)
2733 first_vec
= dr_chain
[first_vec_indx
];
2734 second_vec
= dr_chain
[second_vec_indx
];
2736 /* Generate the permute statement. */
2737 perm_stmt
= gimple_build_assign_with_ops (VEC_PERM_EXPR
, perm_dest
,
2738 first_vec
, second_vec
, mask
);
2739 data_ref
= make_ssa_name (perm_dest
, perm_stmt
);
2740 gimple_set_lhs (perm_stmt
, data_ref
);
2741 vect_finish_stmt_generation (stmt
, perm_stmt
, gsi
);
2743 /* Store the vector statement in NODE. */
2744 SLP_TREE_VEC_STMTS (node
)[stride
* i
+ vect_stmts_counter
] = perm_stmt
;
2746 first_vec_indx
+= stride
;
2747 second_vec_indx
+= stride
;
2750 /* Mark the scalar stmt as vectorized. */
2751 next_stmt_info
= vinfo_for_stmt (next_scalar_stmt
);
2752 STMT_VINFO_VEC_STMT (next_stmt_info
) = perm_stmt
;
2756 /* Given FIRST_MASK_ELEMENT - the mask element in element representation,
2757 return in CURRENT_MASK_ELEMENT its equivalent in target specific
2758 representation. Check that the mask is valid and return FALSE if not.
2759 Return TRUE in NEED_NEXT_VECTOR if the permutation requires to move to
2760 the next vector, i.e., the current first vector is not needed. */
2763 vect_get_mask_element (gimple stmt
, int first_mask_element
, int m
,
2764 int mask_nunits
, bool only_one_vec
, int index
,
2765 unsigned char *mask
, int *current_mask_element
,
2766 bool *need_next_vector
, int *number_of_mask_fixes
,
2767 bool *mask_fixed
, bool *needs_first_vector
)
2771 /* Convert to target specific representation. */
2772 *current_mask_element
= first_mask_element
+ m
;
2773 /* Adjust the value in case it's a mask for second and third vectors. */
2774 *current_mask_element
-= mask_nunits
* (*number_of_mask_fixes
- 1);
2776 if (*current_mask_element
< mask_nunits
)
2777 *needs_first_vector
= true;
2779 /* We have only one input vector to permute but the mask accesses values in
2780 the next vector as well. */
2781 if (only_one_vec
&& *current_mask_element
>= mask_nunits
)
2783 if (dump_enabled_p ())
2785 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2786 "permutation requires at least two vectors ");
2787 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
2793 /* The mask requires the next vector. */
2794 if (*current_mask_element
>= mask_nunits
* 2)
2796 if (*needs_first_vector
|| *mask_fixed
)
2798 /* We either need the first vector too or have already moved to the
2799 next vector. In both cases, this permutation needs three
2801 if (dump_enabled_p ())
2803 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2804 "permutation requires at "
2805 "least three vectors ");
2806 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
2812 /* We move to the next vector, dropping the first one and working with
2813 the second and the third - we need to adjust the values of the mask
2815 *current_mask_element
-= mask_nunits
* *number_of_mask_fixes
;
2817 for (i
= 0; i
< index
; i
++)
2818 mask
[i
] -= mask_nunits
* *number_of_mask_fixes
;
2820 (*number_of_mask_fixes
)++;
2824 *need_next_vector
= *mask_fixed
;
2826 /* This was the last element of this mask. Start a new one. */
2827 if (index
== mask_nunits
- 1)
2829 *number_of_mask_fixes
= 1;
2830 *mask_fixed
= false;
2831 *needs_first_vector
= false;
2838 /* Generate vector permute statements from a list of loads in DR_CHAIN.
2839 If ANALYZE_ONLY is TRUE, only check that it is possible to create valid
2840 permute statements for SLP_NODE_INSTANCE. */
2842 vect_transform_slp_perm_load (gimple stmt
, vec
<tree
> dr_chain
,
2843 gimple_stmt_iterator
*gsi
, int vf
,
2844 slp_instance slp_node_instance
, bool analyze_only
)
2846 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
2847 tree mask_element_type
= NULL_TREE
, mask_type
;
2848 int i
, j
, k
, nunits
, vec_index
= 0, scalar_index
;
2850 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
2851 gimple next_scalar_stmt
;
2852 int group_size
= SLP_INSTANCE_GROUP_SIZE (slp_node_instance
);
2853 int first_mask_element
;
2854 int index
, unroll_factor
, current_mask_element
, ncopies
;
2855 unsigned char *mask
;
2856 bool only_one_vec
= false, need_next_vector
= false;
2857 int first_vec_index
, second_vec_index
, orig_vec_stmts_num
, vect_stmts_counter
;
2858 int number_of_mask_fixes
= 1;
2859 bool mask_fixed
= false;
2860 bool needs_first_vector
= false;
2861 enum machine_mode mode
;
2863 mode
= TYPE_MODE (vectype
);
2865 if (!can_vec_perm_p (mode
, false, NULL
))
2867 if (dump_enabled_p ())
2869 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2870 "no vect permute for ");
2871 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
2876 /* The generic VEC_PERM_EXPR code always uses an integral type of the
2877 same size as the vector element being permuted. */
2878 mask_element_type
= lang_hooks
.types
.type_for_mode
2879 (int_mode_for_mode (TYPE_MODE (TREE_TYPE (vectype
))), 1);
2880 mask_type
= get_vectype_for_scalar_type (mask_element_type
);
2881 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
2882 mask
= XALLOCAVEC (unsigned char, nunits
);
2883 unroll_factor
= SLP_INSTANCE_UNROLLING_FACTOR (slp_node_instance
);
2885 /* The number of vector stmts to generate based only on SLP_NODE_INSTANCE
2886 unrolling factor. */
2887 orig_vec_stmts_num
= group_size
*
2888 SLP_INSTANCE_UNROLLING_FACTOR (slp_node_instance
) / nunits
;
2889 if (orig_vec_stmts_num
== 1)
2890 only_one_vec
= true;
2892 /* Number of copies is determined by the final vectorization factor
2893 relatively to SLP_NODE_INSTANCE unrolling factor. */
2894 ncopies
= vf
/ SLP_INSTANCE_UNROLLING_FACTOR (slp_node_instance
);
2896 /* Generate permutation masks for every NODE. Number of masks for each NODE
2897 is equal to GROUP_SIZE.
2898 E.g., we have a group of three nodes with three loads from the same
2899 location in each node, and the vector size is 4. I.e., we have a
2900 a0b0c0a1b1c1... sequence and we need to create the following vectors:
2901 for a's: a0a0a0a1 a1a1a2a2 a2a3a3a3
2902 for b's: b0b0b0b1 b1b1b2b2 b2b3b3b3
2905 The masks for a's should be: {0,0,0,3} {3,3,6,6} {6,9,9,9}.
2906 The last mask is illegal since we assume two operands for permute
2907 operation, and the mask element values can't be outside that range.
2908 Hence, the last mask must be converted into {2,5,5,5}.
2909 For the first two permutations we need the first and the second input
2910 vectors: {a0,b0,c0,a1} and {b1,c1,a2,b2}, and for the last permutation
2911 we need the second and the third vectors: {b1,c1,a2,b2} and
2914 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_node_instance
), i
, node
)
2918 vect_stmts_counter
= 0;
2920 first_vec_index
= vec_index
++;
2922 second_vec_index
= first_vec_index
;
2924 second_vec_index
= vec_index
++;
2926 for (j
= 0; j
< unroll_factor
; j
++)
2928 for (k
= 0; k
< group_size
; k
++)
2930 first_mask_element
= i
+ j
* group_size
;
2931 if (!vect_get_mask_element (stmt
, first_mask_element
, 0,
2932 nunits
, only_one_vec
, index
,
2933 mask
, ¤t_mask_element
,
2935 &number_of_mask_fixes
, &mask_fixed
,
2936 &needs_first_vector
))
2938 mask
[index
++] = current_mask_element
;
2940 if (index
== nunits
)
2942 tree mask_vec
, *mask_elts
;
2945 if (!can_vec_perm_p (mode
, false, mask
))
2947 if (dump_enabled_p ())
2949 dump_printf_loc (MSG_MISSED_OPTIMIZATION
,
2951 "unsupported vect permute { ");
2952 for (i
= 0; i
< nunits
; ++i
)
2953 dump_printf (MSG_MISSED_OPTIMIZATION
, "%d ",
2955 dump_printf (MSG_MISSED_OPTIMIZATION
, "}\n");
2960 mask_elts
= XALLOCAVEC (tree
, nunits
);
2961 for (l
= 0; l
< nunits
; ++l
)
2962 mask_elts
[l
] = build_int_cst (mask_element_type
, mask
[l
]);
2963 mask_vec
= build_vector (mask_type
, mask_elts
);
2968 if (need_next_vector
)
2970 first_vec_index
= second_vec_index
;
2971 second_vec_index
= vec_index
;
2975 = SLP_TREE_SCALAR_STMTS (node
)[scalar_index
++];
2977 vect_create_mask_and_perm (stmt
, next_scalar_stmt
,
2978 mask_vec
, first_vec_index
, second_vec_index
,
2979 gsi
, node
, vectype
, dr_chain
,
2980 ncopies
, vect_stmts_counter
++);
2992 /* Vectorize SLP instance tree in postorder. */
2995 vect_schedule_slp_instance (slp_tree node
, slp_instance instance
,
2996 unsigned int vectorization_factor
)
2999 bool grouped_store
, is_store
;
3000 gimple_stmt_iterator si
;
3001 stmt_vec_info stmt_info
;
3002 unsigned int vec_stmts_size
, nunits
, group_size
;
3005 slp_tree loads_node
;
3011 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node
), i
, child
)
3012 vect_schedule_slp_instance ((slp_tree
) child
, instance
,
3013 vectorization_factor
);
3015 stmt
= SLP_TREE_SCALAR_STMTS (node
)[0];
3016 stmt_info
= vinfo_for_stmt (stmt
);
3018 /* VECTYPE is the type of the destination. */
3019 vectype
= STMT_VINFO_VECTYPE (stmt_info
);
3020 nunits
= (unsigned int) TYPE_VECTOR_SUBPARTS (vectype
);
3021 group_size
= SLP_INSTANCE_GROUP_SIZE (instance
);
3023 /* For each SLP instance calculate number of vector stmts to be created
3024 for the scalar stmts in each node of the SLP tree. Number of vector
3025 elements in one vector iteration is the number of scalar elements in
3026 one scalar iteration (GROUP_SIZE) multiplied by VF divided by vector
3028 vec_stmts_size
= (vectorization_factor
* group_size
) / nunits
;
3030 /* In case of load permutation we have to allocate vectorized statements for
3031 all the nodes that participate in that permutation. */
3032 if (SLP_INSTANCE_LOAD_PERMUTATION (instance
).exists ())
3034 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (instance
), i
, loads_node
)
3036 if (!SLP_TREE_VEC_STMTS (loads_node
).exists ())
3038 SLP_TREE_VEC_STMTS (loads_node
).create (vec_stmts_size
);
3039 SLP_TREE_NUMBER_OF_VEC_STMTS (loads_node
) = vec_stmts_size
;
3044 if (!SLP_TREE_VEC_STMTS (node
).exists ())
3046 SLP_TREE_VEC_STMTS (node
).create (vec_stmts_size
);
3047 SLP_TREE_NUMBER_OF_VEC_STMTS (node
) = vec_stmts_size
;
3050 if (dump_enabled_p ())
3052 dump_printf_loc (MSG_NOTE
,vect_location
,
3053 "------>vectorizing SLP node starting from: ");
3054 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
3057 /* Loads should be inserted before the first load. */
3058 if (SLP_INSTANCE_FIRST_LOAD_STMT (instance
)
3059 && STMT_VINFO_GROUPED_ACCESS (stmt_info
)
3060 && !REFERENCE_CLASS_P (gimple_get_lhs (stmt
))
3061 && SLP_INSTANCE_LOAD_PERMUTATION (instance
).exists ())
3062 si
= gsi_for_stmt (SLP_INSTANCE_FIRST_LOAD_STMT (instance
));
3063 else if (is_pattern_stmt_p (stmt_info
))
3064 si
= gsi_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info
));
3066 si
= gsi_for_stmt (stmt
);
3068 /* Stores should be inserted just before the last store. */
3069 if (STMT_VINFO_GROUPED_ACCESS (stmt_info
)
3070 && REFERENCE_CLASS_P (gimple_get_lhs (stmt
)))
3072 gimple last_store
= vect_find_last_store_in_slp_instance (instance
);
3073 if (is_pattern_stmt_p (vinfo_for_stmt (last_store
)))
3074 last_store
= STMT_VINFO_RELATED_STMT (vinfo_for_stmt (last_store
));
3075 si
= gsi_for_stmt (last_store
);
3078 /* Mark the first element of the reduction chain as reduction to properly
3079 transform the node. In the analysis phase only the last element of the
3080 chain is marked as reduction. */
3081 if (GROUP_FIRST_ELEMENT (stmt_info
) && !STMT_VINFO_GROUPED_ACCESS (stmt_info
)
3082 && GROUP_FIRST_ELEMENT (stmt_info
) == stmt
)
3084 STMT_VINFO_DEF_TYPE (stmt_info
) = vect_reduction_def
;
3085 STMT_VINFO_TYPE (stmt_info
) = reduc_vec_info_type
;
3088 is_store
= vect_transform_stmt (stmt
, &si
, &grouped_store
, node
, instance
);
3092 /* Replace scalar calls from SLP node NODE with setting of their lhs to zero.
3093 For loop vectorization this is done in vectorizable_call, but for SLP
3094 it needs to be deferred until end of vect_schedule_slp, because multiple
3095 SLP instances may refer to the same scalar stmt. */
3098 vect_remove_slp_scalar_calls (slp_tree node
)
3100 gimple stmt
, new_stmt
;
3101 gimple_stmt_iterator gsi
;
3105 stmt_vec_info stmt_info
;
3110 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node
), i
, child
)
3111 vect_remove_slp_scalar_calls ((slp_tree
) child
);
3113 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node
), i
, stmt
)
3115 if (!is_gimple_call (stmt
) || gimple_bb (stmt
) == NULL
)
3117 stmt_info
= vinfo_for_stmt (stmt
);
3118 if (stmt_info
== NULL
3119 || is_pattern_stmt_p (stmt_info
)
3120 || !PURE_SLP_STMT (stmt_info
))
3122 lhs
= gimple_call_lhs (stmt
);
3123 new_stmt
= gimple_build_assign (lhs
, build_zero_cst (TREE_TYPE (lhs
)));
3124 set_vinfo_for_stmt (new_stmt
, stmt_info
);
3125 set_vinfo_for_stmt (stmt
, NULL
);
3126 STMT_VINFO_STMT (stmt_info
) = new_stmt
;
3127 gsi
= gsi_for_stmt (stmt
);
3128 gsi_replace (&gsi
, new_stmt
, false);
3129 SSA_NAME_DEF_STMT (gimple_assign_lhs (new_stmt
)) = new_stmt
;
3133 /* Generate vector code for all SLP instances in the loop/basic block. */
3136 vect_schedule_slp (loop_vec_info loop_vinfo
, bb_vec_info bb_vinfo
)
3138 vec
<slp_instance
> slp_instances
;
3139 slp_instance instance
;
3140 slp_tree loads_node
;
3141 unsigned int i
, j
, vf
;
3142 bool is_store
= false;
3146 slp_instances
= LOOP_VINFO_SLP_INSTANCES (loop_vinfo
);
3147 vf
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
3151 slp_instances
= BB_VINFO_SLP_INSTANCES (bb_vinfo
);
3155 FOR_EACH_VEC_ELT (slp_instances
, i
, instance
)
3157 /* Schedule the tree of INSTANCE. */
3158 is_store
= vect_schedule_slp_instance (SLP_INSTANCE_TREE (instance
),
3161 /* Clear STMT_VINFO_VEC_STMT of all loads. With shared loads
3162 between SLP instances we fail to properly initialize the
3163 vectorized SLP stmts and confuse different load permutations. */
3164 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (instance
), j
, loads_node
)
3166 (vinfo_for_stmt (SLP_TREE_SCALAR_STMTS (loads_node
)[0])) = NULL
;
3168 if (dump_enabled_p ())
3169 dump_printf_loc (MSG_NOTE
, vect_location
,
3170 "vectorizing stmts using SLP.");
3173 FOR_EACH_VEC_ELT (slp_instances
, i
, instance
)
3175 slp_tree root
= SLP_INSTANCE_TREE (instance
);
3178 gimple_stmt_iterator gsi
;
3180 /* Remove scalar call stmts. Do not do this for basic-block
3181 vectorization as not all uses may be vectorized.
3182 ??? Why should this be necessary? DCE should be able to
3183 remove the stmts itself.
3184 ??? For BB vectorization we can as well remove scalar
3185 stmts starting from the SLP tree root if they have no
3188 vect_remove_slp_scalar_calls (root
);
3190 for (j
= 0; SLP_TREE_SCALAR_STMTS (root
).iterate (j
, &store
)
3191 && j
< SLP_INSTANCE_GROUP_SIZE (instance
); j
++)
3193 if (!STMT_VINFO_DATA_REF (vinfo_for_stmt (store
)))
3196 if (is_pattern_stmt_p (vinfo_for_stmt (store
)))
3197 store
= STMT_VINFO_RELATED_STMT (vinfo_for_stmt (store
));
3198 /* Free the attached stmt_vec_info and remove the stmt. */
3199 gsi
= gsi_for_stmt (store
);
3200 unlink_stmt_vdef (store
);
3201 gsi_remove (&gsi
, true);
3202 release_defs (store
);
3203 free_stmt_vec_info (store
);
3211 /* Vectorize the basic block. */
3214 vect_slp_transform_bb (basic_block bb
)
3216 bb_vec_info bb_vinfo
= vec_info_for_bb (bb
);
3217 gimple_stmt_iterator si
;
3219 gcc_assert (bb_vinfo
);
3221 if (dump_enabled_p ())
3222 dump_printf_loc (MSG_NOTE
, vect_location
, "SLPing BB\n");
3224 for (si
= gsi_start_bb (bb
); !gsi_end_p (si
); gsi_next (&si
))
3226 gimple stmt
= gsi_stmt (si
);
3227 stmt_vec_info stmt_info
;
3229 if (dump_enabled_p ())
3231 dump_printf_loc (MSG_NOTE
, vect_location
,
3232 "------>SLPing statement: ");
3233 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
3236 stmt_info
= vinfo_for_stmt (stmt
);
3237 gcc_assert (stmt_info
);
3239 /* Schedule all the SLP instances when the first SLP stmt is reached. */
3240 if (STMT_SLP_TYPE (stmt_info
))
3242 vect_schedule_slp (NULL
, bb_vinfo
);
3247 if (dump_enabled_p ())
3248 dump_printf (MSG_OPTIMIZED_LOCATIONS
, "BASIC BLOCK VECTORIZED\n");
3250 destroy_bb_vec_info (bb_vinfo
);