1 /* SLP - Basic Block Vectorization
2 Copyright (C) 2007, 2008, 2009, 2010, 2011, 2012
3 Free Software Foundation, Inc.
4 Contributed by Dorit Naishlos <dorit@il.ibm.com>
5 and Ira Rosen <irar@il.ibm.com>
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
25 #include "coretypes.h"
31 #include "basic-block.h"
32 #include "gimple-pretty-print.h"
33 #include "tree-flow.h"
34 #include "tree-pass.h"
37 #include "recog.h" /* FIXME: for insn_data */
39 #include "tree-vectorizer.h"
40 #include "langhooks.h"
42 /* Extract the location of the basic block in the source code.
43 Return the basic block location if succeed and NULL if not. */
46 find_bb_location (basic_block bb
)
49 gimple_stmt_iterator si
;
54 for (si
= gsi_start_bb (bb
); !gsi_end_p (si
); gsi_next (&si
))
57 if (gimple_location (stmt
) != UNKNOWN_LOC
)
58 return gimple_location (stmt
);
65 /* Recursively free the memory allocated for the SLP tree rooted at NODE. */
68 vect_free_slp_tree (slp_tree node
)
76 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node
), i
, child
)
77 vect_free_slp_tree ((slp_tree
) child
);
79 SLP_TREE_CHILDREN (node
).release ();
80 SLP_TREE_SCALAR_STMTS (node
).release ();
81 SLP_TREE_VEC_STMTS (node
).release ();
87 /* Free the memory allocated for the SLP instance. */
90 vect_free_slp_instance (slp_instance instance
)
92 vect_free_slp_tree (SLP_INSTANCE_TREE (instance
));
93 SLP_INSTANCE_LOAD_PERMUTATION (instance
).release ();
94 SLP_INSTANCE_LOADS (instance
).release ();
95 SLP_INSTANCE_BODY_COST_VEC (instance
).release ();
100 /* Create an SLP node for SCALAR_STMTS. */
103 vect_create_new_slp_node (vec
<gimple
> scalar_stmts
)
106 gimple stmt
= scalar_stmts
[0];
109 if (is_gimple_call (stmt
))
110 nops
= gimple_call_num_args (stmt
);
111 else if (is_gimple_assign (stmt
))
113 nops
= gimple_num_ops (stmt
) - 1;
114 if (gimple_assign_rhs_code (stmt
) == COND_EXPR
)
120 node
= XNEW (struct _slp_tree
);
121 SLP_TREE_SCALAR_STMTS (node
) = scalar_stmts
;
122 SLP_TREE_VEC_STMTS (node
).create (0);
123 SLP_TREE_CHILDREN (node
).create (nops
);
129 /* Allocate operands info for NOPS operands, and GROUP_SIZE def-stmts for each
131 static vec
<slp_oprnd_info
>
132 vect_create_oprnd_info (int nops
, int group_size
)
135 slp_oprnd_info oprnd_info
;
136 vec
<slp_oprnd_info
> oprnds_info
;
138 oprnds_info
.create (nops
);
139 for (i
= 0; i
< nops
; i
++)
141 oprnd_info
= XNEW (struct _slp_oprnd_info
);
142 oprnd_info
->def_stmts
.create (group_size
);
143 oprnd_info
->first_dt
= vect_uninitialized_def
;
144 oprnd_info
->first_def_type
= NULL_TREE
;
145 oprnd_info
->first_const_oprnd
= NULL_TREE
;
146 oprnd_info
->first_pattern
= false;
147 oprnds_info
.quick_push (oprnd_info
);
154 /* Free operands info. */
157 vect_free_oprnd_info (vec
<slp_oprnd_info
> &oprnds_info
)
160 slp_oprnd_info oprnd_info
;
162 FOR_EACH_VEC_ELT (oprnds_info
, i
, oprnd_info
)
164 oprnd_info
->def_stmts
.release ();
165 XDELETE (oprnd_info
);
168 oprnds_info
.release ();
172 /* Get the defs for the rhs of STMT (collect them in OPRNDS_INFO), check that
173 they are of a valid type and that they match the defs of the first stmt of
174 the SLP group (stored in OPRNDS_INFO). */
177 vect_get_and_check_slp_defs (loop_vec_info loop_vinfo
, bb_vec_info bb_vinfo
,
178 slp_tree slp_node
, gimple stmt
,
179 int ncopies_for_cost
, bool first
,
180 vec
<slp_oprnd_info
> *oprnds_info
,
181 stmt_vector_for_cost
*prologue_cost_vec
,
182 stmt_vector_for_cost
*body_cost_vec
)
185 unsigned int i
, number_of_oprnds
;
186 tree def
, def_op0
= NULL_TREE
;
188 enum vect_def_type dt
= vect_uninitialized_def
;
189 enum vect_def_type dt_op0
= vect_uninitialized_def
;
190 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
191 tree lhs
= gimple_get_lhs (stmt
);
192 struct loop
*loop
= NULL
;
193 enum tree_code rhs_code
;
194 bool different_types
= false;
195 bool pattern
= false;
196 slp_oprnd_info oprnd_info
, oprnd0_info
, oprnd1_info
;
198 tree compare_rhs
= NULL_TREE
;
201 loop
= LOOP_VINFO_LOOP (loop_vinfo
);
203 if (is_gimple_call (stmt
))
205 number_of_oprnds
= gimple_call_num_args (stmt
);
208 else if (is_gimple_assign (stmt
))
210 number_of_oprnds
= gimple_num_ops (stmt
) - 1;
211 if (gimple_assign_rhs_code (stmt
) == COND_EXPR
)
217 for (i
= 0; i
< number_of_oprnds
; i
++)
222 compare_rhs
= NULL_TREE
;
225 oprnd
= gimple_op (stmt
, op_idx
++);
227 oprnd_info
= (*oprnds_info
)[i
];
229 if (COMPARISON_CLASS_P (oprnd
))
231 compare_rhs
= TREE_OPERAND (oprnd
, 1);
232 oprnd
= TREE_OPERAND (oprnd
, 0);
235 if (!vect_is_simple_use (oprnd
, NULL
, loop_vinfo
, bb_vinfo
, &def_stmt
,
237 || (!def_stmt
&& dt
!= vect_constant_def
))
239 if (dump_enabled_p ())
241 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
242 "Build SLP failed: can't find def for ");
243 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, oprnd
);
249 /* Check if DEF_STMT is a part of a pattern in LOOP and get the def stmt
250 from the pattern. Check that all the stmts of the node are in the
252 if (def_stmt
&& gimple_bb (def_stmt
)
253 && ((loop
&& flow_bb_inside_loop_p (loop
, gimple_bb (def_stmt
)))
254 || (!loop
&& gimple_bb (def_stmt
) == BB_VINFO_BB (bb_vinfo
)
255 && gimple_code (def_stmt
) != GIMPLE_PHI
))
256 && vinfo_for_stmt (def_stmt
)
257 && STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (def_stmt
))
258 && !STMT_VINFO_RELEVANT (vinfo_for_stmt (def_stmt
))
259 && !STMT_VINFO_LIVE_P (vinfo_for_stmt (def_stmt
)))
262 if (!first
&& !oprnd_info
->first_pattern
)
264 if (dump_enabled_p ())
266 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
267 "Build SLP failed: some of the stmts"
268 " are in a pattern, and others are not ");
269 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, oprnd
);
275 def_stmt
= STMT_VINFO_RELATED_STMT (vinfo_for_stmt (def_stmt
));
276 dt
= STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt
));
278 if (dt
== vect_unknown_def_type
)
280 if (dump_enabled_p ())
281 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
282 "Unsupported pattern.");
286 switch (gimple_code (def_stmt
))
289 def
= gimple_phi_result (def_stmt
);
293 def
= gimple_assign_lhs (def_stmt
);
297 if (dump_enabled_p ())
298 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
299 "unsupported defining stmt: ");
306 oprnd_info
->first_dt
= dt
;
307 oprnd_info
->first_pattern
= pattern
;
310 oprnd_info
->first_def_type
= TREE_TYPE (def
);
311 oprnd_info
->first_const_oprnd
= NULL_TREE
;
315 oprnd_info
->first_def_type
= NULL_TREE
;
316 oprnd_info
->first_const_oprnd
= oprnd
;
323 /* Analyze costs (for the first stmt of the group only). */
324 if (REFERENCE_CLASS_P (lhs
))
326 vect_model_store_cost (stmt_info
, ncopies_for_cost
, false,
327 dt
, slp_node
, prologue_cost_vec
,
331 enum vect_def_type dts
[2];
333 dts
[1] = vect_uninitialized_def
;
334 /* Not memory operation (we don't call this function for
336 vect_model_simple_cost (stmt_info
, ncopies_for_cost
, dts
,
337 prologue_cost_vec
, body_cost_vec
);
343 /* Not first stmt of the group, check that the def-stmt/s match
344 the def-stmt/s of the first stmt. Allow different definition
345 types for reduction chains: the first stmt must be a
346 vect_reduction_def (a phi node), and the rest
347 vect_internal_def. */
348 if (((oprnd_info
->first_dt
!= dt
349 && !(oprnd_info
->first_dt
== vect_reduction_def
350 && dt
== vect_internal_def
))
351 || (oprnd_info
->first_def_type
!= NULL_TREE
353 && !types_compatible_p (oprnd_info
->first_def_type
,
356 && !types_compatible_p (TREE_TYPE (oprnd_info
->first_const_oprnd
),
360 if (number_of_oprnds
!= 2)
362 if (dump_enabled_p ())
363 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
364 "Build SLP failed: different types ");
369 /* Try to swap operands in case of binary operation. */
371 different_types
= true;
374 oprnd0_info
= (*oprnds_info
)[0];
375 if (is_gimple_assign (stmt
)
376 && (rhs_code
= gimple_assign_rhs_code (stmt
))
377 && TREE_CODE_CLASS (rhs_code
) == tcc_binary
378 && commutative_tree_code (rhs_code
)
379 && oprnd0_info
->first_dt
== dt
380 && oprnd_info
->first_dt
== dt_op0
382 && !(oprnd0_info
->first_def_type
383 && !types_compatible_p (oprnd0_info
->first_def_type
,
385 && !(oprnd_info
->first_def_type
386 && !types_compatible_p (oprnd_info
->first_def_type
,
387 TREE_TYPE (def_op0
))))
389 if (dump_enabled_p ())
391 dump_printf_loc (MSG_NOTE
, vect_location
,
392 "Swapping operands of ");
393 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
396 swap_tree_operands (stmt
, gimple_assign_rhs1_ptr (stmt
),
397 gimple_assign_rhs2_ptr (stmt
));
401 if (dump_enabled_p ())
402 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
403 "Build SLP failed: different types ");
411 /* Check the types of the definitions. */
414 case vect_constant_def
:
415 case vect_external_def
:
416 case vect_reduction_def
:
419 case vect_internal_def
:
422 oprnd0_info
= (*oprnds_info
)[0];
423 oprnd1_info
= (*oprnds_info
)[0];
425 oprnd1_info
->def_stmts
.quick_push (def_stmt
);
427 oprnd0_info
->def_stmts
.quick_push (def_stmt
);
430 oprnd_info
->def_stmts
.quick_push (def_stmt
);
435 /* FORNOW: Not supported. */
436 if (dump_enabled_p ())
438 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
439 "Build SLP failed: illegal type of def ");
440 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, def
);
451 /* Recursively build an SLP tree starting from NODE.
452 Fail (and return FALSE) if def-stmts are not isomorphic, require data
453 permutation or are of unsupported types of operation. Otherwise, return
457 vect_build_slp_tree (loop_vec_info loop_vinfo
, bb_vec_info bb_vinfo
,
458 slp_tree
*node
, unsigned int group_size
, int *outside_cost
,
459 int ncopies_for_cost
, unsigned int *max_nunits
,
460 vec
<int> *load_permutation
,
461 vec
<slp_tree
> *loads
,
462 unsigned int vectorization_factor
, bool *loads_permuted
,
463 stmt_vector_for_cost
*prologue_cost_vec
,
464 stmt_vector_for_cost
*body_cost_vec
)
467 vec
<gimple
> stmts
= SLP_TREE_SCALAR_STMTS (*node
);
468 gimple stmt
= stmts
[0];
469 enum tree_code first_stmt_code
= ERROR_MARK
, rhs_code
= ERROR_MARK
;
470 enum tree_code first_cond_code
= ERROR_MARK
;
472 bool stop_recursion
= false, need_same_oprnds
= false;
473 tree vectype
, scalar_type
, first_op1
= NULL_TREE
;
474 unsigned int ncopies
;
477 enum machine_mode optab_op2_mode
;
478 enum machine_mode vec_mode
;
479 struct data_reference
*first_dr
;
481 bool permutation
= false;
482 unsigned int load_place
;
483 gimple first_load
= NULL
, prev_first_load
= NULL
, old_first_load
= NULL
;
484 vec
<slp_oprnd_info
> oprnds_info
;
486 slp_oprnd_info oprnd_info
;
489 if (is_gimple_call (stmt
))
490 nops
= gimple_call_num_args (stmt
);
491 else if (is_gimple_assign (stmt
))
493 nops
= gimple_num_ops (stmt
) - 1;
494 if (gimple_assign_rhs_code (stmt
) == COND_EXPR
)
500 oprnds_info
= vect_create_oprnd_info (nops
, group_size
);
502 /* For every stmt in NODE find its def stmt/s. */
503 FOR_EACH_VEC_ELT (stmts
, i
, stmt
)
505 if (dump_enabled_p ())
507 dump_printf_loc (MSG_NOTE
, vect_location
, "Build SLP for ");
508 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
511 /* Fail to vectorize statements marked as unvectorizable. */
512 if (!STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (stmt
)))
514 if (dump_enabled_p ())
516 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
517 "Build SLP failed: unvectorizable statement ");
518 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
521 vect_free_oprnd_info (oprnds_info
);
525 lhs
= gimple_get_lhs (stmt
);
526 if (lhs
== NULL_TREE
)
528 if (dump_enabled_p ())
530 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
531 "Build SLP failed: not GIMPLE_ASSIGN nor "
533 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
536 vect_free_oprnd_info (oprnds_info
);
540 if (is_gimple_assign (stmt
)
541 && gimple_assign_rhs_code (stmt
) == COND_EXPR
542 && (cond
= gimple_assign_rhs1 (stmt
))
543 && !COMPARISON_CLASS_P (cond
))
545 if (dump_enabled_p ())
547 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
548 "Build SLP failed: condition is not "
550 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
553 vect_free_oprnd_info (oprnds_info
);
557 scalar_type
= vect_get_smallest_scalar_type (stmt
, &dummy
, &dummy
);
558 vectype
= get_vectype_for_scalar_type (scalar_type
);
561 if (dump_enabled_p ())
563 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
564 "Build SLP failed: unsupported data-type ");
565 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
569 vect_free_oprnd_info (oprnds_info
);
573 /* In case of multiple types we need to detect the smallest type. */
574 if (*max_nunits
< TYPE_VECTOR_SUBPARTS (vectype
))
576 *max_nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
578 vectorization_factor
= *max_nunits
;
581 ncopies
= vectorization_factor
/ TYPE_VECTOR_SUBPARTS (vectype
);
583 if (is_gimple_call (stmt
))
585 rhs_code
= CALL_EXPR
;
586 if (gimple_call_internal_p (stmt
)
587 || gimple_call_tail_p (stmt
)
588 || gimple_call_noreturn_p (stmt
)
589 || !gimple_call_nothrow_p (stmt
)
590 || gimple_call_chain (stmt
))
592 if (dump_enabled_p ())
594 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
595 "Build SLP failed: unsupported call type ");
596 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
599 vect_free_oprnd_info (oprnds_info
);
604 rhs_code
= gimple_assign_rhs_code (stmt
);
606 /* Check the operation. */
609 first_stmt_code
= rhs_code
;
611 /* Shift arguments should be equal in all the packed stmts for a
612 vector shift with scalar shift operand. */
613 if (rhs_code
== LSHIFT_EXPR
|| rhs_code
== RSHIFT_EXPR
614 || rhs_code
== LROTATE_EXPR
615 || rhs_code
== RROTATE_EXPR
)
617 vec_mode
= TYPE_MODE (vectype
);
619 /* First see if we have a vector/vector shift. */
620 optab
= optab_for_tree_code (rhs_code
, vectype
,
624 || optab_handler (optab
, vec_mode
) == CODE_FOR_nothing
)
626 /* No vector/vector shift, try for a vector/scalar shift. */
627 optab
= optab_for_tree_code (rhs_code
, vectype
,
632 if (dump_enabled_p ())
633 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
634 "Build SLP failed: no optab.");
635 vect_free_oprnd_info (oprnds_info
);
638 icode
= (int) optab_handler (optab
, vec_mode
);
639 if (icode
== CODE_FOR_nothing
)
641 if (dump_enabled_p ())
642 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
644 "op not supported by target.");
645 vect_free_oprnd_info (oprnds_info
);
648 optab_op2_mode
= insn_data
[icode
].operand
[2].mode
;
649 if (!VECTOR_MODE_P (optab_op2_mode
))
651 need_same_oprnds
= true;
652 first_op1
= gimple_assign_rhs2 (stmt
);
656 else if (rhs_code
== WIDEN_LSHIFT_EXPR
)
658 need_same_oprnds
= true;
659 first_op1
= gimple_assign_rhs2 (stmt
);
664 if (first_stmt_code
!= rhs_code
665 && (first_stmt_code
!= IMAGPART_EXPR
666 || rhs_code
!= REALPART_EXPR
)
667 && (first_stmt_code
!= REALPART_EXPR
668 || rhs_code
!= IMAGPART_EXPR
)
669 && !(STMT_VINFO_GROUPED_ACCESS (vinfo_for_stmt (stmt
))
670 && (first_stmt_code
== ARRAY_REF
671 || first_stmt_code
== INDIRECT_REF
672 || first_stmt_code
== COMPONENT_REF
673 || first_stmt_code
== MEM_REF
)))
675 if (dump_enabled_p ())
677 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
678 "Build SLP failed: different operation "
680 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
683 vect_free_oprnd_info (oprnds_info
);
688 && !operand_equal_p (first_op1
, gimple_assign_rhs2 (stmt
), 0))
690 if (dump_enabled_p ())
692 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
693 "Build SLP failed: different shift "
695 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
698 vect_free_oprnd_info (oprnds_info
);
702 if (rhs_code
== CALL_EXPR
)
704 gimple first_stmt
= stmts
[0];
705 if (gimple_call_num_args (stmt
) != nops
706 || !operand_equal_p (gimple_call_fn (first_stmt
),
707 gimple_call_fn (stmt
), 0)
708 || gimple_call_fntype (first_stmt
)
709 != gimple_call_fntype (stmt
))
711 if (dump_enabled_p ())
713 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
714 "Build SLP failed: different calls in ");
715 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
719 vect_free_oprnd_info (oprnds_info
);
725 /* Grouped store or load. */
726 if (STMT_VINFO_GROUPED_ACCESS (vinfo_for_stmt (stmt
)))
728 if (REFERENCE_CLASS_P (lhs
))
731 if (!vect_get_and_check_slp_defs (loop_vinfo
, bb_vinfo
, *node
,
732 stmt
, ncopies_for_cost
,
733 (i
== 0), &oprnds_info
,
737 vect_free_oprnd_info (oprnds_info
);
744 /* FORNOW: Check that there is no gap between the loads. */
745 if ((GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt
)) == stmt
746 && GROUP_GAP (vinfo_for_stmt (stmt
)) != 0)
747 || (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt
)) != stmt
748 && GROUP_GAP (vinfo_for_stmt (stmt
)) != 1))
750 if (dump_enabled_p ())
752 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
753 "Build SLP failed: grouped "
755 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
759 vect_free_oprnd_info (oprnds_info
);
763 /* Check that the size of interleaved loads group is not
764 greater than the SLP group size. */
766 && GROUP_SIZE (vinfo_for_stmt (stmt
)) > ncopies
* group_size
)
768 if (dump_enabled_p ())
770 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
771 "Build SLP failed: the number "
772 "of interleaved loads is greater than "
773 "the SLP group size ");
774 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
778 vect_free_oprnd_info (oprnds_info
);
782 old_first_load
= first_load
;
783 first_load
= GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt
));
786 /* Check that there are no loads from different interleaving
787 chains in the same node. The only exception is complex
789 if (prev_first_load
!= first_load
790 && rhs_code
!= REALPART_EXPR
791 && rhs_code
!= IMAGPART_EXPR
)
793 if (dump_enabled_p ())
795 dump_printf_loc (MSG_MISSED_OPTIMIZATION
,
797 "Build SLP failed: different "
798 "interleaving chains in one node ");
799 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
803 vect_free_oprnd_info (oprnds_info
);
808 prev_first_load
= first_load
;
810 /* In some cases a group of loads is just the same load
811 repeated N times. Only analyze its cost once. */
812 if (first_load
== stmt
&& old_first_load
!= first_load
)
814 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt
));
815 if (vect_supportable_dr_alignment (first_dr
, false)
816 == dr_unaligned_unsupported
)
818 if (dump_enabled_p ())
820 dump_printf_loc (MSG_MISSED_OPTIMIZATION
,
822 "Build SLP failed: unsupported "
824 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
828 vect_free_oprnd_info (oprnds_info
);
832 /* Analyze costs (for the first stmt in the group). */
833 vect_model_load_cost (vinfo_for_stmt (stmt
),
834 ncopies_for_cost
, false, *node
,
835 prologue_cost_vec
, body_cost_vec
);
838 /* Store the place of this load in the interleaving chain. In
839 case that permutation is needed we later decide if a specific
840 permutation is supported. */
841 load_place
= vect_get_place_in_interleaving_chain (stmt
,
846 load_permutation
->safe_push (load_place
);
848 /* We stop the tree when we reach a group of loads. */
849 stop_recursion
= true;
852 } /* Grouped access. */
855 if (TREE_CODE_CLASS (rhs_code
) == tcc_reference
)
857 /* Not grouped load. */
858 if (dump_enabled_p ())
860 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
861 "Build SLP failed: not grouped load ");
862 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
865 /* FORNOW: Not grouped loads are not supported. */
866 vect_free_oprnd_info (oprnds_info
);
870 /* Not memory operation. */
871 if (TREE_CODE_CLASS (rhs_code
) != tcc_binary
872 && TREE_CODE_CLASS (rhs_code
) != tcc_unary
873 && rhs_code
!= COND_EXPR
874 && rhs_code
!= CALL_EXPR
)
876 if (dump_enabled_p ())
878 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
879 "Build SLP failed: operation");
880 dump_printf (MSG_MISSED_OPTIMIZATION
, " unsupported ");
881 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
884 vect_free_oprnd_info (oprnds_info
);
888 if (rhs_code
== COND_EXPR
)
890 tree cond_expr
= gimple_assign_rhs1 (stmt
);
893 first_cond_code
= TREE_CODE (cond_expr
);
894 else if (first_cond_code
!= TREE_CODE (cond_expr
))
896 if (dump_enabled_p ())
898 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
899 "Build SLP failed: different"
901 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
905 vect_free_oprnd_info (oprnds_info
);
910 /* Find the def-stmts. */
911 if (!vect_get_and_check_slp_defs (loop_vinfo
, bb_vinfo
, *node
, stmt
,
912 ncopies_for_cost
, (i
== 0),
913 &oprnds_info
, prologue_cost_vec
,
916 vect_free_oprnd_info (oprnds_info
);
922 /* Grouped loads were reached - stop the recursion. */
925 loads
->safe_push (*node
);
928 gimple first_stmt
= stmts
[0];
929 *loads_permuted
= true;
930 (void) record_stmt_cost (body_cost_vec
, group_size
, vec_perm
,
931 vinfo_for_stmt (first_stmt
), 0, vect_body
);
935 /* We don't check here complex numbers chains, so we set
936 LOADS_PERMUTED for further check in
937 vect_supported_load_permutation_p. */
938 if (rhs_code
== REALPART_EXPR
|| rhs_code
== IMAGPART_EXPR
)
939 *loads_permuted
= true;
942 vect_free_oprnd_info (oprnds_info
);
946 /* Create SLP_TREE nodes for the definition node/s. */
947 FOR_EACH_VEC_ELT (oprnds_info
, i
, oprnd_info
)
951 if (oprnd_info
->first_dt
!= vect_internal_def
)
954 child
= vect_create_new_slp_node (oprnd_info
->def_stmts
);
956 || !vect_build_slp_tree (loop_vinfo
, bb_vinfo
, &child
, group_size
,
957 outside_cost
, ncopies_for_cost
,
958 max_nunits
, load_permutation
, loads
,
959 vectorization_factor
, loads_permuted
,
960 prologue_cost_vec
, body_cost_vec
))
963 oprnd_info
->def_stmts
= vec
<gimple
>();
964 vect_free_slp_tree (child
);
965 vect_free_oprnd_info (oprnds_info
);
969 oprnd_info
->def_stmts
.create (0);
970 SLP_TREE_CHILDREN (*node
).quick_push (child
);
973 vect_free_oprnd_info (oprnds_info
);
977 /* Dump a slp tree NODE using flags specified in DUMP_KIND. */
980 vect_print_slp_tree (int dump_kind
, slp_tree node
)
989 dump_printf (dump_kind
, "node ");
990 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node
), i
, stmt
)
992 dump_printf (dump_kind
, "\n\tstmt %d ", i
);
993 dump_gimple_stmt (dump_kind
, TDF_SLIM
, stmt
, 0);
995 dump_printf (dump_kind
, "\n");
997 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node
), i
, child
)
998 vect_print_slp_tree (dump_kind
, (slp_tree
) child
);
1002 /* Mark the tree rooted at NODE with MARK (PURE_SLP or HYBRID).
1003 If MARK is HYBRID, it refers to a specific stmt in NODE (the stmt at index
1004 J). Otherwise, MARK is PURE_SLP and J is -1, which indicates that all the
1005 stmts in NODE are to be marked. */
1008 vect_mark_slp_stmts (slp_tree node
, enum slp_vect_type mark
, int j
)
1017 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node
), i
, stmt
)
1018 if (j
< 0 || i
== j
)
1019 STMT_SLP_TYPE (vinfo_for_stmt (stmt
)) = mark
;
1021 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node
), i
, child
)
1022 vect_mark_slp_stmts ((slp_tree
) child
, mark
, j
);
1026 /* Mark the statements of the tree rooted at NODE as relevant (vect_used). */
1029 vect_mark_slp_stmts_relevant (slp_tree node
)
1033 stmt_vec_info stmt_info
;
1039 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node
), i
, stmt
)
1041 stmt_info
= vinfo_for_stmt (stmt
);
1042 gcc_assert (!STMT_VINFO_RELEVANT (stmt_info
)
1043 || STMT_VINFO_RELEVANT (stmt_info
) == vect_used_in_scope
);
1044 STMT_VINFO_RELEVANT (stmt_info
) = vect_used_in_scope
;
1047 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node
), i
, child
)
1048 vect_mark_slp_stmts_relevant ((slp_tree
) child
);
1052 /* Check if the permutation required by the SLP INSTANCE is supported.
1053 Reorganize the SLP nodes stored in SLP_INSTANCE_LOADS if needed. */
1056 vect_supported_slp_permutation_p (slp_instance instance
)
1058 slp_tree node
= SLP_INSTANCE_LOADS (instance
)[0];
1059 gimple stmt
= SLP_TREE_SCALAR_STMTS (node
)[0];
1060 gimple first_load
= GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt
));
1061 vec
<slp_tree
> sorted_loads
= vec
<slp_tree
>();
1063 slp_tree
*tmp_loads
= NULL
;
1064 int group_size
= SLP_INSTANCE_GROUP_SIZE (instance
), i
, j
;
1067 /* FORNOW: The only supported loads permutation is loads from the same
1068 location in all the loads in the node, when the data-refs in
1069 nodes of LOADS constitute an interleaving chain.
1070 Sort the nodes according to the order of accesses in the chain. */
1071 tmp_loads
= (slp_tree
*) xmalloc (sizeof (slp_tree
) * group_size
);
1073 SLP_INSTANCE_LOAD_PERMUTATION (instance
).iterate (i
, &index
)
1074 && SLP_INSTANCE_LOADS (instance
).iterate (j
, &load
);
1075 i
+= group_size
, j
++)
1077 gimple scalar_stmt
= SLP_TREE_SCALAR_STMTS (load
)[0];
1078 /* Check that the loads are all in the same interleaving chain. */
1079 if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (scalar_stmt
)) != first_load
)
1081 if (dump_enabled_p ())
1083 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1084 "Build SLP failed: unsupported data "
1086 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
1094 tmp_loads
[index
] = load
;
1097 sorted_loads
.create (group_size
);
1098 for (i
= 0; i
< group_size
; i
++)
1099 sorted_loads
.safe_push (tmp_loads
[i
]);
1101 SLP_INSTANCE_LOADS (instance
).release ();
1102 SLP_INSTANCE_LOADS (instance
) = sorted_loads
;
1105 if (!vect_transform_slp_perm_load (stmt
, vec
<tree
>(), NULL
,
1106 SLP_INSTANCE_UNROLLING_FACTOR (instance
),
1114 /* Rearrange the statements of NODE according to PERMUTATION. */
1117 vect_slp_rearrange_stmts (slp_tree node
, unsigned int group_size
,
1118 vec
<int> permutation
)
1121 vec
<gimple
> tmp_stmts
;
1122 unsigned int index
, i
;
1128 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node
), i
, child
)
1129 vect_slp_rearrange_stmts ((slp_tree
) child
, group_size
, permutation
);
1131 gcc_assert (group_size
== SLP_TREE_SCALAR_STMTS (node
).length ());
1132 tmp_stmts
.create (group_size
);
1134 for (i
= 0; i
< group_size
; i
++)
1135 tmp_stmts
.safe_push (NULL
);
1137 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node
), i
, stmt
)
1139 index
= permutation
[i
];
1140 tmp_stmts
[index
] = stmt
;
1143 SLP_TREE_SCALAR_STMTS (node
).release ();
1144 SLP_TREE_SCALAR_STMTS (node
) = tmp_stmts
;
1148 /* Check if the required load permutation is supported.
1149 LOAD_PERMUTATION contains a list of indices of the loads.
1150 In SLP this permutation is relative to the order of grouped stores that are
1151 the base of the SLP instance. */
1154 vect_supported_load_permutation_p (slp_instance slp_instn
, int group_size
,
1155 vec
<int> load_permutation
)
1157 int i
= 0, j
, prev
= -1, next
, k
, number_of_groups
;
1158 bool supported
, bad_permutation
= false;
1160 slp_tree node
, other_complex_node
;
1161 gimple stmt
, first
= NULL
, other_node_first
, load
, next_load
, first_load
;
1162 unsigned complex_numbers
= 0;
1163 struct data_reference
*dr
;
1164 bb_vec_info bb_vinfo
;
1166 /* FORNOW: permutations are only supported in SLP. */
1170 if (dump_enabled_p ())
1172 dump_printf_loc (MSG_NOTE
, vect_location
, "Load permutation ");
1173 FOR_EACH_VEC_ELT (load_permutation
, i
, next
)
1174 dump_printf (MSG_NOTE
, "%d ", next
);
1177 /* In case of reduction every load permutation is allowed, since the order
1178 of the reduction statements is not important (as opposed to the case of
1179 grouped stores). The only condition we need to check is that all the
1180 load nodes are of the same size and have the same permutation (and then
1181 rearrange all the nodes of the SLP instance according to this
1184 /* Check that all the load nodes are of the same size. */
1185 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn
), i
, node
)
1187 if (SLP_TREE_SCALAR_STMTS (node
).length () != (unsigned) group_size
)
1190 stmt
= SLP_TREE_SCALAR_STMTS (node
)[0];
1191 if (is_gimple_assign (stmt
)
1192 && (gimple_assign_rhs_code (stmt
) == REALPART_EXPR
1193 || gimple_assign_rhs_code (stmt
) == IMAGPART_EXPR
))
1197 /* Complex operands can be swapped as following:
1198 real_c = real_b + real_a;
1199 imag_c = imag_a + imag_b;
1200 i.e., we have {real_b, imag_a} and {real_a, imag_b} instead of
1201 {real_a, imag_a} and {real_b, imag_b}. We check here that if interleaving
1202 chains are mixed, they match the above pattern. */
1203 if (complex_numbers
)
1205 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn
), i
, node
)
1207 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node
), j
, stmt
)
1213 if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt
)) != first
)
1215 if (complex_numbers
!= 2)
1223 other_complex_node
= SLP_INSTANCE_LOADS (slp_instn
)[k
];
1225 SLP_TREE_SCALAR_STMTS (other_complex_node
)[0];
1227 if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt
))
1228 != other_node_first
)
1236 /* We checked that this case ok, so there is no need to proceed with
1237 permutation tests. */
1238 if (complex_numbers
== 2
1239 && SLP_INSTANCE_LOADS (slp_instn
).length () == 2)
1241 SLP_INSTANCE_LOADS (slp_instn
).release ();
1242 SLP_INSTANCE_LOAD_PERMUTATION (slp_instn
).release ();
1246 node
= SLP_INSTANCE_TREE (slp_instn
);
1247 stmt
= SLP_TREE_SCALAR_STMTS (node
)[0];
1248 /* LOAD_PERMUTATION is a list of indices of all the loads of the SLP
1249 instance, not all the loads belong to the same node or interleaving
1250 group. Hence, we need to divide them into groups according to
1252 number_of_groups
= load_permutation
.length () / group_size
;
1254 /* Reduction (there are no data-refs in the root).
1255 In reduction chain the order of the loads is important. */
1256 if (!STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt
))
1257 && !GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt
)))
1259 int first_group_load_index
;
1261 /* Compare all the permutation sequences to the first one. */
1262 for (i
= 1; i
< number_of_groups
; i
++)
1265 for (j
= i
* group_size
; j
< i
* group_size
+ group_size
; j
++)
1267 next
= load_permutation
[j
];
1268 first_group_load_index
= load_permutation
[k
];
1270 if (next
!= first_group_load_index
)
1272 bad_permutation
= true;
1279 if (bad_permutation
)
1283 if (!bad_permutation
)
1285 /* Check that the loads in the first sequence are different and there
1286 are no gaps between them. */
1287 load_index
= sbitmap_alloc (group_size
);
1288 bitmap_clear (load_index
);
1289 for (k
= 0; k
< group_size
; k
++)
1291 first_group_load_index
= load_permutation
[k
];
1292 if (bitmap_bit_p (load_index
, first_group_load_index
))
1294 bad_permutation
= true;
1298 bitmap_set_bit (load_index
, first_group_load_index
);
1301 if (!bad_permutation
)
1302 for (k
= 0; k
< group_size
; k
++)
1303 if (!bitmap_bit_p (load_index
, k
))
1305 bad_permutation
= true;
1309 sbitmap_free (load_index
);
1312 if (!bad_permutation
)
1314 /* This permutation is valid for reduction. Since the order of the
1315 statements in the nodes is not important unless they are memory
1316 accesses, we can rearrange the statements in all the nodes
1317 according to the order of the loads. */
1318 vect_slp_rearrange_stmts (SLP_INSTANCE_TREE (slp_instn
), group_size
,
1320 SLP_INSTANCE_LOAD_PERMUTATION (slp_instn
).release ();
1325 /* In basic block vectorization we allow any subchain of an interleaving
1327 FORNOW: not supported in loop SLP because of realignment compications. */
1328 bb_vinfo
= STMT_VINFO_BB_VINFO (vinfo_for_stmt (stmt
));
1329 bad_permutation
= false;
1330 /* Check that for every node in the instance the loads form a subchain. */
1333 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn
), i
, node
)
1337 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node
), j
, load
)
1340 first_load
= GROUP_FIRST_ELEMENT (vinfo_for_stmt (load
));
1342 != GROUP_FIRST_ELEMENT (vinfo_for_stmt (load
)))
1344 bad_permutation
= true;
1348 if (j
!= 0 && next_load
!= load
)
1350 bad_permutation
= true;
1354 next_load
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (load
));
1357 if (bad_permutation
)
1361 /* Check that the alignment of the first load in every subchain, i.e.,
1362 the first statement in every load node, is supported. */
1363 if (!bad_permutation
)
1365 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn
), i
, node
)
1367 first_load
= SLP_TREE_SCALAR_STMTS (node
)[0];
1369 != GROUP_FIRST_ELEMENT (vinfo_for_stmt (first_load
)))
1371 dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_load
));
1372 if (vect_supportable_dr_alignment (dr
, false)
1373 == dr_unaligned_unsupported
)
1375 if (dump_enabled_p ())
1377 dump_printf_loc (MSG_MISSED_OPTIMIZATION
,
1379 "unsupported unaligned load ");
1380 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
1383 bad_permutation
= true;
1389 if (!bad_permutation
)
1391 SLP_INSTANCE_LOAD_PERMUTATION (slp_instn
).release ();
1397 /* FORNOW: the only supported permutation is 0..01..1.. of length equal to
1398 GROUP_SIZE and where each sequence of same drs is of GROUP_SIZE length as
1399 well (unless it's reduction). */
1400 if (load_permutation
.length ()
1401 != (unsigned int) (group_size
* group_size
))
1405 load_index
= sbitmap_alloc (group_size
);
1406 bitmap_clear (load_index
);
1407 for (j
= 0; j
< group_size
; j
++)
1409 for (i
= j
* group_size
, k
= 0;
1410 load_permutation
.iterate (i
, &next
) && k
< group_size
;
1413 if (i
!= j
* group_size
&& next
!= prev
)
1422 if (bitmap_bit_p (load_index
, prev
))
1428 bitmap_set_bit (load_index
, prev
);
1431 for (j
= 0; j
< group_size
; j
++)
1432 if (!bitmap_bit_p (load_index
, j
))
1435 sbitmap_free (load_index
);
1437 if (supported
&& i
== group_size
* group_size
1438 && vect_supported_slp_permutation_p (slp_instn
))
1445 /* Find the first load in the loop that belongs to INSTANCE.
1446 When loads are in several SLP nodes, there can be a case in which the first
1447 load does not appear in the first SLP node to be transformed, causing
1448 incorrect order of statements. Since we generate all the loads together,
1449 they must be inserted before the first load of the SLP instance and not
1450 before the first load of the first node of the instance. */
1453 vect_find_first_load_in_slp_instance (slp_instance instance
)
1457 gimple first_load
= NULL
, load
;
1459 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (instance
), i
, load_node
)
1460 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (load_node
), j
, load
)
1461 first_load
= get_earlier_stmt (load
, first_load
);
1467 /* Find the last store in SLP INSTANCE. */
1470 vect_find_last_store_in_slp_instance (slp_instance instance
)
1474 gimple last_store
= NULL
, store
;
1476 node
= SLP_INSTANCE_TREE (instance
);
1477 for (i
= 0; SLP_TREE_SCALAR_STMTS (node
).iterate (i
, &store
); i
++)
1478 last_store
= get_later_stmt (store
, last_store
);
1484 /* Analyze an SLP instance starting from a group of grouped stores. Call
1485 vect_build_slp_tree to build a tree of packed stmts if possible.
1486 Return FALSE if it's impossible to SLP any stmt in the loop. */
1489 vect_analyze_slp_instance (loop_vec_info loop_vinfo
, bb_vec_info bb_vinfo
,
1492 slp_instance new_instance
;
1494 unsigned int group_size
= GROUP_SIZE (vinfo_for_stmt (stmt
));
1495 unsigned int unrolling_factor
= 1, nunits
;
1496 tree vectype
, scalar_type
= NULL_TREE
;
1498 unsigned int vectorization_factor
= 0;
1499 int outside_cost
= 0, ncopies_for_cost
, i
;
1500 unsigned int max_nunits
= 0;
1501 vec
<int> load_permutation
;
1502 vec
<slp_tree
> loads
;
1503 struct data_reference
*dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt
));
1504 bool loads_permuted
= false;
1505 vec
<gimple
> scalar_stmts
;
1506 stmt_vector_for_cost body_cost_vec
, prologue_cost_vec
;
1507 stmt_info_for_cost
*si
;
1509 if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt
)))
1513 scalar_type
= TREE_TYPE (DR_REF (dr
));
1514 vectype
= get_vectype_for_scalar_type (scalar_type
);
1518 gcc_assert (loop_vinfo
);
1519 vectype
= STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt
));
1522 group_size
= GROUP_SIZE (vinfo_for_stmt (stmt
));
1526 gcc_assert (loop_vinfo
);
1527 vectype
= STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt
));
1528 group_size
= LOOP_VINFO_REDUCTIONS (loop_vinfo
).length ();
1533 if (dump_enabled_p ())
1535 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1536 "Build SLP failed: unsupported data-type ");
1537 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, scalar_type
);
1543 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
1545 vectorization_factor
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
1547 vectorization_factor
= nunits
;
1549 /* Calculate the unrolling factor. */
1550 unrolling_factor
= least_common_multiple (nunits
, group_size
) / group_size
;
1551 if (unrolling_factor
!= 1 && !loop_vinfo
)
1553 if (dump_enabled_p ())
1554 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1555 "Build SLP failed: unrolling required in basic"
1561 /* Create a node (a root of the SLP tree) for the packed grouped stores. */
1562 scalar_stmts
.create (group_size
);
1564 if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt
)))
1566 /* Collect the stores and store them in SLP_TREE_SCALAR_STMTS. */
1569 if (STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (next
))
1570 && STMT_VINFO_RELATED_STMT (vinfo_for_stmt (next
)))
1571 scalar_stmts
.safe_push (
1572 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (next
)));
1574 scalar_stmts
.safe_push (next
);
1575 next
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (next
));
1580 /* Collect reduction statements. */
1581 vec
<gimple
> reductions
= LOOP_VINFO_REDUCTIONS (loop_vinfo
);
1582 for (i
= 0; reductions
.iterate (i
, &next
); i
++)
1583 scalar_stmts
.safe_push (next
);
1586 node
= vect_create_new_slp_node (scalar_stmts
);
1588 /* Calculate the number of vector stmts to create based on the unrolling
1589 factor (number of vectors is 1 if NUNITS >= GROUP_SIZE, and is
1590 GROUP_SIZE / NUNITS otherwise. */
1591 ncopies_for_cost
= unrolling_factor
* group_size
/ nunits
;
1593 load_permutation
.create (group_size
* group_size
);
1594 loads
.create (group_size
);
1595 prologue_cost_vec
.create (10);
1596 body_cost_vec
.create (10);
1598 /* Build the tree for the SLP instance. */
1599 if (vect_build_slp_tree (loop_vinfo
, bb_vinfo
, &node
, group_size
,
1600 &outside_cost
, ncopies_for_cost
,
1601 &max_nunits
, &load_permutation
, &loads
,
1602 vectorization_factor
, &loads_permuted
,
1603 &prologue_cost_vec
, &body_cost_vec
))
1605 void *data
= (loop_vinfo
? LOOP_VINFO_TARGET_COST_DATA (loop_vinfo
)
1606 : BB_VINFO_TARGET_COST_DATA (bb_vinfo
));
1608 /* Calculate the unrolling factor based on the smallest type. */
1609 if (max_nunits
> nunits
)
1610 unrolling_factor
= least_common_multiple (max_nunits
, group_size
)
1613 if (unrolling_factor
!= 1 && !loop_vinfo
)
1615 if (dump_enabled_p ())
1616 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1617 "Build SLP failed: unrolling required in basic"
1619 vect_free_slp_tree (node
);
1620 body_cost_vec
.release ();
1621 prologue_cost_vec
.release ();
1622 load_permutation
.release ();
1627 /* Create a new SLP instance. */
1628 new_instance
= XNEW (struct _slp_instance
);
1629 SLP_INSTANCE_TREE (new_instance
) = node
;
1630 SLP_INSTANCE_GROUP_SIZE (new_instance
) = group_size
;
1631 SLP_INSTANCE_UNROLLING_FACTOR (new_instance
) = unrolling_factor
;
1632 SLP_INSTANCE_BODY_COST_VEC (new_instance
) = body_cost_vec
;
1633 SLP_INSTANCE_LOADS (new_instance
) = loads
;
1634 SLP_INSTANCE_FIRST_LOAD_STMT (new_instance
) = NULL
;
1635 SLP_INSTANCE_LOAD_PERMUTATION (new_instance
) = load_permutation
;
1639 if (!vect_supported_load_permutation_p (new_instance
, group_size
,
1642 if (dump_enabled_p ())
1644 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1645 "Build SLP failed: unsupported load "
1647 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
1650 vect_free_slp_instance (new_instance
);
1651 prologue_cost_vec
.release ();
1655 SLP_INSTANCE_FIRST_LOAD_STMT (new_instance
)
1656 = vect_find_first_load_in_slp_instance (new_instance
);
1659 SLP_INSTANCE_LOAD_PERMUTATION (new_instance
).release ();
1661 /* Record the prologue costs, which were delayed until we were
1662 sure that SLP was successful. Unlike the body costs, we know
1663 the final values now regardless of the loop vectorization factor. */
1664 FOR_EACH_VEC_ELT (prologue_cost_vec
, i
, si
)
1666 struct _stmt_vec_info
*stmt_info
1667 = si
->stmt
? vinfo_for_stmt (si
->stmt
) : NULL
;
1668 (void) add_stmt_cost (data
, si
->count
, si
->kind
, stmt_info
,
1669 si
->misalign
, vect_prologue
);
1672 prologue_cost_vec
.release ();
1675 LOOP_VINFO_SLP_INSTANCES (loop_vinfo
).safe_push (new_instance
);
1677 BB_VINFO_SLP_INSTANCES (bb_vinfo
).safe_push (new_instance
);
1679 if (dump_enabled_p ())
1680 vect_print_slp_tree (MSG_NOTE
, node
);
1686 body_cost_vec
.release ();
1687 prologue_cost_vec
.release ();
1690 /* Failed to SLP. */
1691 /* Free the allocated memory. */
1692 vect_free_slp_tree (node
);
1693 load_permutation
.release ();
1700 /* Check if there are stmts in the loop can be vectorized using SLP. Build SLP
1701 trees of packed scalar stmts if SLP is possible. */
1704 vect_analyze_slp (loop_vec_info loop_vinfo
, bb_vec_info bb_vinfo
)
1707 vec
<gimple
> grouped_stores
;
1708 vec
<gimple
> reductions
= vec
<gimple
>();
1709 vec
<gimple
> reduc_chains
= vec
<gimple
>();
1710 gimple first_element
;
1713 if (dump_enabled_p ())
1714 dump_printf_loc (MSG_NOTE
, vect_location
, "=== vect_analyze_slp ===");
1718 grouped_stores
= LOOP_VINFO_GROUPED_STORES (loop_vinfo
);
1719 reduc_chains
= LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo
);
1720 reductions
= LOOP_VINFO_REDUCTIONS (loop_vinfo
);
1723 grouped_stores
= BB_VINFO_GROUPED_STORES (bb_vinfo
);
1725 /* Find SLP sequences starting from groups of grouped stores. */
1726 FOR_EACH_VEC_ELT (grouped_stores
, i
, first_element
)
1727 if (vect_analyze_slp_instance (loop_vinfo
, bb_vinfo
, first_element
))
1730 if (bb_vinfo
&& !ok
)
1732 if (dump_enabled_p ())
1733 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1734 "Failed to SLP the basic block.");
1740 && LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo
).length () > 0)
1742 /* Find SLP sequences starting from reduction chains. */
1743 FOR_EACH_VEC_ELT (reduc_chains
, i
, first_element
)
1744 if (vect_analyze_slp_instance (loop_vinfo
, bb_vinfo
, first_element
))
1749 /* Don't try to vectorize SLP reductions if reduction chain was
1754 /* Find SLP sequences starting from groups of reductions. */
1755 if (loop_vinfo
&& LOOP_VINFO_REDUCTIONS (loop_vinfo
).length () > 1
1756 && vect_analyze_slp_instance (loop_vinfo
, bb_vinfo
, reductions
[0]))
1763 /* For each possible SLP instance decide whether to SLP it and calculate overall
1764 unrolling factor needed to SLP the loop. Return TRUE if decided to SLP at
1765 least one instance. */
1768 vect_make_slp_decision (loop_vec_info loop_vinfo
)
1770 unsigned int i
, unrolling_factor
= 1;
1771 vec
<slp_instance
> slp_instances
= LOOP_VINFO_SLP_INSTANCES (loop_vinfo
);
1772 slp_instance instance
;
1773 int decided_to_slp
= 0;
1775 if (dump_enabled_p ())
1776 dump_printf_loc (MSG_NOTE
, vect_location
, "=== vect_make_slp_decision ===");
1778 FOR_EACH_VEC_ELT (slp_instances
, i
, instance
)
1780 /* FORNOW: SLP if you can. */
1781 if (unrolling_factor
< SLP_INSTANCE_UNROLLING_FACTOR (instance
))
1782 unrolling_factor
= SLP_INSTANCE_UNROLLING_FACTOR (instance
);
1784 /* Mark all the stmts that belong to INSTANCE as PURE_SLP stmts. Later we
1785 call vect_detect_hybrid_slp () to find stmts that need hybrid SLP and
1786 loop-based vectorization. Such stmts will be marked as HYBRID. */
1787 vect_mark_slp_stmts (SLP_INSTANCE_TREE (instance
), pure_slp
, -1);
1791 LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo
) = unrolling_factor
;
1793 if (decided_to_slp
&& dump_enabled_p ())
1794 dump_printf_loc (MSG_OPTIMIZED_LOCATIONS
, vect_location
,
1795 "Decided to SLP %d instances. Unrolling factor %d",
1796 decided_to_slp
, unrolling_factor
);
1798 return (decided_to_slp
> 0);
1802 /* Find stmts that must be both vectorized and SLPed (since they feed stmts that
1803 can't be SLPed) in the tree rooted at NODE. Mark such stmts as HYBRID. */
1806 vect_detect_hybrid_slp_stmts (slp_tree node
)
1809 vec
<gimple
> stmts
= SLP_TREE_SCALAR_STMTS (node
);
1810 gimple stmt
= stmts
[0];
1811 imm_use_iterator imm_iter
;
1813 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (stmt
);
1815 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_vinfo
);
1816 struct loop
*loop
= NULL
;
1817 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_vinfo
);
1818 basic_block bb
= NULL
;
1824 loop
= LOOP_VINFO_LOOP (loop_vinfo
);
1826 bb
= BB_VINFO_BB (bb_vinfo
);
1828 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node
), i
, stmt
)
1829 if (PURE_SLP_STMT (vinfo_for_stmt (stmt
))
1830 && TREE_CODE (gimple_op (stmt
, 0)) == SSA_NAME
)
1831 FOR_EACH_IMM_USE_STMT (use_stmt
, imm_iter
, gimple_op (stmt
, 0))
1832 if (gimple_bb (use_stmt
)
1833 && ((loop
&& flow_bb_inside_loop_p (loop
, gimple_bb (use_stmt
)))
1834 || bb
== gimple_bb (use_stmt
))
1835 && (stmt_vinfo
= vinfo_for_stmt (use_stmt
))
1836 && !STMT_SLP_TYPE (stmt_vinfo
)
1837 && (STMT_VINFO_RELEVANT (stmt_vinfo
)
1838 || VECTORIZABLE_CYCLE_DEF (STMT_VINFO_DEF_TYPE (stmt_vinfo
)))
1839 && !(gimple_code (use_stmt
) == GIMPLE_PHI
1840 && STMT_VINFO_DEF_TYPE (stmt_vinfo
)
1841 == vect_reduction_def
))
1842 vect_mark_slp_stmts (node
, hybrid
, i
);
1844 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node
), i
, child
)
1845 vect_detect_hybrid_slp_stmts ((slp_tree
) child
);
1849 /* Find stmts that must be both vectorized and SLPed. */
1852 vect_detect_hybrid_slp (loop_vec_info loop_vinfo
)
1855 vec
<slp_instance
> slp_instances
= LOOP_VINFO_SLP_INSTANCES (loop_vinfo
);
1856 slp_instance instance
;
1858 if (dump_enabled_p ())
1859 dump_printf_loc (MSG_NOTE
, vect_location
, "=== vect_detect_hybrid_slp ===");
1861 FOR_EACH_VEC_ELT (slp_instances
, i
, instance
)
1862 vect_detect_hybrid_slp_stmts (SLP_INSTANCE_TREE (instance
));
1866 /* Create and initialize a new bb_vec_info struct for BB, as well as
1867 stmt_vec_info structs for all the stmts in it. */
1870 new_bb_vec_info (basic_block bb
)
1872 bb_vec_info res
= NULL
;
1873 gimple_stmt_iterator gsi
;
1875 res
= (bb_vec_info
) xcalloc (1, sizeof (struct _bb_vec_info
));
1876 BB_VINFO_BB (res
) = bb
;
1878 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
1880 gimple stmt
= gsi_stmt (gsi
);
1881 gimple_set_uid (stmt
, 0);
1882 set_vinfo_for_stmt (stmt
, new_stmt_vec_info (stmt
, NULL
, res
));
1885 BB_VINFO_GROUPED_STORES (res
).create (10);
1886 BB_VINFO_SLP_INSTANCES (res
).create (2);
1887 BB_VINFO_TARGET_COST_DATA (res
) = init_cost (NULL
);
1894 /* Free BB_VINFO struct, as well as all the stmt_vec_info structs of all the
1895 stmts in the basic block. */
1898 destroy_bb_vec_info (bb_vec_info bb_vinfo
)
1900 vec
<slp_instance
> slp_instances
;
1901 slp_instance instance
;
1903 gimple_stmt_iterator si
;
1909 bb
= BB_VINFO_BB (bb_vinfo
);
1911 for (si
= gsi_start_bb (bb
); !gsi_end_p (si
); gsi_next (&si
))
1913 gimple stmt
= gsi_stmt (si
);
1914 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1917 /* Free stmt_vec_info. */
1918 free_stmt_vec_info (stmt
);
1921 free_data_refs (BB_VINFO_DATAREFS (bb_vinfo
));
1922 free_dependence_relations (BB_VINFO_DDRS (bb_vinfo
));
1923 BB_VINFO_GROUPED_STORES (bb_vinfo
).release ();
1924 slp_instances
= BB_VINFO_SLP_INSTANCES (bb_vinfo
);
1925 FOR_EACH_VEC_ELT (slp_instances
, i
, instance
)
1926 vect_free_slp_instance (instance
);
1927 BB_VINFO_SLP_INSTANCES (bb_vinfo
).release ();
1928 destroy_cost_data (BB_VINFO_TARGET_COST_DATA (bb_vinfo
));
1934 /* Analyze statements contained in SLP tree node after recursively analyzing
1935 the subtree. Return TRUE if the operations are supported. */
1938 vect_slp_analyze_node_operations (bb_vec_info bb_vinfo
, slp_tree node
)
1948 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node
), i
, child
)
1949 if (!vect_slp_analyze_node_operations (bb_vinfo
, (slp_tree
) child
))
1952 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node
), i
, stmt
)
1954 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1955 gcc_assert (stmt_info
);
1956 gcc_assert (PURE_SLP_STMT (stmt_info
));
1958 if (!vect_analyze_stmt (stmt
, &dummy
, node
))
1966 /* Analyze statements in SLP instances of the basic block. Return TRUE if the
1967 operations are supported. */
1970 vect_slp_analyze_operations (bb_vec_info bb_vinfo
)
1972 vec
<slp_instance
> slp_instances
= BB_VINFO_SLP_INSTANCES (bb_vinfo
);
1973 slp_instance instance
;
1976 for (i
= 0; slp_instances
.iterate (i
, &instance
); )
1978 if (!vect_slp_analyze_node_operations (bb_vinfo
,
1979 SLP_INSTANCE_TREE (instance
)))
1981 vect_free_slp_instance (instance
);
1982 slp_instances
.ordered_remove (i
);
1988 if (!slp_instances
.length ())
1994 /* Check if vectorization of the basic block is profitable. */
1997 vect_bb_vectorization_profitable_p (bb_vec_info bb_vinfo
)
1999 vec
<slp_instance
> slp_instances
= BB_VINFO_SLP_INSTANCES (bb_vinfo
);
2000 slp_instance instance
;
2002 unsigned int vec_inside_cost
= 0, vec_outside_cost
= 0, scalar_cost
= 0;
2003 unsigned int vec_prologue_cost
= 0, vec_epilogue_cost
= 0;
2004 unsigned int stmt_cost
;
2006 gimple_stmt_iterator si
;
2007 basic_block bb
= BB_VINFO_BB (bb_vinfo
);
2008 void *target_cost_data
= BB_VINFO_TARGET_COST_DATA (bb_vinfo
);
2009 stmt_vec_info stmt_info
= NULL
;
2010 stmt_vector_for_cost body_cost_vec
;
2011 stmt_info_for_cost
*ci
;
2013 /* Calculate vector costs. */
2014 FOR_EACH_VEC_ELT (slp_instances
, i
, instance
)
2016 body_cost_vec
= SLP_INSTANCE_BODY_COST_VEC (instance
);
2018 FOR_EACH_VEC_ELT (body_cost_vec
, j
, ci
)
2020 stmt_info
= ci
->stmt
? vinfo_for_stmt (ci
->stmt
) : NULL
;
2021 (void) add_stmt_cost (target_cost_data
, ci
->count
, ci
->kind
,
2022 stmt_info
, ci
->misalign
, vect_body
);
2026 /* Calculate scalar cost. */
2027 for (si
= gsi_start_bb (bb
); !gsi_end_p (si
); gsi_next (&si
))
2029 stmt
= gsi_stmt (si
);
2030 stmt_info
= vinfo_for_stmt (stmt
);
2032 if (!stmt_info
|| !STMT_VINFO_VECTORIZABLE (stmt_info
)
2033 || !PURE_SLP_STMT (stmt_info
))
2036 if (STMT_VINFO_DATA_REF (stmt_info
))
2038 if (DR_IS_READ (STMT_VINFO_DATA_REF (stmt_info
)))
2039 stmt_cost
= vect_get_stmt_cost (scalar_load
);
2041 stmt_cost
= vect_get_stmt_cost (scalar_store
);
2044 stmt_cost
= vect_get_stmt_cost (scalar_stmt
);
2046 scalar_cost
+= stmt_cost
;
2049 /* Complete the target-specific cost calculation. */
2050 finish_cost (BB_VINFO_TARGET_COST_DATA (bb_vinfo
), &vec_prologue_cost
,
2051 &vec_inside_cost
, &vec_epilogue_cost
);
2053 vec_outside_cost
= vec_prologue_cost
+ vec_epilogue_cost
;
2055 if (dump_enabled_p ())
2057 dump_printf_loc (MSG_NOTE
, vect_location
, "Cost model analysis: \n");
2058 dump_printf (MSG_NOTE
, " Vector inside of basic block cost: %d\n",
2060 dump_printf (MSG_NOTE
, " Vector prologue cost: %d\n", vec_prologue_cost
);
2061 dump_printf (MSG_NOTE
, " Vector epilogue cost: %d\n", vec_epilogue_cost
);
2062 dump_printf (MSG_NOTE
, " Scalar cost of basic block: %d", scalar_cost
);
2065 /* Vectorization is profitable if its cost is less than the cost of scalar
2067 if (vec_outside_cost
+ vec_inside_cost
>= scalar_cost
)
2073 /* Check if the basic block can be vectorized. */
2076 vect_slp_analyze_bb_1 (basic_block bb
)
2078 bb_vec_info bb_vinfo
;
2080 vec
<slp_instance
> slp_instances
;
2081 slp_instance instance
;
2084 int max_vf
= MAX_VECTORIZATION_FACTOR
;
2086 bb_vinfo
= new_bb_vec_info (bb
);
2090 if (!vect_analyze_data_refs (NULL
, bb_vinfo
, &min_vf
))
2092 if (dump_enabled_p ())
2093 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2094 "not vectorized: unhandled data-ref in basic "
2097 destroy_bb_vec_info (bb_vinfo
);
2101 ddrs
= BB_VINFO_DDRS (bb_vinfo
);
2102 if (!ddrs
.length ())
2104 if (dump_enabled_p ())
2105 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2106 "not vectorized: not enough data-refs in "
2109 destroy_bb_vec_info (bb_vinfo
);
2113 vect_pattern_recog (NULL
, bb_vinfo
);
2115 if (!vect_analyze_data_ref_dependences (NULL
, bb_vinfo
, &max_vf
)
2118 if (dump_enabled_p ())
2119 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2120 "not vectorized: unhandled data dependence "
2121 "in basic block.\n");
2123 destroy_bb_vec_info (bb_vinfo
);
2127 if (!vect_analyze_data_refs_alignment (NULL
, bb_vinfo
))
2129 if (dump_enabled_p ())
2130 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2131 "not vectorized: bad data alignment in basic "
2134 destroy_bb_vec_info (bb_vinfo
);
2138 if (!vect_analyze_data_ref_accesses (NULL
, bb_vinfo
))
2140 if (dump_enabled_p ())
2141 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2142 "not vectorized: unhandled data access in "
2145 destroy_bb_vec_info (bb_vinfo
);
2149 /* Check the SLP opportunities in the basic block, analyze and build SLP
2151 if (!vect_analyze_slp (NULL
, bb_vinfo
))
2153 if (dump_enabled_p ())
2154 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2155 "not vectorized: failed to find SLP opportunities "
2156 "in basic block.\n");
2158 destroy_bb_vec_info (bb_vinfo
);
2162 slp_instances
= BB_VINFO_SLP_INSTANCES (bb_vinfo
);
2164 /* Mark all the statements that we want to vectorize as pure SLP and
2166 FOR_EACH_VEC_ELT (slp_instances
, i
, instance
)
2168 vect_mark_slp_stmts (SLP_INSTANCE_TREE (instance
), pure_slp
, -1);
2169 vect_mark_slp_stmts_relevant (SLP_INSTANCE_TREE (instance
));
2172 if (!vect_verify_datarefs_alignment (NULL
, bb_vinfo
))
2174 if (dump_enabled_p ())
2175 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2176 "not vectorized: unsupported alignment in basic "
2178 destroy_bb_vec_info (bb_vinfo
);
2182 if (!vect_slp_analyze_operations (bb_vinfo
))
2184 if (dump_enabled_p ())
2185 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2186 "not vectorized: bad operation in basic block.\n");
2188 destroy_bb_vec_info (bb_vinfo
);
2192 /* Cost model: check if the vectorization is worthwhile. */
2193 if (flag_vect_cost_model
2194 && !vect_bb_vectorization_profitable_p (bb_vinfo
))
2196 if (dump_enabled_p ())
2197 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2198 "not vectorized: vectorization is not "
2201 destroy_bb_vec_info (bb_vinfo
);
2205 if (dump_enabled_p ())
2206 dump_printf_loc (MSG_NOTE
, vect_location
,
2207 "Basic block will be vectorized using SLP\n");
2214 vect_slp_analyze_bb (basic_block bb
)
2216 bb_vec_info bb_vinfo
;
2218 gimple_stmt_iterator gsi
;
2219 unsigned int vector_sizes
;
2221 if (dump_enabled_p ())
2222 dump_printf_loc (MSG_NOTE
, vect_location
, "===vect_slp_analyze_bb===\n");
2224 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
2226 gimple stmt
= gsi_stmt (gsi
);
2227 if (!is_gimple_debug (stmt
)
2228 && !gimple_nop_p (stmt
)
2229 && gimple_code (stmt
) != GIMPLE_LABEL
)
2233 if (insns
> PARAM_VALUE (PARAM_SLP_MAX_INSNS_IN_BB
))
2235 if (dump_enabled_p ())
2236 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2237 "not vectorized: too many instructions in "
2243 /* Autodetect first vector size we try. */
2244 current_vector_size
= 0;
2245 vector_sizes
= targetm
.vectorize
.autovectorize_vector_sizes ();
2249 bb_vinfo
= vect_slp_analyze_bb_1 (bb
);
2253 destroy_bb_vec_info (bb_vinfo
);
2255 vector_sizes
&= ~current_vector_size
;
2256 if (vector_sizes
== 0
2257 || current_vector_size
== 0)
2260 /* Try the next biggest vector size. */
2261 current_vector_size
= 1 << floor_log2 (vector_sizes
);
2262 if (dump_enabled_p ())
2263 dump_printf_loc (MSG_NOTE
, vect_location
,
2264 "***** Re-trying analysis with "
2265 "vector size %d\n", current_vector_size
);
2270 /* SLP costs are calculated according to SLP instance unrolling factor (i.e.,
2271 the number of created vector stmts depends on the unrolling factor).
2272 However, the actual number of vector stmts for every SLP node depends on
2273 VF which is set later in vect_analyze_operations (). Hence, SLP costs
2274 should be updated. In this function we assume that the inside costs
2275 calculated in vect_model_xxx_cost are linear in ncopies. */
2278 vect_update_slp_costs_according_to_vf (loop_vec_info loop_vinfo
)
2280 unsigned int i
, j
, vf
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
2281 vec
<slp_instance
> slp_instances
= LOOP_VINFO_SLP_INSTANCES (loop_vinfo
);
2282 slp_instance instance
;
2283 stmt_vector_for_cost body_cost_vec
;
2284 stmt_info_for_cost
*si
;
2285 void *data
= LOOP_VINFO_TARGET_COST_DATA (loop_vinfo
);
2287 if (dump_enabled_p ())
2288 dump_printf_loc (MSG_NOTE
, vect_location
,
2289 "=== vect_update_slp_costs_according_to_vf ===");
2291 FOR_EACH_VEC_ELT (slp_instances
, i
, instance
)
2293 /* We assume that costs are linear in ncopies. */
2294 int ncopies
= vf
/ SLP_INSTANCE_UNROLLING_FACTOR (instance
);
2296 /* Record the instance's instructions in the target cost model.
2297 This was delayed until here because the count of instructions
2298 isn't known beforehand. */
2299 body_cost_vec
= SLP_INSTANCE_BODY_COST_VEC (instance
);
2301 FOR_EACH_VEC_ELT (body_cost_vec
, j
, si
)
2302 (void) add_stmt_cost (data
, si
->count
* ncopies
, si
->kind
,
2303 vinfo_for_stmt (si
->stmt
), si
->misalign
,
2309 /* For constant and loop invariant defs of SLP_NODE this function returns
2310 (vector) defs (VEC_OPRNDS) that will be used in the vectorized stmts.
2311 OP_NUM determines if we gather defs for operand 0 or operand 1 of the RHS of
2312 scalar stmts. NUMBER_OF_VECTORS is the number of vector defs to create.
2313 REDUC_INDEX is the index of the reduction operand in the statements, unless
2317 vect_get_constant_vectors (tree op
, slp_tree slp_node
,
2318 vec
<tree
> *vec_oprnds
,
2319 unsigned int op_num
, unsigned int number_of_vectors
,
2322 vec
<gimple
> stmts
= SLP_TREE_SCALAR_STMTS (slp_node
);
2323 gimple stmt
= stmts
[0];
2324 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (stmt
);
2328 unsigned j
, number_of_places_left_in_vector
;
2331 int group_size
= stmts
.length ();
2332 unsigned int vec_num
, i
;
2333 unsigned number_of_copies
= 1;
2335 voprnds
.create (number_of_vectors
);
2336 bool constant_p
, is_store
;
2337 tree neutral_op
= NULL
;
2338 enum tree_code code
= gimple_expr_code (stmt
);
2341 gimple_seq ctor_seq
= NULL
;
2343 if (STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_reduction_def
2344 && reduc_index
!= -1)
2346 op_num
= reduc_index
- 1;
2347 op
= gimple_op (stmt
, reduc_index
);
2348 /* For additional copies (see the explanation of NUMBER_OF_COPIES below)
2349 we need either neutral operands or the original operands. See
2350 get_initial_def_for_reduction() for details. */
2353 case WIDEN_SUM_EXPR
:
2359 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (op
)))
2360 neutral_op
= build_real (TREE_TYPE (op
), dconst0
);
2362 neutral_op
= build_int_cst (TREE_TYPE (op
), 0);
2367 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (op
)))
2368 neutral_op
= build_real (TREE_TYPE (op
), dconst1
);
2370 neutral_op
= build_int_cst (TREE_TYPE (op
), 1);
2375 neutral_op
= build_int_cst (TREE_TYPE (op
), -1);
2380 def_stmt
= SSA_NAME_DEF_STMT (op
);
2381 loop
= (gimple_bb (stmt
))->loop_father
;
2382 neutral_op
= PHI_ARG_DEF_FROM_EDGE (def_stmt
,
2383 loop_preheader_edge (loop
));
2391 if (STMT_VINFO_DATA_REF (stmt_vinfo
))
2394 op
= gimple_assign_rhs1 (stmt
);
2401 if (CONSTANT_CLASS_P (op
))
2406 vector_type
= get_vectype_for_scalar_type (TREE_TYPE (op
));
2407 gcc_assert (vector_type
);
2408 nunits
= TYPE_VECTOR_SUBPARTS (vector_type
);
2410 /* NUMBER_OF_COPIES is the number of times we need to use the same values in
2411 created vectors. It is greater than 1 if unrolling is performed.
2413 For example, we have two scalar operands, s1 and s2 (e.g., group of
2414 strided accesses of size two), while NUNITS is four (i.e., four scalars
2415 of this type can be packed in a vector). The output vector will contain
2416 two copies of each scalar operand: {s1, s2, s1, s2}. (NUMBER_OF_COPIES
2419 If GROUP_SIZE > NUNITS, the scalars will be split into several vectors
2420 containing the operands.
2422 For example, NUNITS is four as before, and the group size is 8
2423 (s1, s2, ..., s8). We will create two vectors {s1, s2, s3, s4} and
2424 {s5, s6, s7, s8}. */
2426 number_of_copies
= least_common_multiple (nunits
, group_size
) / group_size
;
2428 number_of_places_left_in_vector
= nunits
;
2429 elts
= XALLOCAVEC (tree
, nunits
);
2430 for (j
= 0; j
< number_of_copies
; j
++)
2432 for (i
= group_size
- 1; stmts
.iterate (i
, &stmt
); i
--)
2435 op
= gimple_assign_rhs1 (stmt
);
2441 if (op_num
== 0 || op_num
== 1)
2443 tree cond
= gimple_assign_rhs1 (stmt
);
2444 op
= TREE_OPERAND (cond
, op_num
);
2449 op
= gimple_assign_rhs2 (stmt
);
2451 op
= gimple_assign_rhs3 (stmt
);
2456 op
= gimple_call_arg (stmt
, op_num
);
2463 op
= gimple_op (stmt
, op_num
+ 1);
2464 /* Unlike the other binary operators, shifts/rotates have
2465 the shift count being int, instead of the same type as
2466 the lhs, so make sure the scalar is the right type if
2467 we are dealing with vectors of
2468 long long/long/short/char. */
2469 if (op_num
== 1 && constant_p
)
2470 op
= fold_convert (TREE_TYPE (vector_type
), op
);
2474 op
= gimple_op (stmt
, op_num
+ 1);
2479 if (reduc_index
!= -1)
2481 loop
= (gimple_bb (stmt
))->loop_father
;
2482 def_stmt
= SSA_NAME_DEF_STMT (op
);
2486 /* Get the def before the loop. In reduction chain we have only
2487 one initial value. */
2488 if ((j
!= (number_of_copies
- 1)
2489 || (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt
))
2494 op
= PHI_ARG_DEF_FROM_EDGE (def_stmt
,
2495 loop_preheader_edge (loop
));
2498 /* Create 'vect_ = {op0,op1,...,opn}'. */
2499 number_of_places_left_in_vector
--;
2500 if (!types_compatible_p (TREE_TYPE (vector_type
), TREE_TYPE (op
)))
2504 op
= fold_unary (VIEW_CONVERT_EXPR
,
2505 TREE_TYPE (vector_type
), op
);
2506 gcc_assert (op
&& CONSTANT_CLASS_P (op
));
2511 = make_ssa_name (TREE_TYPE (vector_type
), NULL
);
2513 op
= build1 (VIEW_CONVERT_EXPR
, TREE_TYPE (vector_type
),
2516 = gimple_build_assign_with_ops (VIEW_CONVERT_EXPR
,
2517 new_temp
, op
, NULL_TREE
);
2518 gimple_seq_add_stmt (&ctor_seq
, init_stmt
);
2522 elts
[number_of_places_left_in_vector
] = op
;
2524 if (number_of_places_left_in_vector
== 0)
2526 number_of_places_left_in_vector
= nunits
;
2529 vec_cst
= build_vector (vector_type
, elts
);
2532 vec
<constructor_elt
, va_gc
> *v
;
2534 vec_alloc (v
, nunits
);
2535 for (k
= 0; k
< nunits
; ++k
)
2536 CONSTRUCTOR_APPEND_ELT (v
, NULL_TREE
, elts
[k
]);
2537 vec_cst
= build_constructor (vector_type
, v
);
2539 voprnds
.quick_push (vect_init_vector (stmt
, vec_cst
,
2540 vector_type
, NULL
));
2541 if (ctor_seq
!= NULL
)
2543 gimple init_stmt
= SSA_NAME_DEF_STMT (voprnds
.last ());
2544 gimple_stmt_iterator gsi
= gsi_for_stmt (init_stmt
);
2545 gsi_insert_seq_before_without_update (&gsi
, ctor_seq
,
2553 /* Since the vectors are created in the reverse order, we should invert
2555 vec_num
= voprnds
.length ();
2556 for (j
= vec_num
; j
!= 0; j
--)
2558 vop
= voprnds
[j
- 1];
2559 vec_oprnds
->quick_push (vop
);
2564 /* In case that VF is greater than the unrolling factor needed for the SLP
2565 group of stmts, NUMBER_OF_VECTORS to be created is greater than
2566 NUMBER_OF_SCALARS/NUNITS or NUNITS/NUMBER_OF_SCALARS, and hence we have
2567 to replicate the vectors. */
2568 while (number_of_vectors
> vec_oprnds
->length ())
2570 tree neutral_vec
= NULL
;
2575 neutral_vec
= build_vector_from_val (vector_type
, neutral_op
);
2577 vec_oprnds
->quick_push (neutral_vec
);
2581 for (i
= 0; vec_oprnds
->iterate (i
, &vop
) && i
< vec_num
; i
++)
2582 vec_oprnds
->quick_push (vop
);
2588 /* Get vectorized definitions from SLP_NODE that contains corresponding
2589 vectorized def-stmts. */
2592 vect_get_slp_vect_defs (slp_tree slp_node
, vec
<tree
> *vec_oprnds
)
2595 gimple vec_def_stmt
;
2598 gcc_assert (SLP_TREE_VEC_STMTS (slp_node
).exists ());
2600 FOR_EACH_VEC_ELT (SLP_TREE_VEC_STMTS (slp_node
), i
, vec_def_stmt
)
2602 gcc_assert (vec_def_stmt
);
2603 vec_oprnd
= gimple_get_lhs (vec_def_stmt
);
2604 vec_oprnds
->quick_push (vec_oprnd
);
2609 /* Get vectorized definitions for SLP_NODE.
2610 If the scalar definitions are loop invariants or constants, collect them and
2611 call vect_get_constant_vectors() to create vector stmts.
2612 Otherwise, the def-stmts must be already vectorized and the vectorized stmts
2613 must be stored in the corresponding child of SLP_NODE, and we call
2614 vect_get_slp_vect_defs () to retrieve them. */
2617 vect_get_slp_defs (vec
<tree
> ops
, slp_tree slp_node
,
2618 vec
<slp_void_p
> *vec_oprnds
, int reduc_index
)
2620 gimple first_stmt
, first_def
;
2621 int number_of_vects
= 0, i
;
2622 unsigned int child_index
= 0;
2623 HOST_WIDE_INT lhs_size_unit
, rhs_size_unit
;
2624 slp_tree child
= NULL
;
2625 vec
<tree
> *vec_defs
;
2626 tree oprnd
, def_lhs
;
2627 bool vectorized_defs
;
2629 first_stmt
= SLP_TREE_SCALAR_STMTS (slp_node
)[0];
2630 FOR_EACH_VEC_ELT (ops
, i
, oprnd
)
2632 /* For each operand we check if it has vectorized definitions in a child
2633 node or we need to create them (for invariants and constants). We
2634 check if the LHS of the first stmt of the next child matches OPRND.
2635 If it does, we found the correct child. Otherwise, we call
2636 vect_get_constant_vectors (), and not advance CHILD_INDEX in order
2637 to check this child node for the next operand. */
2638 vectorized_defs
= false;
2639 if (SLP_TREE_CHILDREN (slp_node
).length () > child_index
)
2641 child
= (slp_tree
) SLP_TREE_CHILDREN (slp_node
)[child_index
];
2642 first_def
= SLP_TREE_SCALAR_STMTS (child
)[0];
2644 /* In the end of a pattern sequence we have a use of the original stmt,
2645 so we need to compare OPRND with the original def. */
2646 if (is_pattern_stmt_p (vinfo_for_stmt (first_def
))
2647 && !STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (first_stmt
))
2648 && !is_pattern_stmt_p (vinfo_for_stmt (first_stmt
)))
2649 first_def
= STMT_VINFO_RELATED_STMT (vinfo_for_stmt (first_def
));
2651 if (is_gimple_call (first_def
))
2652 def_lhs
= gimple_call_lhs (first_def
);
2654 def_lhs
= gimple_assign_lhs (first_def
);
2656 if (operand_equal_p (oprnd
, def_lhs
, 0))
2658 /* The number of vector defs is determined by the number of
2659 vector statements in the node from which we get those
2661 number_of_vects
= SLP_TREE_NUMBER_OF_VEC_STMTS (child
);
2662 vectorized_defs
= true;
2667 if (!vectorized_defs
)
2671 number_of_vects
= SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node
);
2672 /* Number of vector stmts was calculated according to LHS in
2673 vect_schedule_slp_instance (), fix it by replacing LHS with
2674 RHS, if necessary. See vect_get_smallest_scalar_type () for
2676 vect_get_smallest_scalar_type (first_stmt
, &lhs_size_unit
,
2678 if (rhs_size_unit
!= lhs_size_unit
)
2680 number_of_vects
*= rhs_size_unit
;
2681 number_of_vects
/= lhs_size_unit
;
2686 /* Allocate memory for vectorized defs. */
2687 vec_alloc (vec_defs
, number_of_vects
);
2689 /* For reduction defs we call vect_get_constant_vectors (), since we are
2690 looking for initial loop invariant values. */
2691 if (vectorized_defs
&& reduc_index
== -1)
2692 /* The defs are already vectorized. */
2693 vect_get_slp_vect_defs (child
, vec_defs
);
2695 /* Build vectors from scalar defs. */
2696 vect_get_constant_vectors (oprnd
, slp_node
, vec_defs
, i
,
2697 number_of_vects
, reduc_index
);
2699 vec_oprnds
->quick_push ((slp_void_p
) vec_defs
);
2701 /* For reductions, we only need initial values. */
2702 if (reduc_index
!= -1)
2708 /* Create NCOPIES permutation statements using the mask MASK_BYTES (by
2709 building a vector of type MASK_TYPE from it) and two input vectors placed in
2710 DR_CHAIN at FIRST_VEC_INDX and SECOND_VEC_INDX for the first copy and
2711 shifting by STRIDE elements of DR_CHAIN for every copy.
2712 (STRIDE is the number of vectorized stmts for NODE divided by the number of
2714 VECT_STMTS_COUNTER specifies the index in the vectorized stmts of NODE, where
2715 the created stmts must be inserted. */
2718 vect_create_mask_and_perm (gimple stmt
, gimple next_scalar_stmt
,
2719 tree mask
, int first_vec_indx
, int second_vec_indx
,
2720 gimple_stmt_iterator
*gsi
, slp_tree node
,
2721 tree vectype
, vec
<tree
> dr_chain
,
2722 int ncopies
, int vect_stmts_counter
)
2725 gimple perm_stmt
= NULL
;
2726 stmt_vec_info next_stmt_info
;
2728 tree first_vec
, second_vec
, data_ref
;
2730 stride
= SLP_TREE_NUMBER_OF_VEC_STMTS (node
) / ncopies
;
2732 /* Initialize the vect stmts of NODE to properly insert the generated
2734 for (i
= SLP_TREE_VEC_STMTS (node
).length ();
2735 i
< (int) SLP_TREE_NUMBER_OF_VEC_STMTS (node
); i
++)
2736 SLP_TREE_VEC_STMTS (node
).quick_push (NULL
);
2738 perm_dest
= vect_create_destination_var (gimple_assign_lhs (stmt
), vectype
);
2739 for (i
= 0; i
< ncopies
; i
++)
2741 first_vec
= dr_chain
[first_vec_indx
];
2742 second_vec
= dr_chain
[second_vec_indx
];
2744 /* Generate the permute statement. */
2745 perm_stmt
= gimple_build_assign_with_ops (VEC_PERM_EXPR
, perm_dest
,
2746 first_vec
, second_vec
, mask
);
2747 data_ref
= make_ssa_name (perm_dest
, perm_stmt
);
2748 gimple_set_lhs (perm_stmt
, data_ref
);
2749 vect_finish_stmt_generation (stmt
, perm_stmt
, gsi
);
2751 /* Store the vector statement in NODE. */
2752 SLP_TREE_VEC_STMTS (node
)[stride
* i
+ vect_stmts_counter
] = perm_stmt
;
2754 first_vec_indx
+= stride
;
2755 second_vec_indx
+= stride
;
2758 /* Mark the scalar stmt as vectorized. */
2759 next_stmt_info
= vinfo_for_stmt (next_scalar_stmt
);
2760 STMT_VINFO_VEC_STMT (next_stmt_info
) = perm_stmt
;
2764 /* Given FIRST_MASK_ELEMENT - the mask element in element representation,
2765 return in CURRENT_MASK_ELEMENT its equivalent in target specific
2766 representation. Check that the mask is valid and return FALSE if not.
2767 Return TRUE in NEED_NEXT_VECTOR if the permutation requires to move to
2768 the next vector, i.e., the current first vector is not needed. */
2771 vect_get_mask_element (gimple stmt
, int first_mask_element
, int m
,
2772 int mask_nunits
, bool only_one_vec
, int index
,
2773 unsigned char *mask
, int *current_mask_element
,
2774 bool *need_next_vector
, int *number_of_mask_fixes
,
2775 bool *mask_fixed
, bool *needs_first_vector
)
2779 /* Convert to target specific representation. */
2780 *current_mask_element
= first_mask_element
+ m
;
2781 /* Adjust the value in case it's a mask for second and third vectors. */
2782 *current_mask_element
-= mask_nunits
* (*number_of_mask_fixes
- 1);
2784 if (*current_mask_element
< mask_nunits
)
2785 *needs_first_vector
= true;
2787 /* We have only one input vector to permute but the mask accesses values in
2788 the next vector as well. */
2789 if (only_one_vec
&& *current_mask_element
>= mask_nunits
)
2791 if (dump_enabled_p ())
2793 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2794 "permutation requires at least two vectors ");
2795 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
2801 /* The mask requires the next vector. */
2802 if (*current_mask_element
>= mask_nunits
* 2)
2804 if (*needs_first_vector
|| *mask_fixed
)
2806 /* We either need the first vector too or have already moved to the
2807 next vector. In both cases, this permutation needs three
2809 if (dump_enabled_p ())
2811 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2812 "permutation requires at "
2813 "least three vectors ");
2814 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
2820 /* We move to the next vector, dropping the first one and working with
2821 the second and the third - we need to adjust the values of the mask
2823 *current_mask_element
-= mask_nunits
* *number_of_mask_fixes
;
2825 for (i
= 0; i
< index
; i
++)
2826 mask
[i
] -= mask_nunits
* *number_of_mask_fixes
;
2828 (*number_of_mask_fixes
)++;
2832 *need_next_vector
= *mask_fixed
;
2834 /* This was the last element of this mask. Start a new one. */
2835 if (index
== mask_nunits
- 1)
2837 *number_of_mask_fixes
= 1;
2838 *mask_fixed
= false;
2839 *needs_first_vector
= false;
2846 /* Generate vector permute statements from a list of loads in DR_CHAIN.
2847 If ANALYZE_ONLY is TRUE, only check that it is possible to create valid
2848 permute statements for SLP_NODE_INSTANCE. */
2850 vect_transform_slp_perm_load (gimple stmt
, vec
<tree
> dr_chain
,
2851 gimple_stmt_iterator
*gsi
, int vf
,
2852 slp_instance slp_node_instance
, bool analyze_only
)
2854 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
2855 tree mask_element_type
= NULL_TREE
, mask_type
;
2856 int i
, j
, k
, nunits
, vec_index
= 0, scalar_index
;
2858 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
2859 gimple next_scalar_stmt
;
2860 int group_size
= SLP_INSTANCE_GROUP_SIZE (slp_node_instance
);
2861 int first_mask_element
;
2862 int index
, unroll_factor
, current_mask_element
, ncopies
;
2863 unsigned char *mask
;
2864 bool only_one_vec
= false, need_next_vector
= false;
2865 int first_vec_index
, second_vec_index
, orig_vec_stmts_num
, vect_stmts_counter
;
2866 int number_of_mask_fixes
= 1;
2867 bool mask_fixed
= false;
2868 bool needs_first_vector
= false;
2869 enum machine_mode mode
;
2871 mode
= TYPE_MODE (vectype
);
2873 if (!can_vec_perm_p (mode
, false, NULL
))
2875 if (dump_enabled_p ())
2877 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2878 "no vect permute for ");
2879 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
2884 /* The generic VEC_PERM_EXPR code always uses an integral type of the
2885 same size as the vector element being permuted. */
2886 mask_element_type
= lang_hooks
.types
.type_for_mode
2887 (int_mode_for_mode (TYPE_MODE (TREE_TYPE (vectype
))), 1);
2888 mask_type
= get_vectype_for_scalar_type (mask_element_type
);
2889 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
2890 mask
= XALLOCAVEC (unsigned char, nunits
);
2891 unroll_factor
= SLP_INSTANCE_UNROLLING_FACTOR (slp_node_instance
);
2893 /* The number of vector stmts to generate based only on SLP_NODE_INSTANCE
2894 unrolling factor. */
2895 orig_vec_stmts_num
= group_size
*
2896 SLP_INSTANCE_UNROLLING_FACTOR (slp_node_instance
) / nunits
;
2897 if (orig_vec_stmts_num
== 1)
2898 only_one_vec
= true;
2900 /* Number of copies is determined by the final vectorization factor
2901 relatively to SLP_NODE_INSTANCE unrolling factor. */
2902 ncopies
= vf
/ SLP_INSTANCE_UNROLLING_FACTOR (slp_node_instance
);
2904 /* Generate permutation masks for every NODE. Number of masks for each NODE
2905 is equal to GROUP_SIZE.
2906 E.g., we have a group of three nodes with three loads from the same
2907 location in each node, and the vector size is 4. I.e., we have a
2908 a0b0c0a1b1c1... sequence and we need to create the following vectors:
2909 for a's: a0a0a0a1 a1a1a2a2 a2a3a3a3
2910 for b's: b0b0b0b1 b1b1b2b2 b2b3b3b3
2913 The masks for a's should be: {0,0,0,3} {3,3,6,6} {6,9,9,9}.
2914 The last mask is illegal since we assume two operands for permute
2915 operation, and the mask element values can't be outside that range.
2916 Hence, the last mask must be converted into {2,5,5,5}.
2917 For the first two permutations we need the first and the second input
2918 vectors: {a0,b0,c0,a1} and {b1,c1,a2,b2}, and for the last permutation
2919 we need the second and the third vectors: {b1,c1,a2,b2} and
2922 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_node_instance
), i
, node
)
2926 vect_stmts_counter
= 0;
2928 first_vec_index
= vec_index
++;
2930 second_vec_index
= first_vec_index
;
2932 second_vec_index
= vec_index
++;
2934 for (j
= 0; j
< unroll_factor
; j
++)
2936 for (k
= 0; k
< group_size
; k
++)
2938 first_mask_element
= i
+ j
* group_size
;
2939 if (!vect_get_mask_element (stmt
, first_mask_element
, 0,
2940 nunits
, only_one_vec
, index
,
2941 mask
, ¤t_mask_element
,
2943 &number_of_mask_fixes
, &mask_fixed
,
2944 &needs_first_vector
))
2946 mask
[index
++] = current_mask_element
;
2948 if (index
== nunits
)
2950 tree mask_vec
, *mask_elts
;
2953 if (!can_vec_perm_p (mode
, false, mask
))
2955 if (dump_enabled_p ())
2957 dump_printf_loc (MSG_MISSED_OPTIMIZATION
,
2959 "unsupported vect permute { ");
2960 for (i
= 0; i
< nunits
; ++i
)
2961 dump_printf (MSG_MISSED_OPTIMIZATION
, "%d ",
2963 dump_printf (MSG_MISSED_OPTIMIZATION
, "}\n");
2968 mask_elts
= XALLOCAVEC (tree
, nunits
);
2969 for (l
= 0; l
< nunits
; ++l
)
2970 mask_elts
[l
] = build_int_cst (mask_element_type
, mask
[l
]);
2971 mask_vec
= build_vector (mask_type
, mask_elts
);
2976 if (need_next_vector
)
2978 first_vec_index
= second_vec_index
;
2979 second_vec_index
= vec_index
;
2983 = SLP_TREE_SCALAR_STMTS (node
)[scalar_index
++];
2985 vect_create_mask_and_perm (stmt
, next_scalar_stmt
,
2986 mask_vec
, first_vec_index
, second_vec_index
,
2987 gsi
, node
, vectype
, dr_chain
,
2988 ncopies
, vect_stmts_counter
++);
3000 /* Vectorize SLP instance tree in postorder. */
3003 vect_schedule_slp_instance (slp_tree node
, slp_instance instance
,
3004 unsigned int vectorization_factor
)
3007 bool grouped_store
, is_store
;
3008 gimple_stmt_iterator si
;
3009 stmt_vec_info stmt_info
;
3010 unsigned int vec_stmts_size
, nunits
, group_size
;
3013 slp_tree loads_node
;
3019 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node
), i
, child
)
3020 vect_schedule_slp_instance ((slp_tree
) child
, instance
,
3021 vectorization_factor
);
3023 stmt
= SLP_TREE_SCALAR_STMTS (node
)[0];
3024 stmt_info
= vinfo_for_stmt (stmt
);
3026 /* VECTYPE is the type of the destination. */
3027 vectype
= STMT_VINFO_VECTYPE (stmt_info
);
3028 nunits
= (unsigned int) TYPE_VECTOR_SUBPARTS (vectype
);
3029 group_size
= SLP_INSTANCE_GROUP_SIZE (instance
);
3031 /* For each SLP instance calculate number of vector stmts to be created
3032 for the scalar stmts in each node of the SLP tree. Number of vector
3033 elements in one vector iteration is the number of scalar elements in
3034 one scalar iteration (GROUP_SIZE) multiplied by VF divided by vector
3036 vec_stmts_size
= (vectorization_factor
* group_size
) / nunits
;
3038 /* In case of load permutation we have to allocate vectorized statements for
3039 all the nodes that participate in that permutation. */
3040 if (SLP_INSTANCE_LOAD_PERMUTATION (instance
).exists ())
3042 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (instance
), i
, loads_node
)
3044 if (!SLP_TREE_VEC_STMTS (loads_node
).exists ())
3046 SLP_TREE_VEC_STMTS (loads_node
).create (vec_stmts_size
);
3047 SLP_TREE_NUMBER_OF_VEC_STMTS (loads_node
) = vec_stmts_size
;
3052 if (!SLP_TREE_VEC_STMTS (node
).exists ())
3054 SLP_TREE_VEC_STMTS (node
).create (vec_stmts_size
);
3055 SLP_TREE_NUMBER_OF_VEC_STMTS (node
) = vec_stmts_size
;
3058 if (dump_enabled_p ())
3060 dump_printf_loc (MSG_NOTE
,vect_location
,
3061 "------>vectorizing SLP node starting from: ");
3062 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
3065 /* Loads should be inserted before the first load. */
3066 if (SLP_INSTANCE_FIRST_LOAD_STMT (instance
)
3067 && STMT_VINFO_GROUPED_ACCESS (stmt_info
)
3068 && !REFERENCE_CLASS_P (gimple_get_lhs (stmt
))
3069 && SLP_INSTANCE_LOAD_PERMUTATION (instance
).exists ())
3070 si
= gsi_for_stmt (SLP_INSTANCE_FIRST_LOAD_STMT (instance
));
3071 else if (is_pattern_stmt_p (stmt_info
))
3072 si
= gsi_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info
));
3074 si
= gsi_for_stmt (stmt
);
3076 /* Stores should be inserted just before the last store. */
3077 if (STMT_VINFO_GROUPED_ACCESS (stmt_info
)
3078 && REFERENCE_CLASS_P (gimple_get_lhs (stmt
)))
3080 gimple last_store
= vect_find_last_store_in_slp_instance (instance
);
3081 if (is_pattern_stmt_p (vinfo_for_stmt (last_store
)))
3082 last_store
= STMT_VINFO_RELATED_STMT (vinfo_for_stmt (last_store
));
3083 si
= gsi_for_stmt (last_store
);
3086 /* Mark the first element of the reduction chain as reduction to properly
3087 transform the node. In the analysis phase only the last element of the
3088 chain is marked as reduction. */
3089 if (GROUP_FIRST_ELEMENT (stmt_info
) && !STMT_VINFO_GROUPED_ACCESS (stmt_info
)
3090 && GROUP_FIRST_ELEMENT (stmt_info
) == stmt
)
3092 STMT_VINFO_DEF_TYPE (stmt_info
) = vect_reduction_def
;
3093 STMT_VINFO_TYPE (stmt_info
) = reduc_vec_info_type
;
3096 is_store
= vect_transform_stmt (stmt
, &si
, &grouped_store
, node
, instance
);
3100 /* Replace scalar calls from SLP node NODE with setting of their lhs to zero.
3101 For loop vectorization this is done in vectorizable_call, but for SLP
3102 it needs to be deferred until end of vect_schedule_slp, because multiple
3103 SLP instances may refer to the same scalar stmt. */
3106 vect_remove_slp_scalar_calls (slp_tree node
)
3108 gimple stmt
, new_stmt
;
3109 gimple_stmt_iterator gsi
;
3113 stmt_vec_info stmt_info
;
3118 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node
), i
, child
)
3119 vect_remove_slp_scalar_calls ((slp_tree
) child
);
3121 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node
), i
, stmt
)
3123 if (!is_gimple_call (stmt
) || gimple_bb (stmt
) == NULL
)
3125 stmt_info
= vinfo_for_stmt (stmt
);
3126 if (stmt_info
== NULL
3127 || is_pattern_stmt_p (stmt_info
)
3128 || !PURE_SLP_STMT (stmt_info
))
3130 lhs
= gimple_call_lhs (stmt
);
3131 new_stmt
= gimple_build_assign (lhs
, build_zero_cst (TREE_TYPE (lhs
)));
3132 set_vinfo_for_stmt (new_stmt
, stmt_info
);
3133 set_vinfo_for_stmt (stmt
, NULL
);
3134 STMT_VINFO_STMT (stmt_info
) = new_stmt
;
3135 gsi
= gsi_for_stmt (stmt
);
3136 gsi_replace (&gsi
, new_stmt
, false);
3137 SSA_NAME_DEF_STMT (gimple_assign_lhs (new_stmt
)) = new_stmt
;
3141 /* Generate vector code for all SLP instances in the loop/basic block. */
3144 vect_schedule_slp (loop_vec_info loop_vinfo
, bb_vec_info bb_vinfo
)
3146 vec
<slp_instance
> slp_instances
;
3147 slp_instance instance
;
3149 bool is_store
= false;
3153 slp_instances
= LOOP_VINFO_SLP_INSTANCES (loop_vinfo
);
3154 vf
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
3158 slp_instances
= BB_VINFO_SLP_INSTANCES (bb_vinfo
);
3162 FOR_EACH_VEC_ELT (slp_instances
, i
, instance
)
3164 /* Schedule the tree of INSTANCE. */
3165 is_store
= vect_schedule_slp_instance (SLP_INSTANCE_TREE (instance
),
3167 if (dump_enabled_p ())
3168 dump_printf_loc (MSG_NOTE
, vect_location
,
3169 "vectorizing stmts using SLP.");
3172 FOR_EACH_VEC_ELT (slp_instances
, i
, instance
)
3174 slp_tree root
= SLP_INSTANCE_TREE (instance
);
3177 gimple_stmt_iterator gsi
;
3179 vect_remove_slp_scalar_calls (root
);
3181 for (j
= 0; SLP_TREE_SCALAR_STMTS (root
).iterate (j
, &store
)
3182 && j
< SLP_INSTANCE_GROUP_SIZE (instance
); j
++)
3184 if (!STMT_VINFO_DATA_REF (vinfo_for_stmt (store
)))
3187 if (is_pattern_stmt_p (vinfo_for_stmt (store
)))
3188 store
= STMT_VINFO_RELATED_STMT (vinfo_for_stmt (store
));
3189 /* Free the attached stmt_vec_info and remove the stmt. */
3190 gsi
= gsi_for_stmt (store
);
3191 unlink_stmt_vdef (store
);
3192 gsi_remove (&gsi
, true);
3193 release_defs (store
);
3194 free_stmt_vec_info (store
);
3202 /* Vectorize the basic block. */
3205 vect_slp_transform_bb (basic_block bb
)
3207 bb_vec_info bb_vinfo
= vec_info_for_bb (bb
);
3208 gimple_stmt_iterator si
;
3210 gcc_assert (bb_vinfo
);
3212 if (dump_enabled_p ())
3213 dump_printf_loc (MSG_NOTE
, vect_location
, "SLPing BB\n");
3215 for (si
= gsi_start_bb (bb
); !gsi_end_p (si
); gsi_next (&si
))
3217 gimple stmt
= gsi_stmt (si
);
3218 stmt_vec_info stmt_info
;
3220 if (dump_enabled_p ())
3222 dump_printf_loc (MSG_NOTE
, vect_location
,
3223 "------>SLPing statement: ");
3224 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
3227 stmt_info
= vinfo_for_stmt (stmt
);
3228 gcc_assert (stmt_info
);
3230 /* Schedule all the SLP instances when the first SLP stmt is reached. */
3231 if (STMT_SLP_TYPE (stmt_info
))
3233 vect_schedule_slp (NULL
, bb_vinfo
);
3238 if (dump_enabled_p ())
3239 dump_printf (MSG_OPTIMIZED_LOCATIONS
, "BASIC BLOCK VECTORIZED\n");
3241 destroy_bb_vec_info (bb_vinfo
);