1 /* SLP - Basic Block Vectorization
2 Copyright (C) 2007-2015 Free Software Foundation, Inc.
3 Contributed by Dorit Naishlos <dorit@il.ibm.com>
4 and Ira Rosen <irar@il.ibm.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
24 #include "coretypes.h"
30 #include "double-int.h"
37 #include "fold-const.h"
38 #include "stor-layout.h"
41 #include "hard-reg-set.h"
43 #include "basic-block.h"
44 #include "gimple-pretty-print.h"
45 #include "tree-ssa-alias.h"
46 #include "internal-fn.h"
47 #include "gimple-expr.h"
50 #include "gimple-iterator.h"
51 #include "gimple-ssa.h"
52 #include "tree-phinodes.h"
53 #include "ssa-iterators.h"
54 #include "stringpool.h"
55 #include "tree-ssanames.h"
56 #include "tree-pass.h"
61 #include "statistics.h"
63 #include "fixed-value.h"
64 #include "insn-config.h"
73 #include "recog.h" /* FIXME: for insn_data */
74 #include "insn-codes.h"
76 #include "tree-vectorizer.h"
77 #include "langhooks.h"
78 #include "gimple-walk.h"
80 /* Extract the location of the basic block in the source code.
81 Return the basic block location if succeed and NULL if not. */
84 find_bb_location (basic_block bb
)
87 gimple_stmt_iterator si
;
90 return UNKNOWN_LOCATION
;
92 for (si
= gsi_start_bb (bb
); !gsi_end_p (si
); gsi_next (&si
))
95 if (gimple_location (stmt
) != UNKNOWN_LOCATION
)
96 return gimple_location (stmt
);
99 return UNKNOWN_LOCATION
;
103 /* Recursively free the memory allocated for the SLP tree rooted at NODE. */
106 vect_free_slp_tree (slp_tree node
)
114 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node
), i
, child
)
115 vect_free_slp_tree (child
);
117 SLP_TREE_CHILDREN (node
).release ();
118 SLP_TREE_SCALAR_STMTS (node
).release ();
119 SLP_TREE_VEC_STMTS (node
).release ();
120 SLP_TREE_LOAD_PERMUTATION (node
).release ();
126 /* Free the memory allocated for the SLP instance. */
129 vect_free_slp_instance (slp_instance instance
)
131 vect_free_slp_tree (SLP_INSTANCE_TREE (instance
));
132 SLP_INSTANCE_LOADS (instance
).release ();
133 SLP_INSTANCE_BODY_COST_VEC (instance
).release ();
138 /* Create an SLP node for SCALAR_STMTS. */
141 vect_create_new_slp_node (vec
<gimple
> scalar_stmts
)
144 gimple stmt
= scalar_stmts
[0];
147 if (is_gimple_call (stmt
))
148 nops
= gimple_call_num_args (stmt
);
149 else if (is_gimple_assign (stmt
))
151 nops
= gimple_num_ops (stmt
) - 1;
152 if (gimple_assign_rhs_code (stmt
) == COND_EXPR
)
158 node
= XNEW (struct _slp_tree
);
159 SLP_TREE_SCALAR_STMTS (node
) = scalar_stmts
;
160 SLP_TREE_VEC_STMTS (node
).create (0);
161 SLP_TREE_CHILDREN (node
).create (nops
);
162 SLP_TREE_LOAD_PERMUTATION (node
) = vNULL
;
163 SLP_TREE_TWO_OPERATORS (node
) = false;
169 /* Allocate operands info for NOPS operands, and GROUP_SIZE def-stmts for each
171 static vec
<slp_oprnd_info
>
172 vect_create_oprnd_info (int nops
, int group_size
)
175 slp_oprnd_info oprnd_info
;
176 vec
<slp_oprnd_info
> oprnds_info
;
178 oprnds_info
.create (nops
);
179 for (i
= 0; i
< nops
; i
++)
181 oprnd_info
= XNEW (struct _slp_oprnd_info
);
182 oprnd_info
->def_stmts
.create (group_size
);
183 oprnd_info
->first_dt
= vect_uninitialized_def
;
184 oprnd_info
->first_op_type
= NULL_TREE
;
185 oprnd_info
->first_pattern
= false;
186 oprnd_info
->second_pattern
= false;
187 oprnds_info
.quick_push (oprnd_info
);
194 /* Free operands info. */
197 vect_free_oprnd_info (vec
<slp_oprnd_info
> &oprnds_info
)
200 slp_oprnd_info oprnd_info
;
202 FOR_EACH_VEC_ELT (oprnds_info
, i
, oprnd_info
)
204 oprnd_info
->def_stmts
.release ();
205 XDELETE (oprnd_info
);
208 oprnds_info
.release ();
212 /* Find the place of the data-ref in STMT in the interleaving chain that starts
213 from FIRST_STMT. Return -1 if the data-ref is not a part of the chain. */
216 vect_get_place_in_interleaving_chain (gimple stmt
, gimple first_stmt
)
218 gimple next_stmt
= first_stmt
;
221 if (first_stmt
!= GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt
)))
226 if (next_stmt
== stmt
)
228 next_stmt
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt
));
230 result
+= GROUP_GAP (vinfo_for_stmt (next_stmt
));
238 /* Get the defs for the rhs of STMT (collect them in OPRNDS_INFO), check that
239 they are of a valid type and that they match the defs of the first stmt of
240 the SLP group (stored in OPRNDS_INFO). If there was a fatal error
241 return -1, if the error could be corrected by swapping operands of the
242 operation return 1, if everything is ok return 0. */
245 vect_get_and_check_slp_defs (loop_vec_info loop_vinfo
, bb_vec_info bb_vinfo
,
246 gimple stmt
, unsigned stmt_num
,
247 vec
<slp_oprnd_info
> *oprnds_info
)
250 unsigned int i
, number_of_oprnds
;
253 enum vect_def_type dt
= vect_uninitialized_def
;
254 struct loop
*loop
= NULL
;
255 bool pattern
= false;
256 slp_oprnd_info oprnd_info
;
257 int first_op_idx
= 1;
258 bool commutative
= false;
259 bool first_op_cond
= false;
260 bool first
= stmt_num
== 0;
261 bool second
= stmt_num
== 1;
264 loop
= LOOP_VINFO_LOOP (loop_vinfo
);
266 if (is_gimple_call (stmt
))
268 number_of_oprnds
= gimple_call_num_args (stmt
);
271 else if (is_gimple_assign (stmt
))
273 enum tree_code code
= gimple_assign_rhs_code (stmt
);
274 number_of_oprnds
= gimple_num_ops (stmt
) - 1;
275 if (gimple_assign_rhs_code (stmt
) == COND_EXPR
)
277 first_op_cond
= true;
282 commutative
= commutative_tree_code (code
);
287 bool swapped
= false;
288 for (i
= 0; i
< number_of_oprnds
; i
++)
293 if (i
== 0 || i
== 1)
294 oprnd
= TREE_OPERAND (gimple_op (stmt
, first_op_idx
),
297 oprnd
= gimple_op (stmt
, first_op_idx
+ i
- 1);
300 oprnd
= gimple_op (stmt
, first_op_idx
+ (swapped
? !i
: i
));
302 oprnd_info
= (*oprnds_info
)[i
];
304 if (!vect_is_simple_use (oprnd
, NULL
, loop_vinfo
, bb_vinfo
, &def_stmt
,
306 || (!def_stmt
&& dt
!= vect_constant_def
))
308 if (dump_enabled_p ())
310 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
311 "Build SLP failed: can't find def for ");
312 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, oprnd
);
313 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
319 /* Check if DEF_STMT is a part of a pattern in LOOP and get the def stmt
320 from the pattern. Check that all the stmts of the node are in the
322 if (def_stmt
&& gimple_bb (def_stmt
)
323 && ((loop
&& flow_bb_inside_loop_p (loop
, gimple_bb (def_stmt
)))
324 || (!loop
&& gimple_bb (def_stmt
) == BB_VINFO_BB (bb_vinfo
)
325 && gimple_code (def_stmt
) != GIMPLE_PHI
))
326 && vinfo_for_stmt (def_stmt
)
327 && STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (def_stmt
))
328 && !STMT_VINFO_RELEVANT (vinfo_for_stmt (def_stmt
))
329 && !STMT_VINFO_LIVE_P (vinfo_for_stmt (def_stmt
)))
332 if (!first
&& !oprnd_info
->first_pattern
333 /* Allow different pattern state for the defs of the
334 first stmt in reduction chains. */
335 && (oprnd_info
->first_dt
!= vect_reduction_def
336 || (!second
&& !oprnd_info
->second_pattern
)))
346 if (dump_enabled_p ())
348 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
349 "Build SLP failed: some of the stmts"
350 " are in a pattern, and others are not ");
351 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, oprnd
);
352 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
358 def_stmt
= STMT_VINFO_RELATED_STMT (vinfo_for_stmt (def_stmt
));
359 dt
= STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt
));
361 if (dt
== vect_unknown_def_type
)
363 if (dump_enabled_p ())
364 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
365 "Unsupported pattern.\n");
369 switch (gimple_code (def_stmt
))
372 def
= gimple_phi_result (def_stmt
);
376 def
= gimple_assign_lhs (def_stmt
);
380 if (dump_enabled_p ())
381 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
382 "unsupported defining stmt:\n");
388 oprnd_info
->second_pattern
= pattern
;
392 oprnd_info
->first_dt
= dt
;
393 oprnd_info
->first_pattern
= pattern
;
394 oprnd_info
->first_op_type
= TREE_TYPE (oprnd
);
398 /* Not first stmt of the group, check that the def-stmt/s match
399 the def-stmt/s of the first stmt. Allow different definition
400 types for reduction chains: the first stmt must be a
401 vect_reduction_def (a phi node), and the rest
402 vect_internal_def. */
403 if (((oprnd_info
->first_dt
!= dt
404 && !(oprnd_info
->first_dt
== vect_reduction_def
405 && dt
== vect_internal_def
)
406 && !((oprnd_info
->first_dt
== vect_external_def
407 || oprnd_info
->first_dt
== vect_constant_def
)
408 && (dt
== vect_external_def
409 || dt
== vect_constant_def
)))
410 || !types_compatible_p (oprnd_info
->first_op_type
,
413 /* Try swapping operands if we got a mismatch. */
422 if (dump_enabled_p ())
423 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
424 "Build SLP failed: different types\n");
430 /* Check the types of the definitions. */
433 case vect_constant_def
:
434 case vect_external_def
:
435 case vect_reduction_def
:
438 case vect_internal_def
:
439 oprnd_info
->def_stmts
.quick_push (def_stmt
);
443 /* FORNOW: Not supported. */
444 if (dump_enabled_p ())
446 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
447 "Build SLP failed: illegal type of def ");
448 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, def
);
449 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
461 tree cond
= gimple_assign_rhs1 (stmt
);
462 swap_ssa_operands (stmt
, &TREE_OPERAND (cond
, 0),
463 &TREE_OPERAND (cond
, 1));
464 TREE_SET_CODE (cond
, swap_tree_comparison (TREE_CODE (cond
)));
467 swap_ssa_operands (stmt
, gimple_assign_rhs1_ptr (stmt
),
468 gimple_assign_rhs2_ptr (stmt
));
475 /* Verify if the scalar stmts STMTS are isomorphic, require data
476 permutation or are of unsupported types of operation. Return
477 true if they are, otherwise return false and indicate in *MATCHES
478 which stmts are not isomorphic to the first one. If MATCHES[0]
479 is false then this indicates the comparison could not be
480 carried out or the stmts will never be vectorized by SLP. */
483 vect_build_slp_tree_1 (loop_vec_info loop_vinfo
, bb_vec_info bb_vinfo
,
484 vec
<gimple
> stmts
, unsigned int group_size
,
485 unsigned nops
, unsigned int *max_nunits
,
486 unsigned int vectorization_factor
, bool *matches
,
490 gimple first_stmt
= stmts
[0], stmt
= stmts
[0];
491 enum tree_code first_stmt_code
= ERROR_MARK
;
492 enum tree_code alt_stmt_code
= ERROR_MARK
;
493 enum tree_code rhs_code
= ERROR_MARK
;
494 enum tree_code first_cond_code
= ERROR_MARK
;
496 bool need_same_oprnds
= false;
497 tree vectype
, scalar_type
, first_op1
= NULL_TREE
;
500 machine_mode optab_op2_mode
;
501 machine_mode vec_mode
;
502 struct data_reference
*first_dr
;
504 gimple first_load
= NULL
, prev_first_load
= NULL
, old_first_load
= NULL
;
507 /* For every stmt in NODE find its def stmt/s. */
508 FOR_EACH_VEC_ELT (stmts
, i
, stmt
)
512 if (dump_enabled_p ())
514 dump_printf_loc (MSG_NOTE
, vect_location
, "Build SLP for ");
515 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
516 dump_printf (MSG_NOTE
, "\n");
519 /* Fail to vectorize statements marked as unvectorizable. */
520 if (!STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (stmt
)))
522 if (dump_enabled_p ())
524 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
525 "Build SLP failed: unvectorizable statement ");
526 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
527 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
529 /* Fatal mismatch. */
534 lhs
= gimple_get_lhs (stmt
);
535 if (lhs
== NULL_TREE
)
537 if (dump_enabled_p ())
539 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
540 "Build SLP failed: not GIMPLE_ASSIGN nor "
542 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
543 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
545 /* Fatal mismatch. */
550 if (is_gimple_assign (stmt
)
551 && gimple_assign_rhs_code (stmt
) == COND_EXPR
552 && (cond
= gimple_assign_rhs1 (stmt
))
553 && !COMPARISON_CLASS_P (cond
))
555 if (dump_enabled_p ())
557 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
558 "Build SLP failed: condition is not "
560 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
561 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
563 /* Fatal mismatch. */
568 scalar_type
= vect_get_smallest_scalar_type (stmt
, &dummy
, &dummy
);
569 vectype
= get_vectype_for_scalar_type (scalar_type
);
572 if (dump_enabled_p ())
574 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
575 "Build SLP failed: unsupported data-type ");
576 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
578 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
580 /* Fatal mismatch. */
585 /* If populating the vector type requires unrolling then fail
586 before adjusting *max_nunits for basic-block vectorization. */
588 && TYPE_VECTOR_SUBPARTS (vectype
) > group_size
)
590 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
591 "Build SLP failed: unrolling required "
592 "in basic block SLP\n");
593 /* Fatal mismatch. */
598 /* In case of multiple types we need to detect the smallest type. */
599 if (*max_nunits
< TYPE_VECTOR_SUBPARTS (vectype
))
601 *max_nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
603 vectorization_factor
= *max_nunits
;
606 if (gcall
*call_stmt
= dyn_cast
<gcall
*> (stmt
))
608 rhs_code
= CALL_EXPR
;
609 if (gimple_call_internal_p (call_stmt
)
610 || gimple_call_tail_p (call_stmt
)
611 || gimple_call_noreturn_p (call_stmt
)
612 || !gimple_call_nothrow_p (call_stmt
)
613 || gimple_call_chain (call_stmt
))
615 if (dump_enabled_p ())
617 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
618 "Build SLP failed: unsupported call type ");
619 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
621 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
623 /* Fatal mismatch. */
629 rhs_code
= gimple_assign_rhs_code (stmt
);
631 /* Check the operation. */
634 first_stmt_code
= rhs_code
;
636 /* Shift arguments should be equal in all the packed stmts for a
637 vector shift with scalar shift operand. */
638 if (rhs_code
== LSHIFT_EXPR
|| rhs_code
== RSHIFT_EXPR
639 || rhs_code
== LROTATE_EXPR
640 || rhs_code
== RROTATE_EXPR
)
642 vec_mode
= TYPE_MODE (vectype
);
644 /* First see if we have a vector/vector shift. */
645 optab
= optab_for_tree_code (rhs_code
, vectype
,
649 || optab_handler (optab
, vec_mode
) == CODE_FOR_nothing
)
651 /* No vector/vector shift, try for a vector/scalar shift. */
652 optab
= optab_for_tree_code (rhs_code
, vectype
,
657 if (dump_enabled_p ())
658 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
659 "Build SLP failed: no optab.\n");
660 /* Fatal mismatch. */
664 icode
= (int) optab_handler (optab
, vec_mode
);
665 if (icode
== CODE_FOR_nothing
)
667 if (dump_enabled_p ())
668 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
670 "op not supported by target.\n");
671 /* Fatal mismatch. */
675 optab_op2_mode
= insn_data
[icode
].operand
[2].mode
;
676 if (!VECTOR_MODE_P (optab_op2_mode
))
678 need_same_oprnds
= true;
679 first_op1
= gimple_assign_rhs2 (stmt
);
683 else if (rhs_code
== WIDEN_LSHIFT_EXPR
)
685 need_same_oprnds
= true;
686 first_op1
= gimple_assign_rhs2 (stmt
);
691 if (first_stmt_code
!= rhs_code
692 && alt_stmt_code
== ERROR_MARK
)
693 alt_stmt_code
= rhs_code
;
694 if (first_stmt_code
!= rhs_code
695 && (first_stmt_code
!= IMAGPART_EXPR
696 || rhs_code
!= REALPART_EXPR
)
697 && (first_stmt_code
!= REALPART_EXPR
698 || rhs_code
!= IMAGPART_EXPR
)
699 /* Handle mismatches in plus/minus by computing both
700 and merging the results. */
701 && !((first_stmt_code
== PLUS_EXPR
702 || first_stmt_code
== MINUS_EXPR
)
703 && (alt_stmt_code
== PLUS_EXPR
704 || alt_stmt_code
== MINUS_EXPR
)
705 && rhs_code
== alt_stmt_code
)
706 && !(STMT_VINFO_GROUPED_ACCESS (vinfo_for_stmt (stmt
))
707 && (first_stmt_code
== ARRAY_REF
708 || first_stmt_code
== BIT_FIELD_REF
709 || first_stmt_code
== INDIRECT_REF
710 || first_stmt_code
== COMPONENT_REF
711 || first_stmt_code
== MEM_REF
)))
713 if (dump_enabled_p ())
715 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
716 "Build SLP failed: different operation "
718 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
719 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
721 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
729 && !operand_equal_p (first_op1
, gimple_assign_rhs2 (stmt
), 0))
731 if (dump_enabled_p ())
733 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
734 "Build SLP failed: different shift "
736 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
737 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
743 if (rhs_code
== CALL_EXPR
)
745 gimple first_stmt
= stmts
[0];
746 if (gimple_call_num_args (stmt
) != nops
747 || !operand_equal_p (gimple_call_fn (first_stmt
),
748 gimple_call_fn (stmt
), 0)
749 || gimple_call_fntype (first_stmt
)
750 != gimple_call_fntype (stmt
))
752 if (dump_enabled_p ())
754 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
755 "Build SLP failed: different calls in ");
756 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
758 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
766 /* Grouped store or load. */
767 if (STMT_VINFO_GROUPED_ACCESS (vinfo_for_stmt (stmt
)))
769 if (REFERENCE_CLASS_P (lhs
))
777 unsigned unrolling_factor
778 = least_common_multiple
779 (*max_nunits
, group_size
) / group_size
;
780 /* FORNOW: Check that there is no gap between the loads
781 and no gap between the groups when we need to load
782 multiple groups at once.
783 ??? We should enhance this to only disallow gaps
785 if ((unrolling_factor
> 1
786 && ((GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt
)) == stmt
787 && GROUP_GAP (vinfo_for_stmt (stmt
)) != 0)
788 /* If the group is split up then GROUP_GAP
789 isn't correct here, nor is GROUP_FIRST_ELEMENT. */
790 || GROUP_SIZE (vinfo_for_stmt (stmt
)) > group_size
))
791 || (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt
)) != stmt
792 && GROUP_GAP (vinfo_for_stmt (stmt
)) != 1))
794 if (dump_enabled_p ())
796 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
797 "Build SLP failed: grouped "
799 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
801 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
803 /* Fatal mismatch. */
808 /* Check that the size of interleaved loads group is not
809 greater than the SLP group size. */
811 = vectorization_factor
/ TYPE_VECTOR_SUBPARTS (vectype
);
813 && GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt
)) == stmt
814 && ((GROUP_SIZE (vinfo_for_stmt (stmt
))
815 - GROUP_GAP (vinfo_for_stmt (stmt
)))
816 > ncopies
* group_size
))
818 if (dump_enabled_p ())
820 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
821 "Build SLP failed: the number "
822 "of interleaved loads is greater than "
823 "the SLP group size ");
824 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
826 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
828 /* Fatal mismatch. */
833 old_first_load
= first_load
;
834 first_load
= GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt
));
837 /* Check that there are no loads from different interleaving
838 chains in the same node. */
839 if (prev_first_load
!= first_load
)
841 if (dump_enabled_p ())
843 dump_printf_loc (MSG_MISSED_OPTIMIZATION
,
845 "Build SLP failed: different "
846 "interleaving chains in one node ");
847 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
849 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
856 prev_first_load
= first_load
;
858 /* In some cases a group of loads is just the same load
859 repeated N times. Only analyze its cost once. */
860 if (first_load
== stmt
&& old_first_load
!= first_load
)
862 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt
));
863 if (vect_supportable_dr_alignment (first_dr
, false)
864 == dr_unaligned_unsupported
)
866 if (dump_enabled_p ())
868 dump_printf_loc (MSG_MISSED_OPTIMIZATION
,
870 "Build SLP failed: unsupported "
872 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
874 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
876 /* Fatal mismatch. */
882 } /* Grouped access. */
885 if (TREE_CODE_CLASS (rhs_code
) == tcc_reference
)
887 /* Not grouped load. */
888 if (dump_enabled_p ())
890 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
891 "Build SLP failed: not grouped load ");
892 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
893 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
896 /* FORNOW: Not grouped loads are not supported. */
897 /* Fatal mismatch. */
902 /* Not memory operation. */
903 if (TREE_CODE_CLASS (rhs_code
) != tcc_binary
904 && TREE_CODE_CLASS (rhs_code
) != tcc_unary
905 && TREE_CODE_CLASS (rhs_code
) != tcc_expression
906 && rhs_code
!= CALL_EXPR
)
908 if (dump_enabled_p ())
910 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
911 "Build SLP failed: operation");
912 dump_printf (MSG_MISSED_OPTIMIZATION
, " unsupported ");
913 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
914 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
916 /* Fatal mismatch. */
921 if (rhs_code
== COND_EXPR
)
923 tree cond_expr
= gimple_assign_rhs1 (stmt
);
926 first_cond_code
= TREE_CODE (cond_expr
);
927 else if (first_cond_code
!= TREE_CODE (cond_expr
))
929 if (dump_enabled_p ())
931 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
932 "Build SLP failed: different"
934 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
936 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
947 for (i
= 0; i
< group_size
; ++i
)
951 /* If we allowed a two-operation SLP node verify the target can cope
952 with the permute we are going to use. */
953 if (alt_stmt_code
!= ERROR_MARK
954 && TREE_CODE_CLASS (alt_stmt_code
) != tcc_reference
)
957 = XALLOCAVEC (unsigned char, TYPE_VECTOR_SUBPARTS (vectype
));
958 for (i
= 0; i
< TYPE_VECTOR_SUBPARTS (vectype
); ++i
)
961 if (gimple_assign_rhs_code (stmts
[i
% group_size
]) == alt_stmt_code
)
962 sel
[i
] += TYPE_VECTOR_SUBPARTS (vectype
);
964 if (!can_vec_perm_p (TYPE_MODE (vectype
), false, sel
))
966 for (i
= 0; i
< group_size
; ++i
)
967 if (gimple_assign_rhs_code (stmts
[i
]) == alt_stmt_code
)
970 if (dump_enabled_p ())
972 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
973 "Build SLP failed: different operation "
975 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
977 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
979 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
985 *two_operators
= true;
991 /* Recursively build an SLP tree starting from NODE.
992 Fail (and return a value not equal to zero) if def-stmts are not
993 isomorphic, require data permutation or are of unsupported types of
994 operation. Otherwise, return 0.
995 The value returned is the depth in the SLP tree where a mismatch
999 vect_build_slp_tree (loop_vec_info loop_vinfo
, bb_vec_info bb_vinfo
,
1000 slp_tree
*node
, unsigned int group_size
,
1001 unsigned int *max_nunits
,
1002 vec
<slp_tree
> *loads
,
1003 unsigned int vectorization_factor
,
1004 bool *matches
, unsigned *npermutes
, unsigned *tree_size
,
1005 unsigned max_tree_size
)
1007 unsigned nops
, i
, this_tree_size
= 0;
1012 stmt
= SLP_TREE_SCALAR_STMTS (*node
)[0];
1013 if (is_gimple_call (stmt
))
1014 nops
= gimple_call_num_args (stmt
);
1015 else if (is_gimple_assign (stmt
))
1017 nops
= gimple_num_ops (stmt
) - 1;
1018 if (gimple_assign_rhs_code (stmt
) == COND_EXPR
)
1024 bool two_operators
= false;
1025 if (!vect_build_slp_tree_1 (loop_vinfo
, bb_vinfo
,
1026 SLP_TREE_SCALAR_STMTS (*node
), group_size
, nops
,
1027 max_nunits
, vectorization_factor
, matches
,
1030 SLP_TREE_TWO_OPERATORS (*node
) = two_operators
;
1032 /* If the SLP node is a load, terminate the recursion. */
1033 if (STMT_VINFO_GROUPED_ACCESS (vinfo_for_stmt (stmt
))
1034 && DR_IS_READ (STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt
))))
1036 loads
->safe_push (*node
);
1040 /* Get at the operands, verifying they are compatible. */
1041 vec
<slp_oprnd_info
> oprnds_info
= vect_create_oprnd_info (nops
, group_size
);
1042 slp_oprnd_info oprnd_info
;
1043 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (*node
), i
, stmt
)
1045 switch (vect_get_and_check_slp_defs (loop_vinfo
, bb_vinfo
,
1046 stmt
, i
, &oprnds_info
))
1052 vect_free_oprnd_info (oprnds_info
);
1059 for (i
= 0; i
< group_size
; ++i
)
1062 vect_free_oprnd_info (oprnds_info
);
1066 stmt
= SLP_TREE_SCALAR_STMTS (*node
)[0];
1068 /* Create SLP_TREE nodes for the definition node/s. */
1069 FOR_EACH_VEC_ELT (oprnds_info
, i
, oprnd_info
)
1072 unsigned old_nloads
= loads
->length ();
1073 unsigned old_max_nunits
= *max_nunits
;
1075 if (oprnd_info
->first_dt
!= vect_internal_def
)
1078 if (++this_tree_size
> max_tree_size
)
1080 vect_free_oprnd_info (oprnds_info
);
1084 child
= vect_create_new_slp_node (oprnd_info
->def_stmts
);
1087 vect_free_oprnd_info (oprnds_info
);
1091 if (vect_build_slp_tree (loop_vinfo
, bb_vinfo
, &child
,
1092 group_size
, max_nunits
, loads
,
1093 vectorization_factor
, matches
,
1094 npermutes
, &this_tree_size
, max_tree_size
))
1096 oprnd_info
->def_stmts
= vNULL
;
1097 SLP_TREE_CHILDREN (*node
).quick_push (child
);
1101 /* If the SLP build failed fatally and we analyze a basic-block
1102 simply treat nodes we fail to build as externally defined
1103 (and thus build vectors from the scalar defs).
1104 The cost model will reject outright expensive cases.
1105 ??? This doesn't treat cases where permutation ultimatively
1106 fails (or we don't try permutation below). Ideally we'd
1107 even compute a permutation that will end up with the maximum
1111 /* ??? Rejecting patterns this way doesn't work. We'd have to
1112 do extra work to cancel the pattern so the uses see the
1114 && !is_pattern_stmt_p (vinfo_for_stmt (stmt
)))
1117 slp_tree grandchild
;
1120 *max_nunits
= old_max_nunits
;
1121 loads
->truncate (old_nloads
);
1122 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (child
), j
, grandchild
)
1123 vect_free_slp_tree (grandchild
);
1124 SLP_TREE_CHILDREN (child
).truncate (0);
1126 dump_printf_loc (MSG_NOTE
, vect_location
,
1127 "Building vector operands from scalars\n");
1128 oprnd_info
->def_stmts
= vNULL
;
1129 vect_free_slp_tree (child
);
1130 SLP_TREE_CHILDREN (*node
).quick_push (NULL
);
1134 /* If the SLP build for operand zero failed and operand zero
1135 and one can be commutated try that for the scalar stmts
1136 that failed the match. */
1138 /* A first scalar stmt mismatch signals a fatal mismatch. */
1140 /* ??? For COND_EXPRs we can swap the comparison operands
1141 as well as the arms under some constraints. */
1143 && oprnds_info
[1]->first_dt
== vect_internal_def
1144 && is_gimple_assign (stmt
)
1145 && commutative_tree_code (gimple_assign_rhs_code (stmt
))
1146 && !SLP_TREE_TWO_OPERATORS (*node
)
1147 /* Do so only if the number of not successful permutes was nor more
1148 than a cut-ff as re-trying the recursive match on
1149 possibly each level of the tree would expose exponential
1154 slp_tree grandchild
;
1157 *max_nunits
= old_max_nunits
;
1158 loads
->truncate (old_nloads
);
1159 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (child
), j
, grandchild
)
1160 vect_free_slp_tree (grandchild
);
1161 SLP_TREE_CHILDREN (child
).truncate (0);
1163 /* Swap mismatched definition stmts. */
1164 dump_printf_loc (MSG_NOTE
, vect_location
,
1165 "Re-trying with swapped operands of stmts ");
1166 for (j
= 0; j
< group_size
; ++j
)
1169 gimple tem
= oprnds_info
[0]->def_stmts
[j
];
1170 oprnds_info
[0]->def_stmts
[j
] = oprnds_info
[1]->def_stmts
[j
];
1171 oprnds_info
[1]->def_stmts
[j
] = tem
;
1172 dump_printf (MSG_NOTE
, "%d ", j
);
1174 dump_printf (MSG_NOTE
, "\n");
1175 /* And try again with scratch 'matches' ... */
1176 bool *tem
= XALLOCAVEC (bool, group_size
);
1177 if (vect_build_slp_tree (loop_vinfo
, bb_vinfo
, &child
,
1178 group_size
, max_nunits
, loads
,
1179 vectorization_factor
,
1180 tem
, npermutes
, &this_tree_size
,
1183 /* ... so if successful we can apply the operand swapping
1184 to the GIMPLE IL. This is necessary because for example
1185 vect_get_slp_defs uses operand indexes and thus expects
1186 canonical operand order. */
1187 for (j
= 0; j
< group_size
; ++j
)
1190 gimple stmt
= SLP_TREE_SCALAR_STMTS (*node
)[j
];
1191 swap_ssa_operands (stmt
, gimple_assign_rhs1_ptr (stmt
),
1192 gimple_assign_rhs2_ptr (stmt
));
1194 oprnd_info
->def_stmts
= vNULL
;
1195 SLP_TREE_CHILDREN (*node
).quick_push (child
);
1202 oprnd_info
->def_stmts
= vNULL
;
1203 vect_free_slp_tree (child
);
1204 vect_free_oprnd_info (oprnds_info
);
1209 *tree_size
+= this_tree_size
;
1211 vect_free_oprnd_info (oprnds_info
);
1215 /* Dump a slp tree NODE using flags specified in DUMP_KIND. */
1218 vect_print_slp_tree (int dump_kind
, slp_tree node
)
1227 dump_printf (dump_kind
, "node ");
1228 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node
), i
, stmt
)
1230 dump_printf (dump_kind
, "\n\tstmt %d ", i
);
1231 dump_gimple_stmt (dump_kind
, TDF_SLIM
, stmt
, 0);
1233 dump_printf (dump_kind
, "\n");
1235 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node
), i
, child
)
1236 vect_print_slp_tree (dump_kind
, child
);
1240 /* Mark the tree rooted at NODE with MARK (PURE_SLP or HYBRID).
1241 If MARK is HYBRID, it refers to a specific stmt in NODE (the stmt at index
1242 J). Otherwise, MARK is PURE_SLP and J is -1, which indicates that all the
1243 stmts in NODE are to be marked. */
1246 vect_mark_slp_stmts (slp_tree node
, enum slp_vect_type mark
, int j
)
1255 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node
), i
, stmt
)
1256 if (j
< 0 || i
== j
)
1257 STMT_SLP_TYPE (vinfo_for_stmt (stmt
)) = mark
;
1259 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node
), i
, child
)
1260 vect_mark_slp_stmts (child
, mark
, j
);
1264 /* Mark the statements of the tree rooted at NODE as relevant (vect_used). */
1267 vect_mark_slp_stmts_relevant (slp_tree node
)
1271 stmt_vec_info stmt_info
;
1277 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node
), i
, stmt
)
1279 stmt_info
= vinfo_for_stmt (stmt
);
1280 gcc_assert (!STMT_VINFO_RELEVANT (stmt_info
)
1281 || STMT_VINFO_RELEVANT (stmt_info
) == vect_used_in_scope
);
1282 STMT_VINFO_RELEVANT (stmt_info
) = vect_used_in_scope
;
1285 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node
), i
, child
)
1286 vect_mark_slp_stmts_relevant (child
);
1290 /* Rearrange the statements of NODE according to PERMUTATION. */
1293 vect_slp_rearrange_stmts (slp_tree node
, unsigned int group_size
,
1294 vec
<unsigned> permutation
)
1297 vec
<gimple
> tmp_stmts
;
1301 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node
), i
, child
)
1302 vect_slp_rearrange_stmts (child
, group_size
, permutation
);
1304 gcc_assert (group_size
== SLP_TREE_SCALAR_STMTS (node
).length ());
1305 tmp_stmts
.create (group_size
);
1306 tmp_stmts
.quick_grow_cleared (group_size
);
1308 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node
), i
, stmt
)
1309 tmp_stmts
[permutation
[i
]] = stmt
;
1311 SLP_TREE_SCALAR_STMTS (node
).release ();
1312 SLP_TREE_SCALAR_STMTS (node
) = tmp_stmts
;
1316 /* Check if the required load permutations in the SLP instance
1317 SLP_INSTN are supported. */
1320 vect_supported_load_permutation_p (slp_instance slp_instn
)
1322 unsigned int group_size
= SLP_INSTANCE_GROUP_SIZE (slp_instn
);
1323 unsigned int i
, j
, k
, next
;
1326 gimple stmt
, load
, next_load
, first_load
;
1327 struct data_reference
*dr
;
1329 if (dump_enabled_p ())
1331 dump_printf_loc (MSG_NOTE
, vect_location
, "Load permutation ");
1332 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn
), i
, node
)
1333 if (node
->load_permutation
.exists ())
1334 FOR_EACH_VEC_ELT (node
->load_permutation
, j
, next
)
1335 dump_printf (MSG_NOTE
, "%d ", next
);
1337 for (k
= 0; k
< group_size
; ++k
)
1338 dump_printf (MSG_NOTE
, "%d ", k
);
1339 dump_printf (MSG_NOTE
, "\n");
1342 /* In case of reduction every load permutation is allowed, since the order
1343 of the reduction statements is not important (as opposed to the case of
1344 grouped stores). The only condition we need to check is that all the
1345 load nodes are of the same size and have the same permutation (and then
1346 rearrange all the nodes of the SLP instance according to this
1349 /* Check that all the load nodes are of the same size. */
1350 /* ??? Can't we assert this? */
1351 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn
), i
, node
)
1352 if (SLP_TREE_SCALAR_STMTS (node
).length () != (unsigned) group_size
)
1355 node
= SLP_INSTANCE_TREE (slp_instn
);
1356 stmt
= SLP_TREE_SCALAR_STMTS (node
)[0];
1358 /* Reduction (there are no data-refs in the root).
1359 In reduction chain the order of the loads is important. */
1360 if (!STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt
))
1361 && !GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt
)))
1366 /* Compare all the permutation sequences to the first one. We know
1367 that at least one load is permuted. */
1368 node
= SLP_INSTANCE_LOADS (slp_instn
)[0];
1369 if (!node
->load_permutation
.exists ())
1371 for (i
= 1; SLP_INSTANCE_LOADS (slp_instn
).iterate (i
, &load
); ++i
)
1373 if (!load
->load_permutation
.exists ())
1375 FOR_EACH_VEC_ELT (load
->load_permutation
, j
, lidx
)
1376 if (lidx
!= node
->load_permutation
[j
])
1380 /* Check that the loads in the first sequence are different and there
1381 are no gaps between them. */
1382 load_index
= sbitmap_alloc (group_size
);
1383 bitmap_clear (load_index
);
1384 FOR_EACH_VEC_ELT (node
->load_permutation
, i
, lidx
)
1386 if (bitmap_bit_p (load_index
, lidx
))
1388 sbitmap_free (load_index
);
1391 bitmap_set_bit (load_index
, lidx
);
1393 for (i
= 0; i
< group_size
; i
++)
1394 if (!bitmap_bit_p (load_index
, i
))
1396 sbitmap_free (load_index
);
1399 sbitmap_free (load_index
);
1401 /* This permutation is valid for reduction. Since the order of the
1402 statements in the nodes is not important unless they are memory
1403 accesses, we can rearrange the statements in all the nodes
1404 according to the order of the loads. */
1405 vect_slp_rearrange_stmts (SLP_INSTANCE_TREE (slp_instn
), group_size
,
1406 node
->load_permutation
);
1408 /* We are done, no actual permutations need to be generated. */
1409 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn
), i
, node
)
1410 SLP_TREE_LOAD_PERMUTATION (node
).release ();
1414 /* In basic block vectorization we allow any subchain of an interleaving
1416 FORNOW: not supported in loop SLP because of realignment compications. */
1417 if (STMT_VINFO_BB_VINFO (vinfo_for_stmt (stmt
)))
1419 /* Check whether the loads in an instance form a subchain and thus
1420 no permutation is necessary. */
1421 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn
), i
, node
)
1423 if (!SLP_TREE_LOAD_PERMUTATION (node
).exists ())
1425 bool subchain_p
= true;
1427 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node
), j
, load
)
1429 if (j
!= 0 && next_load
!= load
)
1434 next_load
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (load
));
1437 SLP_TREE_LOAD_PERMUTATION (node
).release ();
1440 /* Verify the permutation can be generated. */
1442 if (!vect_transform_slp_perm_load (node
, tem
, NULL
,
1443 1, slp_instn
, true))
1445 dump_printf_loc (MSG_MISSED_OPTIMIZATION
,
1447 "unsupported load permutation\n");
1453 /* Check that the alignment of the first load in every subchain, i.e.,
1454 the first statement in every load node, is supported.
1455 ??? This belongs in alignment checking. */
1456 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn
), i
, node
)
1458 first_load
= SLP_TREE_SCALAR_STMTS (node
)[0];
1459 if (first_load
!= GROUP_FIRST_ELEMENT (vinfo_for_stmt (first_load
)))
1461 dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_load
));
1462 if (vect_supportable_dr_alignment (dr
, false)
1463 == dr_unaligned_unsupported
)
1465 if (dump_enabled_p ())
1467 dump_printf_loc (MSG_MISSED_OPTIMIZATION
,
1469 "unsupported unaligned load ");
1470 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
1472 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
1482 /* FORNOW: the only supported permutation is 0..01..1.. of length equal to
1483 GROUP_SIZE and where each sequence of same drs is of GROUP_SIZE length as
1484 well (unless it's reduction). */
1485 if (SLP_INSTANCE_LOADS (slp_instn
).length () != group_size
)
1487 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn
), i
, node
)
1488 if (!node
->load_permutation
.exists ())
1491 load_index
= sbitmap_alloc (group_size
);
1492 bitmap_clear (load_index
);
1493 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn
), i
, node
)
1495 unsigned int lidx
= node
->load_permutation
[0];
1496 if (bitmap_bit_p (load_index
, lidx
))
1498 sbitmap_free (load_index
);
1501 bitmap_set_bit (load_index
, lidx
);
1502 FOR_EACH_VEC_ELT (node
->load_permutation
, j
, k
)
1505 sbitmap_free (load_index
);
1509 for (i
= 0; i
< group_size
; i
++)
1510 if (!bitmap_bit_p (load_index
, i
))
1512 sbitmap_free (load_index
);
1515 sbitmap_free (load_index
);
1517 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn
), i
, node
)
1518 if (node
->load_permutation
.exists ()
1519 && !vect_transform_slp_perm_load
1521 SLP_INSTANCE_UNROLLING_FACTOR (slp_instn
), slp_instn
, true))
1527 /* Find the last store in SLP INSTANCE. */
1530 vect_find_last_scalar_stmt_in_slp (slp_tree node
)
1532 gimple last
= NULL
, stmt
;
1534 for (int i
= 0; SLP_TREE_SCALAR_STMTS (node
).iterate (i
, &stmt
); i
++)
1536 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (stmt
);
1537 if (is_pattern_stmt_p (stmt_vinfo
))
1538 last
= get_later_stmt (STMT_VINFO_RELATED_STMT (stmt_vinfo
), last
);
1540 last
= get_later_stmt (stmt
, last
);
1546 /* Compute the cost for the SLP node NODE in the SLP instance INSTANCE. */
1549 vect_analyze_slp_cost_1 (loop_vec_info loop_vinfo
, bb_vec_info bb_vinfo
,
1550 slp_instance instance
, slp_tree node
,
1551 stmt_vector_for_cost
*prologue_cost_vec
,
1552 unsigned ncopies_for_cost
)
1554 stmt_vector_for_cost
*body_cost_vec
= &SLP_INSTANCE_BODY_COST_VEC (instance
);
1559 stmt_vec_info stmt_info
;
1561 unsigned group_size
= SLP_INSTANCE_GROUP_SIZE (instance
);
1563 /* Recurse down the SLP tree. */
1564 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node
), i
, child
)
1566 vect_analyze_slp_cost_1 (loop_vinfo
, bb_vinfo
,
1567 instance
, child
, prologue_cost_vec
,
1570 /* Look at the first scalar stmt to determine the cost. */
1571 stmt
= SLP_TREE_SCALAR_STMTS (node
)[0];
1572 stmt_info
= vinfo_for_stmt (stmt
);
1573 if (STMT_VINFO_GROUPED_ACCESS (stmt_info
))
1575 if (DR_IS_WRITE (STMT_VINFO_DATA_REF (stmt_info
)))
1576 vect_model_store_cost (stmt_info
, ncopies_for_cost
, false,
1577 vect_uninitialized_def
,
1578 node
, prologue_cost_vec
, body_cost_vec
);
1582 gcc_checking_assert (DR_IS_READ (STMT_VINFO_DATA_REF (stmt_info
)));
1583 vect_model_load_cost (stmt_info
, ncopies_for_cost
, false,
1584 node
, prologue_cost_vec
, body_cost_vec
);
1585 /* If the load is permuted record the cost for the permutation.
1586 ??? Loads from multiple chains are let through here only
1587 for a single special case involving complex numbers where
1588 in the end no permutation is necessary. */
1589 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node
), i
, s
)
1590 if ((STMT_VINFO_GROUP_FIRST_ELEMENT (vinfo_for_stmt (s
))
1591 == STMT_VINFO_GROUP_FIRST_ELEMENT (stmt_info
))
1592 && vect_get_place_in_interleaving_chain
1593 (s
, STMT_VINFO_GROUP_FIRST_ELEMENT (stmt_info
)) != i
)
1595 record_stmt_cost (body_cost_vec
, group_size
, vec_perm
,
1596 stmt_info
, 0, vect_body
);
1603 record_stmt_cost (body_cost_vec
, ncopies_for_cost
, vector_stmt
,
1604 stmt_info
, 0, vect_body
);
1605 if (SLP_TREE_TWO_OPERATORS (node
))
1607 record_stmt_cost (body_cost_vec
, ncopies_for_cost
, vector_stmt
,
1608 stmt_info
, 0, vect_body
);
1609 record_stmt_cost (body_cost_vec
, ncopies_for_cost
, vec_perm
,
1610 stmt_info
, 0, vect_body
);
1614 /* Scan operands and account for prologue cost of constants/externals.
1615 ??? This over-estimates cost for multiple uses and should be
1617 lhs
= gimple_get_lhs (stmt
);
1618 for (i
= 0; i
< gimple_num_ops (stmt
); ++i
)
1620 tree def
, op
= gimple_op (stmt
, i
);
1622 enum vect_def_type dt
;
1623 if (!op
|| op
== lhs
)
1625 if (vect_is_simple_use (op
, NULL
, loop_vinfo
, bb_vinfo
,
1626 &def_stmt
, &def
, &dt
))
1628 /* Without looking at the actual initializer a vector of
1629 constants can be implemented as load from the constant pool.
1630 ??? We need to pass down stmt_info for a vector type
1631 even if it points to the wrong stmt. */
1632 if (dt
== vect_constant_def
)
1633 record_stmt_cost (prologue_cost_vec
, 1, vector_load
,
1634 stmt_info
, 0, vect_prologue
);
1635 else if (dt
== vect_external_def
)
1636 record_stmt_cost (prologue_cost_vec
, 1, vec_construct
,
1637 stmt_info
, 0, vect_prologue
);
1642 /* Compute the cost for the SLP instance INSTANCE. */
1645 vect_analyze_slp_cost (loop_vec_info loop_vinfo
, bb_vec_info bb_vinfo
,
1646 slp_instance instance
, unsigned nunits
)
1648 stmt_vector_for_cost body_cost_vec
, prologue_cost_vec
;
1649 unsigned ncopies_for_cost
;
1650 stmt_info_for_cost
*si
;
1653 /* Calculate the number of vector stmts to create based on the unrolling
1654 factor (number of vectors is 1 if NUNITS >= GROUP_SIZE, and is
1655 GROUP_SIZE / NUNITS otherwise. */
1656 unsigned group_size
= SLP_INSTANCE_GROUP_SIZE (instance
);
1657 ncopies_for_cost
= least_common_multiple (nunits
, group_size
) / nunits
;
1659 prologue_cost_vec
.create (10);
1660 body_cost_vec
.create (10);
1661 SLP_INSTANCE_BODY_COST_VEC (instance
) = body_cost_vec
;
1662 vect_analyze_slp_cost_1 (loop_vinfo
, bb_vinfo
,
1663 instance
, SLP_INSTANCE_TREE (instance
),
1664 &prologue_cost_vec
, ncopies_for_cost
);
1666 /* Record the prologue costs, which were delayed until we were
1667 sure that SLP was successful. Unlike the body costs, we know
1668 the final values now regardless of the loop vectorization factor. */
1669 void *data
= (loop_vinfo
? LOOP_VINFO_TARGET_COST_DATA (loop_vinfo
)
1670 : BB_VINFO_TARGET_COST_DATA (bb_vinfo
));
1671 FOR_EACH_VEC_ELT (prologue_cost_vec
, i
, si
)
1673 struct _stmt_vec_info
*stmt_info
1674 = si
->stmt
? vinfo_for_stmt (si
->stmt
) : NULL
;
1675 (void) add_stmt_cost (data
, si
->count
, si
->kind
, stmt_info
,
1676 si
->misalign
, vect_prologue
);
1679 prologue_cost_vec
.release ();
1682 /* Analyze an SLP instance starting from a group of grouped stores. Call
1683 vect_build_slp_tree to build a tree of packed stmts if possible.
1684 Return FALSE if it's impossible to SLP any stmt in the loop. */
1687 vect_analyze_slp_instance (loop_vec_info loop_vinfo
, bb_vec_info bb_vinfo
,
1688 gimple stmt
, unsigned max_tree_size
)
1690 slp_instance new_instance
;
1692 unsigned int group_size
= GROUP_SIZE (vinfo_for_stmt (stmt
));
1693 unsigned int unrolling_factor
= 1, nunits
;
1694 tree vectype
, scalar_type
= NULL_TREE
;
1696 unsigned int vectorization_factor
= 0;
1698 unsigned int max_nunits
= 0;
1699 vec
<slp_tree
> loads
;
1700 struct data_reference
*dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt
));
1701 vec
<gimple
> scalar_stmts
;
1703 if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt
)))
1707 scalar_type
= TREE_TYPE (DR_REF (dr
));
1708 vectype
= get_vectype_for_scalar_type (scalar_type
);
1712 gcc_assert (loop_vinfo
);
1713 vectype
= STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt
));
1716 group_size
= GROUP_SIZE (vinfo_for_stmt (stmt
));
1720 gcc_assert (loop_vinfo
);
1721 vectype
= STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt
));
1722 group_size
= LOOP_VINFO_REDUCTIONS (loop_vinfo
).length ();
1727 if (dump_enabled_p ())
1729 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1730 "Build SLP failed: unsupported data-type ");
1731 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, scalar_type
);
1732 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
1738 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
1740 vectorization_factor
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
1742 vectorization_factor
= nunits
;
1744 /* Calculate the unrolling factor. */
1745 unrolling_factor
= least_common_multiple (nunits
, group_size
) / group_size
;
1746 if (unrolling_factor
!= 1 && !loop_vinfo
)
1748 if (dump_enabled_p ())
1749 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1750 "Build SLP failed: unrolling required in basic"
1756 /* Create a node (a root of the SLP tree) for the packed grouped stores. */
1757 scalar_stmts
.create (group_size
);
1759 if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt
)))
1761 /* Collect the stores and store them in SLP_TREE_SCALAR_STMTS. */
1764 if (STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (next
))
1765 && STMT_VINFO_RELATED_STMT (vinfo_for_stmt (next
)))
1766 scalar_stmts
.safe_push (
1767 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (next
)));
1769 scalar_stmts
.safe_push (next
);
1770 next
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (next
));
1775 /* Collect reduction statements. */
1776 vec
<gimple
> reductions
= LOOP_VINFO_REDUCTIONS (loop_vinfo
);
1777 for (i
= 0; reductions
.iterate (i
, &next
); i
++)
1778 scalar_stmts
.safe_push (next
);
1781 node
= vect_create_new_slp_node (scalar_stmts
);
1783 loads
.create (group_size
);
1785 /* Build the tree for the SLP instance. */
1786 bool *matches
= XALLOCAVEC (bool, group_size
);
1787 unsigned npermutes
= 0;
1788 if (vect_build_slp_tree (loop_vinfo
, bb_vinfo
, &node
, group_size
,
1789 &max_nunits
, &loads
,
1790 vectorization_factor
, matches
, &npermutes
, NULL
,
1793 /* Calculate the unrolling factor based on the smallest type. */
1794 if (max_nunits
> nunits
)
1795 unrolling_factor
= least_common_multiple (max_nunits
, group_size
)
1798 if (unrolling_factor
!= 1 && !loop_vinfo
)
1800 if (dump_enabled_p ())
1801 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1802 "Build SLP failed: unrolling required in basic"
1804 vect_free_slp_tree (node
);
1809 /* Create a new SLP instance. */
1810 new_instance
= XNEW (struct _slp_instance
);
1811 SLP_INSTANCE_TREE (new_instance
) = node
;
1812 SLP_INSTANCE_GROUP_SIZE (new_instance
) = group_size
;
1813 SLP_INSTANCE_UNROLLING_FACTOR (new_instance
) = unrolling_factor
;
1814 SLP_INSTANCE_BODY_COST_VEC (new_instance
) = vNULL
;
1815 SLP_INSTANCE_LOADS (new_instance
) = loads
;
1817 /* Compute the load permutation. */
1819 bool loads_permuted
= false;
1820 FOR_EACH_VEC_ELT (loads
, i
, load_node
)
1822 vec
<unsigned> load_permutation
;
1824 gimple load
, first_stmt
;
1825 bool this_load_permuted
= false;
1826 load_permutation
.create (group_size
);
1827 first_stmt
= GROUP_FIRST_ELEMENT
1828 (vinfo_for_stmt (SLP_TREE_SCALAR_STMTS (load_node
)[0]));
1829 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (load_node
), j
, load
)
1832 = vect_get_place_in_interleaving_chain (load
, first_stmt
);
1833 gcc_assert (load_place
!= -1);
1834 if (load_place
!= j
)
1835 this_load_permuted
= true;
1836 load_permutation
.safe_push (load_place
);
1838 if (!this_load_permuted
)
1840 load_permutation
.release ();
1843 SLP_TREE_LOAD_PERMUTATION (load_node
) = load_permutation
;
1844 loads_permuted
= true;
1849 if (!vect_supported_load_permutation_p (new_instance
))
1851 if (dump_enabled_p ())
1853 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1854 "Build SLP failed: unsupported load "
1856 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
1857 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
1859 vect_free_slp_instance (new_instance
);
1867 /* Compute the costs of this SLP instance. Delay this for BB
1868 vectorization as we don't have vector types computed yet. */
1869 vect_analyze_slp_cost (loop_vinfo
, bb_vinfo
,
1870 new_instance
, TYPE_VECTOR_SUBPARTS (vectype
));
1871 LOOP_VINFO_SLP_INSTANCES (loop_vinfo
).safe_push (new_instance
);
1874 BB_VINFO_SLP_INSTANCES (bb_vinfo
).safe_push (new_instance
);
1876 if (dump_enabled_p ())
1877 vect_print_slp_tree (MSG_NOTE
, node
);
1882 /* Failed to SLP. */
1883 /* Free the allocated memory. */
1884 vect_free_slp_tree (node
);
1891 /* Check if there are stmts in the loop can be vectorized using SLP. Build SLP
1892 trees of packed scalar stmts if SLP is possible. */
1895 vect_analyze_slp (loop_vec_info loop_vinfo
, bb_vec_info bb_vinfo
,
1896 unsigned max_tree_size
)
1899 vec
<gimple
> grouped_stores
;
1900 vec
<gimple
> reductions
= vNULL
;
1901 vec
<gimple
> reduc_chains
= vNULL
;
1902 gimple first_element
;
1905 if (dump_enabled_p ())
1906 dump_printf_loc (MSG_NOTE
, vect_location
, "=== vect_analyze_slp ===\n");
1910 grouped_stores
= LOOP_VINFO_GROUPED_STORES (loop_vinfo
);
1911 reduc_chains
= LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo
);
1912 reductions
= LOOP_VINFO_REDUCTIONS (loop_vinfo
);
1915 grouped_stores
= BB_VINFO_GROUPED_STORES (bb_vinfo
);
1917 /* Find SLP sequences starting from groups of grouped stores. */
1918 FOR_EACH_VEC_ELT (grouped_stores
, i
, first_element
)
1919 if (vect_analyze_slp_instance (loop_vinfo
, bb_vinfo
, first_element
,
1923 if (reduc_chains
.length () > 0)
1925 /* Find SLP sequences starting from reduction chains. */
1926 FOR_EACH_VEC_ELT (reduc_chains
, i
, first_element
)
1927 if (vect_analyze_slp_instance (loop_vinfo
, bb_vinfo
, first_element
,
1933 /* Don't try to vectorize SLP reductions if reduction chain was
1938 /* Find SLP sequences starting from groups of reductions. */
1939 if (reductions
.length () > 1
1940 && vect_analyze_slp_instance (loop_vinfo
, bb_vinfo
, reductions
[0],
1948 /* For each possible SLP instance decide whether to SLP it and calculate overall
1949 unrolling factor needed to SLP the loop. Return TRUE if decided to SLP at
1950 least one instance. */
1953 vect_make_slp_decision (loop_vec_info loop_vinfo
)
1955 unsigned int i
, unrolling_factor
= 1;
1956 vec
<slp_instance
> slp_instances
= LOOP_VINFO_SLP_INSTANCES (loop_vinfo
);
1957 slp_instance instance
;
1958 int decided_to_slp
= 0;
1960 if (dump_enabled_p ())
1961 dump_printf_loc (MSG_NOTE
, vect_location
, "=== vect_make_slp_decision ==="
1964 FOR_EACH_VEC_ELT (slp_instances
, i
, instance
)
1966 /* FORNOW: SLP if you can. */
1967 if (unrolling_factor
< SLP_INSTANCE_UNROLLING_FACTOR (instance
))
1968 unrolling_factor
= SLP_INSTANCE_UNROLLING_FACTOR (instance
);
1970 /* Mark all the stmts that belong to INSTANCE as PURE_SLP stmts. Later we
1971 call vect_detect_hybrid_slp () to find stmts that need hybrid SLP and
1972 loop-based vectorization. Such stmts will be marked as HYBRID. */
1973 vect_mark_slp_stmts (SLP_INSTANCE_TREE (instance
), pure_slp
, -1);
1977 LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo
) = unrolling_factor
;
1979 if (decided_to_slp
&& dump_enabled_p ())
1980 dump_printf_loc (MSG_NOTE
, vect_location
,
1981 "Decided to SLP %d instances. Unrolling factor %d\n",
1982 decided_to_slp
, unrolling_factor
);
1984 return (decided_to_slp
> 0);
1988 /* Find stmts that must be both vectorized and SLPed (since they feed stmts that
1989 can't be SLPed) in the tree rooted at NODE. Mark such stmts as HYBRID. */
1992 vect_detect_hybrid_slp_stmts (slp_tree node
, unsigned i
, slp_vect_type stype
)
1994 gimple stmt
= SLP_TREE_SCALAR_STMTS (node
)[i
];
1995 imm_use_iterator imm_iter
;
1997 stmt_vec_info use_vinfo
, stmt_vinfo
= vinfo_for_stmt (stmt
);
1999 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_vinfo
);
2000 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
2003 /* Propagate hybrid down the SLP tree. */
2004 if (stype
== hybrid
)
2006 else if (HYBRID_SLP_STMT (stmt_vinfo
))
2010 /* Check if a pure SLP stmt has uses in non-SLP stmts. */
2011 gcc_checking_assert (PURE_SLP_STMT (stmt_vinfo
));
2012 if (TREE_CODE (gimple_op (stmt
, 0)) == SSA_NAME
)
2013 FOR_EACH_IMM_USE_STMT (use_stmt
, imm_iter
, gimple_op (stmt
, 0))
2014 if (gimple_bb (use_stmt
)
2015 && flow_bb_inside_loop_p (loop
, gimple_bb (use_stmt
))
2016 && (use_vinfo
= vinfo_for_stmt (use_stmt
))
2017 && !STMT_SLP_TYPE (use_vinfo
)
2018 && (STMT_VINFO_RELEVANT (use_vinfo
)
2019 || VECTORIZABLE_CYCLE_DEF (STMT_VINFO_DEF_TYPE (use_vinfo
))
2020 || (STMT_VINFO_IN_PATTERN_P (use_vinfo
)
2021 && STMT_VINFO_RELATED_STMT (use_vinfo
)
2022 && !STMT_SLP_TYPE (vinfo_for_stmt
2023 (STMT_VINFO_RELATED_STMT (use_vinfo
)))))
2024 && !(gimple_code (use_stmt
) == GIMPLE_PHI
2025 && STMT_VINFO_DEF_TYPE (use_vinfo
) == vect_reduction_def
))
2029 if (stype
== hybrid
)
2030 STMT_SLP_TYPE (stmt_vinfo
) = hybrid
;
2032 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node
), j
, child
)
2034 vect_detect_hybrid_slp_stmts (child
, i
, stype
);
2037 /* Helpers for vect_detect_hybrid_slp walking pattern stmt uses. */
2040 vect_detect_hybrid_slp_1 (tree
*tp
, int *, void *data
)
2042 walk_stmt_info
*wi
= (walk_stmt_info
*)data
;
2043 struct loop
*loopp
= (struct loop
*)wi
->info
;
2048 if (TREE_CODE (*tp
) == SSA_NAME
2049 && !SSA_NAME_IS_DEFAULT_DEF (*tp
))
2051 gimple def_stmt
= SSA_NAME_DEF_STMT (*tp
);
2052 if (flow_bb_inside_loop_p (loopp
, gimple_bb (def_stmt
))
2053 && PURE_SLP_STMT (vinfo_for_stmt (def_stmt
)))
2054 STMT_SLP_TYPE (vinfo_for_stmt (def_stmt
)) = hybrid
;
2061 vect_detect_hybrid_slp_2 (gimple_stmt_iterator
*gsi
, bool *handled
,
2064 /* If the stmt is in a SLP instance then this isn't a reason
2065 to mark use definitions in other SLP instances as hybrid. */
2066 if (STMT_SLP_TYPE (vinfo_for_stmt (gsi_stmt (*gsi
))) != loop_vect
)
2071 /* Find stmts that must be both vectorized and SLPed. */
2074 vect_detect_hybrid_slp (loop_vec_info loop_vinfo
)
2077 vec
<slp_instance
> slp_instances
= LOOP_VINFO_SLP_INSTANCES (loop_vinfo
);
2078 slp_instance instance
;
2080 if (dump_enabled_p ())
2081 dump_printf_loc (MSG_NOTE
, vect_location
, "=== vect_detect_hybrid_slp ==="
2084 /* First walk all pattern stmt in the loop and mark defs of uses as
2085 hybrid because immediate uses in them are not recorded. */
2086 for (i
= 0; i
< LOOP_VINFO_LOOP (loop_vinfo
)->num_nodes
; ++i
)
2088 basic_block bb
= LOOP_VINFO_BBS (loop_vinfo
)[i
];
2089 for (gimple_stmt_iterator gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
);
2092 gimple stmt
= gsi_stmt (gsi
);
2093 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
2094 if (STMT_VINFO_IN_PATTERN_P (stmt_info
))
2097 memset (&wi
, 0, sizeof (wi
));
2098 wi
.info
= LOOP_VINFO_LOOP (loop_vinfo
);
2099 gimple_stmt_iterator gsi2
2100 = gsi_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info
));
2101 walk_gimple_stmt (&gsi2
, vect_detect_hybrid_slp_2
,
2102 vect_detect_hybrid_slp_1
, &wi
);
2103 walk_gimple_seq (STMT_VINFO_PATTERN_DEF_SEQ (stmt_info
),
2104 vect_detect_hybrid_slp_2
,
2105 vect_detect_hybrid_slp_1
, &wi
);
2110 /* Then walk the SLP instance trees marking stmts with uses in
2111 non-SLP stmts as hybrid, also propagating hybrid down the
2112 SLP tree, collecting the above info on-the-fly. */
2113 FOR_EACH_VEC_ELT (slp_instances
, i
, instance
)
2115 for (unsigned i
= 0; i
< SLP_INSTANCE_GROUP_SIZE (instance
); ++i
)
2116 vect_detect_hybrid_slp_stmts (SLP_INSTANCE_TREE (instance
),
2122 /* Create and initialize a new bb_vec_info struct for BB, as well as
2123 stmt_vec_info structs for all the stmts in it. */
2126 new_bb_vec_info (basic_block bb
)
2128 bb_vec_info res
= NULL
;
2129 gimple_stmt_iterator gsi
;
2131 res
= (bb_vec_info
) xcalloc (1, sizeof (struct _bb_vec_info
));
2132 BB_VINFO_BB (res
) = bb
;
2134 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
2136 gimple stmt
= gsi_stmt (gsi
);
2137 gimple_set_uid (stmt
, 0);
2138 set_vinfo_for_stmt (stmt
, new_stmt_vec_info (stmt
, NULL
, res
));
2141 BB_VINFO_GROUPED_STORES (res
).create (10);
2142 BB_VINFO_SLP_INSTANCES (res
).create (2);
2143 BB_VINFO_TARGET_COST_DATA (res
) = init_cost (NULL
);
2150 /* Free BB_VINFO struct, as well as all the stmt_vec_info structs of all the
2151 stmts in the basic block. */
2154 destroy_bb_vec_info (bb_vec_info bb_vinfo
)
2156 vec
<slp_instance
> slp_instances
;
2157 slp_instance instance
;
2159 gimple_stmt_iterator si
;
2165 bb
= BB_VINFO_BB (bb_vinfo
);
2167 for (si
= gsi_start_bb (bb
); !gsi_end_p (si
); gsi_next (&si
))
2169 gimple stmt
= gsi_stmt (si
);
2170 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
2173 /* Free stmt_vec_info. */
2174 free_stmt_vec_info (stmt
);
2177 vect_destroy_datarefs (NULL
, bb_vinfo
);
2178 free_dependence_relations (BB_VINFO_DDRS (bb_vinfo
));
2179 BB_VINFO_GROUPED_STORES (bb_vinfo
).release ();
2180 slp_instances
= BB_VINFO_SLP_INSTANCES (bb_vinfo
);
2181 FOR_EACH_VEC_ELT (slp_instances
, i
, instance
)
2182 vect_free_slp_instance (instance
);
2183 BB_VINFO_SLP_INSTANCES (bb_vinfo
).release ();
2184 destroy_cost_data (BB_VINFO_TARGET_COST_DATA (bb_vinfo
));
2190 /* Analyze statements contained in SLP tree node after recursively analyzing
2191 the subtree. Return TRUE if the operations are supported. */
2194 vect_slp_analyze_node_operations (bb_vec_info bb_vinfo
, slp_tree node
)
2204 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node
), i
, child
)
2205 if (!vect_slp_analyze_node_operations (bb_vinfo
, child
))
2208 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node
), i
, stmt
)
2210 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
2211 gcc_assert (stmt_info
);
2212 gcc_assert (PURE_SLP_STMT (stmt_info
));
2214 if (!vect_analyze_stmt (stmt
, &dummy
, node
))
2222 /* Analyze statements in SLP instances of the basic block. Return TRUE if the
2223 operations are supported. */
2226 vect_slp_analyze_operations (bb_vec_info bb_vinfo
)
2228 vec
<slp_instance
> slp_instances
= BB_VINFO_SLP_INSTANCES (bb_vinfo
);
2229 slp_instance instance
;
2232 for (i
= 0; slp_instances
.iterate (i
, &instance
); )
2234 if (!vect_slp_analyze_node_operations (bb_vinfo
,
2235 SLP_INSTANCE_TREE (instance
)))
2237 vect_free_slp_instance (instance
);
2238 slp_instances
.ordered_remove (i
);
2244 if (!slp_instances
.length ())
2251 /* Compute the scalar cost of the SLP node NODE and its children
2252 and return it. Do not account defs that are marked in LIFE and
2253 update LIFE according to uses of NODE. */
2256 vect_bb_slp_scalar_cost (basic_block bb
,
2257 slp_tree node
, vec
<bool, va_heap
> *life
)
2259 unsigned scalar_cost
= 0;
2264 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node
), i
, stmt
)
2267 ssa_op_iter op_iter
;
2268 def_operand_p def_p
;
2269 stmt_vec_info stmt_info
;
2274 /* If there is a non-vectorized use of the defs then the scalar
2275 stmt is kept live in which case we do not account it or any
2276 required defs in the SLP children in the scalar cost. This
2277 way we make the vectorization more costly when compared to
2279 FOR_EACH_SSA_DEF_OPERAND (def_p
, stmt
, op_iter
, SSA_OP_DEF
)
2281 imm_use_iterator use_iter
;
2283 FOR_EACH_IMM_USE_STMT (use_stmt
, use_iter
, DEF_FROM_PTR (def_p
))
2284 if (!is_gimple_debug (use_stmt
)
2285 && (gimple_code (use_stmt
) == GIMPLE_PHI
2286 || gimple_bb (use_stmt
) != bb
2287 || !STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (use_stmt
))))
2290 BREAK_FROM_IMM_USE_STMT (use_iter
);
2296 stmt_info
= vinfo_for_stmt (stmt
);
2297 if (STMT_VINFO_DATA_REF (stmt_info
))
2299 if (DR_IS_READ (STMT_VINFO_DATA_REF (stmt_info
)))
2300 stmt_cost
= vect_get_stmt_cost (scalar_load
);
2302 stmt_cost
= vect_get_stmt_cost (scalar_store
);
2305 stmt_cost
= vect_get_stmt_cost (scalar_stmt
);
2307 scalar_cost
+= stmt_cost
;
2310 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node
), i
, child
)
2312 scalar_cost
+= vect_bb_slp_scalar_cost (bb
, child
, life
);
2317 /* Check if vectorization of the basic block is profitable. */
2320 vect_bb_vectorization_profitable_p (bb_vec_info bb_vinfo
)
2322 vec
<slp_instance
> slp_instances
= BB_VINFO_SLP_INSTANCES (bb_vinfo
);
2323 slp_instance instance
;
2325 unsigned int vec_inside_cost
= 0, vec_outside_cost
= 0, scalar_cost
= 0;
2326 unsigned int vec_prologue_cost
= 0, vec_epilogue_cost
= 0;
2327 void *target_cost_data
= BB_VINFO_TARGET_COST_DATA (bb_vinfo
);
2328 stmt_vec_info stmt_info
= NULL
;
2329 stmt_vector_for_cost body_cost_vec
;
2330 stmt_info_for_cost
*ci
;
2332 /* Calculate vector costs. */
2333 FOR_EACH_VEC_ELT (slp_instances
, i
, instance
)
2335 body_cost_vec
= SLP_INSTANCE_BODY_COST_VEC (instance
);
2337 FOR_EACH_VEC_ELT (body_cost_vec
, j
, ci
)
2339 stmt_info
= ci
->stmt
? vinfo_for_stmt (ci
->stmt
) : NULL
;
2340 (void) add_stmt_cost (target_cost_data
, ci
->count
, ci
->kind
,
2341 stmt_info
, ci
->misalign
, vect_body
);
2345 /* Calculate scalar cost. */
2346 FOR_EACH_VEC_ELT (slp_instances
, i
, instance
)
2348 auto_vec
<bool, 20> life
;
2349 life
.safe_grow_cleared (SLP_INSTANCE_GROUP_SIZE (instance
));
2350 scalar_cost
+= vect_bb_slp_scalar_cost (BB_VINFO_BB (bb_vinfo
),
2351 SLP_INSTANCE_TREE (instance
),
2355 /* Complete the target-specific cost calculation. */
2356 finish_cost (BB_VINFO_TARGET_COST_DATA (bb_vinfo
), &vec_prologue_cost
,
2357 &vec_inside_cost
, &vec_epilogue_cost
);
2359 vec_outside_cost
= vec_prologue_cost
+ vec_epilogue_cost
;
2361 if (dump_enabled_p ())
2363 dump_printf_loc (MSG_NOTE
, vect_location
, "Cost model analysis: \n");
2364 dump_printf (MSG_NOTE
, " Vector inside of basic block cost: %d\n",
2366 dump_printf (MSG_NOTE
, " Vector prologue cost: %d\n", vec_prologue_cost
);
2367 dump_printf (MSG_NOTE
, " Vector epilogue cost: %d\n", vec_epilogue_cost
);
2368 dump_printf (MSG_NOTE
, " Scalar cost of basic block: %d\n", scalar_cost
);
2371 /* Vectorization is profitable if its cost is less than the cost of scalar
2373 if (vec_outside_cost
+ vec_inside_cost
>= scalar_cost
)
2379 /* Check if the basic block can be vectorized. */
2382 vect_slp_analyze_bb_1 (basic_block bb
)
2384 bb_vec_info bb_vinfo
;
2385 vec
<slp_instance
> slp_instances
;
2386 slp_instance instance
;
2389 unsigned n_stmts
= 0;
2391 bb_vinfo
= new_bb_vec_info (bb
);
2395 if (!vect_analyze_data_refs (NULL
, bb_vinfo
, &min_vf
, &n_stmts
))
2397 if (dump_enabled_p ())
2398 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2399 "not vectorized: unhandled data-ref in basic "
2402 destroy_bb_vec_info (bb_vinfo
);
2406 if (BB_VINFO_DATAREFS (bb_vinfo
).length () < 2)
2408 if (dump_enabled_p ())
2409 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2410 "not vectorized: not enough data-refs in "
2413 destroy_bb_vec_info (bb_vinfo
);
2417 if (!vect_analyze_data_ref_accesses (NULL
, bb_vinfo
))
2419 if (dump_enabled_p ())
2420 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2421 "not vectorized: unhandled data access in "
2424 destroy_bb_vec_info (bb_vinfo
);
2428 vect_pattern_recog (NULL
, bb_vinfo
);
2430 if (!vect_analyze_data_refs_alignment (NULL
, bb_vinfo
))
2432 if (dump_enabled_p ())
2433 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2434 "not vectorized: bad data alignment in basic "
2437 destroy_bb_vec_info (bb_vinfo
);
2441 /* Check the SLP opportunities in the basic block, analyze and build SLP
2443 if (!vect_analyze_slp (NULL
, bb_vinfo
, n_stmts
))
2445 if (dump_enabled_p ())
2447 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2448 "Failed to SLP the basic block.\n");
2449 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2450 "not vectorized: failed to find SLP opportunities "
2451 "in basic block.\n");
2454 destroy_bb_vec_info (bb_vinfo
);
2458 slp_instances
= BB_VINFO_SLP_INSTANCES (bb_vinfo
);
2460 /* Mark all the statements that we want to vectorize as pure SLP and
2462 FOR_EACH_VEC_ELT (slp_instances
, i
, instance
)
2464 vect_mark_slp_stmts (SLP_INSTANCE_TREE (instance
), pure_slp
, -1);
2465 vect_mark_slp_stmts_relevant (SLP_INSTANCE_TREE (instance
));
2468 /* Mark all the statements that we do not want to vectorize. */
2469 for (gimple_stmt_iterator gsi
= gsi_start_bb (BB_VINFO_BB (bb_vinfo
));
2470 !gsi_end_p (gsi
); gsi_next (&gsi
))
2472 stmt_vec_info vinfo
= vinfo_for_stmt (gsi_stmt (gsi
));
2473 if (STMT_SLP_TYPE (vinfo
) != pure_slp
)
2474 STMT_VINFO_VECTORIZABLE (vinfo
) = false;
2477 /* Analyze dependences. At this point all stmts not participating in
2478 vectorization have to be marked. Dependence analysis assumes
2479 that we either vectorize all SLP instances or none at all. */
2480 if (!vect_slp_analyze_data_ref_dependences (bb_vinfo
))
2482 if (dump_enabled_p ())
2483 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2484 "not vectorized: unhandled data dependence "
2485 "in basic block.\n");
2487 destroy_bb_vec_info (bb_vinfo
);
2491 if (!vect_verify_datarefs_alignment (NULL
, bb_vinfo
))
2493 if (dump_enabled_p ())
2494 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2495 "not vectorized: unsupported alignment in basic "
2497 destroy_bb_vec_info (bb_vinfo
);
2501 if (!vect_slp_analyze_operations (bb_vinfo
))
2503 if (dump_enabled_p ())
2504 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2505 "not vectorized: bad operation in basic block.\n");
2507 destroy_bb_vec_info (bb_vinfo
);
2511 /* Compute the costs of the SLP instances. */
2512 FOR_EACH_VEC_ELT (slp_instances
, i
, instance
)
2514 gimple stmt
= SLP_TREE_SCALAR_STMTS (SLP_INSTANCE_TREE (instance
))[0];
2515 tree vectype
= STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt
));
2516 vect_analyze_slp_cost (NULL
, bb_vinfo
,
2517 instance
, TYPE_VECTOR_SUBPARTS (vectype
));
2520 /* Cost model: check if the vectorization is worthwhile. */
2521 if (!unlimited_cost_model (NULL
)
2522 && !vect_bb_vectorization_profitable_p (bb_vinfo
))
2524 if (dump_enabled_p ())
2525 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2526 "not vectorized: vectorization is not "
2529 destroy_bb_vec_info (bb_vinfo
);
2533 if (dump_enabled_p ())
2534 dump_printf_loc (MSG_NOTE
, vect_location
,
2535 "Basic block will be vectorized using SLP\n");
2542 vect_slp_analyze_bb (basic_block bb
)
2544 bb_vec_info bb_vinfo
;
2546 gimple_stmt_iterator gsi
;
2547 unsigned int vector_sizes
;
2549 if (dump_enabled_p ())
2550 dump_printf_loc (MSG_NOTE
, vect_location
, "===vect_slp_analyze_bb===\n");
2552 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
2554 gimple stmt
= gsi_stmt (gsi
);
2555 if (!is_gimple_debug (stmt
)
2556 && !gimple_nop_p (stmt
)
2557 && gimple_code (stmt
) != GIMPLE_LABEL
)
2561 if (insns
> PARAM_VALUE (PARAM_SLP_MAX_INSNS_IN_BB
))
2563 if (dump_enabled_p ())
2564 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2565 "not vectorized: too many instructions in "
2571 /* Autodetect first vector size we try. */
2572 current_vector_size
= 0;
2573 vector_sizes
= targetm
.vectorize
.autovectorize_vector_sizes ();
2577 bb_vinfo
= vect_slp_analyze_bb_1 (bb
);
2581 destroy_bb_vec_info (bb_vinfo
);
2583 vector_sizes
&= ~current_vector_size
;
2584 if (vector_sizes
== 0
2585 || current_vector_size
== 0)
2588 /* Try the next biggest vector size. */
2589 current_vector_size
= 1 << floor_log2 (vector_sizes
);
2590 if (dump_enabled_p ())
2591 dump_printf_loc (MSG_NOTE
, vect_location
,
2592 "***** Re-trying analysis with "
2593 "vector size %d\n", current_vector_size
);
2598 /* SLP costs are calculated according to SLP instance unrolling factor (i.e.,
2599 the number of created vector stmts depends on the unrolling factor).
2600 However, the actual number of vector stmts for every SLP node depends on
2601 VF which is set later in vect_analyze_operations (). Hence, SLP costs
2602 should be updated. In this function we assume that the inside costs
2603 calculated in vect_model_xxx_cost are linear in ncopies. */
2606 vect_update_slp_costs_according_to_vf (loop_vec_info loop_vinfo
)
2608 unsigned int i
, j
, vf
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
2609 vec
<slp_instance
> slp_instances
= LOOP_VINFO_SLP_INSTANCES (loop_vinfo
);
2610 slp_instance instance
;
2611 stmt_vector_for_cost body_cost_vec
;
2612 stmt_info_for_cost
*si
;
2613 void *data
= LOOP_VINFO_TARGET_COST_DATA (loop_vinfo
);
2615 if (dump_enabled_p ())
2616 dump_printf_loc (MSG_NOTE
, vect_location
,
2617 "=== vect_update_slp_costs_according_to_vf ===\n");
2619 FOR_EACH_VEC_ELT (slp_instances
, i
, instance
)
2621 /* We assume that costs are linear in ncopies. */
2622 int ncopies
= vf
/ SLP_INSTANCE_UNROLLING_FACTOR (instance
);
2624 /* Record the instance's instructions in the target cost model.
2625 This was delayed until here because the count of instructions
2626 isn't known beforehand. */
2627 body_cost_vec
= SLP_INSTANCE_BODY_COST_VEC (instance
);
2629 FOR_EACH_VEC_ELT (body_cost_vec
, j
, si
)
2630 (void) add_stmt_cost (data
, si
->count
* ncopies
, si
->kind
,
2631 vinfo_for_stmt (si
->stmt
), si
->misalign
,
2637 /* For constant and loop invariant defs of SLP_NODE this function returns
2638 (vector) defs (VEC_OPRNDS) that will be used in the vectorized stmts.
2639 OP_NUM determines if we gather defs for operand 0 or operand 1 of the RHS of
2640 scalar stmts. NUMBER_OF_VECTORS is the number of vector defs to create.
2641 REDUC_INDEX is the index of the reduction operand in the statements, unless
2645 vect_get_constant_vectors (tree op
, slp_tree slp_node
,
2646 vec
<tree
> *vec_oprnds
,
2647 unsigned int op_num
, unsigned int number_of_vectors
,
2650 vec
<gimple
> stmts
= SLP_TREE_SCALAR_STMTS (slp_node
);
2651 gimple stmt
= stmts
[0];
2652 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (stmt
);
2656 unsigned j
, number_of_places_left_in_vector
;
2659 int group_size
= stmts
.length ();
2660 unsigned int vec_num
, i
;
2661 unsigned number_of_copies
= 1;
2663 voprnds
.create (number_of_vectors
);
2664 bool constant_p
, is_store
;
2665 tree neutral_op
= NULL
;
2666 enum tree_code code
= gimple_expr_code (stmt
);
2669 gimple_seq ctor_seq
= NULL
;
2671 vector_type
= get_vectype_for_scalar_type (TREE_TYPE (op
));
2672 nunits
= TYPE_VECTOR_SUBPARTS (vector_type
);
2674 if (STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_reduction_def
2675 && reduc_index
!= -1)
2677 op_num
= reduc_index
;
2678 op
= gimple_op (stmt
, op_num
+ 1);
2679 /* For additional copies (see the explanation of NUMBER_OF_COPIES below)
2680 we need either neutral operands or the original operands. See
2681 get_initial_def_for_reduction() for details. */
2684 case WIDEN_SUM_EXPR
:
2691 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (op
)))
2692 neutral_op
= build_real (TREE_TYPE (op
), dconst0
);
2694 neutral_op
= build_int_cst (TREE_TYPE (op
), 0);
2699 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (op
)))
2700 neutral_op
= build_real (TREE_TYPE (op
), dconst1
);
2702 neutral_op
= build_int_cst (TREE_TYPE (op
), 1);
2707 neutral_op
= build_int_cst (TREE_TYPE (op
), -1);
2710 /* For MIN/MAX we don't have an easy neutral operand but
2711 the initial values can be used fine here. Only for
2712 a reduction chain we have to force a neutral element. */
2715 if (!GROUP_FIRST_ELEMENT (stmt_vinfo
))
2719 def_stmt
= SSA_NAME_DEF_STMT (op
);
2720 loop
= (gimple_bb (stmt
))->loop_father
;
2721 neutral_op
= PHI_ARG_DEF_FROM_EDGE (def_stmt
,
2722 loop_preheader_edge (loop
));
2727 gcc_assert (!GROUP_FIRST_ELEMENT (stmt_vinfo
));
2732 if (STMT_VINFO_DATA_REF (stmt_vinfo
))
2735 op
= gimple_assign_rhs1 (stmt
);
2742 if (CONSTANT_CLASS_P (op
))
2747 /* NUMBER_OF_COPIES is the number of times we need to use the same values in
2748 created vectors. It is greater than 1 if unrolling is performed.
2750 For example, we have two scalar operands, s1 and s2 (e.g., group of
2751 strided accesses of size two), while NUNITS is four (i.e., four scalars
2752 of this type can be packed in a vector). The output vector will contain
2753 two copies of each scalar operand: {s1, s2, s1, s2}. (NUMBER_OF_COPIES
2756 If GROUP_SIZE > NUNITS, the scalars will be split into several vectors
2757 containing the operands.
2759 For example, NUNITS is four as before, and the group size is 8
2760 (s1, s2, ..., s8). We will create two vectors {s1, s2, s3, s4} and
2761 {s5, s6, s7, s8}. */
2763 number_of_copies
= least_common_multiple (nunits
, group_size
) / group_size
;
2765 number_of_places_left_in_vector
= nunits
;
2766 elts
= XALLOCAVEC (tree
, nunits
);
2767 bool place_after_defs
= false;
2768 for (j
= 0; j
< number_of_copies
; j
++)
2770 for (i
= group_size
- 1; stmts
.iterate (i
, &stmt
); i
--)
2773 op
= gimple_assign_rhs1 (stmt
);
2779 if (op_num
== 0 || op_num
== 1)
2781 tree cond
= gimple_assign_rhs1 (stmt
);
2782 op
= TREE_OPERAND (cond
, op_num
);
2787 op
= gimple_assign_rhs2 (stmt
);
2789 op
= gimple_assign_rhs3 (stmt
);
2794 op
= gimple_call_arg (stmt
, op_num
);
2801 op
= gimple_op (stmt
, op_num
+ 1);
2802 /* Unlike the other binary operators, shifts/rotates have
2803 the shift count being int, instead of the same type as
2804 the lhs, so make sure the scalar is the right type if
2805 we are dealing with vectors of
2806 long long/long/short/char. */
2807 if (op_num
== 1 && TREE_CODE (op
) == INTEGER_CST
)
2808 op
= fold_convert (TREE_TYPE (vector_type
), op
);
2812 op
= gimple_op (stmt
, op_num
+ 1);
2817 if (reduc_index
!= -1)
2819 loop
= (gimple_bb (stmt
))->loop_father
;
2820 def_stmt
= SSA_NAME_DEF_STMT (op
);
2824 /* Get the def before the loop. In reduction chain we have only
2825 one initial value. */
2826 if ((j
!= (number_of_copies
- 1)
2827 || (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt
))
2832 op
= PHI_ARG_DEF_FROM_EDGE (def_stmt
,
2833 loop_preheader_edge (loop
));
2836 /* Create 'vect_ = {op0,op1,...,opn}'. */
2837 number_of_places_left_in_vector
--;
2839 if (!types_compatible_p (TREE_TYPE (vector_type
), TREE_TYPE (op
)))
2841 if (CONSTANT_CLASS_P (op
))
2843 op
= fold_unary (VIEW_CONVERT_EXPR
,
2844 TREE_TYPE (vector_type
), op
);
2845 gcc_assert (op
&& CONSTANT_CLASS_P (op
));
2849 tree new_temp
= make_ssa_name (TREE_TYPE (vector_type
));
2851 op
= build1 (VIEW_CONVERT_EXPR
, TREE_TYPE (vector_type
), op
);
2853 = gimple_build_assign (new_temp
, VIEW_CONVERT_EXPR
, op
);
2854 gimple_seq_add_stmt (&ctor_seq
, init_stmt
);
2858 elts
[number_of_places_left_in_vector
] = op
;
2859 if (!CONSTANT_CLASS_P (op
))
2861 if (TREE_CODE (orig_op
) == SSA_NAME
2862 && !SSA_NAME_IS_DEFAULT_DEF (orig_op
)
2863 && STMT_VINFO_BB_VINFO (stmt_vinfo
)
2864 && (STMT_VINFO_BB_VINFO (stmt_vinfo
)->bb
2865 == gimple_bb (SSA_NAME_DEF_STMT (orig_op
))))
2866 place_after_defs
= true;
2868 if (number_of_places_left_in_vector
== 0)
2870 number_of_places_left_in_vector
= nunits
;
2873 vec_cst
= build_vector (vector_type
, elts
);
2876 vec
<constructor_elt
, va_gc
> *v
;
2878 vec_alloc (v
, nunits
);
2879 for (k
= 0; k
< nunits
; ++k
)
2880 CONSTRUCTOR_APPEND_ELT (v
, NULL_TREE
, elts
[k
]);
2881 vec_cst
= build_constructor (vector_type
, v
);
2884 gimple_stmt_iterator gsi
;
2885 if (place_after_defs
)
2888 (vect_find_last_scalar_stmt_in_slp (slp_node
));
2889 init
= vect_init_vector (stmt
, vec_cst
, vector_type
, &gsi
);
2892 init
= vect_init_vector (stmt
, vec_cst
, vector_type
, NULL
);
2893 if (ctor_seq
!= NULL
)
2895 gsi
= gsi_for_stmt (SSA_NAME_DEF_STMT (init
));
2896 gsi_insert_seq_before_without_update (&gsi
, ctor_seq
,
2900 voprnds
.quick_push (init
);
2901 place_after_defs
= false;
2906 /* Since the vectors are created in the reverse order, we should invert
2908 vec_num
= voprnds
.length ();
2909 for (j
= vec_num
; j
!= 0; j
--)
2911 vop
= voprnds
[j
- 1];
2912 vec_oprnds
->quick_push (vop
);
2917 /* In case that VF is greater than the unrolling factor needed for the SLP
2918 group of stmts, NUMBER_OF_VECTORS to be created is greater than
2919 NUMBER_OF_SCALARS/NUNITS or NUNITS/NUMBER_OF_SCALARS, and hence we have
2920 to replicate the vectors. */
2921 while (number_of_vectors
> vec_oprnds
->length ())
2923 tree neutral_vec
= NULL
;
2928 neutral_vec
= build_vector_from_val (vector_type
, neutral_op
);
2930 vec_oprnds
->quick_push (neutral_vec
);
2934 for (i
= 0; vec_oprnds
->iterate (i
, &vop
) && i
< vec_num
; i
++)
2935 vec_oprnds
->quick_push (vop
);
2941 /* Get vectorized definitions from SLP_NODE that contains corresponding
2942 vectorized def-stmts. */
2945 vect_get_slp_vect_defs (slp_tree slp_node
, vec
<tree
> *vec_oprnds
)
2948 gimple vec_def_stmt
;
2951 gcc_assert (SLP_TREE_VEC_STMTS (slp_node
).exists ());
2953 FOR_EACH_VEC_ELT (SLP_TREE_VEC_STMTS (slp_node
), i
, vec_def_stmt
)
2955 gcc_assert (vec_def_stmt
);
2956 vec_oprnd
= gimple_get_lhs (vec_def_stmt
);
2957 vec_oprnds
->quick_push (vec_oprnd
);
2962 /* Get vectorized definitions for SLP_NODE.
2963 If the scalar definitions are loop invariants or constants, collect them and
2964 call vect_get_constant_vectors() to create vector stmts.
2965 Otherwise, the def-stmts must be already vectorized and the vectorized stmts
2966 must be stored in the corresponding child of SLP_NODE, and we call
2967 vect_get_slp_vect_defs () to retrieve them. */
2970 vect_get_slp_defs (vec
<tree
> ops
, slp_tree slp_node
,
2971 vec
<vec
<tree
> > *vec_oprnds
, int reduc_index
)
2974 int number_of_vects
= 0, i
;
2975 unsigned int child_index
= 0;
2976 HOST_WIDE_INT lhs_size_unit
, rhs_size_unit
;
2977 slp_tree child
= NULL
;
2980 bool vectorized_defs
;
2982 first_stmt
= SLP_TREE_SCALAR_STMTS (slp_node
)[0];
2983 FOR_EACH_VEC_ELT (ops
, i
, oprnd
)
2985 /* For each operand we check if it has vectorized definitions in a child
2986 node or we need to create them (for invariants and constants). We
2987 check if the LHS of the first stmt of the next child matches OPRND.
2988 If it does, we found the correct child. Otherwise, we call
2989 vect_get_constant_vectors (), and not advance CHILD_INDEX in order
2990 to check this child node for the next operand. */
2991 vectorized_defs
= false;
2992 if (SLP_TREE_CHILDREN (slp_node
).length () > child_index
)
2994 child
= SLP_TREE_CHILDREN (slp_node
)[child_index
];
2996 /* We have to check both pattern and original def, if available. */
2999 gimple first_def
= SLP_TREE_SCALAR_STMTS (child
)[0];
3001 = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (first_def
));
3003 if (operand_equal_p (oprnd
, gimple_get_lhs (first_def
), 0)
3005 && operand_equal_p (oprnd
, gimple_get_lhs (related
), 0)))
3007 /* The number of vector defs is determined by the number of
3008 vector statements in the node from which we get those
3010 number_of_vects
= SLP_TREE_NUMBER_OF_VEC_STMTS (child
);
3011 vectorized_defs
= true;
3019 if (!vectorized_defs
)
3023 number_of_vects
= SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node
);
3024 /* Number of vector stmts was calculated according to LHS in
3025 vect_schedule_slp_instance (), fix it by replacing LHS with
3026 RHS, if necessary. See vect_get_smallest_scalar_type () for
3028 vect_get_smallest_scalar_type (first_stmt
, &lhs_size_unit
,
3030 if (rhs_size_unit
!= lhs_size_unit
)
3032 number_of_vects
*= rhs_size_unit
;
3033 number_of_vects
/= lhs_size_unit
;
3038 /* Allocate memory for vectorized defs. */
3040 vec_defs
.create (number_of_vects
);
3042 /* For reduction defs we call vect_get_constant_vectors (), since we are
3043 looking for initial loop invariant values. */
3044 if (vectorized_defs
&& reduc_index
== -1)
3045 /* The defs are already vectorized. */
3046 vect_get_slp_vect_defs (child
, &vec_defs
);
3048 /* Build vectors from scalar defs. */
3049 vect_get_constant_vectors (oprnd
, slp_node
, &vec_defs
, i
,
3050 number_of_vects
, reduc_index
);
3052 vec_oprnds
->quick_push (vec_defs
);
3054 /* For reductions, we only need initial values. */
3055 if (reduc_index
!= -1)
3061 /* Create NCOPIES permutation statements using the mask MASK_BYTES (by
3062 building a vector of type MASK_TYPE from it) and two input vectors placed in
3063 DR_CHAIN at FIRST_VEC_INDX and SECOND_VEC_INDX for the first copy and
3064 shifting by STRIDE elements of DR_CHAIN for every copy.
3065 (STRIDE is the number of vectorized stmts for NODE divided by the number of
3067 VECT_STMTS_COUNTER specifies the index in the vectorized stmts of NODE, where
3068 the created stmts must be inserted. */
3071 vect_create_mask_and_perm (gimple stmt
, gimple next_scalar_stmt
,
3072 tree mask
, int first_vec_indx
, int second_vec_indx
,
3073 gimple_stmt_iterator
*gsi
, slp_tree node
,
3074 tree vectype
, vec
<tree
> dr_chain
,
3075 int ncopies
, int vect_stmts_counter
)
3078 gimple perm_stmt
= NULL
;
3079 stmt_vec_info next_stmt_info
;
3081 tree first_vec
, second_vec
, data_ref
;
3083 stride
= SLP_TREE_NUMBER_OF_VEC_STMTS (node
) / ncopies
;
3085 /* Initialize the vect stmts of NODE to properly insert the generated
3087 for (i
= SLP_TREE_VEC_STMTS (node
).length ();
3088 i
< (int) SLP_TREE_NUMBER_OF_VEC_STMTS (node
); i
++)
3089 SLP_TREE_VEC_STMTS (node
).quick_push (NULL
);
3091 perm_dest
= vect_create_destination_var (gimple_assign_lhs (stmt
), vectype
);
3092 for (i
= 0; i
< ncopies
; i
++)
3094 first_vec
= dr_chain
[first_vec_indx
];
3095 second_vec
= dr_chain
[second_vec_indx
];
3097 /* Generate the permute statement. */
3098 perm_stmt
= gimple_build_assign (perm_dest
, VEC_PERM_EXPR
,
3099 first_vec
, second_vec
, mask
);
3100 data_ref
= make_ssa_name (perm_dest
, perm_stmt
);
3101 gimple_set_lhs (perm_stmt
, data_ref
);
3102 vect_finish_stmt_generation (stmt
, perm_stmt
, gsi
);
3104 /* Store the vector statement in NODE. */
3105 SLP_TREE_VEC_STMTS (node
)[stride
* i
+ vect_stmts_counter
] = perm_stmt
;
3107 first_vec_indx
+= stride
;
3108 second_vec_indx
+= stride
;
3111 /* Mark the scalar stmt as vectorized. */
3112 next_stmt_info
= vinfo_for_stmt (next_scalar_stmt
);
3113 STMT_VINFO_VEC_STMT (next_stmt_info
) = perm_stmt
;
3117 /* Given FIRST_MASK_ELEMENT - the mask element in element representation,
3118 return in CURRENT_MASK_ELEMENT its equivalent in target specific
3119 representation. Check that the mask is valid and return FALSE if not.
3120 Return TRUE in NEED_NEXT_VECTOR if the permutation requires to move to
3121 the next vector, i.e., the current first vector is not needed. */
3124 vect_get_mask_element (gimple stmt
, int first_mask_element
, int m
,
3125 int mask_nunits
, bool only_one_vec
, int index
,
3126 unsigned char *mask
, int *current_mask_element
,
3127 bool *need_next_vector
, int *number_of_mask_fixes
,
3128 bool *mask_fixed
, bool *needs_first_vector
)
3132 /* Convert to target specific representation. */
3133 *current_mask_element
= first_mask_element
+ m
;
3134 /* Adjust the value in case it's a mask for second and third vectors. */
3135 *current_mask_element
-= mask_nunits
* (*number_of_mask_fixes
- 1);
3137 if (*current_mask_element
< 0)
3139 if (dump_enabled_p ())
3141 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3142 "permutation requires past vector ");
3143 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
3144 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
3149 if (*current_mask_element
< mask_nunits
)
3150 *needs_first_vector
= true;
3152 /* We have only one input vector to permute but the mask accesses values in
3153 the next vector as well. */
3154 if (only_one_vec
&& *current_mask_element
>= mask_nunits
)
3156 if (dump_enabled_p ())
3158 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3159 "permutation requires at least two vectors ");
3160 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
3161 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
3167 /* The mask requires the next vector. */
3168 while (*current_mask_element
>= mask_nunits
* 2)
3170 if (*needs_first_vector
|| *mask_fixed
)
3172 /* We either need the first vector too or have already moved to the
3173 next vector. In both cases, this permutation needs three
3175 if (dump_enabled_p ())
3177 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3178 "permutation requires at "
3179 "least three vectors ");
3180 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
3181 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
3187 /* We move to the next vector, dropping the first one and working with
3188 the second and the third - we need to adjust the values of the mask
3190 *current_mask_element
-= mask_nunits
* *number_of_mask_fixes
;
3192 for (i
= 0; i
< index
; i
++)
3193 mask
[i
] -= mask_nunits
* *number_of_mask_fixes
;
3195 (*number_of_mask_fixes
)++;
3199 *need_next_vector
= *mask_fixed
;
3201 /* This was the last element of this mask. Start a new one. */
3202 if (index
== mask_nunits
- 1)
3204 *number_of_mask_fixes
= 1;
3205 *mask_fixed
= false;
3206 *needs_first_vector
= false;
3213 /* Generate vector permute statements from a list of loads in DR_CHAIN.
3214 If ANALYZE_ONLY is TRUE, only check that it is possible to create valid
3215 permute statements for the SLP node NODE of the SLP instance
3216 SLP_NODE_INSTANCE. */
3219 vect_transform_slp_perm_load (slp_tree node
, vec
<tree
> dr_chain
,
3220 gimple_stmt_iterator
*gsi
, int vf
,
3221 slp_instance slp_node_instance
, bool analyze_only
)
3223 gimple stmt
= SLP_TREE_SCALAR_STMTS (node
)[0];
3224 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
3225 tree mask_element_type
= NULL_TREE
, mask_type
;
3226 int i
, j
, k
, nunits
, vec_index
= 0, scalar_index
;
3227 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
3228 gimple next_scalar_stmt
;
3229 int group_size
= SLP_INSTANCE_GROUP_SIZE (slp_node_instance
);
3230 int first_mask_element
;
3231 int index
, unroll_factor
, current_mask_element
, ncopies
;
3232 unsigned char *mask
;
3233 bool only_one_vec
= false, need_next_vector
= false;
3234 int first_vec_index
, second_vec_index
, orig_vec_stmts_num
, vect_stmts_counter
;
3235 int number_of_mask_fixes
= 1;
3236 bool mask_fixed
= false;
3237 bool needs_first_vector
= false;
3240 mode
= TYPE_MODE (vectype
);
3242 if (!can_vec_perm_p (mode
, false, NULL
))
3244 if (dump_enabled_p ())
3246 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3247 "no vect permute for ");
3248 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
3249 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
3254 /* The generic VEC_PERM_EXPR code always uses an integral type of the
3255 same size as the vector element being permuted. */
3256 mask_element_type
= lang_hooks
.types
.type_for_mode
3257 (int_mode_for_mode (TYPE_MODE (TREE_TYPE (vectype
))), 1);
3258 mask_type
= get_vectype_for_scalar_type (mask_element_type
);
3259 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
3260 mask
= XALLOCAVEC (unsigned char, nunits
);
3261 unroll_factor
= SLP_INSTANCE_UNROLLING_FACTOR (slp_node_instance
);
3263 /* The number of vector stmts to generate based only on SLP_NODE_INSTANCE
3264 unrolling factor. */
3265 orig_vec_stmts_num
= group_size
*
3266 SLP_INSTANCE_UNROLLING_FACTOR (slp_node_instance
) / nunits
;
3267 if (orig_vec_stmts_num
== 1)
3268 only_one_vec
= true;
3270 /* Number of copies is determined by the final vectorization factor
3271 relatively to SLP_NODE_INSTANCE unrolling factor. */
3272 ncopies
= vf
/ SLP_INSTANCE_UNROLLING_FACTOR (slp_node_instance
);
3274 if (!STMT_VINFO_GROUPED_ACCESS (stmt_info
))
3277 /* Generate permutation masks for every NODE. Number of masks for each NODE
3278 is equal to GROUP_SIZE.
3279 E.g., we have a group of three nodes with three loads from the same
3280 location in each node, and the vector size is 4. I.e., we have a
3281 a0b0c0a1b1c1... sequence and we need to create the following vectors:
3282 for a's: a0a0a0a1 a1a1a2a2 a2a3a3a3
3283 for b's: b0b0b0b1 b1b1b2b2 b2b3b3b3
3286 The masks for a's should be: {0,0,0,3} {3,3,6,6} {6,9,9,9}.
3287 The last mask is illegal since we assume two operands for permute
3288 operation, and the mask element values can't be outside that range.
3289 Hence, the last mask must be converted into {2,5,5,5}.
3290 For the first two permutations we need the first and the second input
3291 vectors: {a0,b0,c0,a1} and {b1,c1,a2,b2}, and for the last permutation
3292 we need the second and the third vectors: {b1,c1,a2,b2} and
3298 vect_stmts_counter
= 0;
3300 first_vec_index
= vec_index
++;
3302 second_vec_index
= first_vec_index
;
3304 second_vec_index
= vec_index
++;
3306 for (j
= 0; j
< unroll_factor
; j
++)
3308 for (k
= 0; k
< group_size
; k
++)
3310 i
= SLP_TREE_LOAD_PERMUTATION (node
)[k
];
3311 first_mask_element
= i
+ j
* group_size
;
3312 if (!vect_get_mask_element (stmt
, first_mask_element
, 0,
3313 nunits
, only_one_vec
, index
,
3314 mask
, ¤t_mask_element
,
3316 &number_of_mask_fixes
, &mask_fixed
,
3317 &needs_first_vector
))
3319 gcc_assert (current_mask_element
>= 0
3320 && current_mask_element
< 2 * nunits
);
3321 mask
[index
++] = current_mask_element
;
3323 if (index
== nunits
)
3326 if (!can_vec_perm_p (mode
, false, mask
))
3328 if (dump_enabled_p ())
3330 dump_printf_loc (MSG_MISSED_OPTIMIZATION
,
3332 "unsupported vect permute { ");
3333 for (i
= 0; i
< nunits
; ++i
)
3334 dump_printf (MSG_MISSED_OPTIMIZATION
, "%d ",
3336 dump_printf (MSG_MISSED_OPTIMIZATION
, "}\n");
3344 tree mask_vec
, *mask_elts
;
3345 mask_elts
= XALLOCAVEC (tree
, nunits
);
3346 for (l
= 0; l
< nunits
; ++l
)
3347 mask_elts
[l
] = build_int_cst (mask_element_type
,
3349 mask_vec
= build_vector (mask_type
, mask_elts
);
3351 if (need_next_vector
)
3353 first_vec_index
= second_vec_index
;
3354 second_vec_index
= vec_index
;
3358 = SLP_TREE_SCALAR_STMTS (node
)[scalar_index
++];
3360 vect_create_mask_and_perm (stmt
, next_scalar_stmt
,
3361 mask_vec
, first_vec_index
, second_vec_index
,
3362 gsi
, node
, vectype
, dr_chain
,
3363 ncopies
, vect_stmts_counter
++);
3375 /* Vectorize SLP instance tree in postorder. */
3378 vect_schedule_slp_instance (slp_tree node
, slp_instance instance
,
3379 unsigned int vectorization_factor
)
3382 bool grouped_store
, is_store
;
3383 gimple_stmt_iterator si
;
3384 stmt_vec_info stmt_info
;
3385 unsigned int vec_stmts_size
, nunits
, group_size
;
3393 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node
), i
, child
)
3394 vect_schedule_slp_instance (child
, instance
, vectorization_factor
);
3396 stmt
= SLP_TREE_SCALAR_STMTS (node
)[0];
3397 stmt_info
= vinfo_for_stmt (stmt
);
3399 /* VECTYPE is the type of the destination. */
3400 vectype
= STMT_VINFO_VECTYPE (stmt_info
);
3401 nunits
= (unsigned int) TYPE_VECTOR_SUBPARTS (vectype
);
3402 group_size
= SLP_INSTANCE_GROUP_SIZE (instance
);
3404 /* For each SLP instance calculate number of vector stmts to be created
3405 for the scalar stmts in each node of the SLP tree. Number of vector
3406 elements in one vector iteration is the number of scalar elements in
3407 one scalar iteration (GROUP_SIZE) multiplied by VF divided by vector
3409 vec_stmts_size
= (vectorization_factor
* group_size
) / nunits
;
3411 if (!SLP_TREE_VEC_STMTS (node
).exists ())
3413 SLP_TREE_VEC_STMTS (node
).create (vec_stmts_size
);
3414 SLP_TREE_NUMBER_OF_VEC_STMTS (node
) = vec_stmts_size
;
3417 if (dump_enabled_p ())
3419 dump_printf_loc (MSG_NOTE
,vect_location
,
3420 "------>vectorizing SLP node starting from: ");
3421 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
3422 dump_printf (MSG_NOTE
, "\n");
3425 /* Vectorized stmts go before the last scalar stmt which is where
3426 all uses are ready. */
3427 si
= gsi_for_stmt (vect_find_last_scalar_stmt_in_slp (node
));
3429 /* Mark the first element of the reduction chain as reduction to properly
3430 transform the node. In the analysis phase only the last element of the
3431 chain is marked as reduction. */
3432 if (GROUP_FIRST_ELEMENT (stmt_info
) && !STMT_VINFO_GROUPED_ACCESS (stmt_info
)
3433 && GROUP_FIRST_ELEMENT (stmt_info
) == stmt
)
3435 STMT_VINFO_DEF_TYPE (stmt_info
) = vect_reduction_def
;
3436 STMT_VINFO_TYPE (stmt_info
) = reduc_vec_info_type
;
3439 /* Handle two-operation SLP nodes by vectorizing the group with
3440 both operations and then performing a merge. */
3441 if (SLP_TREE_TWO_OPERATORS (node
))
3443 enum tree_code code0
= gimple_assign_rhs_code (stmt
);
3444 enum tree_code ocode
;
3446 unsigned char *mask
= XALLOCAVEC (unsigned char, group_size
);
3447 bool allsame
= true;
3448 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node
), i
, ostmt
)
3449 if (gimple_assign_rhs_code (ostmt
) != code0
)
3453 ocode
= gimple_assign_rhs_code (ostmt
);
3462 tree tmask
= NULL_TREE
;
3463 vect_transform_stmt (stmt
, &si
, &grouped_store
, node
, instance
);
3464 v0
= SLP_TREE_VEC_STMTS (node
).copy ();
3465 SLP_TREE_VEC_STMTS (node
).truncate (0);
3466 gimple_assign_set_rhs_code (stmt
, ocode
);
3467 vect_transform_stmt (stmt
, &si
, &grouped_store
, node
, instance
);
3468 gimple_assign_set_rhs_code (stmt
, code0
);
3469 v1
= SLP_TREE_VEC_STMTS (node
).copy ();
3470 SLP_TREE_VEC_STMTS (node
).truncate (0);
3471 tree meltype
= build_nonstandard_integer_type
3472 (GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (vectype
))), 1);
3473 tree mvectype
= get_same_sized_vectype (meltype
, vectype
);
3475 for (j
= 0; j
< v0
.length (); ++j
)
3477 tree
*melts
= XALLOCAVEC (tree
, TYPE_VECTOR_SUBPARTS (vectype
));
3478 for (l
= 0; l
< TYPE_VECTOR_SUBPARTS (vectype
); ++l
)
3480 if (k
>= group_size
)
3482 melts
[l
] = build_int_cst
3483 (meltype
, mask
[k
++] * TYPE_VECTOR_SUBPARTS (vectype
) + l
);
3485 tmask
= build_vector (mvectype
, melts
);
3487 /* ??? Not all targets support a VEC_PERM_EXPR with a
3488 constant mask that would translate to a vec_merge RTX
3489 (with their vec_perm_const_ok). We can either not
3490 vectorize in that case or let veclower do its job.
3491 Unfortunately that isn't too great and at least for
3492 plus/minus we'd eventually like to match targets
3493 vector addsub instructions. */
3495 vstmt
= gimple_build_assign (make_ssa_name (vectype
),
3497 gimple_assign_lhs (v0
[j
]),
3498 gimple_assign_lhs (v1
[j
]), tmask
);
3499 vect_finish_stmt_generation (stmt
, vstmt
, &si
);
3500 SLP_TREE_VEC_STMTS (node
).quick_push (vstmt
);
3507 is_store
= vect_transform_stmt (stmt
, &si
, &grouped_store
, node
, instance
);
3511 /* Replace scalar calls from SLP node NODE with setting of their lhs to zero.
3512 For loop vectorization this is done in vectorizable_call, but for SLP
3513 it needs to be deferred until end of vect_schedule_slp, because multiple
3514 SLP instances may refer to the same scalar stmt. */
3517 vect_remove_slp_scalar_calls (slp_tree node
)
3519 gimple stmt
, new_stmt
;
3520 gimple_stmt_iterator gsi
;
3524 stmt_vec_info stmt_info
;
3529 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node
), i
, child
)
3530 vect_remove_slp_scalar_calls (child
);
3532 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node
), i
, stmt
)
3534 if (!is_gimple_call (stmt
) || gimple_bb (stmt
) == NULL
)
3536 stmt_info
= vinfo_for_stmt (stmt
);
3537 if (stmt_info
== NULL
3538 || is_pattern_stmt_p (stmt_info
)
3539 || !PURE_SLP_STMT (stmt_info
))
3541 lhs
= gimple_call_lhs (stmt
);
3542 new_stmt
= gimple_build_assign (lhs
, build_zero_cst (TREE_TYPE (lhs
)));
3543 set_vinfo_for_stmt (new_stmt
, stmt_info
);
3544 set_vinfo_for_stmt (stmt
, NULL
);
3545 STMT_VINFO_STMT (stmt_info
) = new_stmt
;
3546 gsi
= gsi_for_stmt (stmt
);
3547 gsi_replace (&gsi
, new_stmt
, false);
3548 SSA_NAME_DEF_STMT (gimple_assign_lhs (new_stmt
)) = new_stmt
;
3552 /* Generate vector code for all SLP instances in the loop/basic block. */
3555 vect_schedule_slp (loop_vec_info loop_vinfo
, bb_vec_info bb_vinfo
)
3557 vec
<slp_instance
> slp_instances
;
3558 slp_instance instance
;
3560 bool is_store
= false;
3564 slp_instances
= LOOP_VINFO_SLP_INSTANCES (loop_vinfo
);
3565 vf
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
3569 slp_instances
= BB_VINFO_SLP_INSTANCES (bb_vinfo
);
3573 FOR_EACH_VEC_ELT (slp_instances
, i
, instance
)
3575 /* Schedule the tree of INSTANCE. */
3576 is_store
= vect_schedule_slp_instance (SLP_INSTANCE_TREE (instance
),
3578 if (dump_enabled_p ())
3579 dump_printf_loc (MSG_NOTE
, vect_location
,
3580 "vectorizing stmts using SLP.\n");
3583 FOR_EACH_VEC_ELT (slp_instances
, i
, instance
)
3585 slp_tree root
= SLP_INSTANCE_TREE (instance
);
3588 gimple_stmt_iterator gsi
;
3590 /* Remove scalar call stmts. Do not do this for basic-block
3591 vectorization as not all uses may be vectorized.
3592 ??? Why should this be necessary? DCE should be able to
3593 remove the stmts itself.
3594 ??? For BB vectorization we can as well remove scalar
3595 stmts starting from the SLP tree root if they have no
3598 vect_remove_slp_scalar_calls (root
);
3600 for (j
= 0; SLP_TREE_SCALAR_STMTS (root
).iterate (j
, &store
)
3601 && j
< SLP_INSTANCE_GROUP_SIZE (instance
); j
++)
3603 if (!STMT_VINFO_DATA_REF (vinfo_for_stmt (store
)))
3606 if (is_pattern_stmt_p (vinfo_for_stmt (store
)))
3607 store
= STMT_VINFO_RELATED_STMT (vinfo_for_stmt (store
));
3608 /* Free the attached stmt_vec_info and remove the stmt. */
3609 gsi
= gsi_for_stmt (store
);
3610 unlink_stmt_vdef (store
);
3611 gsi_remove (&gsi
, true);
3612 release_defs (store
);
3613 free_stmt_vec_info (store
);
3621 /* Vectorize the basic block. */
3624 vect_slp_transform_bb (basic_block bb
)
3626 bb_vec_info bb_vinfo
= vec_info_for_bb (bb
);
3627 gimple_stmt_iterator si
;
3629 gcc_assert (bb_vinfo
);
3631 if (dump_enabled_p ())
3632 dump_printf_loc (MSG_NOTE
, vect_location
, "SLPing BB\n");
3634 for (si
= gsi_start_bb (bb
); !gsi_end_p (si
); gsi_next (&si
))
3636 gimple stmt
= gsi_stmt (si
);
3637 stmt_vec_info stmt_info
;
3639 if (dump_enabled_p ())
3641 dump_printf_loc (MSG_NOTE
, vect_location
,
3642 "------>SLPing statement: ");
3643 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
3644 dump_printf (MSG_NOTE
, "\n");
3647 stmt_info
= vinfo_for_stmt (stmt
);
3648 gcc_assert (stmt_info
);
3650 /* Schedule all the SLP instances when the first SLP stmt is reached. */
3651 if (STMT_SLP_TYPE (stmt_info
))
3653 vect_schedule_slp (NULL
, bb_vinfo
);
3658 if (dump_enabled_p ())
3659 dump_printf_loc (MSG_NOTE
, vect_location
,
3660 "BASIC BLOCK VECTORIZED\n");
3662 destroy_bb_vec_info (bb_vinfo
);