1 /* SLP - Basic Block Vectorization
2 Copyright (C) 2007-2015 Free Software Foundation, Inc.
3 Contributed by Dorit Naishlos <dorit@il.ibm.com>
4 and Ira Rosen <irar@il.ibm.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
24 #include "coretypes.h"
30 #include "fold-const.h"
31 #include "stor-layout.h"
34 #include "hard-reg-set.h"
36 #include "basic-block.h"
37 #include "gimple-pretty-print.h"
38 #include "tree-ssa-alias.h"
39 #include "internal-fn.h"
40 #include "gimple-expr.h"
42 #include "gimple-iterator.h"
43 #include "gimple-ssa.h"
44 #include "tree-phinodes.h"
45 #include "ssa-iterators.h"
46 #include "stringpool.h"
47 #include "tree-ssanames.h"
48 #include "tree-pass.h"
52 #include "insn-config.h"
61 #include "recog.h" /* FIXME: for insn_data */
62 #include "insn-codes.h"
64 #include "tree-vectorizer.h"
65 #include "langhooks.h"
66 #include "gimple-walk.h"
68 /* Extract the location of the basic block in the source code.
69 Return the basic block location if succeed and NULL if not. */
72 find_bb_location (basic_block bb
)
75 gimple_stmt_iterator si
;
78 return UNKNOWN_LOCATION
;
80 for (si
= gsi_start_bb (bb
); !gsi_end_p (si
); gsi_next (&si
))
83 if (gimple_location (stmt
) != UNKNOWN_LOCATION
)
84 return gimple_location (stmt
);
87 return UNKNOWN_LOCATION
;
91 /* Recursively free the memory allocated for the SLP tree rooted at NODE. */
94 vect_free_slp_tree (slp_tree node
)
102 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node
), i
, child
)
103 vect_free_slp_tree (child
);
105 SLP_TREE_CHILDREN (node
).release ();
106 SLP_TREE_SCALAR_STMTS (node
).release ();
107 SLP_TREE_VEC_STMTS (node
).release ();
108 SLP_TREE_LOAD_PERMUTATION (node
).release ();
114 /* Free the memory allocated for the SLP instance. */
117 vect_free_slp_instance (slp_instance instance
)
119 vect_free_slp_tree (SLP_INSTANCE_TREE (instance
));
120 SLP_INSTANCE_LOADS (instance
).release ();
125 /* Create an SLP node for SCALAR_STMTS. */
128 vect_create_new_slp_node (vec
<gimple
> scalar_stmts
)
131 gimple stmt
= scalar_stmts
[0];
134 if (is_gimple_call (stmt
))
135 nops
= gimple_call_num_args (stmt
);
136 else if (is_gimple_assign (stmt
))
138 nops
= gimple_num_ops (stmt
) - 1;
139 if (gimple_assign_rhs_code (stmt
) == COND_EXPR
)
145 node
= XNEW (struct _slp_tree
);
146 SLP_TREE_SCALAR_STMTS (node
) = scalar_stmts
;
147 SLP_TREE_VEC_STMTS (node
).create (0);
148 SLP_TREE_CHILDREN (node
).create (nops
);
149 SLP_TREE_LOAD_PERMUTATION (node
) = vNULL
;
150 SLP_TREE_TWO_OPERATORS (node
) = false;
156 /* Allocate operands info for NOPS operands, and GROUP_SIZE def-stmts for each
158 static vec
<slp_oprnd_info
>
159 vect_create_oprnd_info (int nops
, int group_size
)
162 slp_oprnd_info oprnd_info
;
163 vec
<slp_oprnd_info
> oprnds_info
;
165 oprnds_info
.create (nops
);
166 for (i
= 0; i
< nops
; i
++)
168 oprnd_info
= XNEW (struct _slp_oprnd_info
);
169 oprnd_info
->def_stmts
.create (group_size
);
170 oprnd_info
->first_dt
= vect_uninitialized_def
;
171 oprnd_info
->first_op_type
= NULL_TREE
;
172 oprnd_info
->first_pattern
= false;
173 oprnd_info
->second_pattern
= false;
174 oprnds_info
.quick_push (oprnd_info
);
181 /* Free operands info. */
184 vect_free_oprnd_info (vec
<slp_oprnd_info
> &oprnds_info
)
187 slp_oprnd_info oprnd_info
;
189 FOR_EACH_VEC_ELT (oprnds_info
, i
, oprnd_info
)
191 oprnd_info
->def_stmts
.release ();
192 XDELETE (oprnd_info
);
195 oprnds_info
.release ();
199 /* Find the place of the data-ref in STMT in the interleaving chain that starts
200 from FIRST_STMT. Return -1 if the data-ref is not a part of the chain. */
203 vect_get_place_in_interleaving_chain (gimple stmt
, gimple first_stmt
)
205 gimple next_stmt
= first_stmt
;
208 if (first_stmt
!= GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt
)))
213 if (next_stmt
== stmt
)
215 next_stmt
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt
));
217 result
+= GROUP_GAP (vinfo_for_stmt (next_stmt
));
225 /* Get the defs for the rhs of STMT (collect them in OPRNDS_INFO), check that
226 they are of a valid type and that they match the defs of the first stmt of
227 the SLP group (stored in OPRNDS_INFO). If there was a fatal error
228 return -1, if the error could be corrected by swapping operands of the
229 operation return 1, if everything is ok return 0. */
232 vect_get_and_check_slp_defs (loop_vec_info loop_vinfo
, bb_vec_info bb_vinfo
,
233 gimple stmt
, unsigned stmt_num
,
234 vec
<slp_oprnd_info
> *oprnds_info
)
237 unsigned int i
, number_of_oprnds
;
240 enum vect_def_type dt
= vect_uninitialized_def
;
241 struct loop
*loop
= NULL
;
242 bool pattern
= false;
243 slp_oprnd_info oprnd_info
;
244 int first_op_idx
= 1;
245 bool commutative
= false;
246 bool first_op_cond
= false;
247 bool first
= stmt_num
== 0;
248 bool second
= stmt_num
== 1;
251 loop
= LOOP_VINFO_LOOP (loop_vinfo
);
253 if (is_gimple_call (stmt
))
255 number_of_oprnds
= gimple_call_num_args (stmt
);
258 else if (is_gimple_assign (stmt
))
260 enum tree_code code
= gimple_assign_rhs_code (stmt
);
261 number_of_oprnds
= gimple_num_ops (stmt
) - 1;
262 if (gimple_assign_rhs_code (stmt
) == COND_EXPR
)
264 first_op_cond
= true;
269 commutative
= commutative_tree_code (code
);
274 bool swapped
= false;
275 for (i
= 0; i
< number_of_oprnds
; i
++)
280 if (i
== 0 || i
== 1)
281 oprnd
= TREE_OPERAND (gimple_op (stmt
, first_op_idx
),
284 oprnd
= gimple_op (stmt
, first_op_idx
+ i
- 1);
287 oprnd
= gimple_op (stmt
, first_op_idx
+ (swapped
? !i
: i
));
289 oprnd_info
= (*oprnds_info
)[i
];
291 if (!vect_is_simple_use (oprnd
, NULL
, loop_vinfo
, bb_vinfo
, &def_stmt
,
294 if (dump_enabled_p ())
296 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
297 "Build SLP failed: can't analyze def for ");
298 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, oprnd
);
299 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
305 /* Check if DEF_STMT is a part of a pattern in LOOP and get the def stmt
306 from the pattern. Check that all the stmts of the node are in the
308 if (def_stmt
&& gimple_bb (def_stmt
)
309 && ((loop
&& flow_bb_inside_loop_p (loop
, gimple_bb (def_stmt
)))
310 || (!loop
&& gimple_bb (def_stmt
) == BB_VINFO_BB (bb_vinfo
)
311 && gimple_code (def_stmt
) != GIMPLE_PHI
))
312 && vinfo_for_stmt (def_stmt
)
313 && STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (def_stmt
))
314 && !STMT_VINFO_RELEVANT (vinfo_for_stmt (def_stmt
))
315 && !STMT_VINFO_LIVE_P (vinfo_for_stmt (def_stmt
)))
318 if (!first
&& !oprnd_info
->first_pattern
319 /* Allow different pattern state for the defs of the
320 first stmt in reduction chains. */
321 && (oprnd_info
->first_dt
!= vect_reduction_def
322 || (!second
&& !oprnd_info
->second_pattern
)))
332 if (dump_enabled_p ())
334 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
335 "Build SLP failed: some of the stmts"
336 " are in a pattern, and others are not ");
337 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, oprnd
);
338 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
344 def_stmt
= STMT_VINFO_RELATED_STMT (vinfo_for_stmt (def_stmt
));
345 dt
= STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt
));
347 if (dt
== vect_unknown_def_type
)
349 if (dump_enabled_p ())
350 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
351 "Unsupported pattern.\n");
355 switch (gimple_code (def_stmt
))
358 def
= gimple_phi_result (def_stmt
);
362 def
= gimple_assign_lhs (def_stmt
);
366 if (dump_enabled_p ())
367 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
368 "unsupported defining stmt:\n");
374 oprnd_info
->second_pattern
= pattern
;
378 oprnd_info
->first_dt
= dt
;
379 oprnd_info
->first_pattern
= pattern
;
380 oprnd_info
->first_op_type
= TREE_TYPE (oprnd
);
384 /* Not first stmt of the group, check that the def-stmt/s match
385 the def-stmt/s of the first stmt. Allow different definition
386 types for reduction chains: the first stmt must be a
387 vect_reduction_def (a phi node), and the rest
388 vect_internal_def. */
389 if (((oprnd_info
->first_dt
!= dt
390 && !(oprnd_info
->first_dt
== vect_reduction_def
391 && dt
== vect_internal_def
)
392 && !((oprnd_info
->first_dt
== vect_external_def
393 || oprnd_info
->first_dt
== vect_constant_def
)
394 && (dt
== vect_external_def
395 || dt
== vect_constant_def
)))
396 || !types_compatible_p (oprnd_info
->first_op_type
,
399 /* Try swapping operands if we got a mismatch. */
408 if (dump_enabled_p ())
409 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
410 "Build SLP failed: different types\n");
416 /* Check the types of the definitions. */
419 case vect_constant_def
:
420 case vect_external_def
:
421 case vect_reduction_def
:
424 case vect_internal_def
:
425 oprnd_info
->def_stmts
.quick_push (def_stmt
);
429 /* FORNOW: Not supported. */
430 if (dump_enabled_p ())
432 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
433 "Build SLP failed: illegal type of def ");
434 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, def
);
435 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
447 tree cond
= gimple_assign_rhs1 (stmt
);
448 swap_ssa_operands (stmt
, &TREE_OPERAND (cond
, 0),
449 &TREE_OPERAND (cond
, 1));
450 TREE_SET_CODE (cond
, swap_tree_comparison (TREE_CODE (cond
)));
453 swap_ssa_operands (stmt
, gimple_assign_rhs1_ptr (stmt
),
454 gimple_assign_rhs2_ptr (stmt
));
461 /* Verify if the scalar stmts STMTS are isomorphic, require data
462 permutation or are of unsupported types of operation. Return
463 true if they are, otherwise return false and indicate in *MATCHES
464 which stmts are not isomorphic to the first one. If MATCHES[0]
465 is false then this indicates the comparison could not be
466 carried out or the stmts will never be vectorized by SLP. */
469 vect_build_slp_tree_1 (loop_vec_info loop_vinfo
, bb_vec_info bb_vinfo
,
470 vec
<gimple
> stmts
, unsigned int group_size
,
471 unsigned nops
, unsigned int *max_nunits
,
472 unsigned int vectorization_factor
, bool *matches
,
476 gimple first_stmt
= stmts
[0], stmt
= stmts
[0];
477 enum tree_code first_stmt_code
= ERROR_MARK
;
478 enum tree_code alt_stmt_code
= ERROR_MARK
;
479 enum tree_code rhs_code
= ERROR_MARK
;
480 enum tree_code first_cond_code
= ERROR_MARK
;
482 bool need_same_oprnds
= false;
483 tree vectype
= NULL_TREE
, scalar_type
, first_op1
= NULL_TREE
;
486 machine_mode optab_op2_mode
;
487 machine_mode vec_mode
;
489 gimple first_load
= NULL
, prev_first_load
= NULL
;
492 /* For every stmt in NODE find its def stmt/s. */
493 FOR_EACH_VEC_ELT (stmts
, i
, stmt
)
497 if (dump_enabled_p ())
499 dump_printf_loc (MSG_NOTE
, vect_location
, "Build SLP for ");
500 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
501 dump_printf (MSG_NOTE
, "\n");
504 /* Fail to vectorize statements marked as unvectorizable. */
505 if (!STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (stmt
)))
507 if (dump_enabled_p ())
509 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
510 "Build SLP failed: unvectorizable statement ");
511 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
512 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
514 /* Fatal mismatch. */
519 lhs
= gimple_get_lhs (stmt
);
520 if (lhs
== NULL_TREE
)
522 if (dump_enabled_p ())
524 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
525 "Build SLP failed: not GIMPLE_ASSIGN nor "
527 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
528 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
530 /* Fatal mismatch. */
535 if (is_gimple_assign (stmt
)
536 && gimple_assign_rhs_code (stmt
) == COND_EXPR
537 && (cond
= gimple_assign_rhs1 (stmt
))
538 && !COMPARISON_CLASS_P (cond
))
540 if (dump_enabled_p ())
542 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
543 "Build SLP failed: condition is not "
545 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
546 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
548 /* Fatal mismatch. */
553 scalar_type
= vect_get_smallest_scalar_type (stmt
, &dummy
, &dummy
);
554 vectype
= get_vectype_for_scalar_type (scalar_type
);
557 if (dump_enabled_p ())
559 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
560 "Build SLP failed: unsupported data-type ");
561 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
563 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
565 /* Fatal mismatch. */
570 /* If populating the vector type requires unrolling then fail
571 before adjusting *max_nunits for basic-block vectorization. */
573 && TYPE_VECTOR_SUBPARTS (vectype
) > group_size
)
575 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
576 "Build SLP failed: unrolling required "
577 "in basic block SLP\n");
578 /* Fatal mismatch. */
583 /* In case of multiple types we need to detect the smallest type. */
584 if (*max_nunits
< TYPE_VECTOR_SUBPARTS (vectype
))
586 *max_nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
588 vectorization_factor
= *max_nunits
;
591 if (gcall
*call_stmt
= dyn_cast
<gcall
*> (stmt
))
593 rhs_code
= CALL_EXPR
;
594 if (gimple_call_internal_p (call_stmt
)
595 || gimple_call_tail_p (call_stmt
)
596 || gimple_call_noreturn_p (call_stmt
)
597 || !gimple_call_nothrow_p (call_stmt
)
598 || gimple_call_chain (call_stmt
))
600 if (dump_enabled_p ())
602 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
603 "Build SLP failed: unsupported call type ");
604 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
606 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
608 /* Fatal mismatch. */
614 rhs_code
= gimple_assign_rhs_code (stmt
);
616 /* Check the operation. */
619 first_stmt_code
= rhs_code
;
621 /* Shift arguments should be equal in all the packed stmts for a
622 vector shift with scalar shift operand. */
623 if (rhs_code
== LSHIFT_EXPR
|| rhs_code
== RSHIFT_EXPR
624 || rhs_code
== LROTATE_EXPR
625 || rhs_code
== RROTATE_EXPR
)
627 vec_mode
= TYPE_MODE (vectype
);
629 /* First see if we have a vector/vector shift. */
630 optab
= optab_for_tree_code (rhs_code
, vectype
,
634 || optab_handler (optab
, vec_mode
) == CODE_FOR_nothing
)
636 /* No vector/vector shift, try for a vector/scalar shift. */
637 optab
= optab_for_tree_code (rhs_code
, vectype
,
642 if (dump_enabled_p ())
643 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
644 "Build SLP failed: no optab.\n");
645 /* Fatal mismatch. */
649 icode
= (int) optab_handler (optab
, vec_mode
);
650 if (icode
== CODE_FOR_nothing
)
652 if (dump_enabled_p ())
653 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
655 "op not supported by target.\n");
656 /* Fatal mismatch. */
660 optab_op2_mode
= insn_data
[icode
].operand
[2].mode
;
661 if (!VECTOR_MODE_P (optab_op2_mode
))
663 need_same_oprnds
= true;
664 first_op1
= gimple_assign_rhs2 (stmt
);
668 else if (rhs_code
== WIDEN_LSHIFT_EXPR
)
670 need_same_oprnds
= true;
671 first_op1
= gimple_assign_rhs2 (stmt
);
676 if (first_stmt_code
!= rhs_code
677 && alt_stmt_code
== ERROR_MARK
)
678 alt_stmt_code
= rhs_code
;
679 if (first_stmt_code
!= rhs_code
680 && (first_stmt_code
!= IMAGPART_EXPR
681 || rhs_code
!= REALPART_EXPR
)
682 && (first_stmt_code
!= REALPART_EXPR
683 || rhs_code
!= IMAGPART_EXPR
)
684 /* Handle mismatches in plus/minus by computing both
685 and merging the results. */
686 && !((first_stmt_code
== PLUS_EXPR
687 || first_stmt_code
== MINUS_EXPR
)
688 && (alt_stmt_code
== PLUS_EXPR
689 || alt_stmt_code
== MINUS_EXPR
)
690 && rhs_code
== alt_stmt_code
)
691 && !(STMT_VINFO_GROUPED_ACCESS (vinfo_for_stmt (stmt
))
692 && (first_stmt_code
== ARRAY_REF
693 || first_stmt_code
== BIT_FIELD_REF
694 || first_stmt_code
== INDIRECT_REF
695 || first_stmt_code
== COMPONENT_REF
696 || first_stmt_code
== MEM_REF
)))
698 if (dump_enabled_p ())
700 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
701 "Build SLP failed: different operation "
703 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
704 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
706 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
714 && !operand_equal_p (first_op1
, gimple_assign_rhs2 (stmt
), 0))
716 if (dump_enabled_p ())
718 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
719 "Build SLP failed: different shift "
721 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
722 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
728 if (rhs_code
== CALL_EXPR
)
730 gimple first_stmt
= stmts
[0];
731 if (gimple_call_num_args (stmt
) != nops
732 || !operand_equal_p (gimple_call_fn (first_stmt
),
733 gimple_call_fn (stmt
), 0)
734 || gimple_call_fntype (first_stmt
)
735 != gimple_call_fntype (stmt
))
737 if (dump_enabled_p ())
739 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
740 "Build SLP failed: different calls in ");
741 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
743 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
751 /* Grouped store or load. */
752 if (STMT_VINFO_GROUPED_ACCESS (vinfo_for_stmt (stmt
)))
754 if (REFERENCE_CLASS_P (lhs
))
762 /* Check that the size of interleaved loads group is not
763 greater than the SLP group size. */
765 = vectorization_factor
/ TYPE_VECTOR_SUBPARTS (vectype
);
767 && GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt
)) == stmt
768 && ((GROUP_SIZE (vinfo_for_stmt (stmt
))
769 - GROUP_GAP (vinfo_for_stmt (stmt
)))
770 > ncopies
* group_size
))
772 if (dump_enabled_p ())
774 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
775 "Build SLP failed: the number "
776 "of interleaved loads is greater than "
777 "the SLP group size ");
778 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
780 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
782 /* Fatal mismatch. */
787 first_load
= GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt
));
790 /* Check that there are no loads from different interleaving
791 chains in the same node. */
792 if (prev_first_load
!= first_load
)
794 if (dump_enabled_p ())
796 dump_printf_loc (MSG_MISSED_OPTIMIZATION
,
798 "Build SLP failed: different "
799 "interleaving chains in one node ");
800 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
802 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
809 prev_first_load
= first_load
;
811 } /* Grouped access. */
814 if (TREE_CODE_CLASS (rhs_code
) == tcc_reference
)
816 /* Not grouped load. */
817 if (dump_enabled_p ())
819 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
820 "Build SLP failed: not grouped load ");
821 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
822 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
825 /* FORNOW: Not grouped loads are not supported. */
826 /* Fatal mismatch. */
831 /* Not memory operation. */
832 if (TREE_CODE_CLASS (rhs_code
) != tcc_binary
833 && TREE_CODE_CLASS (rhs_code
) != tcc_unary
834 && TREE_CODE_CLASS (rhs_code
) != tcc_expression
835 && rhs_code
!= CALL_EXPR
)
837 if (dump_enabled_p ())
839 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
840 "Build SLP failed: operation");
841 dump_printf (MSG_MISSED_OPTIMIZATION
, " unsupported ");
842 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
843 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
845 /* Fatal mismatch. */
850 if (rhs_code
== COND_EXPR
)
852 tree cond_expr
= gimple_assign_rhs1 (stmt
);
855 first_cond_code
= TREE_CODE (cond_expr
);
856 else if (first_cond_code
!= TREE_CODE (cond_expr
))
858 if (dump_enabled_p ())
860 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
861 "Build SLP failed: different"
863 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
865 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
876 for (i
= 0; i
< group_size
; ++i
)
880 /* If we allowed a two-operation SLP node verify the target can cope
881 with the permute we are going to use. */
882 if (alt_stmt_code
!= ERROR_MARK
883 && TREE_CODE_CLASS (alt_stmt_code
) != tcc_reference
)
886 = XALLOCAVEC (unsigned char, TYPE_VECTOR_SUBPARTS (vectype
));
887 for (i
= 0; i
< TYPE_VECTOR_SUBPARTS (vectype
); ++i
)
890 if (gimple_assign_rhs_code (stmts
[i
% group_size
]) == alt_stmt_code
)
891 sel
[i
] += TYPE_VECTOR_SUBPARTS (vectype
);
893 if (!can_vec_perm_p (TYPE_MODE (vectype
), false, sel
))
895 for (i
= 0; i
< group_size
; ++i
)
896 if (gimple_assign_rhs_code (stmts
[i
]) == alt_stmt_code
)
899 if (dump_enabled_p ())
901 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
902 "Build SLP failed: different operation "
904 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
906 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
908 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
914 *two_operators
= true;
920 /* Recursively build an SLP tree starting from NODE.
921 Fail (and return a value not equal to zero) if def-stmts are not
922 isomorphic, require data permutation or are of unsupported types of
923 operation. Otherwise, return 0.
924 The value returned is the depth in the SLP tree where a mismatch
928 vect_build_slp_tree (loop_vec_info loop_vinfo
, bb_vec_info bb_vinfo
,
929 slp_tree
*node
, unsigned int group_size
,
930 unsigned int *max_nunits
,
931 vec
<slp_tree
> *loads
,
932 unsigned int vectorization_factor
,
933 bool *matches
, unsigned *npermutes
, unsigned *tree_size
,
934 unsigned max_tree_size
)
936 unsigned nops
, i
, this_tree_size
= 0;
941 stmt
= SLP_TREE_SCALAR_STMTS (*node
)[0];
942 if (is_gimple_call (stmt
))
943 nops
= gimple_call_num_args (stmt
);
944 else if (is_gimple_assign (stmt
))
946 nops
= gimple_num_ops (stmt
) - 1;
947 if (gimple_assign_rhs_code (stmt
) == COND_EXPR
)
953 bool two_operators
= false;
954 if (!vect_build_slp_tree_1 (loop_vinfo
, bb_vinfo
,
955 SLP_TREE_SCALAR_STMTS (*node
), group_size
, nops
,
956 max_nunits
, vectorization_factor
, matches
,
959 SLP_TREE_TWO_OPERATORS (*node
) = two_operators
;
961 /* If the SLP node is a load, terminate the recursion. */
962 if (STMT_VINFO_GROUPED_ACCESS (vinfo_for_stmt (stmt
))
963 && DR_IS_READ (STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt
))))
965 loads
->safe_push (*node
);
969 /* Get at the operands, verifying they are compatible. */
970 vec
<slp_oprnd_info
> oprnds_info
= vect_create_oprnd_info (nops
, group_size
);
971 slp_oprnd_info oprnd_info
;
972 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (*node
), i
, stmt
)
974 switch (vect_get_and_check_slp_defs (loop_vinfo
, bb_vinfo
,
975 stmt
, i
, &oprnds_info
))
981 vect_free_oprnd_info (oprnds_info
);
988 for (i
= 0; i
< group_size
; ++i
)
991 vect_free_oprnd_info (oprnds_info
);
995 stmt
= SLP_TREE_SCALAR_STMTS (*node
)[0];
997 /* Create SLP_TREE nodes for the definition node/s. */
998 FOR_EACH_VEC_ELT (oprnds_info
, i
, oprnd_info
)
1001 unsigned old_nloads
= loads
->length ();
1002 unsigned old_max_nunits
= *max_nunits
;
1004 if (oprnd_info
->first_dt
!= vect_internal_def
)
1007 if (++this_tree_size
> max_tree_size
)
1009 vect_free_oprnd_info (oprnds_info
);
1013 child
= vect_create_new_slp_node (oprnd_info
->def_stmts
);
1016 vect_free_oprnd_info (oprnds_info
);
1020 if (vect_build_slp_tree (loop_vinfo
, bb_vinfo
, &child
,
1021 group_size
, max_nunits
, loads
,
1022 vectorization_factor
, matches
,
1023 npermutes
, &this_tree_size
, max_tree_size
))
1025 /* If we have all children of child built up from scalars then just
1026 throw that away and build it up this node from scalars. */
1027 if (!SLP_TREE_CHILDREN (child
).is_empty ())
1030 slp_tree grandchild
;
1032 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (child
), j
, grandchild
)
1033 if (grandchild
!= NULL
)
1038 *max_nunits
= old_max_nunits
;
1039 loads
->truncate (old_nloads
);
1040 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (child
), j
, grandchild
)
1041 vect_free_slp_tree (grandchild
);
1042 SLP_TREE_CHILDREN (child
).truncate (0);
1044 dump_printf_loc (MSG_NOTE
, vect_location
,
1045 "Building parent vector operands from "
1046 "scalars instead\n");
1047 oprnd_info
->def_stmts
= vNULL
;
1048 vect_free_slp_tree (child
);
1049 SLP_TREE_CHILDREN (*node
).quick_push (NULL
);
1054 oprnd_info
->def_stmts
= vNULL
;
1055 SLP_TREE_CHILDREN (*node
).quick_push (child
);
1059 /* If the SLP build failed fatally and we analyze a basic-block
1060 simply treat nodes we fail to build as externally defined
1061 (and thus build vectors from the scalar defs).
1062 The cost model will reject outright expensive cases.
1063 ??? This doesn't treat cases where permutation ultimatively
1064 fails (or we don't try permutation below). Ideally we'd
1065 even compute a permutation that will end up with the maximum
1069 /* ??? Rejecting patterns this way doesn't work. We'd have to
1070 do extra work to cancel the pattern so the uses see the
1072 && !is_pattern_stmt_p (vinfo_for_stmt (stmt
)))
1075 slp_tree grandchild
;
1078 *max_nunits
= old_max_nunits
;
1079 loads
->truncate (old_nloads
);
1080 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (child
), j
, grandchild
)
1081 vect_free_slp_tree (grandchild
);
1082 SLP_TREE_CHILDREN (child
).truncate (0);
1084 dump_printf_loc (MSG_NOTE
, vect_location
,
1085 "Building vector operands from scalars\n");
1086 oprnd_info
->def_stmts
= vNULL
;
1087 vect_free_slp_tree (child
);
1088 SLP_TREE_CHILDREN (*node
).quick_push (NULL
);
1092 /* If the SLP build for operand zero failed and operand zero
1093 and one can be commutated try that for the scalar stmts
1094 that failed the match. */
1096 /* A first scalar stmt mismatch signals a fatal mismatch. */
1098 /* ??? For COND_EXPRs we can swap the comparison operands
1099 as well as the arms under some constraints. */
1101 && oprnds_info
[1]->first_dt
== vect_internal_def
1102 && is_gimple_assign (stmt
)
1103 && commutative_tree_code (gimple_assign_rhs_code (stmt
))
1104 && !SLP_TREE_TWO_OPERATORS (*node
)
1105 /* Do so only if the number of not successful permutes was nor more
1106 than a cut-ff as re-trying the recursive match on
1107 possibly each level of the tree would expose exponential
1112 slp_tree grandchild
;
1115 *max_nunits
= old_max_nunits
;
1116 loads
->truncate (old_nloads
);
1117 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (child
), j
, grandchild
)
1118 vect_free_slp_tree (grandchild
);
1119 SLP_TREE_CHILDREN (child
).truncate (0);
1121 /* Swap mismatched definition stmts. */
1122 dump_printf_loc (MSG_NOTE
, vect_location
,
1123 "Re-trying with swapped operands of stmts ");
1124 for (j
= 0; j
< group_size
; ++j
)
1127 std::swap (oprnds_info
[0]->def_stmts
[j
],
1128 oprnds_info
[1]->def_stmts
[j
]);
1129 dump_printf (MSG_NOTE
, "%d ", j
);
1131 dump_printf (MSG_NOTE
, "\n");
1132 /* And try again with scratch 'matches' ... */
1133 bool *tem
= XALLOCAVEC (bool, group_size
);
1134 if (vect_build_slp_tree (loop_vinfo
, bb_vinfo
, &child
,
1135 group_size
, max_nunits
, loads
,
1136 vectorization_factor
,
1137 tem
, npermutes
, &this_tree_size
,
1140 /* ... so if successful we can apply the operand swapping
1141 to the GIMPLE IL. This is necessary because for example
1142 vect_get_slp_defs uses operand indexes and thus expects
1143 canonical operand order. */
1144 for (j
= 0; j
< group_size
; ++j
)
1147 gimple stmt
= SLP_TREE_SCALAR_STMTS (*node
)[j
];
1148 swap_ssa_operands (stmt
, gimple_assign_rhs1_ptr (stmt
),
1149 gimple_assign_rhs2_ptr (stmt
));
1151 oprnd_info
->def_stmts
= vNULL
;
1152 SLP_TREE_CHILDREN (*node
).quick_push (child
);
1159 oprnd_info
->def_stmts
= vNULL
;
1160 vect_free_slp_tree (child
);
1161 vect_free_oprnd_info (oprnds_info
);
1166 *tree_size
+= this_tree_size
;
1168 vect_free_oprnd_info (oprnds_info
);
1172 /* Dump a slp tree NODE using flags specified in DUMP_KIND. */
1175 vect_print_slp_tree (int dump_kind
, slp_tree node
)
1184 dump_printf (dump_kind
, "node ");
1185 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node
), i
, stmt
)
1187 dump_printf (dump_kind
, "\n\tstmt %d ", i
);
1188 dump_gimple_stmt (dump_kind
, TDF_SLIM
, stmt
, 0);
1190 dump_printf (dump_kind
, "\n");
1192 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node
), i
, child
)
1193 vect_print_slp_tree (dump_kind
, child
);
1197 /* Mark the tree rooted at NODE with MARK (PURE_SLP or HYBRID).
1198 If MARK is HYBRID, it refers to a specific stmt in NODE (the stmt at index
1199 J). Otherwise, MARK is PURE_SLP and J is -1, which indicates that all the
1200 stmts in NODE are to be marked. */
1203 vect_mark_slp_stmts (slp_tree node
, enum slp_vect_type mark
, int j
)
1212 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node
), i
, stmt
)
1213 if (j
< 0 || i
== j
)
1214 STMT_SLP_TYPE (vinfo_for_stmt (stmt
)) = mark
;
1216 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node
), i
, child
)
1217 vect_mark_slp_stmts (child
, mark
, j
);
1221 /* Mark the statements of the tree rooted at NODE as relevant (vect_used). */
1224 vect_mark_slp_stmts_relevant (slp_tree node
)
1228 stmt_vec_info stmt_info
;
1234 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node
), i
, stmt
)
1236 stmt_info
= vinfo_for_stmt (stmt
);
1237 gcc_assert (!STMT_VINFO_RELEVANT (stmt_info
)
1238 || STMT_VINFO_RELEVANT (stmt_info
) == vect_used_in_scope
);
1239 STMT_VINFO_RELEVANT (stmt_info
) = vect_used_in_scope
;
1242 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node
), i
, child
)
1243 vect_mark_slp_stmts_relevant (child
);
1247 /* Rearrange the statements of NODE according to PERMUTATION. */
1250 vect_slp_rearrange_stmts (slp_tree node
, unsigned int group_size
,
1251 vec
<unsigned> permutation
)
1254 vec
<gimple
> tmp_stmts
;
1258 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node
), i
, child
)
1259 vect_slp_rearrange_stmts (child
, group_size
, permutation
);
1261 gcc_assert (group_size
== SLP_TREE_SCALAR_STMTS (node
).length ());
1262 tmp_stmts
.create (group_size
);
1263 tmp_stmts
.quick_grow_cleared (group_size
);
1265 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node
), i
, stmt
)
1266 tmp_stmts
[permutation
[i
]] = stmt
;
1268 SLP_TREE_SCALAR_STMTS (node
).release ();
1269 SLP_TREE_SCALAR_STMTS (node
) = tmp_stmts
;
1273 /* Attempt to reorder stmts in a reduction chain so that we don't
1274 require any load permutation. Return true if that was possible,
1275 otherwise return false. */
1278 vect_attempt_slp_rearrange_stmts (slp_instance slp_instn
)
1280 unsigned int group_size
= SLP_INSTANCE_GROUP_SIZE (slp_instn
);
1284 slp_tree node
, load
;
1286 /* Compare all the permutation sequences to the first one. We know
1287 that at least one load is permuted. */
1288 node
= SLP_INSTANCE_LOADS (slp_instn
)[0];
1289 if (!node
->load_permutation
.exists ())
1291 for (i
= 1; SLP_INSTANCE_LOADS (slp_instn
).iterate (i
, &load
); ++i
)
1293 if (!load
->load_permutation
.exists ())
1295 FOR_EACH_VEC_ELT (load
->load_permutation
, j
, lidx
)
1296 if (lidx
!= node
->load_permutation
[j
])
1300 /* Check that the loads in the first sequence are different and there
1301 are no gaps between them. */
1302 load_index
= sbitmap_alloc (group_size
);
1303 bitmap_clear (load_index
);
1304 FOR_EACH_VEC_ELT (node
->load_permutation
, i
, lidx
)
1306 if (bitmap_bit_p (load_index
, lidx
))
1308 sbitmap_free (load_index
);
1311 bitmap_set_bit (load_index
, lidx
);
1313 for (i
= 0; i
< group_size
; i
++)
1314 if (!bitmap_bit_p (load_index
, i
))
1316 sbitmap_free (load_index
);
1319 sbitmap_free (load_index
);
1321 /* This permutation is valid for reduction. Since the order of the
1322 statements in the nodes is not important unless they are memory
1323 accesses, we can rearrange the statements in all the nodes
1324 according to the order of the loads. */
1325 vect_slp_rearrange_stmts (SLP_INSTANCE_TREE (slp_instn
), group_size
,
1326 node
->load_permutation
);
1328 /* We are done, no actual permutations need to be generated. */
1329 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn
), i
, node
)
1330 SLP_TREE_LOAD_PERMUTATION (node
).release ();
1334 /* Check if the required load permutations in the SLP instance
1335 SLP_INSTN are supported. */
1338 vect_supported_load_permutation_p (slp_instance slp_instn
)
1340 unsigned int group_size
= SLP_INSTANCE_GROUP_SIZE (slp_instn
);
1341 unsigned int i
, j
, k
, next
;
1343 gimple stmt
, load
, next_load
, first_load
;
1344 struct data_reference
*dr
;
1346 if (dump_enabled_p ())
1348 dump_printf_loc (MSG_NOTE
, vect_location
, "Load permutation ");
1349 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn
), i
, node
)
1350 if (node
->load_permutation
.exists ())
1351 FOR_EACH_VEC_ELT (node
->load_permutation
, j
, next
)
1352 dump_printf (MSG_NOTE
, "%d ", next
);
1354 for (k
= 0; k
< group_size
; ++k
)
1355 dump_printf (MSG_NOTE
, "%d ", k
);
1356 dump_printf (MSG_NOTE
, "\n");
1359 /* In case of reduction every load permutation is allowed, since the order
1360 of the reduction statements is not important (as opposed to the case of
1361 grouped stores). The only condition we need to check is that all the
1362 load nodes are of the same size and have the same permutation (and then
1363 rearrange all the nodes of the SLP instance according to this
1366 /* Check that all the load nodes are of the same size. */
1367 /* ??? Can't we assert this? */
1368 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn
), i
, node
)
1369 if (SLP_TREE_SCALAR_STMTS (node
).length () != (unsigned) group_size
)
1372 node
= SLP_INSTANCE_TREE (slp_instn
);
1373 stmt
= SLP_TREE_SCALAR_STMTS (node
)[0];
1375 /* Reduction (there are no data-refs in the root).
1376 In reduction chain the order of the loads is not important. */
1377 if (!STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt
))
1378 && !GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt
)))
1380 if (vect_attempt_slp_rearrange_stmts (slp_instn
))
1383 /* Fallthru to general load permutation handling. */
1386 /* In basic block vectorization we allow any subchain of an interleaving
1388 FORNOW: not supported in loop SLP because of realignment compications. */
1389 if (STMT_VINFO_BB_VINFO (vinfo_for_stmt (stmt
)))
1391 /* Check whether the loads in an instance form a subchain and thus
1392 no permutation is necessary. */
1393 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn
), i
, node
)
1395 if (!SLP_TREE_LOAD_PERMUTATION (node
).exists ())
1397 bool subchain_p
= true;
1399 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node
), j
, load
)
1402 && (next_load
!= load
1403 || GROUP_GAP (vinfo_for_stmt (load
)) != 1))
1408 next_load
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (load
));
1411 SLP_TREE_LOAD_PERMUTATION (node
).release ();
1414 /* Verify the permutation can be generated. */
1416 if (!vect_transform_slp_perm_load (node
, tem
, NULL
,
1417 1, slp_instn
, true))
1419 dump_printf_loc (MSG_MISSED_OPTIMIZATION
,
1421 "unsupported load permutation\n");
1427 /* Check that the alignment of the first load in every subchain, i.e.,
1428 the first statement in every load node, is supported.
1429 ??? This belongs in alignment checking. */
1430 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn
), i
, node
)
1432 first_load
= SLP_TREE_SCALAR_STMTS (node
)[0];
1433 if (first_load
!= GROUP_FIRST_ELEMENT (vinfo_for_stmt (first_load
)))
1435 dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_load
));
1436 if (vect_supportable_dr_alignment (dr
, false)
1437 == dr_unaligned_unsupported
)
1439 if (dump_enabled_p ())
1441 dump_printf_loc (MSG_MISSED_OPTIMIZATION
,
1443 "unsupported unaligned load ");
1444 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
1446 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
1456 /* For loop vectorization verify we can generate the permutation. */
1457 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn
), i
, node
)
1458 if (node
->load_permutation
.exists ()
1459 && !vect_transform_slp_perm_load
1461 SLP_INSTANCE_UNROLLING_FACTOR (slp_instn
), slp_instn
, true))
1468 /* Find the last store in SLP INSTANCE. */
1471 vect_find_last_scalar_stmt_in_slp (slp_tree node
)
1473 gimple last
= NULL
, stmt
;
1475 for (int i
= 0; SLP_TREE_SCALAR_STMTS (node
).iterate (i
, &stmt
); i
++)
1477 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (stmt
);
1478 if (is_pattern_stmt_p (stmt_vinfo
))
1479 last
= get_later_stmt (STMT_VINFO_RELATED_STMT (stmt_vinfo
), last
);
1481 last
= get_later_stmt (stmt
, last
);
1487 /* Compute the cost for the SLP node NODE in the SLP instance INSTANCE. */
1490 vect_analyze_slp_cost_1 (slp_instance instance
, slp_tree node
,
1491 stmt_vector_for_cost
*prologue_cost_vec
,
1492 stmt_vector_for_cost
*body_cost_vec
,
1493 unsigned ncopies_for_cost
)
1498 stmt_vec_info stmt_info
;
1500 unsigned group_size
= SLP_INSTANCE_GROUP_SIZE (instance
);
1502 /* Recurse down the SLP tree. */
1503 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node
), i
, child
)
1505 vect_analyze_slp_cost_1 (instance
, child
, prologue_cost_vec
,
1506 body_cost_vec
, ncopies_for_cost
);
1508 /* Look at the first scalar stmt to determine the cost. */
1509 stmt
= SLP_TREE_SCALAR_STMTS (node
)[0];
1510 stmt_info
= vinfo_for_stmt (stmt
);
1511 if (STMT_VINFO_GROUPED_ACCESS (stmt_info
))
1513 if (DR_IS_WRITE (STMT_VINFO_DATA_REF (stmt_info
)))
1514 vect_model_store_cost (stmt_info
, ncopies_for_cost
, false,
1515 vect_uninitialized_def
,
1516 node
, prologue_cost_vec
, body_cost_vec
);
1520 gcc_checking_assert (DR_IS_READ (STMT_VINFO_DATA_REF (stmt_info
)));
1521 vect_model_load_cost (stmt_info
, ncopies_for_cost
, false,
1522 node
, prologue_cost_vec
, body_cost_vec
);
1523 /* If the load is permuted record the cost for the permutation.
1524 ??? Loads from multiple chains are let through here only
1525 for a single special case involving complex numbers where
1526 in the end no permutation is necessary. */
1527 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node
), i
, s
)
1528 if ((STMT_VINFO_GROUP_FIRST_ELEMENT (vinfo_for_stmt (s
))
1529 == STMT_VINFO_GROUP_FIRST_ELEMENT (stmt_info
))
1530 && vect_get_place_in_interleaving_chain
1531 (s
, STMT_VINFO_GROUP_FIRST_ELEMENT (stmt_info
)) != i
)
1533 record_stmt_cost (body_cost_vec
, group_size
, vec_perm
,
1534 stmt_info
, 0, vect_body
);
1541 record_stmt_cost (body_cost_vec
, ncopies_for_cost
, vector_stmt
,
1542 stmt_info
, 0, vect_body
);
1543 if (SLP_TREE_TWO_OPERATORS (node
))
1545 record_stmt_cost (body_cost_vec
, ncopies_for_cost
, vector_stmt
,
1546 stmt_info
, 0, vect_body
);
1547 record_stmt_cost (body_cost_vec
, ncopies_for_cost
, vec_perm
,
1548 stmt_info
, 0, vect_body
);
1552 /* Scan operands and account for prologue cost of constants/externals.
1553 ??? This over-estimates cost for multiple uses and should be
1555 lhs
= gimple_get_lhs (stmt
);
1556 for (i
= 0; i
< gimple_num_ops (stmt
); ++i
)
1558 tree def
, op
= gimple_op (stmt
, i
);
1560 enum vect_def_type dt
;
1561 if (!op
|| op
== lhs
)
1563 if (vect_is_simple_use (op
, NULL
, STMT_VINFO_LOOP_VINFO (stmt_info
),
1564 STMT_VINFO_BB_VINFO (stmt_info
),
1565 &def_stmt
, &def
, &dt
))
1567 /* Without looking at the actual initializer a vector of
1568 constants can be implemented as load from the constant pool.
1569 ??? We need to pass down stmt_info for a vector type
1570 even if it points to the wrong stmt. */
1571 if (dt
== vect_constant_def
)
1572 record_stmt_cost (prologue_cost_vec
, 1, vector_load
,
1573 stmt_info
, 0, vect_prologue
);
1574 else if (dt
== vect_external_def
)
1575 record_stmt_cost (prologue_cost_vec
, 1, vec_construct
,
1576 stmt_info
, 0, vect_prologue
);
1581 /* Compute the cost for the SLP instance INSTANCE. */
1584 vect_analyze_slp_cost (slp_instance instance
, void *data
)
1586 stmt_vector_for_cost body_cost_vec
, prologue_cost_vec
;
1587 unsigned ncopies_for_cost
;
1588 stmt_info_for_cost
*si
;
1591 /* Calculate the number of vector stmts to create based on the unrolling
1592 factor (number of vectors is 1 if NUNITS >= GROUP_SIZE, and is
1593 GROUP_SIZE / NUNITS otherwise. */
1594 unsigned group_size
= SLP_INSTANCE_GROUP_SIZE (instance
);
1595 slp_tree node
= SLP_INSTANCE_TREE (instance
);
1596 stmt_vec_info stmt_info
= vinfo_for_stmt (SLP_TREE_SCALAR_STMTS (node
)[0]);
1597 /* Adjust the group_size by the vectorization factor which is always one
1598 for basic-block vectorization. */
1599 if (STMT_VINFO_LOOP_VINFO (stmt_info
))
1600 group_size
*= LOOP_VINFO_VECT_FACTOR (STMT_VINFO_LOOP_VINFO (stmt_info
));
1601 unsigned nunits
= TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info
));
1602 /* For reductions look at a reduction operand in case the reduction
1603 operation is widening like DOT_PROD or SAD. */
1604 if (!STMT_VINFO_GROUPED_ACCESS (stmt_info
))
1606 gimple stmt
= SLP_TREE_SCALAR_STMTS (node
)[0];
1607 switch (gimple_assign_rhs_code (stmt
))
1611 nunits
= TYPE_VECTOR_SUBPARTS (get_vectype_for_scalar_type
1612 (TREE_TYPE (gimple_assign_rhs1 (stmt
))));
1617 ncopies_for_cost
= least_common_multiple (nunits
, group_size
) / nunits
;
1619 prologue_cost_vec
.create (10);
1620 body_cost_vec
.create (10);
1621 vect_analyze_slp_cost_1 (instance
, SLP_INSTANCE_TREE (instance
),
1622 &prologue_cost_vec
, &body_cost_vec
,
1625 /* Record the prologue costs, which were delayed until we were
1626 sure that SLP was successful. */
1627 FOR_EACH_VEC_ELT (prologue_cost_vec
, i
, si
)
1629 struct _stmt_vec_info
*stmt_info
1630 = si
->stmt
? vinfo_for_stmt (si
->stmt
) : NULL
;
1631 (void) add_stmt_cost (data
, si
->count
, si
->kind
, stmt_info
,
1632 si
->misalign
, vect_prologue
);
1635 /* Record the instance's instructions in the target cost model. */
1636 FOR_EACH_VEC_ELT (body_cost_vec
, i
, si
)
1638 struct _stmt_vec_info
*stmt_info
1639 = si
->stmt
? vinfo_for_stmt (si
->stmt
) : NULL
;
1640 (void) add_stmt_cost (data
, si
->count
, si
->kind
, stmt_info
,
1641 si
->misalign
, vect_body
);
1644 prologue_cost_vec
.release ();
1645 body_cost_vec
.release ();
1648 /* Analyze an SLP instance starting from a group of grouped stores. Call
1649 vect_build_slp_tree to build a tree of packed stmts if possible.
1650 Return FALSE if it's impossible to SLP any stmt in the loop. */
1653 vect_analyze_slp_instance (loop_vec_info loop_vinfo
, bb_vec_info bb_vinfo
,
1654 gimple stmt
, unsigned max_tree_size
)
1656 slp_instance new_instance
;
1658 unsigned int group_size
= GROUP_SIZE (vinfo_for_stmt (stmt
));
1659 unsigned int unrolling_factor
= 1, nunits
;
1660 tree vectype
, scalar_type
= NULL_TREE
;
1662 unsigned int vectorization_factor
= 0;
1664 unsigned int max_nunits
= 0;
1665 vec
<slp_tree
> loads
;
1666 struct data_reference
*dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt
));
1667 vec
<gimple
> scalar_stmts
;
1669 if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt
)))
1673 scalar_type
= TREE_TYPE (DR_REF (dr
));
1674 vectype
= get_vectype_for_scalar_type (scalar_type
);
1678 gcc_assert (loop_vinfo
);
1679 vectype
= STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt
));
1682 group_size
= GROUP_SIZE (vinfo_for_stmt (stmt
));
1686 gcc_assert (loop_vinfo
);
1687 vectype
= STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt
));
1688 group_size
= LOOP_VINFO_REDUCTIONS (loop_vinfo
).length ();
1693 if (dump_enabled_p ())
1695 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1696 "Build SLP failed: unsupported data-type ");
1697 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, scalar_type
);
1698 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
1704 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
1706 vectorization_factor
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
1708 vectorization_factor
= nunits
;
1710 /* Calculate the unrolling factor. */
1711 unrolling_factor
= least_common_multiple (nunits
, group_size
) / group_size
;
1712 if (unrolling_factor
!= 1 && !loop_vinfo
)
1714 if (dump_enabled_p ())
1715 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1716 "Build SLP failed: unrolling required in basic"
1722 /* Create a node (a root of the SLP tree) for the packed grouped stores. */
1723 scalar_stmts
.create (group_size
);
1725 if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt
)))
1727 /* Collect the stores and store them in SLP_TREE_SCALAR_STMTS. */
1730 if (STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (next
))
1731 && STMT_VINFO_RELATED_STMT (vinfo_for_stmt (next
)))
1732 scalar_stmts
.safe_push (
1733 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (next
)));
1735 scalar_stmts
.safe_push (next
);
1736 next
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (next
));
1738 /* Mark the first element of the reduction chain as reduction to properly
1739 transform the node. In the reduction analysis phase only the last
1740 element of the chain is marked as reduction. */
1741 if (!STMT_VINFO_GROUPED_ACCESS (vinfo_for_stmt (stmt
)))
1742 STMT_VINFO_DEF_TYPE (vinfo_for_stmt (stmt
)) = vect_reduction_def
;
1746 /* Collect reduction statements. */
1747 vec
<gimple
> reductions
= LOOP_VINFO_REDUCTIONS (loop_vinfo
);
1748 for (i
= 0; reductions
.iterate (i
, &next
); i
++)
1749 scalar_stmts
.safe_push (next
);
1752 node
= vect_create_new_slp_node (scalar_stmts
);
1754 loads
.create (group_size
);
1756 /* Build the tree for the SLP instance. */
1757 bool *matches
= XALLOCAVEC (bool, group_size
);
1758 unsigned npermutes
= 0;
1759 if (vect_build_slp_tree (loop_vinfo
, bb_vinfo
, &node
, group_size
,
1760 &max_nunits
, &loads
,
1761 vectorization_factor
, matches
, &npermutes
, NULL
,
1764 /* Calculate the unrolling factor based on the smallest type. */
1765 if (max_nunits
> nunits
)
1766 unrolling_factor
= least_common_multiple (max_nunits
, group_size
)
1769 if (unrolling_factor
!= 1 && !loop_vinfo
)
1771 if (dump_enabled_p ())
1772 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1773 "Build SLP failed: unrolling required in basic"
1775 vect_free_slp_tree (node
);
1780 /* Create a new SLP instance. */
1781 new_instance
= XNEW (struct _slp_instance
);
1782 SLP_INSTANCE_TREE (new_instance
) = node
;
1783 SLP_INSTANCE_GROUP_SIZE (new_instance
) = group_size
;
1784 SLP_INSTANCE_UNROLLING_FACTOR (new_instance
) = unrolling_factor
;
1785 SLP_INSTANCE_LOADS (new_instance
) = loads
;
1787 /* Compute the load permutation. */
1789 bool loads_permuted
= false;
1790 FOR_EACH_VEC_ELT (loads
, i
, load_node
)
1792 vec
<unsigned> load_permutation
;
1794 gimple load
, first_stmt
;
1795 bool this_load_permuted
= false;
1796 load_permutation
.create (group_size
);
1797 first_stmt
= GROUP_FIRST_ELEMENT
1798 (vinfo_for_stmt (SLP_TREE_SCALAR_STMTS (load_node
)[0]));
1799 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (load_node
), j
, load
)
1802 = vect_get_place_in_interleaving_chain (load
, first_stmt
);
1803 gcc_assert (load_place
!= -1);
1804 if (load_place
!= j
)
1805 this_load_permuted
= true;
1806 load_permutation
.safe_push (load_place
);
1808 if (!this_load_permuted
1809 /* The load requires permutation when unrolling exposes
1810 a gap either because the group is larger than the SLP
1811 group-size or because there is a gap between the groups. */
1812 && (unrolling_factor
== 1
1813 || (group_size
== GROUP_SIZE (vinfo_for_stmt (first_stmt
))
1814 && GROUP_GAP (vinfo_for_stmt (first_stmt
)) == 0)))
1816 load_permutation
.release ();
1819 SLP_TREE_LOAD_PERMUTATION (load_node
) = load_permutation
;
1820 loads_permuted
= true;
1825 if (!vect_supported_load_permutation_p (new_instance
))
1827 if (dump_enabled_p ())
1829 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1830 "Build SLP failed: unsupported load "
1832 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
1833 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
1835 vect_free_slp_instance (new_instance
);
1842 LOOP_VINFO_SLP_INSTANCES (loop_vinfo
).safe_push (new_instance
);
1844 BB_VINFO_SLP_INSTANCES (bb_vinfo
).safe_push (new_instance
);
1846 if (dump_enabled_p ())
1847 vect_print_slp_tree (MSG_NOTE
, node
);
1852 /* Failed to SLP. */
1853 /* Free the allocated memory. */
1854 vect_free_slp_tree (node
);
1861 /* Check if there are stmts in the loop can be vectorized using SLP. Build SLP
1862 trees of packed scalar stmts if SLP is possible. */
1865 vect_analyze_slp (loop_vec_info loop_vinfo
, bb_vec_info bb_vinfo
,
1866 unsigned max_tree_size
)
1869 vec
<gimple
> grouped_stores
;
1870 vec
<gimple
> reductions
= vNULL
;
1871 vec
<gimple
> reduc_chains
= vNULL
;
1872 gimple first_element
;
1875 if (dump_enabled_p ())
1876 dump_printf_loc (MSG_NOTE
, vect_location
, "=== vect_analyze_slp ===\n");
1880 grouped_stores
= LOOP_VINFO_GROUPED_STORES (loop_vinfo
);
1881 reduc_chains
= LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo
);
1882 reductions
= LOOP_VINFO_REDUCTIONS (loop_vinfo
);
1885 grouped_stores
= BB_VINFO_GROUPED_STORES (bb_vinfo
);
1887 /* Find SLP sequences starting from groups of grouped stores. */
1888 FOR_EACH_VEC_ELT (grouped_stores
, i
, first_element
)
1889 if (vect_analyze_slp_instance (loop_vinfo
, bb_vinfo
, first_element
,
1893 if (reduc_chains
.length () > 0)
1895 /* Find SLP sequences starting from reduction chains. */
1896 FOR_EACH_VEC_ELT (reduc_chains
, i
, first_element
)
1897 if (vect_analyze_slp_instance (loop_vinfo
, bb_vinfo
, first_element
,
1903 /* Don't try to vectorize SLP reductions if reduction chain was
1908 /* Find SLP sequences starting from groups of reductions. */
1909 if (reductions
.length () > 1
1910 && vect_analyze_slp_instance (loop_vinfo
, bb_vinfo
, reductions
[0],
1918 /* For each possible SLP instance decide whether to SLP it and calculate overall
1919 unrolling factor needed to SLP the loop. Return TRUE if decided to SLP at
1920 least one instance. */
1923 vect_make_slp_decision (loop_vec_info loop_vinfo
)
1925 unsigned int i
, unrolling_factor
= 1;
1926 vec
<slp_instance
> slp_instances
= LOOP_VINFO_SLP_INSTANCES (loop_vinfo
);
1927 slp_instance instance
;
1928 int decided_to_slp
= 0;
1930 if (dump_enabled_p ())
1931 dump_printf_loc (MSG_NOTE
, vect_location
, "=== vect_make_slp_decision ==="
1934 FOR_EACH_VEC_ELT (slp_instances
, i
, instance
)
1936 /* FORNOW: SLP if you can. */
1937 if (unrolling_factor
< SLP_INSTANCE_UNROLLING_FACTOR (instance
))
1938 unrolling_factor
= SLP_INSTANCE_UNROLLING_FACTOR (instance
);
1940 /* Mark all the stmts that belong to INSTANCE as PURE_SLP stmts. Later we
1941 call vect_detect_hybrid_slp () to find stmts that need hybrid SLP and
1942 loop-based vectorization. Such stmts will be marked as HYBRID. */
1943 vect_mark_slp_stmts (SLP_INSTANCE_TREE (instance
), pure_slp
, -1);
1947 LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo
) = unrolling_factor
;
1949 if (decided_to_slp
&& dump_enabled_p ())
1950 dump_printf_loc (MSG_NOTE
, vect_location
,
1951 "Decided to SLP %d instances. Unrolling factor %d\n",
1952 decided_to_slp
, unrolling_factor
);
1954 return (decided_to_slp
> 0);
1958 /* Find stmts that must be both vectorized and SLPed (since they feed stmts that
1959 can't be SLPed) in the tree rooted at NODE. Mark such stmts as HYBRID. */
1962 vect_detect_hybrid_slp_stmts (slp_tree node
, unsigned i
, slp_vect_type stype
)
1964 gimple stmt
= SLP_TREE_SCALAR_STMTS (node
)[i
];
1965 imm_use_iterator imm_iter
;
1967 stmt_vec_info use_vinfo
, stmt_vinfo
= vinfo_for_stmt (stmt
);
1969 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_vinfo
);
1970 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
1973 /* Propagate hybrid down the SLP tree. */
1974 if (stype
== hybrid
)
1976 else if (HYBRID_SLP_STMT (stmt_vinfo
))
1980 /* Check if a pure SLP stmt has uses in non-SLP stmts. */
1981 gcc_checking_assert (PURE_SLP_STMT (stmt_vinfo
));
1982 /* We always get the pattern stmt here, but for immediate
1983 uses we have to use the LHS of the original stmt. */
1984 gcc_checking_assert (!STMT_VINFO_IN_PATTERN_P (stmt_vinfo
));
1985 if (STMT_VINFO_RELATED_STMT (stmt_vinfo
))
1986 stmt
= STMT_VINFO_RELATED_STMT (stmt_vinfo
);
1987 if (TREE_CODE (gimple_op (stmt
, 0)) == SSA_NAME
)
1988 FOR_EACH_IMM_USE_STMT (use_stmt
, imm_iter
, gimple_op (stmt
, 0))
1990 if (!flow_bb_inside_loop_p (loop
, gimple_bb (use_stmt
)))
1992 use_vinfo
= vinfo_for_stmt (use_stmt
);
1993 if (STMT_VINFO_IN_PATTERN_P (use_vinfo
)
1994 && STMT_VINFO_RELATED_STMT (use_vinfo
))
1995 use_vinfo
= vinfo_for_stmt (STMT_VINFO_RELATED_STMT (use_vinfo
));
1996 if (!STMT_SLP_TYPE (use_vinfo
)
1997 && (STMT_VINFO_RELEVANT (use_vinfo
)
1998 || VECTORIZABLE_CYCLE_DEF (STMT_VINFO_DEF_TYPE (use_vinfo
)))
1999 && !(gimple_code (use_stmt
) == GIMPLE_PHI
2000 && STMT_VINFO_DEF_TYPE (use_vinfo
) == vect_reduction_def
))
2002 if (dump_enabled_p ())
2004 dump_printf_loc (MSG_NOTE
, vect_location
, "use of SLP "
2005 "def in non-SLP stmt: ");
2006 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, use_stmt
, 0);
2014 && !HYBRID_SLP_STMT (stmt_vinfo
))
2016 if (dump_enabled_p ())
2018 dump_printf_loc (MSG_NOTE
, vect_location
, "marking hybrid: ");
2019 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
2021 STMT_SLP_TYPE (stmt_vinfo
) = hybrid
;
2024 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node
), j
, child
)
2026 vect_detect_hybrid_slp_stmts (child
, i
, stype
);
2029 /* Helpers for vect_detect_hybrid_slp walking pattern stmt uses. */
2032 vect_detect_hybrid_slp_1 (tree
*tp
, int *, void *data
)
2034 walk_stmt_info
*wi
= (walk_stmt_info
*)data
;
2035 struct loop
*loopp
= (struct loop
*)wi
->info
;
2040 if (TREE_CODE (*tp
) == SSA_NAME
2041 && !SSA_NAME_IS_DEFAULT_DEF (*tp
))
2043 gimple def_stmt
= SSA_NAME_DEF_STMT (*tp
);
2044 if (flow_bb_inside_loop_p (loopp
, gimple_bb (def_stmt
))
2045 && PURE_SLP_STMT (vinfo_for_stmt (def_stmt
)))
2047 if (dump_enabled_p ())
2049 dump_printf_loc (MSG_NOTE
, vect_location
, "marking hybrid: ");
2050 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, def_stmt
, 0);
2052 STMT_SLP_TYPE (vinfo_for_stmt (def_stmt
)) = hybrid
;
2060 vect_detect_hybrid_slp_2 (gimple_stmt_iterator
*gsi
, bool *handled
,
2063 /* If the stmt is in a SLP instance then this isn't a reason
2064 to mark use definitions in other SLP instances as hybrid. */
2065 if (STMT_SLP_TYPE (vinfo_for_stmt (gsi_stmt (*gsi
))) != loop_vect
)
2070 /* Find stmts that must be both vectorized and SLPed. */
2073 vect_detect_hybrid_slp (loop_vec_info loop_vinfo
)
2076 vec
<slp_instance
> slp_instances
= LOOP_VINFO_SLP_INSTANCES (loop_vinfo
);
2077 slp_instance instance
;
2079 if (dump_enabled_p ())
2080 dump_printf_loc (MSG_NOTE
, vect_location
, "=== vect_detect_hybrid_slp ==="
2083 /* First walk all pattern stmt in the loop and mark defs of uses as
2084 hybrid because immediate uses in them are not recorded. */
2085 for (i
= 0; i
< LOOP_VINFO_LOOP (loop_vinfo
)->num_nodes
; ++i
)
2087 basic_block bb
= LOOP_VINFO_BBS (loop_vinfo
)[i
];
2088 for (gimple_stmt_iterator gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
);
2091 gimple stmt
= gsi_stmt (gsi
);
2092 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
2093 if (STMT_VINFO_IN_PATTERN_P (stmt_info
))
2096 memset (&wi
, 0, sizeof (wi
));
2097 wi
.info
= LOOP_VINFO_LOOP (loop_vinfo
);
2098 gimple_stmt_iterator gsi2
2099 = gsi_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info
));
2100 walk_gimple_stmt (&gsi2
, vect_detect_hybrid_slp_2
,
2101 vect_detect_hybrid_slp_1
, &wi
);
2102 walk_gimple_seq (STMT_VINFO_PATTERN_DEF_SEQ (stmt_info
),
2103 vect_detect_hybrid_slp_2
,
2104 vect_detect_hybrid_slp_1
, &wi
);
2109 /* Then walk the SLP instance trees marking stmts with uses in
2110 non-SLP stmts as hybrid, also propagating hybrid down the
2111 SLP tree, collecting the above info on-the-fly. */
2112 FOR_EACH_VEC_ELT (slp_instances
, i
, instance
)
2114 for (unsigned i
= 0; i
< SLP_INSTANCE_GROUP_SIZE (instance
); ++i
)
2115 vect_detect_hybrid_slp_stmts (SLP_INSTANCE_TREE (instance
),
2121 /* Create and initialize a new bb_vec_info struct for BB, as well as
2122 stmt_vec_info structs for all the stmts in it. */
2125 new_bb_vec_info (basic_block bb
)
2127 bb_vec_info res
= NULL
;
2128 gimple_stmt_iterator gsi
;
2130 res
= (bb_vec_info
) xcalloc (1, sizeof (struct _bb_vec_info
));
2131 BB_VINFO_BB (res
) = bb
;
2133 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
2135 gimple stmt
= gsi_stmt (gsi
);
2136 gimple_set_uid (stmt
, 0);
2137 set_vinfo_for_stmt (stmt
, new_stmt_vec_info (stmt
, NULL
, res
));
2140 BB_VINFO_GROUPED_STORES (res
).create (10);
2141 BB_VINFO_SLP_INSTANCES (res
).create (2);
2142 BB_VINFO_TARGET_COST_DATA (res
) = init_cost (NULL
);
2149 /* Free BB_VINFO struct, as well as all the stmt_vec_info structs of all the
2150 stmts in the basic block. */
2153 destroy_bb_vec_info (bb_vec_info bb_vinfo
)
2155 vec
<slp_instance
> slp_instances
;
2156 slp_instance instance
;
2158 gimple_stmt_iterator si
;
2164 bb
= BB_VINFO_BB (bb_vinfo
);
2166 for (si
= gsi_start_bb (bb
); !gsi_end_p (si
); gsi_next (&si
))
2168 gimple stmt
= gsi_stmt (si
);
2169 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
2172 /* Free stmt_vec_info. */
2173 free_stmt_vec_info (stmt
);
2176 vect_destroy_datarefs (NULL
, bb_vinfo
);
2177 free_dependence_relations (BB_VINFO_DDRS (bb_vinfo
));
2178 BB_VINFO_GROUPED_STORES (bb_vinfo
).release ();
2179 slp_instances
= BB_VINFO_SLP_INSTANCES (bb_vinfo
);
2180 FOR_EACH_VEC_ELT (slp_instances
, i
, instance
)
2181 vect_free_slp_instance (instance
);
2182 BB_VINFO_SLP_INSTANCES (bb_vinfo
).release ();
2183 destroy_cost_data (BB_VINFO_TARGET_COST_DATA (bb_vinfo
));
2189 /* Analyze statements contained in SLP tree node after recursively analyzing
2190 the subtree. Return TRUE if the operations are supported. */
2193 vect_slp_analyze_node_operations (slp_tree node
)
2203 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node
), i
, child
)
2204 if (!vect_slp_analyze_node_operations (child
))
2207 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node
), i
, stmt
)
2209 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
2210 gcc_assert (stmt_info
);
2211 gcc_assert (STMT_SLP_TYPE (stmt_info
) != loop_vect
);
2213 if (!vect_analyze_stmt (stmt
, &dummy
, node
))
2221 /* Analyze statements in SLP instances of the basic block. Return TRUE if the
2222 operations are supported. */
2225 vect_slp_analyze_operations (vec
<slp_instance
> slp_instances
, void *data
)
2227 slp_instance instance
;
2230 if (dump_enabled_p ())
2231 dump_printf_loc (MSG_NOTE
, vect_location
,
2232 "=== vect_slp_analyze_operations ===\n");
2234 for (i
= 0; slp_instances
.iterate (i
, &instance
); )
2236 if (!vect_slp_analyze_node_operations (SLP_INSTANCE_TREE (instance
)))
2238 dump_printf_loc (MSG_NOTE
, vect_location
,
2239 "removing SLP instance operations starting from: ");
2240 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
,
2241 SLP_TREE_SCALAR_STMTS
2242 (SLP_INSTANCE_TREE (instance
))[0], 0);
2243 vect_free_slp_instance (instance
);
2244 slp_instances
.ordered_remove (i
);
2248 /* Compute the costs of the SLP instance. */
2249 vect_analyze_slp_cost (instance
, data
);
2254 if (!slp_instances
.length ())
2261 /* Compute the scalar cost of the SLP node NODE and its children
2262 and return it. Do not account defs that are marked in LIFE and
2263 update LIFE according to uses of NODE. */
2266 vect_bb_slp_scalar_cost (basic_block bb
,
2267 slp_tree node
, vec
<bool, va_heap
> *life
)
2269 unsigned scalar_cost
= 0;
2274 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node
), i
, stmt
)
2277 ssa_op_iter op_iter
;
2278 def_operand_p def_p
;
2279 stmt_vec_info stmt_info
;
2284 /* If there is a non-vectorized use of the defs then the scalar
2285 stmt is kept live in which case we do not account it or any
2286 required defs in the SLP children in the scalar cost. This
2287 way we make the vectorization more costly when compared to
2289 FOR_EACH_SSA_DEF_OPERAND (def_p
, stmt
, op_iter
, SSA_OP_DEF
)
2291 imm_use_iterator use_iter
;
2293 FOR_EACH_IMM_USE_STMT (use_stmt
, use_iter
, DEF_FROM_PTR (def_p
))
2294 if (!is_gimple_debug (use_stmt
)
2295 && (gimple_code (use_stmt
) == GIMPLE_PHI
2296 || gimple_bb (use_stmt
) != bb
2297 || !STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (use_stmt
))))
2300 BREAK_FROM_IMM_USE_STMT (use_iter
);
2306 stmt_info
= vinfo_for_stmt (stmt
);
2307 if (STMT_VINFO_DATA_REF (stmt_info
))
2309 if (DR_IS_READ (STMT_VINFO_DATA_REF (stmt_info
)))
2310 stmt_cost
= vect_get_stmt_cost (scalar_load
);
2312 stmt_cost
= vect_get_stmt_cost (scalar_store
);
2315 stmt_cost
= vect_get_stmt_cost (scalar_stmt
);
2317 scalar_cost
+= stmt_cost
;
2320 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node
), i
, child
)
2322 scalar_cost
+= vect_bb_slp_scalar_cost (bb
, child
, life
);
2327 /* Check if vectorization of the basic block is profitable. */
2330 vect_bb_vectorization_profitable_p (bb_vec_info bb_vinfo
)
2332 vec
<slp_instance
> slp_instances
= BB_VINFO_SLP_INSTANCES (bb_vinfo
);
2333 slp_instance instance
;
2335 unsigned int vec_inside_cost
= 0, vec_outside_cost
= 0, scalar_cost
= 0;
2336 unsigned int vec_prologue_cost
= 0, vec_epilogue_cost
= 0;
2338 /* Calculate scalar cost. */
2339 FOR_EACH_VEC_ELT (slp_instances
, i
, instance
)
2341 auto_vec
<bool, 20> life
;
2342 life
.safe_grow_cleared (SLP_INSTANCE_GROUP_SIZE (instance
));
2343 scalar_cost
+= vect_bb_slp_scalar_cost (BB_VINFO_BB (bb_vinfo
),
2344 SLP_INSTANCE_TREE (instance
),
2348 /* Complete the target-specific cost calculation. */
2349 finish_cost (BB_VINFO_TARGET_COST_DATA (bb_vinfo
), &vec_prologue_cost
,
2350 &vec_inside_cost
, &vec_epilogue_cost
);
2352 vec_outside_cost
= vec_prologue_cost
+ vec_epilogue_cost
;
2354 if (dump_enabled_p ())
2356 dump_printf_loc (MSG_NOTE
, vect_location
, "Cost model analysis: \n");
2357 dump_printf (MSG_NOTE
, " Vector inside of basic block cost: %d\n",
2359 dump_printf (MSG_NOTE
, " Vector prologue cost: %d\n", vec_prologue_cost
);
2360 dump_printf (MSG_NOTE
, " Vector epilogue cost: %d\n", vec_epilogue_cost
);
2361 dump_printf (MSG_NOTE
, " Scalar cost of basic block: %d\n", scalar_cost
);
2364 /* Vectorization is profitable if its cost is less than the cost of scalar
2366 if (vec_outside_cost
+ vec_inside_cost
>= scalar_cost
)
2372 /* Check if the basic block can be vectorized. */
2375 vect_slp_analyze_bb_1 (basic_block bb
)
2377 bb_vec_info bb_vinfo
;
2378 vec
<slp_instance
> slp_instances
;
2379 slp_instance instance
;
2382 unsigned n_stmts
= 0;
2384 bb_vinfo
= new_bb_vec_info (bb
);
2388 if (!vect_analyze_data_refs (NULL
, bb_vinfo
, &min_vf
, &n_stmts
))
2390 if (dump_enabled_p ())
2391 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2392 "not vectorized: unhandled data-ref in basic "
2395 destroy_bb_vec_info (bb_vinfo
);
2399 if (BB_VINFO_DATAREFS (bb_vinfo
).length () < 2)
2401 if (dump_enabled_p ())
2402 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2403 "not vectorized: not enough data-refs in "
2406 destroy_bb_vec_info (bb_vinfo
);
2410 if (!vect_analyze_data_ref_accesses (NULL
, bb_vinfo
))
2412 if (dump_enabled_p ())
2413 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2414 "not vectorized: unhandled data access in "
2417 destroy_bb_vec_info (bb_vinfo
);
2421 vect_pattern_recog (NULL
, bb_vinfo
);
2423 if (!vect_analyze_data_refs_alignment (NULL
, bb_vinfo
))
2425 if (dump_enabled_p ())
2426 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2427 "not vectorized: bad data alignment in basic "
2430 destroy_bb_vec_info (bb_vinfo
);
2434 /* Check the SLP opportunities in the basic block, analyze and build SLP
2436 if (!vect_analyze_slp (NULL
, bb_vinfo
, n_stmts
))
2438 if (dump_enabled_p ())
2440 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2441 "Failed to SLP the basic block.\n");
2442 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2443 "not vectorized: failed to find SLP opportunities "
2444 "in basic block.\n");
2447 destroy_bb_vec_info (bb_vinfo
);
2451 slp_instances
= BB_VINFO_SLP_INSTANCES (bb_vinfo
);
2453 /* Mark all the statements that we want to vectorize as pure SLP and
2455 FOR_EACH_VEC_ELT (slp_instances
, i
, instance
)
2457 vect_mark_slp_stmts (SLP_INSTANCE_TREE (instance
), pure_slp
, -1);
2458 vect_mark_slp_stmts_relevant (SLP_INSTANCE_TREE (instance
));
2461 /* Mark all the statements that we do not want to vectorize. */
2462 for (gimple_stmt_iterator gsi
= gsi_start_bb (BB_VINFO_BB (bb_vinfo
));
2463 !gsi_end_p (gsi
); gsi_next (&gsi
))
2465 stmt_vec_info vinfo
= vinfo_for_stmt (gsi_stmt (gsi
));
2466 if (STMT_SLP_TYPE (vinfo
) != pure_slp
)
2467 STMT_VINFO_VECTORIZABLE (vinfo
) = false;
2470 /* Analyze dependences. At this point all stmts not participating in
2471 vectorization have to be marked. Dependence analysis assumes
2472 that we either vectorize all SLP instances or none at all. */
2473 if (!vect_slp_analyze_data_ref_dependences (bb_vinfo
))
2475 if (dump_enabled_p ())
2476 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2477 "not vectorized: unhandled data dependence "
2478 "in basic block.\n");
2480 destroy_bb_vec_info (bb_vinfo
);
2484 if (!vect_verify_datarefs_alignment (NULL
, bb_vinfo
))
2486 if (dump_enabled_p ())
2487 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2488 "not vectorized: unsupported alignment in basic "
2490 destroy_bb_vec_info (bb_vinfo
);
2494 if (!vect_slp_analyze_operations (BB_VINFO_SLP_INSTANCES (bb_vinfo
),
2495 BB_VINFO_TARGET_COST_DATA (bb_vinfo
)))
2497 if (dump_enabled_p ())
2498 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2499 "not vectorized: bad operation in basic block.\n");
2501 destroy_bb_vec_info (bb_vinfo
);
2505 /* Cost model: check if the vectorization is worthwhile. */
2506 if (!unlimited_cost_model (NULL
)
2507 && !vect_bb_vectorization_profitable_p (bb_vinfo
))
2509 if (dump_enabled_p ())
2510 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2511 "not vectorized: vectorization is not "
2514 destroy_bb_vec_info (bb_vinfo
);
2518 if (dump_enabled_p ())
2519 dump_printf_loc (MSG_NOTE
, vect_location
,
2520 "Basic block will be vectorized using SLP\n");
2527 vect_slp_analyze_bb (basic_block bb
)
2529 bb_vec_info bb_vinfo
;
2531 gimple_stmt_iterator gsi
;
2532 unsigned int vector_sizes
;
2534 if (dump_enabled_p ())
2535 dump_printf_loc (MSG_NOTE
, vect_location
, "===vect_slp_analyze_bb===\n");
2537 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
2539 gimple stmt
= gsi_stmt (gsi
);
2540 if (!is_gimple_debug (stmt
)
2541 && !gimple_nop_p (stmt
)
2542 && gimple_code (stmt
) != GIMPLE_LABEL
)
2546 if (insns
> PARAM_VALUE (PARAM_SLP_MAX_INSNS_IN_BB
))
2548 if (dump_enabled_p ())
2549 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2550 "not vectorized: too many instructions in "
2556 /* Autodetect first vector size we try. */
2557 current_vector_size
= 0;
2558 vector_sizes
= targetm
.vectorize
.autovectorize_vector_sizes ();
2562 bb_vinfo
= vect_slp_analyze_bb_1 (bb
);
2566 destroy_bb_vec_info (bb_vinfo
);
2568 vector_sizes
&= ~current_vector_size
;
2569 if (vector_sizes
== 0
2570 || current_vector_size
== 0)
2573 /* Try the next biggest vector size. */
2574 current_vector_size
= 1 << floor_log2 (vector_sizes
);
2575 if (dump_enabled_p ())
2576 dump_printf_loc (MSG_NOTE
, vect_location
,
2577 "***** Re-trying analysis with "
2578 "vector size %d\n", current_vector_size
);
2583 /* For constant and loop invariant defs of SLP_NODE this function returns
2584 (vector) defs (VEC_OPRNDS) that will be used in the vectorized stmts.
2585 OP_NUM determines if we gather defs for operand 0 or operand 1 of the RHS of
2586 scalar stmts. NUMBER_OF_VECTORS is the number of vector defs to create.
2587 REDUC_INDEX is the index of the reduction operand in the statements, unless
2591 vect_get_constant_vectors (tree op
, slp_tree slp_node
,
2592 vec
<tree
> *vec_oprnds
,
2593 unsigned int op_num
, unsigned int number_of_vectors
,
2596 vec
<gimple
> stmts
= SLP_TREE_SCALAR_STMTS (slp_node
);
2597 gimple stmt
= stmts
[0];
2598 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (stmt
);
2602 unsigned j
, number_of_places_left_in_vector
;
2605 int group_size
= stmts
.length ();
2606 unsigned int vec_num
, i
;
2607 unsigned number_of_copies
= 1;
2609 voprnds
.create (number_of_vectors
);
2610 bool constant_p
, is_store
;
2611 tree neutral_op
= NULL
;
2612 enum tree_code code
= gimple_expr_code (stmt
);
2615 gimple_seq ctor_seq
= NULL
;
2617 vector_type
= get_vectype_for_scalar_type (TREE_TYPE (op
));
2618 nunits
= TYPE_VECTOR_SUBPARTS (vector_type
);
2620 if (STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_reduction_def
2621 && reduc_index
!= -1)
2623 op_num
= reduc_index
;
2624 op
= gimple_op (stmt
, op_num
+ 1);
2625 /* For additional copies (see the explanation of NUMBER_OF_COPIES below)
2626 we need either neutral operands or the original operands. See
2627 get_initial_def_for_reduction() for details. */
2630 case WIDEN_SUM_EXPR
:
2637 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (op
)))
2638 neutral_op
= build_real (TREE_TYPE (op
), dconst0
);
2640 neutral_op
= build_int_cst (TREE_TYPE (op
), 0);
2645 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (op
)))
2646 neutral_op
= build_real (TREE_TYPE (op
), dconst1
);
2648 neutral_op
= build_int_cst (TREE_TYPE (op
), 1);
2653 neutral_op
= build_int_cst (TREE_TYPE (op
), -1);
2656 /* For MIN/MAX we don't have an easy neutral operand but
2657 the initial values can be used fine here. Only for
2658 a reduction chain we have to force a neutral element. */
2661 if (!GROUP_FIRST_ELEMENT (stmt_vinfo
))
2665 def_stmt
= SSA_NAME_DEF_STMT (op
);
2666 loop
= (gimple_bb (stmt
))->loop_father
;
2667 neutral_op
= PHI_ARG_DEF_FROM_EDGE (def_stmt
,
2668 loop_preheader_edge (loop
));
2673 gcc_assert (!GROUP_FIRST_ELEMENT (stmt_vinfo
));
2678 if (STMT_VINFO_DATA_REF (stmt_vinfo
))
2681 op
= gimple_assign_rhs1 (stmt
);
2688 if (CONSTANT_CLASS_P (op
))
2693 /* NUMBER_OF_COPIES is the number of times we need to use the same values in
2694 created vectors. It is greater than 1 if unrolling is performed.
2696 For example, we have two scalar operands, s1 and s2 (e.g., group of
2697 strided accesses of size two), while NUNITS is four (i.e., four scalars
2698 of this type can be packed in a vector). The output vector will contain
2699 two copies of each scalar operand: {s1, s2, s1, s2}. (NUMBER_OF_COPIES
2702 If GROUP_SIZE > NUNITS, the scalars will be split into several vectors
2703 containing the operands.
2705 For example, NUNITS is four as before, and the group size is 8
2706 (s1, s2, ..., s8). We will create two vectors {s1, s2, s3, s4} and
2707 {s5, s6, s7, s8}. */
2709 number_of_copies
= nunits
* number_of_vectors
/ group_size
;
2711 number_of_places_left_in_vector
= nunits
;
2712 elts
= XALLOCAVEC (tree
, nunits
);
2713 bool place_after_defs
= false;
2714 for (j
= 0; j
< number_of_copies
; j
++)
2716 for (i
= group_size
- 1; stmts
.iterate (i
, &stmt
); i
--)
2719 op
= gimple_assign_rhs1 (stmt
);
2725 if (op_num
== 0 || op_num
== 1)
2727 tree cond
= gimple_assign_rhs1 (stmt
);
2728 op
= TREE_OPERAND (cond
, op_num
);
2733 op
= gimple_assign_rhs2 (stmt
);
2735 op
= gimple_assign_rhs3 (stmt
);
2740 op
= gimple_call_arg (stmt
, op_num
);
2747 op
= gimple_op (stmt
, op_num
+ 1);
2748 /* Unlike the other binary operators, shifts/rotates have
2749 the shift count being int, instead of the same type as
2750 the lhs, so make sure the scalar is the right type if
2751 we are dealing with vectors of
2752 long long/long/short/char. */
2753 if (op_num
== 1 && TREE_CODE (op
) == INTEGER_CST
)
2754 op
= fold_convert (TREE_TYPE (vector_type
), op
);
2758 op
= gimple_op (stmt
, op_num
+ 1);
2763 if (reduc_index
!= -1)
2765 loop
= (gimple_bb (stmt
))->loop_father
;
2766 def_stmt
= SSA_NAME_DEF_STMT (op
);
2770 /* Get the def before the loop. In reduction chain we have only
2771 one initial value. */
2772 if ((j
!= (number_of_copies
- 1)
2773 || (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt
))
2778 op
= PHI_ARG_DEF_FROM_EDGE (def_stmt
,
2779 loop_preheader_edge (loop
));
2782 /* Create 'vect_ = {op0,op1,...,opn}'. */
2783 number_of_places_left_in_vector
--;
2785 if (!types_compatible_p (TREE_TYPE (vector_type
), TREE_TYPE (op
)))
2787 if (CONSTANT_CLASS_P (op
))
2789 op
= fold_unary (VIEW_CONVERT_EXPR
,
2790 TREE_TYPE (vector_type
), op
);
2791 gcc_assert (op
&& CONSTANT_CLASS_P (op
));
2795 tree new_temp
= make_ssa_name (TREE_TYPE (vector_type
));
2797 op
= build1 (VIEW_CONVERT_EXPR
, TREE_TYPE (vector_type
), op
);
2799 = gimple_build_assign (new_temp
, VIEW_CONVERT_EXPR
, op
);
2800 gimple_seq_add_stmt (&ctor_seq
, init_stmt
);
2804 elts
[number_of_places_left_in_vector
] = op
;
2805 if (!CONSTANT_CLASS_P (op
))
2807 if (TREE_CODE (orig_op
) == SSA_NAME
2808 && !SSA_NAME_IS_DEFAULT_DEF (orig_op
)
2809 && STMT_VINFO_BB_VINFO (stmt_vinfo
)
2810 && (STMT_VINFO_BB_VINFO (stmt_vinfo
)->bb
2811 == gimple_bb (SSA_NAME_DEF_STMT (orig_op
))))
2812 place_after_defs
= true;
2814 if (number_of_places_left_in_vector
== 0)
2816 number_of_places_left_in_vector
= nunits
;
2819 vec_cst
= build_vector (vector_type
, elts
);
2822 vec
<constructor_elt
, va_gc
> *v
;
2824 vec_alloc (v
, nunits
);
2825 for (k
= 0; k
< nunits
; ++k
)
2826 CONSTRUCTOR_APPEND_ELT (v
, NULL_TREE
, elts
[k
]);
2827 vec_cst
= build_constructor (vector_type
, v
);
2830 gimple_stmt_iterator gsi
;
2831 if (place_after_defs
)
2834 (vect_find_last_scalar_stmt_in_slp (slp_node
));
2835 init
= vect_init_vector (stmt
, vec_cst
, vector_type
, &gsi
);
2838 init
= vect_init_vector (stmt
, vec_cst
, vector_type
, NULL
);
2839 if (ctor_seq
!= NULL
)
2841 gsi
= gsi_for_stmt (SSA_NAME_DEF_STMT (init
));
2842 gsi_insert_seq_before_without_update (&gsi
, ctor_seq
,
2846 voprnds
.quick_push (init
);
2847 place_after_defs
= false;
2852 /* Since the vectors are created in the reverse order, we should invert
2854 vec_num
= voprnds
.length ();
2855 for (j
= vec_num
; j
!= 0; j
--)
2857 vop
= voprnds
[j
- 1];
2858 vec_oprnds
->quick_push (vop
);
2863 /* In case that VF is greater than the unrolling factor needed for the SLP
2864 group of stmts, NUMBER_OF_VECTORS to be created is greater than
2865 NUMBER_OF_SCALARS/NUNITS or NUNITS/NUMBER_OF_SCALARS, and hence we have
2866 to replicate the vectors. */
2867 while (number_of_vectors
> vec_oprnds
->length ())
2869 tree neutral_vec
= NULL
;
2874 neutral_vec
= build_vector_from_val (vector_type
, neutral_op
);
2876 vec_oprnds
->quick_push (neutral_vec
);
2880 for (i
= 0; vec_oprnds
->iterate (i
, &vop
) && i
< vec_num
; i
++)
2881 vec_oprnds
->quick_push (vop
);
2887 /* Get vectorized definitions from SLP_NODE that contains corresponding
2888 vectorized def-stmts. */
2891 vect_get_slp_vect_defs (slp_tree slp_node
, vec
<tree
> *vec_oprnds
)
2894 gimple vec_def_stmt
;
2897 gcc_assert (SLP_TREE_VEC_STMTS (slp_node
).exists ());
2899 FOR_EACH_VEC_ELT (SLP_TREE_VEC_STMTS (slp_node
), i
, vec_def_stmt
)
2901 gcc_assert (vec_def_stmt
);
2902 vec_oprnd
= gimple_get_lhs (vec_def_stmt
);
2903 vec_oprnds
->quick_push (vec_oprnd
);
2908 /* Get vectorized definitions for SLP_NODE.
2909 If the scalar definitions are loop invariants or constants, collect them and
2910 call vect_get_constant_vectors() to create vector stmts.
2911 Otherwise, the def-stmts must be already vectorized and the vectorized stmts
2912 must be stored in the corresponding child of SLP_NODE, and we call
2913 vect_get_slp_vect_defs () to retrieve them. */
2916 vect_get_slp_defs (vec
<tree
> ops
, slp_tree slp_node
,
2917 vec
<vec
<tree
> > *vec_oprnds
, int reduc_index
)
2920 int number_of_vects
= 0, i
;
2921 unsigned int child_index
= 0;
2922 HOST_WIDE_INT lhs_size_unit
, rhs_size_unit
;
2923 slp_tree child
= NULL
;
2926 bool vectorized_defs
;
2928 first_stmt
= SLP_TREE_SCALAR_STMTS (slp_node
)[0];
2929 FOR_EACH_VEC_ELT (ops
, i
, oprnd
)
2931 /* For each operand we check if it has vectorized definitions in a child
2932 node or we need to create them (for invariants and constants). We
2933 check if the LHS of the first stmt of the next child matches OPRND.
2934 If it does, we found the correct child. Otherwise, we call
2935 vect_get_constant_vectors (), and not advance CHILD_INDEX in order
2936 to check this child node for the next operand. */
2937 vectorized_defs
= false;
2938 if (SLP_TREE_CHILDREN (slp_node
).length () > child_index
)
2940 child
= SLP_TREE_CHILDREN (slp_node
)[child_index
];
2942 /* We have to check both pattern and original def, if available. */
2945 gimple first_def
= SLP_TREE_SCALAR_STMTS (child
)[0];
2947 = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (first_def
));
2949 if (operand_equal_p (oprnd
, gimple_get_lhs (first_def
), 0)
2951 && operand_equal_p (oprnd
, gimple_get_lhs (related
), 0)))
2953 /* The number of vector defs is determined by the number of
2954 vector statements in the node from which we get those
2956 number_of_vects
= SLP_TREE_NUMBER_OF_VEC_STMTS (child
);
2957 vectorized_defs
= true;
2965 if (!vectorized_defs
)
2969 number_of_vects
= SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node
);
2970 /* Number of vector stmts was calculated according to LHS in
2971 vect_schedule_slp_instance (), fix it by replacing LHS with
2972 RHS, if necessary. See vect_get_smallest_scalar_type () for
2974 vect_get_smallest_scalar_type (first_stmt
, &lhs_size_unit
,
2976 if (rhs_size_unit
!= lhs_size_unit
)
2978 number_of_vects
*= rhs_size_unit
;
2979 number_of_vects
/= lhs_size_unit
;
2984 /* Allocate memory for vectorized defs. */
2986 vec_defs
.create (number_of_vects
);
2988 /* For reduction defs we call vect_get_constant_vectors (), since we are
2989 looking for initial loop invariant values. */
2990 if (vectorized_defs
&& reduc_index
== -1)
2991 /* The defs are already vectorized. */
2992 vect_get_slp_vect_defs (child
, &vec_defs
);
2994 /* Build vectors from scalar defs. */
2995 vect_get_constant_vectors (oprnd
, slp_node
, &vec_defs
, i
,
2996 number_of_vects
, reduc_index
);
2998 vec_oprnds
->quick_push (vec_defs
);
3000 /* For reductions, we only need initial values. */
3001 if (reduc_index
!= -1)
3007 /* Create NCOPIES permutation statements using the mask MASK_BYTES (by
3008 building a vector of type MASK_TYPE from it) and two input vectors placed in
3009 DR_CHAIN at FIRST_VEC_INDX and SECOND_VEC_INDX for the first copy and
3010 shifting by STRIDE elements of DR_CHAIN for every copy.
3011 (STRIDE is the number of vectorized stmts for NODE divided by the number of
3013 VECT_STMTS_COUNTER specifies the index in the vectorized stmts of NODE, where
3014 the created stmts must be inserted. */
3017 vect_create_mask_and_perm (gimple stmt
,
3018 tree mask
, int first_vec_indx
, int second_vec_indx
,
3019 gimple_stmt_iterator
*gsi
, slp_tree node
,
3020 tree vectype
, vec
<tree
> dr_chain
,
3021 int ncopies
, int vect_stmts_counter
)
3024 gimple perm_stmt
= NULL
;
3026 tree first_vec
, second_vec
, data_ref
;
3028 stride
= SLP_TREE_NUMBER_OF_VEC_STMTS (node
) / ncopies
;
3030 /* Initialize the vect stmts of NODE to properly insert the generated
3032 for (i
= SLP_TREE_VEC_STMTS (node
).length ();
3033 i
< (int) SLP_TREE_NUMBER_OF_VEC_STMTS (node
); i
++)
3034 SLP_TREE_VEC_STMTS (node
).quick_push (NULL
);
3036 perm_dest
= vect_create_destination_var (gimple_assign_lhs (stmt
), vectype
);
3037 for (i
= 0; i
< ncopies
; i
++)
3039 first_vec
= dr_chain
[first_vec_indx
];
3040 second_vec
= dr_chain
[second_vec_indx
];
3042 /* Generate the permute statement. */
3043 perm_stmt
= gimple_build_assign (perm_dest
, VEC_PERM_EXPR
,
3044 first_vec
, second_vec
, mask
);
3045 data_ref
= make_ssa_name (perm_dest
, perm_stmt
);
3046 gimple_set_lhs (perm_stmt
, data_ref
);
3047 vect_finish_stmt_generation (stmt
, perm_stmt
, gsi
);
3049 /* Store the vector statement in NODE. */
3050 SLP_TREE_VEC_STMTS (node
)[stride
* i
+ vect_stmts_counter
] = perm_stmt
;
3052 first_vec_indx
+= stride
;
3053 second_vec_indx
+= stride
;
3058 /* Given FIRST_MASK_ELEMENT - the mask element in element representation,
3059 return in CURRENT_MASK_ELEMENT its equivalent in target specific
3060 representation. Check that the mask is valid and return FALSE if not.
3061 Return TRUE in NEED_NEXT_VECTOR if the permutation requires to move to
3062 the next vector, i.e., the current first vector is not needed. */
3065 vect_get_mask_element (gimple stmt
, int first_mask_element
, int m
,
3066 int mask_nunits
, bool only_one_vec
, int index
,
3067 unsigned char *mask
, int *current_mask_element
,
3068 bool *need_next_vector
, int *number_of_mask_fixes
,
3069 bool *mask_fixed
, bool *needs_first_vector
)
3073 /* Convert to target specific representation. */
3074 *current_mask_element
= first_mask_element
+ m
;
3075 /* Adjust the value in case it's a mask for second and third vectors. */
3076 *current_mask_element
-= mask_nunits
* (*number_of_mask_fixes
- 1);
3078 if (*current_mask_element
< 0)
3080 if (dump_enabled_p ())
3082 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3083 "permutation requires past vector ");
3084 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
3085 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
3090 if (*current_mask_element
< mask_nunits
)
3091 *needs_first_vector
= true;
3093 /* We have only one input vector to permute but the mask accesses values in
3094 the next vector as well. */
3095 if (only_one_vec
&& *current_mask_element
>= mask_nunits
)
3097 if (dump_enabled_p ())
3099 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3100 "permutation requires at least two vectors ");
3101 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
3102 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
3108 /* The mask requires the next vector. */
3109 while (*current_mask_element
>= mask_nunits
* 2)
3111 if (*needs_first_vector
|| *mask_fixed
)
3113 /* We either need the first vector too or have already moved to the
3114 next vector. In both cases, this permutation needs three
3116 if (dump_enabled_p ())
3118 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3119 "permutation requires at "
3120 "least three vectors ");
3121 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
3122 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
3128 /* We move to the next vector, dropping the first one and working with
3129 the second and the third - we need to adjust the values of the mask
3131 *current_mask_element
-= mask_nunits
* *number_of_mask_fixes
;
3133 for (i
= 0; i
< index
; i
++)
3134 mask
[i
] -= mask_nunits
* *number_of_mask_fixes
;
3136 (*number_of_mask_fixes
)++;
3140 *need_next_vector
= *mask_fixed
;
3142 /* This was the last element of this mask. Start a new one. */
3143 if (index
== mask_nunits
- 1)
3145 *number_of_mask_fixes
= 1;
3146 *mask_fixed
= false;
3147 *needs_first_vector
= false;
3154 /* Generate vector permute statements from a list of loads in DR_CHAIN.
3155 If ANALYZE_ONLY is TRUE, only check that it is possible to create valid
3156 permute statements for the SLP node NODE of the SLP instance
3157 SLP_NODE_INSTANCE. */
3160 vect_transform_slp_perm_load (slp_tree node
, vec
<tree
> dr_chain
,
3161 gimple_stmt_iterator
*gsi
, int vf
,
3162 slp_instance slp_node_instance
, bool analyze_only
)
3164 gimple stmt
= SLP_TREE_SCALAR_STMTS (node
)[0];
3165 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
3166 tree mask_element_type
= NULL_TREE
, mask_type
;
3167 int i
, j
, k
, nunits
, vec_index
= 0;
3168 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
3169 int group_size
= SLP_INSTANCE_GROUP_SIZE (slp_node_instance
);
3170 int first_mask_element
;
3171 int index
, unroll_factor
, current_mask_element
, ncopies
;
3172 unsigned char *mask
;
3173 bool only_one_vec
= false, need_next_vector
= false;
3174 int first_vec_index
, second_vec_index
, orig_vec_stmts_num
, vect_stmts_counter
;
3175 int number_of_mask_fixes
= 1;
3176 bool mask_fixed
= false;
3177 bool needs_first_vector
= false;
3180 if (!STMT_VINFO_GROUPED_ACCESS (stmt_info
))
3183 stmt_info
= vinfo_for_stmt (GROUP_FIRST_ELEMENT (stmt_info
));
3185 mode
= TYPE_MODE (vectype
);
3187 if (!can_vec_perm_p (mode
, false, NULL
))
3189 if (dump_enabled_p ())
3191 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3192 "no vect permute for ");
3193 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
3194 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
3199 /* The generic VEC_PERM_EXPR code always uses an integral type of the
3200 same size as the vector element being permuted. */
3201 mask_element_type
= lang_hooks
.types
.type_for_mode
3202 (int_mode_for_mode (TYPE_MODE (TREE_TYPE (vectype
))), 1);
3203 mask_type
= get_vectype_for_scalar_type (mask_element_type
);
3204 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
3205 mask
= XALLOCAVEC (unsigned char, nunits
);
3206 unroll_factor
= SLP_INSTANCE_UNROLLING_FACTOR (slp_node_instance
);
3208 /* The number of vector stmts to generate based only on SLP_NODE_INSTANCE
3209 unrolling factor. */
3211 = (STMT_VINFO_GROUP_SIZE (stmt_info
)
3212 * SLP_INSTANCE_UNROLLING_FACTOR (slp_node_instance
)
3213 + nunits
- 1) / nunits
;
3214 if (orig_vec_stmts_num
== 1)
3215 only_one_vec
= true;
3217 /* Number of copies is determined by the final vectorization factor
3218 relatively to SLP_NODE_INSTANCE unrolling factor. */
3219 ncopies
= vf
/ SLP_INSTANCE_UNROLLING_FACTOR (slp_node_instance
);
3221 /* Generate permutation masks for every NODE. Number of masks for each NODE
3222 is equal to GROUP_SIZE.
3223 E.g., we have a group of three nodes with three loads from the same
3224 location in each node, and the vector size is 4. I.e., we have a
3225 a0b0c0a1b1c1... sequence and we need to create the following vectors:
3226 for a's: a0a0a0a1 a1a1a2a2 a2a3a3a3
3227 for b's: b0b0b0b1 b1b1b2b2 b2b3b3b3
3230 The masks for a's should be: {0,0,0,3} {3,3,6,6} {6,9,9,9}.
3231 The last mask is illegal since we assume two operands for permute
3232 operation, and the mask element values can't be outside that range.
3233 Hence, the last mask must be converted into {2,5,5,5}.
3234 For the first two permutations we need the first and the second input
3235 vectors: {a0,b0,c0,a1} and {b1,c1,a2,b2}, and for the last permutation
3236 we need the second and the third vectors: {b1,c1,a2,b2} and
3241 vect_stmts_counter
= 0;
3243 first_vec_index
= vec_index
++;
3245 second_vec_index
= first_vec_index
;
3247 second_vec_index
= vec_index
++;
3249 for (j
= 0; j
< unroll_factor
; j
++)
3251 for (k
= 0; k
< group_size
; k
++)
3253 i
= SLP_TREE_LOAD_PERMUTATION (node
)[k
];
3254 first_mask_element
= i
+ j
* STMT_VINFO_GROUP_SIZE (stmt_info
);
3255 if (!vect_get_mask_element (stmt
, first_mask_element
, 0,
3256 nunits
, only_one_vec
, index
,
3257 mask
, ¤t_mask_element
,
3259 &number_of_mask_fixes
, &mask_fixed
,
3260 &needs_first_vector
))
3262 gcc_assert (current_mask_element
>= 0
3263 && current_mask_element
< 2 * nunits
);
3264 mask
[index
++] = current_mask_element
;
3266 if (index
== nunits
)
3269 if (!can_vec_perm_p (mode
, false, mask
))
3271 if (dump_enabled_p ())
3273 dump_printf_loc (MSG_MISSED_OPTIMIZATION
,
3275 "unsupported vect permute { ");
3276 for (i
= 0; i
< nunits
; ++i
)
3277 dump_printf (MSG_MISSED_OPTIMIZATION
, "%d ",
3279 dump_printf (MSG_MISSED_OPTIMIZATION
, "}\n");
3287 tree mask_vec
, *mask_elts
;
3288 mask_elts
= XALLOCAVEC (tree
, nunits
);
3289 for (l
= 0; l
< nunits
; ++l
)
3290 mask_elts
[l
] = build_int_cst (mask_element_type
,
3292 mask_vec
= build_vector (mask_type
, mask_elts
);
3294 if (need_next_vector
)
3296 first_vec_index
= second_vec_index
;
3297 second_vec_index
= vec_index
;
3300 vect_create_mask_and_perm (stmt
,
3301 mask_vec
, first_vec_index
, second_vec_index
,
3302 gsi
, node
, vectype
, dr_chain
,
3303 ncopies
, vect_stmts_counter
++);
3315 /* Vectorize SLP instance tree in postorder. */
3318 vect_schedule_slp_instance (slp_tree node
, slp_instance instance
,
3319 unsigned int vectorization_factor
)
3322 bool grouped_store
, is_store
;
3323 gimple_stmt_iterator si
;
3324 stmt_vec_info stmt_info
;
3325 unsigned int vec_stmts_size
, nunits
, group_size
;
3333 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node
), i
, child
)
3334 vect_schedule_slp_instance (child
, instance
, vectorization_factor
);
3336 stmt
= SLP_TREE_SCALAR_STMTS (node
)[0];
3337 stmt_info
= vinfo_for_stmt (stmt
);
3339 /* VECTYPE is the type of the destination. */
3340 vectype
= STMT_VINFO_VECTYPE (stmt_info
);
3341 nunits
= (unsigned int) TYPE_VECTOR_SUBPARTS (vectype
);
3342 group_size
= SLP_INSTANCE_GROUP_SIZE (instance
);
3344 /* For each SLP instance calculate number of vector stmts to be created
3345 for the scalar stmts in each node of the SLP tree. Number of vector
3346 elements in one vector iteration is the number of scalar elements in
3347 one scalar iteration (GROUP_SIZE) multiplied by VF divided by vector
3349 Unless this is a SLP reduction in which case the number of vector
3350 stmts is equal to the number of vector stmts of the children. */
3351 if (GROUP_FIRST_ELEMENT (stmt_info
)
3352 && !STMT_VINFO_GROUPED_ACCESS (stmt_info
))
3353 vec_stmts_size
= SLP_TREE_NUMBER_OF_VEC_STMTS (SLP_TREE_CHILDREN (node
)[0]);
3355 vec_stmts_size
= (vectorization_factor
* group_size
) / nunits
;
3357 if (!SLP_TREE_VEC_STMTS (node
).exists ())
3359 SLP_TREE_VEC_STMTS (node
).create (vec_stmts_size
);
3360 SLP_TREE_NUMBER_OF_VEC_STMTS (node
) = vec_stmts_size
;
3363 if (dump_enabled_p ())
3365 dump_printf_loc (MSG_NOTE
,vect_location
,
3366 "------>vectorizing SLP node starting from: ");
3367 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
3368 dump_printf (MSG_NOTE
, "\n");
3371 /* Vectorized stmts go before the last scalar stmt which is where
3372 all uses are ready. */
3373 si
= gsi_for_stmt (vect_find_last_scalar_stmt_in_slp (node
));
3375 /* Mark the first element of the reduction chain as reduction to properly
3376 transform the node. In the analysis phase only the last element of the
3377 chain is marked as reduction. */
3378 if (GROUP_FIRST_ELEMENT (stmt_info
) && !STMT_VINFO_GROUPED_ACCESS (stmt_info
)
3379 && GROUP_FIRST_ELEMENT (stmt_info
) == stmt
)
3381 STMT_VINFO_DEF_TYPE (stmt_info
) = vect_reduction_def
;
3382 STMT_VINFO_TYPE (stmt_info
) = reduc_vec_info_type
;
3385 /* Handle two-operation SLP nodes by vectorizing the group with
3386 both operations and then performing a merge. */
3387 if (SLP_TREE_TWO_OPERATORS (node
))
3389 enum tree_code code0
= gimple_assign_rhs_code (stmt
);
3390 enum tree_code ocode
;
3392 unsigned char *mask
= XALLOCAVEC (unsigned char, group_size
);
3393 bool allsame
= true;
3394 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node
), i
, ostmt
)
3395 if (gimple_assign_rhs_code (ostmt
) != code0
)
3399 ocode
= gimple_assign_rhs_code (ostmt
);
3408 tree tmask
= NULL_TREE
;
3409 vect_transform_stmt (stmt
, &si
, &grouped_store
, node
, instance
);
3410 v0
= SLP_TREE_VEC_STMTS (node
).copy ();
3411 SLP_TREE_VEC_STMTS (node
).truncate (0);
3412 gimple_assign_set_rhs_code (stmt
, ocode
);
3413 vect_transform_stmt (stmt
, &si
, &grouped_store
, node
, instance
);
3414 gimple_assign_set_rhs_code (stmt
, code0
);
3415 v1
= SLP_TREE_VEC_STMTS (node
).copy ();
3416 SLP_TREE_VEC_STMTS (node
).truncate (0);
3417 tree meltype
= build_nonstandard_integer_type
3418 (GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (vectype
))), 1);
3419 tree mvectype
= get_same_sized_vectype (meltype
, vectype
);
3421 for (j
= 0; j
< v0
.length (); ++j
)
3423 tree
*melts
= XALLOCAVEC (tree
, TYPE_VECTOR_SUBPARTS (vectype
));
3424 for (l
= 0; l
< TYPE_VECTOR_SUBPARTS (vectype
); ++l
)
3426 if (k
>= group_size
)
3428 melts
[l
] = build_int_cst
3429 (meltype
, mask
[k
++] * TYPE_VECTOR_SUBPARTS (vectype
) + l
);
3431 tmask
= build_vector (mvectype
, melts
);
3433 /* ??? Not all targets support a VEC_PERM_EXPR with a
3434 constant mask that would translate to a vec_merge RTX
3435 (with their vec_perm_const_ok). We can either not
3436 vectorize in that case or let veclower do its job.
3437 Unfortunately that isn't too great and at least for
3438 plus/minus we'd eventually like to match targets
3439 vector addsub instructions. */
3441 vstmt
= gimple_build_assign (make_ssa_name (vectype
),
3443 gimple_assign_lhs (v0
[j
]),
3444 gimple_assign_lhs (v1
[j
]), tmask
);
3445 vect_finish_stmt_generation (stmt
, vstmt
, &si
);
3446 SLP_TREE_VEC_STMTS (node
).quick_push (vstmt
);
3453 is_store
= vect_transform_stmt (stmt
, &si
, &grouped_store
, node
, instance
);
3457 /* Replace scalar calls from SLP node NODE with setting of their lhs to zero.
3458 For loop vectorization this is done in vectorizable_call, but for SLP
3459 it needs to be deferred until end of vect_schedule_slp, because multiple
3460 SLP instances may refer to the same scalar stmt. */
3463 vect_remove_slp_scalar_calls (slp_tree node
)
3465 gimple stmt
, new_stmt
;
3466 gimple_stmt_iterator gsi
;
3470 stmt_vec_info stmt_info
;
3475 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node
), i
, child
)
3476 vect_remove_slp_scalar_calls (child
);
3478 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node
), i
, stmt
)
3480 if (!is_gimple_call (stmt
) || gimple_bb (stmt
) == NULL
)
3482 stmt_info
= vinfo_for_stmt (stmt
);
3483 if (stmt_info
== NULL
3484 || is_pattern_stmt_p (stmt_info
)
3485 || !PURE_SLP_STMT (stmt_info
))
3487 lhs
= gimple_call_lhs (stmt
);
3488 new_stmt
= gimple_build_assign (lhs
, build_zero_cst (TREE_TYPE (lhs
)));
3489 set_vinfo_for_stmt (new_stmt
, stmt_info
);
3490 set_vinfo_for_stmt (stmt
, NULL
);
3491 STMT_VINFO_STMT (stmt_info
) = new_stmt
;
3492 gsi
= gsi_for_stmt (stmt
);
3493 gsi_replace (&gsi
, new_stmt
, false);
3494 SSA_NAME_DEF_STMT (gimple_assign_lhs (new_stmt
)) = new_stmt
;
3498 /* Generate vector code for all SLP instances in the loop/basic block. */
3501 vect_schedule_slp (loop_vec_info loop_vinfo
, bb_vec_info bb_vinfo
)
3503 vec
<slp_instance
> slp_instances
;
3504 slp_instance instance
;
3506 bool is_store
= false;
3510 slp_instances
= LOOP_VINFO_SLP_INSTANCES (loop_vinfo
);
3511 vf
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
3515 slp_instances
= BB_VINFO_SLP_INSTANCES (bb_vinfo
);
3519 FOR_EACH_VEC_ELT (slp_instances
, i
, instance
)
3521 /* Schedule the tree of INSTANCE. */
3522 is_store
= vect_schedule_slp_instance (SLP_INSTANCE_TREE (instance
),
3524 if (dump_enabled_p ())
3525 dump_printf_loc (MSG_NOTE
, vect_location
,
3526 "vectorizing stmts using SLP.\n");
3529 FOR_EACH_VEC_ELT (slp_instances
, i
, instance
)
3531 slp_tree root
= SLP_INSTANCE_TREE (instance
);
3534 gimple_stmt_iterator gsi
;
3536 /* Remove scalar call stmts. Do not do this for basic-block
3537 vectorization as not all uses may be vectorized.
3538 ??? Why should this be necessary? DCE should be able to
3539 remove the stmts itself.
3540 ??? For BB vectorization we can as well remove scalar
3541 stmts starting from the SLP tree root if they have no
3544 vect_remove_slp_scalar_calls (root
);
3546 for (j
= 0; SLP_TREE_SCALAR_STMTS (root
).iterate (j
, &store
)
3547 && j
< SLP_INSTANCE_GROUP_SIZE (instance
); j
++)
3549 if (!STMT_VINFO_DATA_REF (vinfo_for_stmt (store
)))
3552 if (is_pattern_stmt_p (vinfo_for_stmt (store
)))
3553 store
= STMT_VINFO_RELATED_STMT (vinfo_for_stmt (store
));
3554 /* Free the attached stmt_vec_info and remove the stmt. */
3555 gsi
= gsi_for_stmt (store
);
3556 unlink_stmt_vdef (store
);
3557 gsi_remove (&gsi
, true);
3558 release_defs (store
);
3559 free_stmt_vec_info (store
);
3567 /* Vectorize the basic block. */
3570 vect_slp_transform_bb (basic_block bb
)
3572 bb_vec_info bb_vinfo
= vec_info_for_bb (bb
);
3573 gimple_stmt_iterator si
;
3575 gcc_assert (bb_vinfo
);
3577 if (dump_enabled_p ())
3578 dump_printf_loc (MSG_NOTE
, vect_location
, "SLPing BB\n");
3580 for (si
= gsi_start_bb (bb
); !gsi_end_p (si
); gsi_next (&si
))
3582 gimple stmt
= gsi_stmt (si
);
3583 stmt_vec_info stmt_info
;
3585 if (dump_enabled_p ())
3587 dump_printf_loc (MSG_NOTE
, vect_location
,
3588 "------>SLPing statement: ");
3589 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
3590 dump_printf (MSG_NOTE
, "\n");
3593 stmt_info
= vinfo_for_stmt (stmt
);
3594 gcc_assert (stmt_info
);
3596 /* Schedule all the SLP instances when the first SLP stmt is reached. */
3597 if (STMT_SLP_TYPE (stmt_info
))
3599 vect_schedule_slp (NULL
, bb_vinfo
);
3604 if (dump_enabled_p ())
3605 dump_printf_loc (MSG_NOTE
, vect_location
,
3606 "BASIC BLOCK VECTORIZED\n");
3608 destroy_bb_vec_info (bb_vinfo
);