1 /* SLP - Basic Block Vectorization
2 Copyright (C) 2007-2015 Free Software Foundation, Inc.
3 Contributed by Dorit Naishlos <dorit@il.ibm.com>
4 and Ira Rosen <irar@il.ibm.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
24 #include "coretypes.h"
33 #include "fold-const.h"
34 #include "stor-layout.h"
36 #include "gimple-pretty-print.h"
37 #include "internal-fn.h"
38 #include "gimple-iterator.h"
39 #include "tree-pass.h"
42 #include "insn-config.h"
43 #include "recog.h" /* FIXME: for insn_data */
44 #include "insn-codes.h"
45 #include "optabs-tree.h"
46 #include "tree-vectorizer.h"
47 #include "langhooks.h"
48 #include "gimple-walk.h"
50 /* Extract the location of the basic block in the source code.
51 Return the basic block location if succeed and NULL if not. */
54 find_bb_location (basic_block bb
)
57 gimple_stmt_iterator si
;
60 return UNKNOWN_LOCATION
;
62 for (si
= gsi_start_bb (bb
); !gsi_end_p (si
); gsi_next (&si
))
65 if (gimple_location (stmt
) != UNKNOWN_LOCATION
)
66 return gimple_location (stmt
);
69 return UNKNOWN_LOCATION
;
73 /* Recursively free the memory allocated for the SLP tree rooted at NODE. */
76 vect_free_slp_tree (slp_tree node
)
84 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node
), i
, child
)
85 vect_free_slp_tree (child
);
87 SLP_TREE_CHILDREN (node
).release ();
88 SLP_TREE_SCALAR_STMTS (node
).release ();
89 SLP_TREE_VEC_STMTS (node
).release ();
90 SLP_TREE_LOAD_PERMUTATION (node
).release ();
96 /* Free the memory allocated for the SLP instance. */
99 vect_free_slp_instance (slp_instance instance
)
101 vect_free_slp_tree (SLP_INSTANCE_TREE (instance
));
102 SLP_INSTANCE_LOADS (instance
).release ();
107 /* Create an SLP node for SCALAR_STMTS. */
110 vect_create_new_slp_node (vec
<gimple
*> scalar_stmts
)
113 gimple
*stmt
= scalar_stmts
[0];
116 if (is_gimple_call (stmt
))
117 nops
= gimple_call_num_args (stmt
);
118 else if (is_gimple_assign (stmt
))
120 nops
= gimple_num_ops (stmt
) - 1;
121 if (gimple_assign_rhs_code (stmt
) == COND_EXPR
)
127 node
= XNEW (struct _slp_tree
);
128 SLP_TREE_SCALAR_STMTS (node
) = scalar_stmts
;
129 SLP_TREE_VEC_STMTS (node
).create (0);
130 SLP_TREE_CHILDREN (node
).create (nops
);
131 SLP_TREE_LOAD_PERMUTATION (node
) = vNULL
;
132 SLP_TREE_TWO_OPERATORS (node
) = false;
138 /* This structure is used in creation of an SLP tree. Each instance
139 corresponds to the same operand in a group of scalar stmts in an SLP
141 typedef struct _slp_oprnd_info
143 /* Def-stmts for the operands. */
144 vec
<gimple
*> def_stmts
;
145 /* Information about the first statement, its vector def-type, type, the
146 operand itself in case it's constant, and an indication if it's a pattern
148 enum vect_def_type first_dt
;
155 /* Allocate operands info for NOPS operands, and GROUP_SIZE def-stmts for each
157 static vec
<slp_oprnd_info
>
158 vect_create_oprnd_info (int nops
, int group_size
)
161 slp_oprnd_info oprnd_info
;
162 vec
<slp_oprnd_info
> oprnds_info
;
164 oprnds_info
.create (nops
);
165 for (i
= 0; i
< nops
; i
++)
167 oprnd_info
= XNEW (struct _slp_oprnd_info
);
168 oprnd_info
->def_stmts
.create (group_size
);
169 oprnd_info
->first_dt
= vect_uninitialized_def
;
170 oprnd_info
->first_op_type
= NULL_TREE
;
171 oprnd_info
->first_pattern
= false;
172 oprnd_info
->second_pattern
= false;
173 oprnds_info
.quick_push (oprnd_info
);
180 /* Free operands info. */
183 vect_free_oprnd_info (vec
<slp_oprnd_info
> &oprnds_info
)
186 slp_oprnd_info oprnd_info
;
188 FOR_EACH_VEC_ELT (oprnds_info
, i
, oprnd_info
)
190 oprnd_info
->def_stmts
.release ();
191 XDELETE (oprnd_info
);
194 oprnds_info
.release ();
198 /* Find the place of the data-ref in STMT in the interleaving chain that starts
199 from FIRST_STMT. Return -1 if the data-ref is not a part of the chain. */
202 vect_get_place_in_interleaving_chain (gimple
*stmt
, gimple
*first_stmt
)
204 gimple
*next_stmt
= first_stmt
;
207 if (first_stmt
!= GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt
)))
212 if (next_stmt
== stmt
)
214 next_stmt
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt
));
216 result
+= GROUP_GAP (vinfo_for_stmt (next_stmt
));
224 /* Get the defs for the rhs of STMT (collect them in OPRNDS_INFO), check that
225 they are of a valid type and that they match the defs of the first stmt of
226 the SLP group (stored in OPRNDS_INFO). If there was a fatal error
227 return -1, if the error could be corrected by swapping operands of the
228 operation return 1, if everything is ok return 0. */
231 vect_get_and_check_slp_defs (vec_info
*vinfo
,
232 gimple
*stmt
, unsigned stmt_num
,
233 vec
<slp_oprnd_info
> *oprnds_info
)
236 unsigned int i
, number_of_oprnds
;
238 enum vect_def_type dt
= vect_uninitialized_def
;
239 struct loop
*loop
= NULL
;
240 bool pattern
= false;
241 slp_oprnd_info oprnd_info
;
242 int first_op_idx
= 1;
243 bool commutative
= false;
244 bool first_op_cond
= false;
245 bool first
= stmt_num
== 0;
246 bool second
= stmt_num
== 1;
248 if (is_a
<loop_vec_info
> (vinfo
))
249 loop
= LOOP_VINFO_LOOP (as_a
<loop_vec_info
> (vinfo
));
251 if (is_gimple_call (stmt
))
253 number_of_oprnds
= gimple_call_num_args (stmt
);
256 else if (is_gimple_assign (stmt
))
258 enum tree_code code
= gimple_assign_rhs_code (stmt
);
259 number_of_oprnds
= gimple_num_ops (stmt
) - 1;
260 if (gimple_assign_rhs_code (stmt
) == COND_EXPR
)
262 first_op_cond
= true;
267 commutative
= commutative_tree_code (code
);
272 bool swapped
= false;
273 for (i
= 0; i
< number_of_oprnds
; i
++)
278 if (i
== 0 || i
== 1)
279 oprnd
= TREE_OPERAND (gimple_op (stmt
, first_op_idx
),
282 oprnd
= gimple_op (stmt
, first_op_idx
+ i
- 1);
285 oprnd
= gimple_op (stmt
, first_op_idx
+ (swapped
? !i
: i
));
287 oprnd_info
= (*oprnds_info
)[i
];
289 if (!vect_is_simple_use (oprnd
, vinfo
, &def_stmt
, &dt
))
291 if (dump_enabled_p ())
293 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
294 "Build SLP failed: can't analyze def for ");
295 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, oprnd
);
296 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
302 /* Check if DEF_STMT is a part of a pattern in LOOP and get the def stmt
303 from the pattern. Check that all the stmts of the node are in the
305 if (def_stmt
&& gimple_bb (def_stmt
)
306 && ((is_a
<loop_vec_info
> (vinfo
)
307 && flow_bb_inside_loop_p (loop
, gimple_bb (def_stmt
)))
308 || (is_a
<bb_vec_info
> (vinfo
)
309 && gimple_bb (def_stmt
) == as_a
<bb_vec_info
> (vinfo
)->bb
310 && gimple_code (def_stmt
) != GIMPLE_PHI
))
311 && vinfo_for_stmt (def_stmt
)
312 && STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (def_stmt
))
313 && !STMT_VINFO_RELEVANT (vinfo_for_stmt (def_stmt
))
314 && !STMT_VINFO_LIVE_P (vinfo_for_stmt (def_stmt
)))
317 if (!first
&& !oprnd_info
->first_pattern
318 /* Allow different pattern state for the defs of the
319 first stmt in reduction chains. */
320 && (oprnd_info
->first_dt
!= vect_reduction_def
321 || (!second
&& !oprnd_info
->second_pattern
)))
331 if (dump_enabled_p ())
333 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
334 "Build SLP failed: some of the stmts"
335 " are in a pattern, and others are not ");
336 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, oprnd
);
337 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
343 def_stmt
= STMT_VINFO_RELATED_STMT (vinfo_for_stmt (def_stmt
));
344 dt
= STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt
));
346 if (dt
== vect_unknown_def_type
)
348 if (dump_enabled_p ())
349 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
350 "Unsupported pattern.\n");
354 switch (gimple_code (def_stmt
))
361 if (dump_enabled_p ())
362 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
363 "unsupported defining stmt:\n");
369 oprnd_info
->second_pattern
= pattern
;
373 oprnd_info
->first_dt
= dt
;
374 oprnd_info
->first_pattern
= pattern
;
375 oprnd_info
->first_op_type
= TREE_TYPE (oprnd
);
379 /* Not first stmt of the group, check that the def-stmt/s match
380 the def-stmt/s of the first stmt. Allow different definition
381 types for reduction chains: the first stmt must be a
382 vect_reduction_def (a phi node), and the rest
383 vect_internal_def. */
384 if (((oprnd_info
->first_dt
!= dt
385 && !(oprnd_info
->first_dt
== vect_reduction_def
386 && dt
== vect_internal_def
)
387 && !((oprnd_info
->first_dt
== vect_external_def
388 || oprnd_info
->first_dt
== vect_constant_def
)
389 && (dt
== vect_external_def
390 || dt
== vect_constant_def
)))
391 || !types_compatible_p (oprnd_info
->first_op_type
,
394 /* Try swapping operands if we got a mismatch. */
403 if (dump_enabled_p ())
404 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
405 "Build SLP failed: different types\n");
411 /* Check the types of the definitions. */
414 case vect_constant_def
:
415 case vect_external_def
:
416 case vect_reduction_def
:
419 case vect_internal_def
:
420 oprnd_info
->def_stmts
.quick_push (def_stmt
);
424 /* FORNOW: Not supported. */
425 if (dump_enabled_p ())
427 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
428 "Build SLP failed: illegal type of def ");
429 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, oprnd
);
430 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
442 tree cond
= gimple_assign_rhs1 (stmt
);
443 swap_ssa_operands (stmt
, &TREE_OPERAND (cond
, 0),
444 &TREE_OPERAND (cond
, 1));
445 TREE_SET_CODE (cond
, swap_tree_comparison (TREE_CODE (cond
)));
448 swap_ssa_operands (stmt
, gimple_assign_rhs1_ptr (stmt
),
449 gimple_assign_rhs2_ptr (stmt
));
456 /* Verify if the scalar stmts STMTS are isomorphic, require data
457 permutation or are of unsupported types of operation. Return
458 true if they are, otherwise return false and indicate in *MATCHES
459 which stmts are not isomorphic to the first one. If MATCHES[0]
460 is false then this indicates the comparison could not be
461 carried out or the stmts will never be vectorized by SLP. */
464 vect_build_slp_tree_1 (vec_info
*vinfo
,
465 vec
<gimple
*> stmts
, unsigned int group_size
,
466 unsigned nops
, unsigned int *max_nunits
,
467 unsigned int vectorization_factor
, bool *matches
,
471 gimple
*first_stmt
= stmts
[0], *stmt
= stmts
[0];
472 enum tree_code first_stmt_code
= ERROR_MARK
;
473 enum tree_code alt_stmt_code
= ERROR_MARK
;
474 enum tree_code rhs_code
= ERROR_MARK
;
475 enum tree_code first_cond_code
= ERROR_MARK
;
477 bool need_same_oprnds
= false;
478 tree vectype
= NULL_TREE
, scalar_type
, first_op1
= NULL_TREE
;
481 machine_mode optab_op2_mode
;
482 machine_mode vec_mode
;
484 gimple
*first_load
= NULL
, *prev_first_load
= NULL
;
487 /* For every stmt in NODE find its def stmt/s. */
488 FOR_EACH_VEC_ELT (stmts
, i
, stmt
)
492 if (dump_enabled_p ())
494 dump_printf_loc (MSG_NOTE
, vect_location
, "Build SLP for ");
495 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
496 dump_printf (MSG_NOTE
, "\n");
499 /* Fail to vectorize statements marked as unvectorizable. */
500 if (!STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (stmt
)))
502 if (dump_enabled_p ())
504 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
505 "Build SLP failed: unvectorizable statement ");
506 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
507 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
509 /* Fatal mismatch. */
514 lhs
= gimple_get_lhs (stmt
);
515 if (lhs
== NULL_TREE
)
517 if (dump_enabled_p ())
519 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
520 "Build SLP failed: not GIMPLE_ASSIGN nor "
522 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
523 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
525 /* Fatal mismatch. */
530 if (is_gimple_assign (stmt
)
531 && gimple_assign_rhs_code (stmt
) == COND_EXPR
532 && (cond
= gimple_assign_rhs1 (stmt
))
533 && !COMPARISON_CLASS_P (cond
))
535 if (dump_enabled_p ())
537 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
538 "Build SLP failed: condition is not "
540 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
541 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
543 /* Fatal mismatch. */
548 scalar_type
= vect_get_smallest_scalar_type (stmt
, &dummy
, &dummy
);
549 vectype
= get_vectype_for_scalar_type (scalar_type
);
552 if (dump_enabled_p ())
554 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
555 "Build SLP failed: unsupported data-type ");
556 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
558 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
560 /* Fatal mismatch. */
565 /* If populating the vector type requires unrolling then fail
566 before adjusting *max_nunits for basic-block vectorization. */
567 if (is_a
<bb_vec_info
> (vinfo
)
568 && TYPE_VECTOR_SUBPARTS (vectype
) > group_size
)
570 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
571 "Build SLP failed: unrolling required "
572 "in basic block SLP\n");
573 /* Fatal mismatch. */
578 /* In case of multiple types we need to detect the smallest type. */
579 if (*max_nunits
< TYPE_VECTOR_SUBPARTS (vectype
))
581 *max_nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
582 if (is_a
<bb_vec_info
> (vinfo
))
583 vectorization_factor
= *max_nunits
;
586 if (gcall
*call_stmt
= dyn_cast
<gcall
*> (stmt
))
588 rhs_code
= CALL_EXPR
;
589 if (gimple_call_internal_p (call_stmt
)
590 || gimple_call_tail_p (call_stmt
)
591 || gimple_call_noreturn_p (call_stmt
)
592 || !gimple_call_nothrow_p (call_stmt
)
593 || gimple_call_chain (call_stmt
))
595 if (dump_enabled_p ())
597 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
598 "Build SLP failed: unsupported call type ");
599 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
601 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
603 /* Fatal mismatch. */
609 rhs_code
= gimple_assign_rhs_code (stmt
);
611 /* Check the operation. */
614 first_stmt_code
= rhs_code
;
616 /* Shift arguments should be equal in all the packed stmts for a
617 vector shift with scalar shift operand. */
618 if (rhs_code
== LSHIFT_EXPR
|| rhs_code
== RSHIFT_EXPR
619 || rhs_code
== LROTATE_EXPR
620 || rhs_code
== RROTATE_EXPR
)
622 vec_mode
= TYPE_MODE (vectype
);
624 /* First see if we have a vector/vector shift. */
625 optab
= optab_for_tree_code (rhs_code
, vectype
,
629 || optab_handler (optab
, vec_mode
) == CODE_FOR_nothing
)
631 /* No vector/vector shift, try for a vector/scalar shift. */
632 optab
= optab_for_tree_code (rhs_code
, vectype
,
637 if (dump_enabled_p ())
638 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
639 "Build SLP failed: no optab.\n");
640 /* Fatal mismatch. */
644 icode
= (int) optab_handler (optab
, vec_mode
);
645 if (icode
== CODE_FOR_nothing
)
647 if (dump_enabled_p ())
648 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
650 "op not supported by target.\n");
651 /* Fatal mismatch. */
655 optab_op2_mode
= insn_data
[icode
].operand
[2].mode
;
656 if (!VECTOR_MODE_P (optab_op2_mode
))
658 need_same_oprnds
= true;
659 first_op1
= gimple_assign_rhs2 (stmt
);
663 else if (rhs_code
== WIDEN_LSHIFT_EXPR
)
665 need_same_oprnds
= true;
666 first_op1
= gimple_assign_rhs2 (stmt
);
671 if (first_stmt_code
!= rhs_code
672 && alt_stmt_code
== ERROR_MARK
)
673 alt_stmt_code
= rhs_code
;
674 if (first_stmt_code
!= rhs_code
675 && (first_stmt_code
!= IMAGPART_EXPR
676 || rhs_code
!= REALPART_EXPR
)
677 && (first_stmt_code
!= REALPART_EXPR
678 || rhs_code
!= IMAGPART_EXPR
)
679 /* Handle mismatches in plus/minus by computing both
680 and merging the results. */
681 && !((first_stmt_code
== PLUS_EXPR
682 || first_stmt_code
== MINUS_EXPR
)
683 && (alt_stmt_code
== PLUS_EXPR
684 || alt_stmt_code
== MINUS_EXPR
)
685 && rhs_code
== alt_stmt_code
)
686 && !(STMT_VINFO_GROUPED_ACCESS (vinfo_for_stmt (stmt
))
687 && (first_stmt_code
== ARRAY_REF
688 || first_stmt_code
== BIT_FIELD_REF
689 || first_stmt_code
== INDIRECT_REF
690 || first_stmt_code
== COMPONENT_REF
691 || first_stmt_code
== MEM_REF
)))
693 if (dump_enabled_p ())
695 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
696 "Build SLP failed: different operation "
698 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
699 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
701 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
709 && !operand_equal_p (first_op1
, gimple_assign_rhs2 (stmt
), 0))
711 if (dump_enabled_p ())
713 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
714 "Build SLP failed: different shift "
716 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
717 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
723 if (rhs_code
== CALL_EXPR
)
725 gimple
*first_stmt
= stmts
[0];
726 if (gimple_call_num_args (stmt
) != nops
727 || !operand_equal_p (gimple_call_fn (first_stmt
),
728 gimple_call_fn (stmt
), 0)
729 || gimple_call_fntype (first_stmt
)
730 != gimple_call_fntype (stmt
))
732 if (dump_enabled_p ())
734 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
735 "Build SLP failed: different calls in ");
736 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
738 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
746 /* Grouped store or load. */
747 if (STMT_VINFO_GROUPED_ACCESS (vinfo_for_stmt (stmt
)))
749 if (REFERENCE_CLASS_P (lhs
))
757 /* Check that the size of interleaved loads group is not
758 greater than the SLP group size. */
760 = vectorization_factor
/ TYPE_VECTOR_SUBPARTS (vectype
);
761 if (is_a
<loop_vec_info
> (vinfo
)
762 && GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt
)) == stmt
763 && ((GROUP_SIZE (vinfo_for_stmt (stmt
))
764 - GROUP_GAP (vinfo_for_stmt (stmt
)))
765 > ncopies
* group_size
))
767 if (dump_enabled_p ())
769 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
770 "Build SLP failed: the number "
771 "of interleaved loads is greater than "
772 "the SLP group size ");
773 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
775 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
777 /* Fatal mismatch. */
782 first_load
= GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt
));
785 /* Check that there are no loads from different interleaving
786 chains in the same node. */
787 if (prev_first_load
!= first_load
)
789 if (dump_enabled_p ())
791 dump_printf_loc (MSG_MISSED_OPTIMIZATION
,
793 "Build SLP failed: different "
794 "interleaving chains in one node ");
795 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
797 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
804 prev_first_load
= first_load
;
806 } /* Grouped access. */
809 if (TREE_CODE_CLASS (rhs_code
) == tcc_reference
)
811 /* Not grouped load. */
812 if (dump_enabled_p ())
814 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
815 "Build SLP failed: not grouped load ");
816 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
817 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
820 /* FORNOW: Not grouped loads are not supported. */
821 /* Fatal mismatch. */
826 /* Not memory operation. */
827 if (TREE_CODE_CLASS (rhs_code
) != tcc_binary
828 && TREE_CODE_CLASS (rhs_code
) != tcc_unary
829 && TREE_CODE_CLASS (rhs_code
) != tcc_expression
830 && rhs_code
!= CALL_EXPR
)
832 if (dump_enabled_p ())
834 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
835 "Build SLP failed: operation");
836 dump_printf (MSG_MISSED_OPTIMIZATION
, " unsupported ");
837 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
838 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
840 /* Fatal mismatch. */
845 if (rhs_code
== COND_EXPR
)
847 tree cond_expr
= gimple_assign_rhs1 (stmt
);
850 first_cond_code
= TREE_CODE (cond_expr
);
851 else if (first_cond_code
!= TREE_CODE (cond_expr
))
853 if (dump_enabled_p ())
855 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
856 "Build SLP failed: different"
858 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
860 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
871 for (i
= 0; i
< group_size
; ++i
)
875 /* If we allowed a two-operation SLP node verify the target can cope
876 with the permute we are going to use. */
877 if (alt_stmt_code
!= ERROR_MARK
878 && TREE_CODE_CLASS (alt_stmt_code
) != tcc_reference
)
881 = XALLOCAVEC (unsigned char, TYPE_VECTOR_SUBPARTS (vectype
));
882 for (i
= 0; i
< TYPE_VECTOR_SUBPARTS (vectype
); ++i
)
885 if (gimple_assign_rhs_code (stmts
[i
% group_size
]) == alt_stmt_code
)
886 sel
[i
] += TYPE_VECTOR_SUBPARTS (vectype
);
888 if (!can_vec_perm_p (TYPE_MODE (vectype
), false, sel
))
890 for (i
= 0; i
< group_size
; ++i
)
891 if (gimple_assign_rhs_code (stmts
[i
]) == alt_stmt_code
)
894 if (dump_enabled_p ())
896 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
897 "Build SLP failed: different operation "
899 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
901 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
903 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
909 *two_operators
= true;
915 /* Recursively build an SLP tree starting from NODE.
916 Fail (and return a value not equal to zero) if def-stmts are not
917 isomorphic, require data permutation or are of unsupported types of
918 operation. Otherwise, return 0.
919 The value returned is the depth in the SLP tree where a mismatch
923 vect_build_slp_tree (vec_info
*vinfo
,
924 slp_tree
*node
, unsigned int group_size
,
925 unsigned int *max_nunits
,
926 vec
<slp_tree
> *loads
,
927 unsigned int vectorization_factor
,
928 bool *matches
, unsigned *npermutes
, unsigned *tree_size
,
929 unsigned max_tree_size
)
931 unsigned nops
, i
, this_tree_size
= 0;
936 stmt
= SLP_TREE_SCALAR_STMTS (*node
)[0];
937 if (is_gimple_call (stmt
))
938 nops
= gimple_call_num_args (stmt
);
939 else if (is_gimple_assign (stmt
))
941 nops
= gimple_num_ops (stmt
) - 1;
942 if (gimple_assign_rhs_code (stmt
) == COND_EXPR
)
948 bool two_operators
= false;
949 if (!vect_build_slp_tree_1 (vinfo
,
950 SLP_TREE_SCALAR_STMTS (*node
), group_size
, nops
,
951 max_nunits
, vectorization_factor
, matches
,
954 SLP_TREE_TWO_OPERATORS (*node
) = two_operators
;
956 /* If the SLP node is a load, terminate the recursion. */
957 if (STMT_VINFO_GROUPED_ACCESS (vinfo_for_stmt (stmt
))
958 && DR_IS_READ (STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt
))))
960 loads
->safe_push (*node
);
964 /* Get at the operands, verifying they are compatible. */
965 vec
<slp_oprnd_info
> oprnds_info
= vect_create_oprnd_info (nops
, group_size
);
966 slp_oprnd_info oprnd_info
;
967 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (*node
), i
, stmt
)
969 switch (vect_get_and_check_slp_defs (vinfo
, stmt
, i
, &oprnds_info
))
975 vect_free_oprnd_info (oprnds_info
);
982 for (i
= 0; i
< group_size
; ++i
)
985 vect_free_oprnd_info (oprnds_info
);
989 stmt
= SLP_TREE_SCALAR_STMTS (*node
)[0];
991 /* Create SLP_TREE nodes for the definition node/s. */
992 FOR_EACH_VEC_ELT (oprnds_info
, i
, oprnd_info
)
995 unsigned old_nloads
= loads
->length ();
996 unsigned old_max_nunits
= *max_nunits
;
998 if (oprnd_info
->first_dt
!= vect_internal_def
)
1001 if (++this_tree_size
> max_tree_size
)
1003 vect_free_oprnd_info (oprnds_info
);
1007 child
= vect_create_new_slp_node (oprnd_info
->def_stmts
);
1010 vect_free_oprnd_info (oprnds_info
);
1014 if (vect_build_slp_tree (vinfo
, &child
,
1015 group_size
, max_nunits
, loads
,
1016 vectorization_factor
, matches
,
1017 npermutes
, &this_tree_size
, max_tree_size
))
1019 /* If we have all children of child built up from scalars then just
1020 throw that away and build it up this node from scalars. */
1021 if (!SLP_TREE_CHILDREN (child
).is_empty ())
1024 slp_tree grandchild
;
1026 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (child
), j
, grandchild
)
1027 if (grandchild
!= NULL
)
1032 *max_nunits
= old_max_nunits
;
1033 loads
->truncate (old_nloads
);
1034 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (child
), j
, grandchild
)
1035 vect_free_slp_tree (grandchild
);
1036 SLP_TREE_CHILDREN (child
).truncate (0);
1038 dump_printf_loc (MSG_NOTE
, vect_location
,
1039 "Building parent vector operands from "
1040 "scalars instead\n");
1041 oprnd_info
->def_stmts
= vNULL
;
1042 vect_free_slp_tree (child
);
1043 SLP_TREE_CHILDREN (*node
).quick_push (NULL
);
1048 oprnd_info
->def_stmts
= vNULL
;
1049 SLP_TREE_CHILDREN (*node
).quick_push (child
);
1053 /* If the SLP build failed fatally and we analyze a basic-block
1054 simply treat nodes we fail to build as externally defined
1055 (and thus build vectors from the scalar defs).
1056 The cost model will reject outright expensive cases.
1057 ??? This doesn't treat cases where permutation ultimatively
1058 fails (or we don't try permutation below). Ideally we'd
1059 even compute a permutation that will end up with the maximum
1061 if (is_a
<bb_vec_info
> (vinfo
)
1063 /* ??? Rejecting patterns this way doesn't work. We'd have to
1064 do extra work to cancel the pattern so the uses see the
1066 && !is_pattern_stmt_p (vinfo_for_stmt (stmt
)))
1069 slp_tree grandchild
;
1072 *max_nunits
= old_max_nunits
;
1073 loads
->truncate (old_nloads
);
1074 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (child
), j
, grandchild
)
1075 vect_free_slp_tree (grandchild
);
1076 SLP_TREE_CHILDREN (child
).truncate (0);
1078 dump_printf_loc (MSG_NOTE
, vect_location
,
1079 "Building vector operands from scalars\n");
1080 oprnd_info
->def_stmts
= vNULL
;
1081 vect_free_slp_tree (child
);
1082 SLP_TREE_CHILDREN (*node
).quick_push (NULL
);
1086 /* If the SLP build for operand zero failed and operand zero
1087 and one can be commutated try that for the scalar stmts
1088 that failed the match. */
1090 /* A first scalar stmt mismatch signals a fatal mismatch. */
1092 /* ??? For COND_EXPRs we can swap the comparison operands
1093 as well as the arms under some constraints. */
1095 && oprnds_info
[1]->first_dt
== vect_internal_def
1096 && is_gimple_assign (stmt
)
1097 && commutative_tree_code (gimple_assign_rhs_code (stmt
))
1098 && !SLP_TREE_TWO_OPERATORS (*node
)
1099 /* Do so only if the number of not successful permutes was nor more
1100 than a cut-ff as re-trying the recursive match on
1101 possibly each level of the tree would expose exponential
1106 slp_tree grandchild
;
1109 *max_nunits
= old_max_nunits
;
1110 loads
->truncate (old_nloads
);
1111 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (child
), j
, grandchild
)
1112 vect_free_slp_tree (grandchild
);
1113 SLP_TREE_CHILDREN (child
).truncate (0);
1115 /* Swap mismatched definition stmts. */
1116 dump_printf_loc (MSG_NOTE
, vect_location
,
1117 "Re-trying with swapped operands of stmts ");
1118 for (j
= 0; j
< group_size
; ++j
)
1121 std::swap (oprnds_info
[0]->def_stmts
[j
],
1122 oprnds_info
[1]->def_stmts
[j
]);
1123 dump_printf (MSG_NOTE
, "%d ", j
);
1125 dump_printf (MSG_NOTE
, "\n");
1126 /* And try again with scratch 'matches' ... */
1127 bool *tem
= XALLOCAVEC (bool, group_size
);
1128 if (vect_build_slp_tree (vinfo
, &child
,
1129 group_size
, max_nunits
, loads
,
1130 vectorization_factor
,
1131 tem
, npermutes
, &this_tree_size
,
1134 /* ... so if successful we can apply the operand swapping
1135 to the GIMPLE IL. This is necessary because for example
1136 vect_get_slp_defs uses operand indexes and thus expects
1137 canonical operand order. */
1138 for (j
= 0; j
< group_size
; ++j
)
1141 gimple
*stmt
= SLP_TREE_SCALAR_STMTS (*node
)[j
];
1142 swap_ssa_operands (stmt
, gimple_assign_rhs1_ptr (stmt
),
1143 gimple_assign_rhs2_ptr (stmt
));
1145 oprnd_info
->def_stmts
= vNULL
;
1146 SLP_TREE_CHILDREN (*node
).quick_push (child
);
1153 oprnd_info
->def_stmts
= vNULL
;
1154 vect_free_slp_tree (child
);
1155 vect_free_oprnd_info (oprnds_info
);
1160 *tree_size
+= this_tree_size
;
1162 vect_free_oprnd_info (oprnds_info
);
1166 /* Dump a slp tree NODE using flags specified in DUMP_KIND. */
1169 vect_print_slp_tree (int dump_kind
, slp_tree node
)
1178 dump_printf (dump_kind
, "node ");
1179 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node
), i
, stmt
)
1181 dump_printf (dump_kind
, "\n\tstmt %d ", i
);
1182 dump_gimple_stmt (dump_kind
, TDF_SLIM
, stmt
, 0);
1184 dump_printf (dump_kind
, "\n");
1186 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node
), i
, child
)
1187 vect_print_slp_tree (dump_kind
, child
);
1191 /* Mark the tree rooted at NODE with MARK (PURE_SLP or HYBRID).
1192 If MARK is HYBRID, it refers to a specific stmt in NODE (the stmt at index
1193 J). Otherwise, MARK is PURE_SLP and J is -1, which indicates that all the
1194 stmts in NODE are to be marked. */
1197 vect_mark_slp_stmts (slp_tree node
, enum slp_vect_type mark
, int j
)
1206 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node
), i
, stmt
)
1207 if (j
< 0 || i
== j
)
1208 STMT_SLP_TYPE (vinfo_for_stmt (stmt
)) = mark
;
1210 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node
), i
, child
)
1211 vect_mark_slp_stmts (child
, mark
, j
);
1215 /* Mark the statements of the tree rooted at NODE as relevant (vect_used). */
1218 vect_mark_slp_stmts_relevant (slp_tree node
)
1222 stmt_vec_info stmt_info
;
1228 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node
), i
, stmt
)
1230 stmt_info
= vinfo_for_stmt (stmt
);
1231 gcc_assert (!STMT_VINFO_RELEVANT (stmt_info
)
1232 || STMT_VINFO_RELEVANT (stmt_info
) == vect_used_in_scope
);
1233 STMT_VINFO_RELEVANT (stmt_info
) = vect_used_in_scope
;
1236 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node
), i
, child
)
1237 vect_mark_slp_stmts_relevant (child
);
1241 /* Rearrange the statements of NODE according to PERMUTATION. */
1244 vect_slp_rearrange_stmts (slp_tree node
, unsigned int group_size
,
1245 vec
<unsigned> permutation
)
1248 vec
<gimple
*> tmp_stmts
;
1252 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node
), i
, child
)
1253 vect_slp_rearrange_stmts (child
, group_size
, permutation
);
1255 gcc_assert (group_size
== SLP_TREE_SCALAR_STMTS (node
).length ());
1256 tmp_stmts
.create (group_size
);
1257 tmp_stmts
.quick_grow_cleared (group_size
);
1259 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node
), i
, stmt
)
1260 tmp_stmts
[permutation
[i
]] = stmt
;
1262 SLP_TREE_SCALAR_STMTS (node
).release ();
1263 SLP_TREE_SCALAR_STMTS (node
) = tmp_stmts
;
1267 /* Attempt to reorder stmts in a reduction chain so that we don't
1268 require any load permutation. Return true if that was possible,
1269 otherwise return false. */
1272 vect_attempt_slp_rearrange_stmts (slp_instance slp_instn
)
1274 unsigned int group_size
= SLP_INSTANCE_GROUP_SIZE (slp_instn
);
1278 slp_tree node
, load
;
1280 /* Compare all the permutation sequences to the first one. We know
1281 that at least one load is permuted. */
1282 node
= SLP_INSTANCE_LOADS (slp_instn
)[0];
1283 if (!node
->load_permutation
.exists ())
1285 for (i
= 1; SLP_INSTANCE_LOADS (slp_instn
).iterate (i
, &load
); ++i
)
1287 if (!load
->load_permutation
.exists ())
1289 FOR_EACH_VEC_ELT (load
->load_permutation
, j
, lidx
)
1290 if (lidx
!= node
->load_permutation
[j
])
1294 /* Check that the loads in the first sequence are different and there
1295 are no gaps between them. */
1296 load_index
= sbitmap_alloc (group_size
);
1297 bitmap_clear (load_index
);
1298 FOR_EACH_VEC_ELT (node
->load_permutation
, i
, lidx
)
1300 if (bitmap_bit_p (load_index
, lidx
))
1302 sbitmap_free (load_index
);
1305 bitmap_set_bit (load_index
, lidx
);
1307 for (i
= 0; i
< group_size
; i
++)
1308 if (!bitmap_bit_p (load_index
, i
))
1310 sbitmap_free (load_index
);
1313 sbitmap_free (load_index
);
1315 /* This permutation is valid for reduction. Since the order of the
1316 statements in the nodes is not important unless they are memory
1317 accesses, we can rearrange the statements in all the nodes
1318 according to the order of the loads. */
1319 vect_slp_rearrange_stmts (SLP_INSTANCE_TREE (slp_instn
), group_size
,
1320 node
->load_permutation
);
1322 /* We are done, no actual permutations need to be generated. */
1323 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn
), i
, node
)
1324 SLP_TREE_LOAD_PERMUTATION (node
).release ();
1328 /* Check if the required load permutations in the SLP instance
1329 SLP_INSTN are supported. */
1332 vect_supported_load_permutation_p (slp_instance slp_instn
)
1334 unsigned int group_size
= SLP_INSTANCE_GROUP_SIZE (slp_instn
);
1335 unsigned int i
, j
, k
, next
;
1337 gimple
*stmt
, *load
, *next_load
, *first_load
;
1338 struct data_reference
*dr
;
1340 if (dump_enabled_p ())
1342 dump_printf_loc (MSG_NOTE
, vect_location
, "Load permutation ");
1343 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn
), i
, node
)
1344 if (node
->load_permutation
.exists ())
1345 FOR_EACH_VEC_ELT (node
->load_permutation
, j
, next
)
1346 dump_printf (MSG_NOTE
, "%d ", next
);
1348 for (k
= 0; k
< group_size
; ++k
)
1349 dump_printf (MSG_NOTE
, "%d ", k
);
1350 dump_printf (MSG_NOTE
, "\n");
1353 /* In case of reduction every load permutation is allowed, since the order
1354 of the reduction statements is not important (as opposed to the case of
1355 grouped stores). The only condition we need to check is that all the
1356 load nodes are of the same size and have the same permutation (and then
1357 rearrange all the nodes of the SLP instance according to this
1360 /* Check that all the load nodes are of the same size. */
1361 /* ??? Can't we assert this? */
1362 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn
), i
, node
)
1363 if (SLP_TREE_SCALAR_STMTS (node
).length () != (unsigned) group_size
)
1366 node
= SLP_INSTANCE_TREE (slp_instn
);
1367 stmt
= SLP_TREE_SCALAR_STMTS (node
)[0];
1369 /* Reduction (there are no data-refs in the root).
1370 In reduction chain the order of the loads is not important. */
1371 if (!STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt
))
1372 && !GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt
)))
1374 if (vect_attempt_slp_rearrange_stmts (slp_instn
))
1377 /* Fallthru to general load permutation handling. */
1380 /* In basic block vectorization we allow any subchain of an interleaving
1382 FORNOW: not supported in loop SLP because of realignment compications. */
1383 if (STMT_VINFO_BB_VINFO (vinfo_for_stmt (stmt
)))
1385 /* Check whether the loads in an instance form a subchain and thus
1386 no permutation is necessary. */
1387 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn
), i
, node
)
1389 if (!SLP_TREE_LOAD_PERMUTATION (node
).exists ())
1391 bool subchain_p
= true;
1393 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node
), j
, load
)
1396 && (next_load
!= load
1397 || GROUP_GAP (vinfo_for_stmt (load
)) != 1))
1402 next_load
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (load
));
1405 SLP_TREE_LOAD_PERMUTATION (node
).release ();
1408 /* Verify the permutation can be generated. */
1410 if (!vect_transform_slp_perm_load (node
, tem
, NULL
,
1411 1, slp_instn
, true))
1413 dump_printf_loc (MSG_MISSED_OPTIMIZATION
,
1415 "unsupported load permutation\n");
1421 /* Check that the alignment of the first load in every subchain, i.e.,
1422 the first statement in every load node, is supported.
1423 ??? This belongs in alignment checking. */
1424 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn
), i
, node
)
1426 first_load
= SLP_TREE_SCALAR_STMTS (node
)[0];
1427 if (first_load
!= GROUP_FIRST_ELEMENT (vinfo_for_stmt (first_load
)))
1429 dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_load
));
1430 if (vect_supportable_dr_alignment (dr
, false)
1431 == dr_unaligned_unsupported
)
1433 if (dump_enabled_p ())
1435 dump_printf_loc (MSG_MISSED_OPTIMIZATION
,
1437 "unsupported unaligned load ");
1438 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
1440 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
1450 /* For loop vectorization verify we can generate the permutation. */
1451 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn
), i
, node
)
1452 if (node
->load_permutation
.exists ()
1453 && !vect_transform_slp_perm_load
1455 SLP_INSTANCE_UNROLLING_FACTOR (slp_instn
), slp_instn
, true))
1462 /* Find the last store in SLP INSTANCE. */
1465 vect_find_last_scalar_stmt_in_slp (slp_tree node
)
1467 gimple
*last
= NULL
, *stmt
;
1469 for (int i
= 0; SLP_TREE_SCALAR_STMTS (node
).iterate (i
, &stmt
); i
++)
1471 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (stmt
);
1472 if (is_pattern_stmt_p (stmt_vinfo
))
1473 last
= get_later_stmt (STMT_VINFO_RELATED_STMT (stmt_vinfo
), last
);
1475 last
= get_later_stmt (stmt
, last
);
1481 /* Compute the cost for the SLP node NODE in the SLP instance INSTANCE. */
1484 vect_analyze_slp_cost_1 (slp_instance instance
, slp_tree node
,
1485 stmt_vector_for_cost
*prologue_cost_vec
,
1486 stmt_vector_for_cost
*body_cost_vec
,
1487 unsigned ncopies_for_cost
)
1492 stmt_vec_info stmt_info
;
1494 unsigned group_size
= SLP_INSTANCE_GROUP_SIZE (instance
);
1496 /* Recurse down the SLP tree. */
1497 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node
), i
, child
)
1499 vect_analyze_slp_cost_1 (instance
, child
, prologue_cost_vec
,
1500 body_cost_vec
, ncopies_for_cost
);
1502 /* Look at the first scalar stmt to determine the cost. */
1503 stmt
= SLP_TREE_SCALAR_STMTS (node
)[0];
1504 stmt_info
= vinfo_for_stmt (stmt
);
1505 if (STMT_VINFO_GROUPED_ACCESS (stmt_info
))
1507 if (DR_IS_WRITE (STMT_VINFO_DATA_REF (stmt_info
)))
1508 vect_model_store_cost (stmt_info
, ncopies_for_cost
, false,
1509 vect_uninitialized_def
,
1510 node
, prologue_cost_vec
, body_cost_vec
);
1514 gcc_checking_assert (DR_IS_READ (STMT_VINFO_DATA_REF (stmt_info
)));
1515 vect_model_load_cost (stmt_info
, ncopies_for_cost
, false,
1516 node
, prologue_cost_vec
, body_cost_vec
);
1517 /* If the load is permuted record the cost for the permutation.
1518 ??? Loads from multiple chains are let through here only
1519 for a single special case involving complex numbers where
1520 in the end no permutation is necessary. */
1521 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node
), i
, s
)
1522 if ((STMT_VINFO_GROUP_FIRST_ELEMENT (vinfo_for_stmt (s
))
1523 == STMT_VINFO_GROUP_FIRST_ELEMENT (stmt_info
))
1524 && vect_get_place_in_interleaving_chain
1525 (s
, STMT_VINFO_GROUP_FIRST_ELEMENT (stmt_info
)) != i
)
1527 record_stmt_cost (body_cost_vec
, group_size
, vec_perm
,
1528 stmt_info
, 0, vect_body
);
1535 record_stmt_cost (body_cost_vec
, ncopies_for_cost
, vector_stmt
,
1536 stmt_info
, 0, vect_body
);
1537 if (SLP_TREE_TWO_OPERATORS (node
))
1539 record_stmt_cost (body_cost_vec
, ncopies_for_cost
, vector_stmt
,
1540 stmt_info
, 0, vect_body
);
1541 record_stmt_cost (body_cost_vec
, ncopies_for_cost
, vec_perm
,
1542 stmt_info
, 0, vect_body
);
1546 /* Scan operands and account for prologue cost of constants/externals.
1547 ??? This over-estimates cost for multiple uses and should be
1549 lhs
= gimple_get_lhs (stmt
);
1550 for (i
= 0; i
< gimple_num_ops (stmt
); ++i
)
1552 tree op
= gimple_op (stmt
, i
);
1554 enum vect_def_type dt
;
1555 if (!op
|| op
== lhs
)
1557 if (vect_is_simple_use (op
, stmt_info
->vinfo
, &def_stmt
, &dt
))
1559 /* Without looking at the actual initializer a vector of
1560 constants can be implemented as load from the constant pool.
1561 ??? We need to pass down stmt_info for a vector type
1562 even if it points to the wrong stmt. */
1563 if (dt
== vect_constant_def
)
1564 record_stmt_cost (prologue_cost_vec
, 1, vector_load
,
1565 stmt_info
, 0, vect_prologue
);
1566 else if (dt
== vect_external_def
)
1567 record_stmt_cost (prologue_cost_vec
, 1, vec_construct
,
1568 stmt_info
, 0, vect_prologue
);
1573 /* Compute the cost for the SLP instance INSTANCE. */
1576 vect_analyze_slp_cost (slp_instance instance
, void *data
)
1578 stmt_vector_for_cost body_cost_vec
, prologue_cost_vec
;
1579 unsigned ncopies_for_cost
;
1580 stmt_info_for_cost
*si
;
1583 if (dump_enabled_p ())
1584 dump_printf_loc (MSG_NOTE
, vect_location
,
1585 "=== vect_analyze_slp_cost ===\n");
1587 /* Calculate the number of vector stmts to create based on the unrolling
1588 factor (number of vectors is 1 if NUNITS >= GROUP_SIZE, and is
1589 GROUP_SIZE / NUNITS otherwise. */
1590 unsigned group_size
= SLP_INSTANCE_GROUP_SIZE (instance
);
1591 slp_tree node
= SLP_INSTANCE_TREE (instance
);
1592 stmt_vec_info stmt_info
= vinfo_for_stmt (SLP_TREE_SCALAR_STMTS (node
)[0]);
1593 /* Adjust the group_size by the vectorization factor which is always one
1594 for basic-block vectorization. */
1595 if (STMT_VINFO_LOOP_VINFO (stmt_info
))
1596 group_size
*= LOOP_VINFO_VECT_FACTOR (STMT_VINFO_LOOP_VINFO (stmt_info
));
1597 unsigned nunits
= TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info
));
1598 /* For reductions look at a reduction operand in case the reduction
1599 operation is widening like DOT_PROD or SAD. */
1600 if (!STMT_VINFO_GROUPED_ACCESS (stmt_info
))
1602 gimple
*stmt
= SLP_TREE_SCALAR_STMTS (node
)[0];
1603 switch (gimple_assign_rhs_code (stmt
))
1607 nunits
= TYPE_VECTOR_SUBPARTS (get_vectype_for_scalar_type
1608 (TREE_TYPE (gimple_assign_rhs1 (stmt
))));
1613 ncopies_for_cost
= least_common_multiple (nunits
, group_size
) / nunits
;
1615 prologue_cost_vec
.create (10);
1616 body_cost_vec
.create (10);
1617 vect_analyze_slp_cost_1 (instance
, SLP_INSTANCE_TREE (instance
),
1618 &prologue_cost_vec
, &body_cost_vec
,
1621 /* Record the prologue costs, which were delayed until we were
1622 sure that SLP was successful. */
1623 FOR_EACH_VEC_ELT (prologue_cost_vec
, i
, si
)
1625 struct _stmt_vec_info
*stmt_info
1626 = si
->stmt
? vinfo_for_stmt (si
->stmt
) : NULL
;
1627 (void) add_stmt_cost (data
, si
->count
, si
->kind
, stmt_info
,
1628 si
->misalign
, vect_prologue
);
1631 /* Record the instance's instructions in the target cost model. */
1632 FOR_EACH_VEC_ELT (body_cost_vec
, i
, si
)
1634 struct _stmt_vec_info
*stmt_info
1635 = si
->stmt
? vinfo_for_stmt (si
->stmt
) : NULL
;
1636 (void) add_stmt_cost (data
, si
->count
, si
->kind
, stmt_info
,
1637 si
->misalign
, vect_body
);
1640 prologue_cost_vec
.release ();
1641 body_cost_vec
.release ();
1644 /* Analyze an SLP instance starting from a group of grouped stores. Call
1645 vect_build_slp_tree to build a tree of packed stmts if possible.
1646 Return FALSE if it's impossible to SLP any stmt in the loop. */
1649 vect_analyze_slp_instance (vec_info
*vinfo
,
1650 gimple
*stmt
, unsigned max_tree_size
)
1652 slp_instance new_instance
;
1654 unsigned int group_size
= GROUP_SIZE (vinfo_for_stmt (stmt
));
1655 unsigned int unrolling_factor
= 1, nunits
;
1656 tree vectype
, scalar_type
= NULL_TREE
;
1658 unsigned int vectorization_factor
= 0;
1660 unsigned int max_nunits
= 0;
1661 vec
<slp_tree
> loads
;
1662 struct data_reference
*dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt
));
1663 vec
<gimple
*> scalar_stmts
;
1665 if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt
)))
1669 scalar_type
= TREE_TYPE (DR_REF (dr
));
1670 vectype
= get_vectype_for_scalar_type (scalar_type
);
1674 gcc_assert (is_a
<loop_vec_info
> (vinfo
));
1675 vectype
= STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt
));
1678 group_size
= GROUP_SIZE (vinfo_for_stmt (stmt
));
1682 gcc_assert (is_a
<loop_vec_info
> (vinfo
));
1683 vectype
= STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt
));
1684 group_size
= as_a
<loop_vec_info
> (vinfo
)->reductions
.length ();
1689 if (dump_enabled_p ())
1691 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1692 "Build SLP failed: unsupported data-type ");
1693 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, scalar_type
);
1694 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
1700 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
1701 if (is_a
<loop_vec_info
> (vinfo
))
1702 vectorization_factor
= as_a
<loop_vec_info
> (vinfo
)->vectorization_factor
;
1704 vectorization_factor
= nunits
;
1706 /* Calculate the unrolling factor. */
1707 unrolling_factor
= least_common_multiple (nunits
, group_size
) / group_size
;
1708 if (unrolling_factor
!= 1 && is_a
<bb_vec_info
> (vinfo
))
1710 if (dump_enabled_p ())
1711 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1712 "Build SLP failed: unrolling required in basic"
1718 /* Create a node (a root of the SLP tree) for the packed grouped stores. */
1719 scalar_stmts
.create (group_size
);
1721 if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt
)))
1723 /* Collect the stores and store them in SLP_TREE_SCALAR_STMTS. */
1726 if (STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (next
))
1727 && STMT_VINFO_RELATED_STMT (vinfo_for_stmt (next
)))
1728 scalar_stmts
.safe_push (
1729 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (next
)));
1731 scalar_stmts
.safe_push (next
);
1732 next
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (next
));
1734 /* Mark the first element of the reduction chain as reduction to properly
1735 transform the node. In the reduction analysis phase only the last
1736 element of the chain is marked as reduction. */
1737 if (!STMT_VINFO_GROUPED_ACCESS (vinfo_for_stmt (stmt
)))
1738 STMT_VINFO_DEF_TYPE (vinfo_for_stmt (stmt
)) = vect_reduction_def
;
1742 /* Collect reduction statements. */
1743 vec
<gimple
*> reductions
= as_a
<loop_vec_info
> (vinfo
)->reductions
;
1744 for (i
= 0; reductions
.iterate (i
, &next
); i
++)
1745 scalar_stmts
.safe_push (next
);
1748 node
= vect_create_new_slp_node (scalar_stmts
);
1750 loads
.create (group_size
);
1752 /* Build the tree for the SLP instance. */
1753 bool *matches
= XALLOCAVEC (bool, group_size
);
1754 unsigned npermutes
= 0;
1755 if (vect_build_slp_tree (vinfo
, &node
, group_size
,
1756 &max_nunits
, &loads
,
1757 vectorization_factor
, matches
, &npermutes
, NULL
,
1760 /* Calculate the unrolling factor based on the smallest type. */
1761 if (max_nunits
> nunits
)
1762 unrolling_factor
= least_common_multiple (max_nunits
, group_size
)
1765 if (unrolling_factor
!= 1 && is_a
<bb_vec_info
> (vinfo
))
1767 if (dump_enabled_p ())
1768 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1769 "Build SLP failed: unrolling required in basic"
1771 vect_free_slp_tree (node
);
1776 /* Create a new SLP instance. */
1777 new_instance
= XNEW (struct _slp_instance
);
1778 SLP_INSTANCE_TREE (new_instance
) = node
;
1779 SLP_INSTANCE_GROUP_SIZE (new_instance
) = group_size
;
1780 SLP_INSTANCE_UNROLLING_FACTOR (new_instance
) = unrolling_factor
;
1781 SLP_INSTANCE_LOADS (new_instance
) = loads
;
1783 /* Compute the load permutation. */
1785 bool loads_permuted
= false;
1786 FOR_EACH_VEC_ELT (loads
, i
, load_node
)
1788 vec
<unsigned> load_permutation
;
1790 gimple
*load
, *first_stmt
;
1791 bool this_load_permuted
= false;
1792 load_permutation
.create (group_size
);
1793 first_stmt
= GROUP_FIRST_ELEMENT
1794 (vinfo_for_stmt (SLP_TREE_SCALAR_STMTS (load_node
)[0]));
1795 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (load_node
), j
, load
)
1798 = vect_get_place_in_interleaving_chain (load
, first_stmt
);
1799 gcc_assert (load_place
!= -1);
1800 if (load_place
!= j
)
1801 this_load_permuted
= true;
1802 load_permutation
.safe_push (load_place
);
1804 if (!this_load_permuted
1805 /* The load requires permutation when unrolling exposes
1806 a gap either because the group is larger than the SLP
1807 group-size or because there is a gap between the groups. */
1808 && (unrolling_factor
== 1
1809 || (group_size
== GROUP_SIZE (vinfo_for_stmt (first_stmt
))
1810 && GROUP_GAP (vinfo_for_stmt (first_stmt
)) == 0)))
1812 load_permutation
.release ();
1815 SLP_TREE_LOAD_PERMUTATION (load_node
) = load_permutation
;
1816 loads_permuted
= true;
1821 if (!vect_supported_load_permutation_p (new_instance
))
1823 if (dump_enabled_p ())
1825 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1826 "Build SLP failed: unsupported load "
1828 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
1829 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
1831 vect_free_slp_instance (new_instance
);
1836 vinfo
->slp_instances
.safe_push (new_instance
);
1838 if (dump_enabled_p ())
1839 vect_print_slp_tree (MSG_NOTE
, node
);
1844 /* Failed to SLP. */
1845 /* Free the allocated memory. */
1846 vect_free_slp_tree (node
);
1853 /* Check if there are stmts in the loop can be vectorized using SLP. Build SLP
1854 trees of packed scalar stmts if SLP is possible. */
1857 vect_analyze_slp (vec_info
*vinfo
, unsigned max_tree_size
)
1860 gimple
*first_element
;
1863 if (dump_enabled_p ())
1864 dump_printf_loc (MSG_NOTE
, vect_location
, "=== vect_analyze_slp ===\n");
1866 /* Find SLP sequences starting from groups of grouped stores. */
1867 FOR_EACH_VEC_ELT (vinfo
->grouped_stores
, i
, first_element
)
1868 if (vect_analyze_slp_instance (vinfo
, first_element
, max_tree_size
))
1871 if (loop_vec_info loop_vinfo
= dyn_cast
<loop_vec_info
> (vinfo
))
1873 if (loop_vinfo
->reduction_chains
.length () > 0)
1875 /* Find SLP sequences starting from reduction chains. */
1876 FOR_EACH_VEC_ELT (loop_vinfo
->reduction_chains
, i
, first_element
)
1877 if (vect_analyze_slp_instance (vinfo
, first_element
,
1883 /* Don't try to vectorize SLP reductions if reduction chain was
1888 /* Find SLP sequences starting from groups of reductions. */
1889 if (loop_vinfo
->reductions
.length () > 1
1890 && vect_analyze_slp_instance (vinfo
, loop_vinfo
->reductions
[0],
1899 /* For each possible SLP instance decide whether to SLP it and calculate overall
1900 unrolling factor needed to SLP the loop. Return TRUE if decided to SLP at
1901 least one instance. */
1904 vect_make_slp_decision (loop_vec_info loop_vinfo
)
1906 unsigned int i
, unrolling_factor
= 1;
1907 vec
<slp_instance
> slp_instances
= LOOP_VINFO_SLP_INSTANCES (loop_vinfo
);
1908 slp_instance instance
;
1909 int decided_to_slp
= 0;
1911 if (dump_enabled_p ())
1912 dump_printf_loc (MSG_NOTE
, vect_location
, "=== vect_make_slp_decision ==="
1915 FOR_EACH_VEC_ELT (slp_instances
, i
, instance
)
1917 /* FORNOW: SLP if you can. */
1918 if (unrolling_factor
< SLP_INSTANCE_UNROLLING_FACTOR (instance
))
1919 unrolling_factor
= SLP_INSTANCE_UNROLLING_FACTOR (instance
);
1921 /* Mark all the stmts that belong to INSTANCE as PURE_SLP stmts. Later we
1922 call vect_detect_hybrid_slp () to find stmts that need hybrid SLP and
1923 loop-based vectorization. Such stmts will be marked as HYBRID. */
1924 vect_mark_slp_stmts (SLP_INSTANCE_TREE (instance
), pure_slp
, -1);
1928 LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo
) = unrolling_factor
;
1930 if (decided_to_slp
&& dump_enabled_p ())
1931 dump_printf_loc (MSG_NOTE
, vect_location
,
1932 "Decided to SLP %d instances. Unrolling factor %d\n",
1933 decided_to_slp
, unrolling_factor
);
1935 return (decided_to_slp
> 0);
1939 /* Find stmts that must be both vectorized and SLPed (since they feed stmts that
1940 can't be SLPed) in the tree rooted at NODE. Mark such stmts as HYBRID. */
1943 vect_detect_hybrid_slp_stmts (slp_tree node
, unsigned i
, slp_vect_type stype
)
1945 gimple
*stmt
= SLP_TREE_SCALAR_STMTS (node
)[i
];
1946 imm_use_iterator imm_iter
;
1948 stmt_vec_info use_vinfo
, stmt_vinfo
= vinfo_for_stmt (stmt
);
1950 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_vinfo
);
1951 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
1954 /* Propagate hybrid down the SLP tree. */
1955 if (stype
== hybrid
)
1957 else if (HYBRID_SLP_STMT (stmt_vinfo
))
1961 /* Check if a pure SLP stmt has uses in non-SLP stmts. */
1962 gcc_checking_assert (PURE_SLP_STMT (stmt_vinfo
));
1963 /* We always get the pattern stmt here, but for immediate
1964 uses we have to use the LHS of the original stmt. */
1965 gcc_checking_assert (!STMT_VINFO_IN_PATTERN_P (stmt_vinfo
));
1966 if (STMT_VINFO_RELATED_STMT (stmt_vinfo
))
1967 stmt
= STMT_VINFO_RELATED_STMT (stmt_vinfo
);
1968 if (TREE_CODE (gimple_op (stmt
, 0)) == SSA_NAME
)
1969 FOR_EACH_IMM_USE_STMT (use_stmt
, imm_iter
, gimple_op (stmt
, 0))
1971 if (!flow_bb_inside_loop_p (loop
, gimple_bb (use_stmt
)))
1973 use_vinfo
= vinfo_for_stmt (use_stmt
);
1974 if (STMT_VINFO_IN_PATTERN_P (use_vinfo
)
1975 && STMT_VINFO_RELATED_STMT (use_vinfo
))
1976 use_vinfo
= vinfo_for_stmt (STMT_VINFO_RELATED_STMT (use_vinfo
));
1977 if (!STMT_SLP_TYPE (use_vinfo
)
1978 && (STMT_VINFO_RELEVANT (use_vinfo
)
1979 || VECTORIZABLE_CYCLE_DEF (STMT_VINFO_DEF_TYPE (use_vinfo
)))
1980 && !(gimple_code (use_stmt
) == GIMPLE_PHI
1981 && STMT_VINFO_DEF_TYPE (use_vinfo
) == vect_reduction_def
))
1983 if (dump_enabled_p ())
1985 dump_printf_loc (MSG_NOTE
, vect_location
, "use of SLP "
1986 "def in non-SLP stmt: ");
1987 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, use_stmt
, 0);
1995 && !HYBRID_SLP_STMT (stmt_vinfo
))
1997 if (dump_enabled_p ())
1999 dump_printf_loc (MSG_NOTE
, vect_location
, "marking hybrid: ");
2000 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
2002 STMT_SLP_TYPE (stmt_vinfo
) = hybrid
;
2005 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node
), j
, child
)
2007 vect_detect_hybrid_slp_stmts (child
, i
, stype
);
2010 /* Helpers for vect_detect_hybrid_slp walking pattern stmt uses. */
2013 vect_detect_hybrid_slp_1 (tree
*tp
, int *, void *data
)
2015 walk_stmt_info
*wi
= (walk_stmt_info
*)data
;
2016 struct loop
*loopp
= (struct loop
*)wi
->info
;
2021 if (TREE_CODE (*tp
) == SSA_NAME
2022 && !SSA_NAME_IS_DEFAULT_DEF (*tp
))
2024 gimple
*def_stmt
= SSA_NAME_DEF_STMT (*tp
);
2025 if (flow_bb_inside_loop_p (loopp
, gimple_bb (def_stmt
))
2026 && PURE_SLP_STMT (vinfo_for_stmt (def_stmt
)))
2028 if (dump_enabled_p ())
2030 dump_printf_loc (MSG_NOTE
, vect_location
, "marking hybrid: ");
2031 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, def_stmt
, 0);
2033 STMT_SLP_TYPE (vinfo_for_stmt (def_stmt
)) = hybrid
;
2041 vect_detect_hybrid_slp_2 (gimple_stmt_iterator
*gsi
, bool *handled
,
2044 /* If the stmt is in a SLP instance then this isn't a reason
2045 to mark use definitions in other SLP instances as hybrid. */
2046 if (STMT_SLP_TYPE (vinfo_for_stmt (gsi_stmt (*gsi
))) != loop_vect
)
2051 /* Find stmts that must be both vectorized and SLPed. */
2054 vect_detect_hybrid_slp (loop_vec_info loop_vinfo
)
2057 vec
<slp_instance
> slp_instances
= LOOP_VINFO_SLP_INSTANCES (loop_vinfo
);
2058 slp_instance instance
;
2060 if (dump_enabled_p ())
2061 dump_printf_loc (MSG_NOTE
, vect_location
, "=== vect_detect_hybrid_slp ==="
2064 /* First walk all pattern stmt in the loop and mark defs of uses as
2065 hybrid because immediate uses in them are not recorded. */
2066 for (i
= 0; i
< LOOP_VINFO_LOOP (loop_vinfo
)->num_nodes
; ++i
)
2068 basic_block bb
= LOOP_VINFO_BBS (loop_vinfo
)[i
];
2069 for (gimple_stmt_iterator gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
);
2072 gimple
*stmt
= gsi_stmt (gsi
);
2073 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
2074 if (STMT_VINFO_IN_PATTERN_P (stmt_info
))
2077 memset (&wi
, 0, sizeof (wi
));
2078 wi
.info
= LOOP_VINFO_LOOP (loop_vinfo
);
2079 gimple_stmt_iterator gsi2
2080 = gsi_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info
));
2081 walk_gimple_stmt (&gsi2
, vect_detect_hybrid_slp_2
,
2082 vect_detect_hybrid_slp_1
, &wi
);
2083 walk_gimple_seq (STMT_VINFO_PATTERN_DEF_SEQ (stmt_info
),
2084 vect_detect_hybrid_slp_2
,
2085 vect_detect_hybrid_slp_1
, &wi
);
2090 /* Then walk the SLP instance trees marking stmts with uses in
2091 non-SLP stmts as hybrid, also propagating hybrid down the
2092 SLP tree, collecting the above info on-the-fly. */
2093 FOR_EACH_VEC_ELT (slp_instances
, i
, instance
)
2095 for (unsigned i
= 0; i
< SLP_INSTANCE_GROUP_SIZE (instance
); ++i
)
2096 vect_detect_hybrid_slp_stmts (SLP_INSTANCE_TREE (instance
),
2102 /* Create and initialize a new bb_vec_info struct for BB, as well as
2103 stmt_vec_info structs for all the stmts in it. */
2106 new_bb_vec_info (basic_block bb
)
2108 bb_vec_info res
= NULL
;
2109 gimple_stmt_iterator gsi
;
2111 res
= (bb_vec_info
) xcalloc (1, sizeof (struct _bb_vec_info
));
2112 res
->kind
= vec_info::bb
;
2113 BB_VINFO_BB (res
) = bb
;
2115 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
2117 gimple
*stmt
= gsi_stmt (gsi
);
2118 gimple_set_uid (stmt
, 0);
2119 set_vinfo_for_stmt (stmt
, new_stmt_vec_info (stmt
, res
));
2122 BB_VINFO_GROUPED_STORES (res
).create (10);
2123 BB_VINFO_SLP_INSTANCES (res
).create (2);
2124 BB_VINFO_TARGET_COST_DATA (res
) = init_cost (NULL
);
2131 /* Free BB_VINFO struct, as well as all the stmt_vec_info structs of all the
2132 stmts in the basic block. */
2135 destroy_bb_vec_info (bb_vec_info bb_vinfo
)
2137 vec
<slp_instance
> slp_instances
;
2138 slp_instance instance
;
2140 gimple_stmt_iterator si
;
2146 bb
= BB_VINFO_BB (bb_vinfo
);
2148 for (si
= gsi_start_bb (bb
); !gsi_end_p (si
); gsi_next (&si
))
2150 gimple
*stmt
= gsi_stmt (si
);
2151 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
2154 /* Free stmt_vec_info. */
2155 free_stmt_vec_info (stmt
);
2158 vect_destroy_datarefs (bb_vinfo
);
2159 free_dependence_relations (BB_VINFO_DDRS (bb_vinfo
));
2160 BB_VINFO_GROUPED_STORES (bb_vinfo
).release ();
2161 slp_instances
= BB_VINFO_SLP_INSTANCES (bb_vinfo
);
2162 FOR_EACH_VEC_ELT (slp_instances
, i
, instance
)
2163 vect_free_slp_instance (instance
);
2164 BB_VINFO_SLP_INSTANCES (bb_vinfo
).release ();
2165 destroy_cost_data (BB_VINFO_TARGET_COST_DATA (bb_vinfo
));
2171 /* Analyze statements contained in SLP tree node after recursively analyzing
2172 the subtree. Return TRUE if the operations are supported. */
2175 vect_slp_analyze_node_operations (slp_tree node
)
2185 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node
), i
, child
)
2186 if (!vect_slp_analyze_node_operations (child
))
2189 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node
), i
, stmt
)
2191 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
2192 gcc_assert (stmt_info
);
2193 gcc_assert (STMT_SLP_TYPE (stmt_info
) != loop_vect
);
2195 if (!vect_analyze_stmt (stmt
, &dummy
, node
))
2203 /* Analyze statements in SLP instances of the basic block. Return TRUE if the
2204 operations are supported. */
2207 vect_slp_analyze_operations (vec
<slp_instance
> slp_instances
, void *data
)
2209 slp_instance instance
;
2212 if (dump_enabled_p ())
2213 dump_printf_loc (MSG_NOTE
, vect_location
,
2214 "=== vect_slp_analyze_operations ===\n");
2216 for (i
= 0; slp_instances
.iterate (i
, &instance
); )
2218 if (!vect_slp_analyze_node_operations (SLP_INSTANCE_TREE (instance
)))
2220 dump_printf_loc (MSG_NOTE
, vect_location
,
2221 "removing SLP instance operations starting from: ");
2222 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
,
2223 SLP_TREE_SCALAR_STMTS
2224 (SLP_INSTANCE_TREE (instance
))[0], 0);
2225 vect_free_slp_instance (instance
);
2226 slp_instances
.ordered_remove (i
);
2230 /* Compute the costs of the SLP instance. */
2231 vect_analyze_slp_cost (instance
, data
);
2236 if (!slp_instances
.length ())
2243 /* Compute the scalar cost of the SLP node NODE and its children
2244 and return it. Do not account defs that are marked in LIFE and
2245 update LIFE according to uses of NODE. */
2248 vect_bb_slp_scalar_cost (basic_block bb
,
2249 slp_tree node
, vec
<bool, va_heap
> *life
)
2251 unsigned scalar_cost
= 0;
2256 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node
), i
, stmt
)
2259 ssa_op_iter op_iter
;
2260 def_operand_p def_p
;
2261 stmt_vec_info stmt_info
;
2266 /* If there is a non-vectorized use of the defs then the scalar
2267 stmt is kept live in which case we do not account it or any
2268 required defs in the SLP children in the scalar cost. This
2269 way we make the vectorization more costly when compared to
2271 FOR_EACH_SSA_DEF_OPERAND (def_p
, stmt
, op_iter
, SSA_OP_DEF
)
2273 imm_use_iterator use_iter
;
2275 FOR_EACH_IMM_USE_STMT (use_stmt
, use_iter
, DEF_FROM_PTR (def_p
))
2276 if (!is_gimple_debug (use_stmt
)
2277 && (gimple_code (use_stmt
) == GIMPLE_PHI
2278 || gimple_bb (use_stmt
) != bb
2279 || !STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (use_stmt
))))
2282 BREAK_FROM_IMM_USE_STMT (use_iter
);
2288 stmt_info
= vinfo_for_stmt (stmt
);
2289 if (STMT_VINFO_DATA_REF (stmt_info
))
2291 if (DR_IS_READ (STMT_VINFO_DATA_REF (stmt_info
)))
2292 stmt_cost
= vect_get_stmt_cost (scalar_load
);
2294 stmt_cost
= vect_get_stmt_cost (scalar_store
);
2297 stmt_cost
= vect_get_stmt_cost (scalar_stmt
);
2299 scalar_cost
+= stmt_cost
;
2302 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node
), i
, child
)
2304 scalar_cost
+= vect_bb_slp_scalar_cost (bb
, child
, life
);
2309 /* Check if vectorization of the basic block is profitable. */
2312 vect_bb_vectorization_profitable_p (bb_vec_info bb_vinfo
)
2314 vec
<slp_instance
> slp_instances
= BB_VINFO_SLP_INSTANCES (bb_vinfo
);
2315 slp_instance instance
;
2317 unsigned int vec_inside_cost
= 0, vec_outside_cost
= 0, scalar_cost
= 0;
2318 unsigned int vec_prologue_cost
= 0, vec_epilogue_cost
= 0;
2320 /* Calculate scalar cost. */
2321 FOR_EACH_VEC_ELT (slp_instances
, i
, instance
)
2323 auto_vec
<bool, 20> life
;
2324 life
.safe_grow_cleared (SLP_INSTANCE_GROUP_SIZE (instance
));
2325 scalar_cost
+= vect_bb_slp_scalar_cost (BB_VINFO_BB (bb_vinfo
),
2326 SLP_INSTANCE_TREE (instance
),
2330 /* Complete the target-specific cost calculation. */
2331 finish_cost (BB_VINFO_TARGET_COST_DATA (bb_vinfo
), &vec_prologue_cost
,
2332 &vec_inside_cost
, &vec_epilogue_cost
);
2334 vec_outside_cost
= vec_prologue_cost
+ vec_epilogue_cost
;
2336 if (dump_enabled_p ())
2338 dump_printf_loc (MSG_NOTE
, vect_location
, "Cost model analysis: \n");
2339 dump_printf (MSG_NOTE
, " Vector inside of basic block cost: %d\n",
2341 dump_printf (MSG_NOTE
, " Vector prologue cost: %d\n", vec_prologue_cost
);
2342 dump_printf (MSG_NOTE
, " Vector epilogue cost: %d\n", vec_epilogue_cost
);
2343 dump_printf (MSG_NOTE
, " Scalar cost of basic block: %d\n", scalar_cost
);
2346 /* Vectorization is profitable if its cost is less than the cost of scalar
2348 if (vec_outside_cost
+ vec_inside_cost
>= scalar_cost
)
2354 /* Check if the basic block can be vectorized. */
2357 vect_slp_analyze_bb_1 (basic_block bb
)
2359 bb_vec_info bb_vinfo
;
2360 vec
<slp_instance
> slp_instances
;
2361 slp_instance instance
;
2364 unsigned n_stmts
= 0;
2366 bb_vinfo
= new_bb_vec_info (bb
);
2370 if (!vect_analyze_data_refs (bb_vinfo
, &min_vf
, &n_stmts
))
2372 if (dump_enabled_p ())
2373 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2374 "not vectorized: unhandled data-ref in basic "
2377 destroy_bb_vec_info (bb_vinfo
);
2381 if (BB_VINFO_DATAREFS (bb_vinfo
).length () < 2)
2383 if (dump_enabled_p ())
2384 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2385 "not vectorized: not enough data-refs in "
2388 destroy_bb_vec_info (bb_vinfo
);
2392 if (!vect_analyze_data_ref_accesses (bb_vinfo
))
2394 if (dump_enabled_p ())
2395 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2396 "not vectorized: unhandled data access in "
2399 destroy_bb_vec_info (bb_vinfo
);
2403 vect_pattern_recog (bb_vinfo
);
2405 if (!vect_analyze_data_refs_alignment (bb_vinfo
))
2407 if (dump_enabled_p ())
2408 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2409 "not vectorized: bad data alignment in basic "
2412 destroy_bb_vec_info (bb_vinfo
);
2416 /* Check the SLP opportunities in the basic block, analyze and build SLP
2418 if (!vect_analyze_slp (bb_vinfo
, n_stmts
))
2420 if (dump_enabled_p ())
2422 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2423 "Failed to SLP the basic block.\n");
2424 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2425 "not vectorized: failed to find SLP opportunities "
2426 "in basic block.\n");
2429 destroy_bb_vec_info (bb_vinfo
);
2433 slp_instances
= BB_VINFO_SLP_INSTANCES (bb_vinfo
);
2435 /* Mark all the statements that we want to vectorize as pure SLP and
2437 FOR_EACH_VEC_ELT (slp_instances
, i
, instance
)
2439 vect_mark_slp_stmts (SLP_INSTANCE_TREE (instance
), pure_slp
, -1);
2440 vect_mark_slp_stmts_relevant (SLP_INSTANCE_TREE (instance
));
2443 /* Mark all the statements that we do not want to vectorize. */
2444 for (gimple_stmt_iterator gsi
= gsi_start_bb (BB_VINFO_BB (bb_vinfo
));
2445 !gsi_end_p (gsi
); gsi_next (&gsi
))
2447 stmt_vec_info vinfo
= vinfo_for_stmt (gsi_stmt (gsi
));
2448 if (STMT_SLP_TYPE (vinfo
) != pure_slp
)
2449 STMT_VINFO_VECTORIZABLE (vinfo
) = false;
2452 /* Analyze dependences. At this point all stmts not participating in
2453 vectorization have to be marked. Dependence analysis assumes
2454 that we either vectorize all SLP instances or none at all. */
2455 if (!vect_slp_analyze_data_ref_dependences (bb_vinfo
))
2457 if (dump_enabled_p ())
2458 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2459 "not vectorized: unhandled data dependence "
2460 "in basic block.\n");
2462 destroy_bb_vec_info (bb_vinfo
);
2466 if (!vect_verify_datarefs_alignment (bb_vinfo
))
2468 if (dump_enabled_p ())
2469 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2470 "not vectorized: unsupported alignment in basic "
2472 destroy_bb_vec_info (bb_vinfo
);
2476 if (!vect_slp_analyze_operations (BB_VINFO_SLP_INSTANCES (bb_vinfo
),
2477 BB_VINFO_TARGET_COST_DATA (bb_vinfo
)))
2479 if (dump_enabled_p ())
2480 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2481 "not vectorized: bad operation in basic block.\n");
2483 destroy_bb_vec_info (bb_vinfo
);
2487 /* Cost model: check if the vectorization is worthwhile. */
2488 if (!unlimited_cost_model (NULL
)
2489 && !vect_bb_vectorization_profitable_p (bb_vinfo
))
2491 if (dump_enabled_p ())
2492 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2493 "not vectorized: vectorization is not "
2496 destroy_bb_vec_info (bb_vinfo
);
2500 if (dump_enabled_p ())
2501 dump_printf_loc (MSG_NOTE
, vect_location
,
2502 "Basic block will be vectorized using SLP\n");
2509 vect_slp_analyze_bb (basic_block bb
)
2511 bb_vec_info bb_vinfo
;
2513 gimple_stmt_iterator gsi
;
2514 unsigned int vector_sizes
;
2516 if (dump_enabled_p ())
2517 dump_printf_loc (MSG_NOTE
, vect_location
, "===vect_slp_analyze_bb===\n");
2519 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
2521 gimple
*stmt
= gsi_stmt (gsi
);
2522 if (!is_gimple_debug (stmt
)
2523 && !gimple_nop_p (stmt
)
2524 && gimple_code (stmt
) != GIMPLE_LABEL
)
2528 if (insns
> PARAM_VALUE (PARAM_SLP_MAX_INSNS_IN_BB
))
2530 if (dump_enabled_p ())
2531 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2532 "not vectorized: too many instructions in "
2538 /* Autodetect first vector size we try. */
2539 current_vector_size
= 0;
2540 vector_sizes
= targetm
.vectorize
.autovectorize_vector_sizes ();
2544 bb_vinfo
= vect_slp_analyze_bb_1 (bb
);
2548 destroy_bb_vec_info (bb_vinfo
);
2550 vector_sizes
&= ~current_vector_size
;
2551 if (vector_sizes
== 0
2552 || current_vector_size
== 0)
2555 /* Try the next biggest vector size. */
2556 current_vector_size
= 1 << floor_log2 (vector_sizes
);
2557 if (dump_enabled_p ())
2558 dump_printf_loc (MSG_NOTE
, vect_location
,
2559 "***** Re-trying analysis with "
2560 "vector size %d\n", current_vector_size
);
2565 /* For constant and loop invariant defs of SLP_NODE this function returns
2566 (vector) defs (VEC_OPRNDS) that will be used in the vectorized stmts.
2567 OP_NUM determines if we gather defs for operand 0 or operand 1 of the RHS of
2568 scalar stmts. NUMBER_OF_VECTORS is the number of vector defs to create.
2569 REDUC_INDEX is the index of the reduction operand in the statements, unless
2573 vect_get_constant_vectors (tree op
, slp_tree slp_node
,
2574 vec
<tree
> *vec_oprnds
,
2575 unsigned int op_num
, unsigned int number_of_vectors
,
2578 vec
<gimple
*> stmts
= SLP_TREE_SCALAR_STMTS (slp_node
);
2579 gimple
*stmt
= stmts
[0];
2580 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (stmt
);
2584 unsigned j
, number_of_places_left_in_vector
;
2587 int group_size
= stmts
.length ();
2588 unsigned int vec_num
, i
;
2589 unsigned number_of_copies
= 1;
2591 voprnds
.create (number_of_vectors
);
2592 bool constant_p
, is_store
;
2593 tree neutral_op
= NULL
;
2594 enum tree_code code
= gimple_expr_code (stmt
);
2597 gimple_seq ctor_seq
= NULL
;
2599 vector_type
= get_vectype_for_scalar_type (TREE_TYPE (op
));
2600 nunits
= TYPE_VECTOR_SUBPARTS (vector_type
);
2602 if (STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_reduction_def
2603 && reduc_index
!= -1)
2605 op_num
= reduc_index
;
2606 op
= gimple_op (stmt
, op_num
+ 1);
2607 /* For additional copies (see the explanation of NUMBER_OF_COPIES below)
2608 we need either neutral operands or the original operands. See
2609 get_initial_def_for_reduction() for details. */
2612 case WIDEN_SUM_EXPR
:
2619 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (op
)))
2620 neutral_op
= build_real (TREE_TYPE (op
), dconst0
);
2622 neutral_op
= build_int_cst (TREE_TYPE (op
), 0);
2627 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (op
)))
2628 neutral_op
= build_real (TREE_TYPE (op
), dconst1
);
2630 neutral_op
= build_int_cst (TREE_TYPE (op
), 1);
2635 neutral_op
= build_int_cst (TREE_TYPE (op
), -1);
2638 /* For MIN/MAX we don't have an easy neutral operand but
2639 the initial values can be used fine here. Only for
2640 a reduction chain we have to force a neutral element. */
2643 if (!GROUP_FIRST_ELEMENT (stmt_vinfo
))
2647 def_stmt
= SSA_NAME_DEF_STMT (op
);
2648 loop
= (gimple_bb (stmt
))->loop_father
;
2649 neutral_op
= PHI_ARG_DEF_FROM_EDGE (def_stmt
,
2650 loop_preheader_edge (loop
));
2655 gcc_assert (!GROUP_FIRST_ELEMENT (stmt_vinfo
));
2660 if (STMT_VINFO_DATA_REF (stmt_vinfo
))
2663 op
= gimple_assign_rhs1 (stmt
);
2670 if (CONSTANT_CLASS_P (op
))
2675 /* NUMBER_OF_COPIES is the number of times we need to use the same values in
2676 created vectors. It is greater than 1 if unrolling is performed.
2678 For example, we have two scalar operands, s1 and s2 (e.g., group of
2679 strided accesses of size two), while NUNITS is four (i.e., four scalars
2680 of this type can be packed in a vector). The output vector will contain
2681 two copies of each scalar operand: {s1, s2, s1, s2}. (NUMBER_OF_COPIES
2684 If GROUP_SIZE > NUNITS, the scalars will be split into several vectors
2685 containing the operands.
2687 For example, NUNITS is four as before, and the group size is 8
2688 (s1, s2, ..., s8). We will create two vectors {s1, s2, s3, s4} and
2689 {s5, s6, s7, s8}. */
2691 number_of_copies
= nunits
* number_of_vectors
/ group_size
;
2693 number_of_places_left_in_vector
= nunits
;
2694 elts
= XALLOCAVEC (tree
, nunits
);
2695 bool place_after_defs
= false;
2696 for (j
= 0; j
< number_of_copies
; j
++)
2698 for (i
= group_size
- 1; stmts
.iterate (i
, &stmt
); i
--)
2701 op
= gimple_assign_rhs1 (stmt
);
2707 if (op_num
== 0 || op_num
== 1)
2709 tree cond
= gimple_assign_rhs1 (stmt
);
2710 op
= TREE_OPERAND (cond
, op_num
);
2715 op
= gimple_assign_rhs2 (stmt
);
2717 op
= gimple_assign_rhs3 (stmt
);
2722 op
= gimple_call_arg (stmt
, op_num
);
2729 op
= gimple_op (stmt
, op_num
+ 1);
2730 /* Unlike the other binary operators, shifts/rotates have
2731 the shift count being int, instead of the same type as
2732 the lhs, so make sure the scalar is the right type if
2733 we are dealing with vectors of
2734 long long/long/short/char. */
2735 if (op_num
== 1 && TREE_CODE (op
) == INTEGER_CST
)
2736 op
= fold_convert (TREE_TYPE (vector_type
), op
);
2740 op
= gimple_op (stmt
, op_num
+ 1);
2745 if (reduc_index
!= -1)
2747 loop
= (gimple_bb (stmt
))->loop_father
;
2748 def_stmt
= SSA_NAME_DEF_STMT (op
);
2752 /* Get the def before the loop. In reduction chain we have only
2753 one initial value. */
2754 if ((j
!= (number_of_copies
- 1)
2755 || (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt
))
2760 op
= PHI_ARG_DEF_FROM_EDGE (def_stmt
,
2761 loop_preheader_edge (loop
));
2764 /* Create 'vect_ = {op0,op1,...,opn}'. */
2765 number_of_places_left_in_vector
--;
2767 if (!types_compatible_p (TREE_TYPE (vector_type
), TREE_TYPE (op
)))
2769 if (CONSTANT_CLASS_P (op
))
2771 op
= fold_unary (VIEW_CONVERT_EXPR
,
2772 TREE_TYPE (vector_type
), op
);
2773 gcc_assert (op
&& CONSTANT_CLASS_P (op
));
2777 tree new_temp
= make_ssa_name (TREE_TYPE (vector_type
));
2779 op
= build1 (VIEW_CONVERT_EXPR
, TREE_TYPE (vector_type
), op
);
2781 = gimple_build_assign (new_temp
, VIEW_CONVERT_EXPR
, op
);
2782 gimple_seq_add_stmt (&ctor_seq
, init_stmt
);
2786 elts
[number_of_places_left_in_vector
] = op
;
2787 if (!CONSTANT_CLASS_P (op
))
2789 if (TREE_CODE (orig_op
) == SSA_NAME
2790 && !SSA_NAME_IS_DEFAULT_DEF (orig_op
)
2791 && STMT_VINFO_BB_VINFO (stmt_vinfo
)
2792 && (STMT_VINFO_BB_VINFO (stmt_vinfo
)->bb
2793 == gimple_bb (SSA_NAME_DEF_STMT (orig_op
))))
2794 place_after_defs
= true;
2796 if (number_of_places_left_in_vector
== 0)
2798 number_of_places_left_in_vector
= nunits
;
2801 vec_cst
= build_vector (vector_type
, elts
);
2804 vec
<constructor_elt
, va_gc
> *v
;
2806 vec_alloc (v
, nunits
);
2807 for (k
= 0; k
< nunits
; ++k
)
2808 CONSTRUCTOR_APPEND_ELT (v
, NULL_TREE
, elts
[k
]);
2809 vec_cst
= build_constructor (vector_type
, v
);
2812 gimple_stmt_iterator gsi
;
2813 if (place_after_defs
)
2816 (vect_find_last_scalar_stmt_in_slp (slp_node
));
2817 init
= vect_init_vector (stmt
, vec_cst
, vector_type
, &gsi
);
2820 init
= vect_init_vector (stmt
, vec_cst
, vector_type
, NULL
);
2821 if (ctor_seq
!= NULL
)
2823 gsi
= gsi_for_stmt (SSA_NAME_DEF_STMT (init
));
2824 gsi_insert_seq_before_without_update (&gsi
, ctor_seq
,
2828 voprnds
.quick_push (init
);
2829 place_after_defs
= false;
2834 /* Since the vectors are created in the reverse order, we should invert
2836 vec_num
= voprnds
.length ();
2837 for (j
= vec_num
; j
!= 0; j
--)
2839 vop
= voprnds
[j
- 1];
2840 vec_oprnds
->quick_push (vop
);
2845 /* In case that VF is greater than the unrolling factor needed for the SLP
2846 group of stmts, NUMBER_OF_VECTORS to be created is greater than
2847 NUMBER_OF_SCALARS/NUNITS or NUNITS/NUMBER_OF_SCALARS, and hence we have
2848 to replicate the vectors. */
2849 while (number_of_vectors
> vec_oprnds
->length ())
2851 tree neutral_vec
= NULL
;
2856 neutral_vec
= build_vector_from_val (vector_type
, neutral_op
);
2858 vec_oprnds
->quick_push (neutral_vec
);
2862 for (i
= 0; vec_oprnds
->iterate (i
, &vop
) && i
< vec_num
; i
++)
2863 vec_oprnds
->quick_push (vop
);
2869 /* Get vectorized definitions from SLP_NODE that contains corresponding
2870 vectorized def-stmts. */
2873 vect_get_slp_vect_defs (slp_tree slp_node
, vec
<tree
> *vec_oprnds
)
2876 gimple
*vec_def_stmt
;
2879 gcc_assert (SLP_TREE_VEC_STMTS (slp_node
).exists ());
2881 FOR_EACH_VEC_ELT (SLP_TREE_VEC_STMTS (slp_node
), i
, vec_def_stmt
)
2883 gcc_assert (vec_def_stmt
);
2884 vec_oprnd
= gimple_get_lhs (vec_def_stmt
);
2885 vec_oprnds
->quick_push (vec_oprnd
);
2890 /* Get vectorized definitions for SLP_NODE.
2891 If the scalar definitions are loop invariants or constants, collect them and
2892 call vect_get_constant_vectors() to create vector stmts.
2893 Otherwise, the def-stmts must be already vectorized and the vectorized stmts
2894 must be stored in the corresponding child of SLP_NODE, and we call
2895 vect_get_slp_vect_defs () to retrieve them. */
2898 vect_get_slp_defs (vec
<tree
> ops
, slp_tree slp_node
,
2899 vec
<vec
<tree
> > *vec_oprnds
, int reduc_index
)
2902 int number_of_vects
= 0, i
;
2903 unsigned int child_index
= 0;
2904 HOST_WIDE_INT lhs_size_unit
, rhs_size_unit
;
2905 slp_tree child
= NULL
;
2908 bool vectorized_defs
;
2910 first_stmt
= SLP_TREE_SCALAR_STMTS (slp_node
)[0];
2911 FOR_EACH_VEC_ELT (ops
, i
, oprnd
)
2913 /* For each operand we check if it has vectorized definitions in a child
2914 node or we need to create them (for invariants and constants). We
2915 check if the LHS of the first stmt of the next child matches OPRND.
2916 If it does, we found the correct child. Otherwise, we call
2917 vect_get_constant_vectors (), and not advance CHILD_INDEX in order
2918 to check this child node for the next operand. */
2919 vectorized_defs
= false;
2920 if (SLP_TREE_CHILDREN (slp_node
).length () > child_index
)
2922 child
= SLP_TREE_CHILDREN (slp_node
)[child_index
];
2924 /* We have to check both pattern and original def, if available. */
2927 gimple
*first_def
= SLP_TREE_SCALAR_STMTS (child
)[0];
2929 = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (first_def
));
2931 if (operand_equal_p (oprnd
, gimple_get_lhs (first_def
), 0)
2933 && operand_equal_p (oprnd
, gimple_get_lhs (related
), 0)))
2935 /* The number of vector defs is determined by the number of
2936 vector statements in the node from which we get those
2938 number_of_vects
= SLP_TREE_NUMBER_OF_VEC_STMTS (child
);
2939 vectorized_defs
= true;
2947 if (!vectorized_defs
)
2951 number_of_vects
= SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node
);
2952 /* Number of vector stmts was calculated according to LHS in
2953 vect_schedule_slp_instance (), fix it by replacing LHS with
2954 RHS, if necessary. See vect_get_smallest_scalar_type () for
2956 vect_get_smallest_scalar_type (first_stmt
, &lhs_size_unit
,
2958 if (rhs_size_unit
!= lhs_size_unit
)
2960 number_of_vects
*= rhs_size_unit
;
2961 number_of_vects
/= lhs_size_unit
;
2966 /* Allocate memory for vectorized defs. */
2968 vec_defs
.create (number_of_vects
);
2970 /* For reduction defs we call vect_get_constant_vectors (), since we are
2971 looking for initial loop invariant values. */
2972 if (vectorized_defs
&& reduc_index
== -1)
2973 /* The defs are already vectorized. */
2974 vect_get_slp_vect_defs (child
, &vec_defs
);
2976 /* Build vectors from scalar defs. */
2977 vect_get_constant_vectors (oprnd
, slp_node
, &vec_defs
, i
,
2978 number_of_vects
, reduc_index
);
2980 vec_oprnds
->quick_push (vec_defs
);
2982 /* For reductions, we only need initial values. */
2983 if (reduc_index
!= -1)
2989 /* Create NCOPIES permutation statements using the mask MASK_BYTES (by
2990 building a vector of type MASK_TYPE from it) and two input vectors placed in
2991 DR_CHAIN at FIRST_VEC_INDX and SECOND_VEC_INDX for the first copy and
2992 shifting by STRIDE elements of DR_CHAIN for every copy.
2993 (STRIDE is the number of vectorized stmts for NODE divided by the number of
2995 VECT_STMTS_COUNTER specifies the index in the vectorized stmts of NODE, where
2996 the created stmts must be inserted. */
2999 vect_create_mask_and_perm (gimple
*stmt
,
3000 tree mask
, int first_vec_indx
, int second_vec_indx
,
3001 gimple_stmt_iterator
*gsi
, slp_tree node
,
3002 tree vectype
, vec
<tree
> dr_chain
,
3003 int ncopies
, int vect_stmts_counter
)
3006 gimple
*perm_stmt
= NULL
;
3008 tree first_vec
, second_vec
, data_ref
;
3010 stride
= SLP_TREE_NUMBER_OF_VEC_STMTS (node
) / ncopies
;
3012 /* Initialize the vect stmts of NODE to properly insert the generated
3014 for (i
= SLP_TREE_VEC_STMTS (node
).length ();
3015 i
< (int) SLP_TREE_NUMBER_OF_VEC_STMTS (node
); i
++)
3016 SLP_TREE_VEC_STMTS (node
).quick_push (NULL
);
3018 perm_dest
= vect_create_destination_var (gimple_assign_lhs (stmt
), vectype
);
3019 for (i
= 0; i
< ncopies
; i
++)
3021 first_vec
= dr_chain
[first_vec_indx
];
3022 second_vec
= dr_chain
[second_vec_indx
];
3024 /* Generate the permute statement. */
3025 perm_stmt
= gimple_build_assign (perm_dest
, VEC_PERM_EXPR
,
3026 first_vec
, second_vec
, mask
);
3027 data_ref
= make_ssa_name (perm_dest
, perm_stmt
);
3028 gimple_set_lhs (perm_stmt
, data_ref
);
3029 vect_finish_stmt_generation (stmt
, perm_stmt
, gsi
);
3031 /* Store the vector statement in NODE. */
3032 SLP_TREE_VEC_STMTS (node
)[stride
* i
+ vect_stmts_counter
] = perm_stmt
;
3034 first_vec_indx
+= stride
;
3035 second_vec_indx
+= stride
;
3040 /* Given FIRST_MASK_ELEMENT - the mask element in element representation,
3041 return in CURRENT_MASK_ELEMENT its equivalent in target specific
3042 representation. Check that the mask is valid and return FALSE if not.
3043 Return TRUE in NEED_NEXT_VECTOR if the permutation requires to move to
3044 the next vector, i.e., the current first vector is not needed. */
3047 vect_get_mask_element (gimple
*stmt
, int first_mask_element
, int m
,
3048 int mask_nunits
, bool only_one_vec
, int index
,
3049 unsigned char *mask
, int *current_mask_element
,
3050 bool *need_next_vector
, int *number_of_mask_fixes
,
3051 bool *mask_fixed
, bool *needs_first_vector
)
3055 /* Convert to target specific representation. */
3056 *current_mask_element
= first_mask_element
+ m
;
3057 /* Adjust the value in case it's a mask for second and third vectors. */
3058 *current_mask_element
-= mask_nunits
* (*number_of_mask_fixes
- 1);
3060 if (*current_mask_element
< 0)
3062 if (dump_enabled_p ())
3064 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3065 "permutation requires past vector ");
3066 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
3067 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
3072 if (*current_mask_element
< mask_nunits
)
3073 *needs_first_vector
= true;
3075 /* We have only one input vector to permute but the mask accesses values in
3076 the next vector as well. */
3077 if (only_one_vec
&& *current_mask_element
>= mask_nunits
)
3079 if (dump_enabled_p ())
3081 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3082 "permutation requires at least two vectors ");
3083 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
3084 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
3090 /* The mask requires the next vector. */
3091 while (*current_mask_element
>= mask_nunits
* 2)
3093 if (*needs_first_vector
|| *mask_fixed
)
3095 /* We either need the first vector too or have already moved to the
3096 next vector. In both cases, this permutation needs three
3098 if (dump_enabled_p ())
3100 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3101 "permutation requires at "
3102 "least three vectors ");
3103 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
3104 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
3110 /* We move to the next vector, dropping the first one and working with
3111 the second and the third - we need to adjust the values of the mask
3113 *current_mask_element
-= mask_nunits
* *number_of_mask_fixes
;
3115 for (i
= 0; i
< index
; i
++)
3116 mask
[i
] -= mask_nunits
* *number_of_mask_fixes
;
3118 (*number_of_mask_fixes
)++;
3122 *need_next_vector
= *mask_fixed
;
3124 /* This was the last element of this mask. Start a new one. */
3125 if (index
== mask_nunits
- 1)
3127 *number_of_mask_fixes
= 1;
3128 *mask_fixed
= false;
3129 *needs_first_vector
= false;
3136 /* Generate vector permute statements from a list of loads in DR_CHAIN.
3137 If ANALYZE_ONLY is TRUE, only check that it is possible to create valid
3138 permute statements for the SLP node NODE of the SLP instance
3139 SLP_NODE_INSTANCE. */
3142 vect_transform_slp_perm_load (slp_tree node
, vec
<tree
> dr_chain
,
3143 gimple_stmt_iterator
*gsi
, int vf
,
3144 slp_instance slp_node_instance
, bool analyze_only
)
3146 gimple
*stmt
= SLP_TREE_SCALAR_STMTS (node
)[0];
3147 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
3148 tree mask_element_type
= NULL_TREE
, mask_type
;
3149 int i
, j
, k
, nunits
, vec_index
= 0;
3150 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
3151 int group_size
= SLP_INSTANCE_GROUP_SIZE (slp_node_instance
);
3152 int first_mask_element
;
3153 int index
, unroll_factor
, current_mask_element
, ncopies
;
3154 unsigned char *mask
;
3155 bool only_one_vec
= false, need_next_vector
= false;
3156 int first_vec_index
, second_vec_index
, orig_vec_stmts_num
, vect_stmts_counter
;
3157 int number_of_mask_fixes
= 1;
3158 bool mask_fixed
= false;
3159 bool needs_first_vector
= false;
3162 if (!STMT_VINFO_GROUPED_ACCESS (stmt_info
))
3165 stmt_info
= vinfo_for_stmt (GROUP_FIRST_ELEMENT (stmt_info
));
3167 mode
= TYPE_MODE (vectype
);
3169 if (!can_vec_perm_p (mode
, false, NULL
))
3171 if (dump_enabled_p ())
3173 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3174 "no vect permute for ");
3175 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
3176 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
3181 /* The generic VEC_PERM_EXPR code always uses an integral type of the
3182 same size as the vector element being permuted. */
3183 mask_element_type
= lang_hooks
.types
.type_for_mode
3184 (int_mode_for_mode (TYPE_MODE (TREE_TYPE (vectype
))), 1);
3185 mask_type
= get_vectype_for_scalar_type (mask_element_type
);
3186 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
3187 mask
= XALLOCAVEC (unsigned char, nunits
);
3188 unroll_factor
= SLP_INSTANCE_UNROLLING_FACTOR (slp_node_instance
);
3190 /* The number of vector stmts to generate based only on SLP_NODE_INSTANCE
3191 unrolling factor. */
3193 = (STMT_VINFO_GROUP_SIZE (stmt_info
)
3194 * SLP_INSTANCE_UNROLLING_FACTOR (slp_node_instance
)
3195 + nunits
- 1) / nunits
;
3196 if (orig_vec_stmts_num
== 1)
3197 only_one_vec
= true;
3199 /* Number of copies is determined by the final vectorization factor
3200 relatively to SLP_NODE_INSTANCE unrolling factor. */
3201 ncopies
= vf
/ SLP_INSTANCE_UNROLLING_FACTOR (slp_node_instance
);
3203 /* Generate permutation masks for every NODE. Number of masks for each NODE
3204 is equal to GROUP_SIZE.
3205 E.g., we have a group of three nodes with three loads from the same
3206 location in each node, and the vector size is 4. I.e., we have a
3207 a0b0c0a1b1c1... sequence and we need to create the following vectors:
3208 for a's: a0a0a0a1 a1a1a2a2 a2a3a3a3
3209 for b's: b0b0b0b1 b1b1b2b2 b2b3b3b3
3212 The masks for a's should be: {0,0,0,3} {3,3,6,6} {6,9,9,9}.
3213 The last mask is illegal since we assume two operands for permute
3214 operation, and the mask element values can't be outside that range.
3215 Hence, the last mask must be converted into {2,5,5,5}.
3216 For the first two permutations we need the first and the second input
3217 vectors: {a0,b0,c0,a1} and {b1,c1,a2,b2}, and for the last permutation
3218 we need the second and the third vectors: {b1,c1,a2,b2} and
3223 vect_stmts_counter
= 0;
3225 first_vec_index
= vec_index
++;
3227 second_vec_index
= first_vec_index
;
3229 second_vec_index
= vec_index
++;
3231 for (j
= 0; j
< unroll_factor
; j
++)
3233 for (k
= 0; k
< group_size
; k
++)
3235 i
= SLP_TREE_LOAD_PERMUTATION (node
)[k
];
3236 first_mask_element
= i
+ j
* STMT_VINFO_GROUP_SIZE (stmt_info
);
3237 if (!vect_get_mask_element (stmt
, first_mask_element
, 0,
3238 nunits
, only_one_vec
, index
,
3239 mask
, ¤t_mask_element
,
3241 &number_of_mask_fixes
, &mask_fixed
,
3242 &needs_first_vector
))
3244 gcc_assert (current_mask_element
>= 0
3245 && current_mask_element
< 2 * nunits
);
3246 mask
[index
++] = current_mask_element
;
3248 if (index
== nunits
)
3251 if (!can_vec_perm_p (mode
, false, mask
))
3253 if (dump_enabled_p ())
3255 dump_printf_loc (MSG_MISSED_OPTIMIZATION
,
3257 "unsupported vect permute { ");
3258 for (i
= 0; i
< nunits
; ++i
)
3259 dump_printf (MSG_MISSED_OPTIMIZATION
, "%d ",
3261 dump_printf (MSG_MISSED_OPTIMIZATION
, "}\n");
3269 tree mask_vec
, *mask_elts
;
3270 mask_elts
= XALLOCAVEC (tree
, nunits
);
3271 for (l
= 0; l
< nunits
; ++l
)
3272 mask_elts
[l
] = build_int_cst (mask_element_type
,
3274 mask_vec
= build_vector (mask_type
, mask_elts
);
3276 if (need_next_vector
)
3278 first_vec_index
= second_vec_index
;
3279 second_vec_index
= vec_index
;
3282 vect_create_mask_and_perm (stmt
,
3283 mask_vec
, first_vec_index
, second_vec_index
,
3284 gsi
, node
, vectype
, dr_chain
,
3285 ncopies
, vect_stmts_counter
++);
3297 /* Vectorize SLP instance tree in postorder. */
3300 vect_schedule_slp_instance (slp_tree node
, slp_instance instance
,
3301 unsigned int vectorization_factor
)
3304 bool grouped_store
, is_store
;
3305 gimple_stmt_iterator si
;
3306 stmt_vec_info stmt_info
;
3307 unsigned int vec_stmts_size
, nunits
, group_size
;
3315 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node
), i
, child
)
3316 vect_schedule_slp_instance (child
, instance
, vectorization_factor
);
3318 stmt
= SLP_TREE_SCALAR_STMTS (node
)[0];
3319 stmt_info
= vinfo_for_stmt (stmt
);
3321 /* VECTYPE is the type of the destination. */
3322 vectype
= STMT_VINFO_VECTYPE (stmt_info
);
3323 nunits
= (unsigned int) TYPE_VECTOR_SUBPARTS (vectype
);
3324 group_size
= SLP_INSTANCE_GROUP_SIZE (instance
);
3326 /* For each SLP instance calculate number of vector stmts to be created
3327 for the scalar stmts in each node of the SLP tree. Number of vector
3328 elements in one vector iteration is the number of scalar elements in
3329 one scalar iteration (GROUP_SIZE) multiplied by VF divided by vector
3331 Unless this is a SLP reduction in which case the number of vector
3332 stmts is equal to the number of vector stmts of the children. */
3333 if (GROUP_FIRST_ELEMENT (stmt_info
)
3334 && !STMT_VINFO_GROUPED_ACCESS (stmt_info
))
3335 vec_stmts_size
= SLP_TREE_NUMBER_OF_VEC_STMTS (SLP_TREE_CHILDREN (node
)[0]);
3337 vec_stmts_size
= (vectorization_factor
* group_size
) / nunits
;
3339 if (!SLP_TREE_VEC_STMTS (node
).exists ())
3341 SLP_TREE_VEC_STMTS (node
).create (vec_stmts_size
);
3342 SLP_TREE_NUMBER_OF_VEC_STMTS (node
) = vec_stmts_size
;
3345 if (dump_enabled_p ())
3347 dump_printf_loc (MSG_NOTE
,vect_location
,
3348 "------>vectorizing SLP node starting from: ");
3349 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
3350 dump_printf (MSG_NOTE
, "\n");
3353 /* Vectorized stmts go before the last scalar stmt which is where
3354 all uses are ready. */
3355 si
= gsi_for_stmt (vect_find_last_scalar_stmt_in_slp (node
));
3357 /* Mark the first element of the reduction chain as reduction to properly
3358 transform the node. In the analysis phase only the last element of the
3359 chain is marked as reduction. */
3360 if (GROUP_FIRST_ELEMENT (stmt_info
) && !STMT_VINFO_GROUPED_ACCESS (stmt_info
)
3361 && GROUP_FIRST_ELEMENT (stmt_info
) == stmt
)
3363 STMT_VINFO_DEF_TYPE (stmt_info
) = vect_reduction_def
;
3364 STMT_VINFO_TYPE (stmt_info
) = reduc_vec_info_type
;
3367 /* Handle two-operation SLP nodes by vectorizing the group with
3368 both operations and then performing a merge. */
3369 if (SLP_TREE_TWO_OPERATORS (node
))
3371 enum tree_code code0
= gimple_assign_rhs_code (stmt
);
3372 enum tree_code ocode
;
3374 unsigned char *mask
= XALLOCAVEC (unsigned char, group_size
);
3375 bool allsame
= true;
3376 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node
), i
, ostmt
)
3377 if (gimple_assign_rhs_code (ostmt
) != code0
)
3381 ocode
= gimple_assign_rhs_code (ostmt
);
3390 tree tmask
= NULL_TREE
;
3391 vect_transform_stmt (stmt
, &si
, &grouped_store
, node
, instance
);
3392 v0
= SLP_TREE_VEC_STMTS (node
).copy ();
3393 SLP_TREE_VEC_STMTS (node
).truncate (0);
3394 gimple_assign_set_rhs_code (stmt
, ocode
);
3395 vect_transform_stmt (stmt
, &si
, &grouped_store
, node
, instance
);
3396 gimple_assign_set_rhs_code (stmt
, code0
);
3397 v1
= SLP_TREE_VEC_STMTS (node
).copy ();
3398 SLP_TREE_VEC_STMTS (node
).truncate (0);
3399 tree meltype
= build_nonstandard_integer_type
3400 (GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (vectype
))), 1);
3401 tree mvectype
= get_same_sized_vectype (meltype
, vectype
);
3403 for (j
= 0; j
< v0
.length (); ++j
)
3405 tree
*melts
= XALLOCAVEC (tree
, TYPE_VECTOR_SUBPARTS (vectype
));
3406 for (l
= 0; l
< TYPE_VECTOR_SUBPARTS (vectype
); ++l
)
3408 if (k
>= group_size
)
3410 melts
[l
] = build_int_cst
3411 (meltype
, mask
[k
++] * TYPE_VECTOR_SUBPARTS (vectype
) + l
);
3413 tmask
= build_vector (mvectype
, melts
);
3415 /* ??? Not all targets support a VEC_PERM_EXPR with a
3416 constant mask that would translate to a vec_merge RTX
3417 (with their vec_perm_const_ok). We can either not
3418 vectorize in that case or let veclower do its job.
3419 Unfortunately that isn't too great and at least for
3420 plus/minus we'd eventually like to match targets
3421 vector addsub instructions. */
3423 vstmt
= gimple_build_assign (make_ssa_name (vectype
),
3425 gimple_assign_lhs (v0
[j
]),
3426 gimple_assign_lhs (v1
[j
]), tmask
);
3427 vect_finish_stmt_generation (stmt
, vstmt
, &si
);
3428 SLP_TREE_VEC_STMTS (node
).quick_push (vstmt
);
3435 is_store
= vect_transform_stmt (stmt
, &si
, &grouped_store
, node
, instance
);
3439 /* Replace scalar calls from SLP node NODE with setting of their lhs to zero.
3440 For loop vectorization this is done in vectorizable_call, but for SLP
3441 it needs to be deferred until end of vect_schedule_slp, because multiple
3442 SLP instances may refer to the same scalar stmt. */
3445 vect_remove_slp_scalar_calls (slp_tree node
)
3447 gimple
*stmt
, *new_stmt
;
3448 gimple_stmt_iterator gsi
;
3452 stmt_vec_info stmt_info
;
3457 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node
), i
, child
)
3458 vect_remove_slp_scalar_calls (child
);
3460 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node
), i
, stmt
)
3462 if (!is_gimple_call (stmt
) || gimple_bb (stmt
) == NULL
)
3464 stmt_info
= vinfo_for_stmt (stmt
);
3465 if (stmt_info
== NULL
3466 || is_pattern_stmt_p (stmt_info
)
3467 || !PURE_SLP_STMT (stmt_info
))
3469 lhs
= gimple_call_lhs (stmt
);
3470 new_stmt
= gimple_build_assign (lhs
, build_zero_cst (TREE_TYPE (lhs
)));
3471 set_vinfo_for_stmt (new_stmt
, stmt_info
);
3472 set_vinfo_for_stmt (stmt
, NULL
);
3473 STMT_VINFO_STMT (stmt_info
) = new_stmt
;
3474 gsi
= gsi_for_stmt (stmt
);
3475 gsi_replace (&gsi
, new_stmt
, false);
3476 SSA_NAME_DEF_STMT (gimple_assign_lhs (new_stmt
)) = new_stmt
;
3480 /* Generate vector code for all SLP instances in the loop/basic block. */
3483 vect_schedule_slp (vec_info
*vinfo
)
3485 vec
<slp_instance
> slp_instances
;
3486 slp_instance instance
;
3488 bool is_store
= false;
3490 slp_instances
= vinfo
->slp_instances
;
3491 if (is_a
<loop_vec_info
> (vinfo
))
3492 vf
= as_a
<loop_vec_info
> (vinfo
)->vectorization_factor
;
3496 FOR_EACH_VEC_ELT (slp_instances
, i
, instance
)
3498 /* Schedule the tree of INSTANCE. */
3499 is_store
= vect_schedule_slp_instance (SLP_INSTANCE_TREE (instance
),
3501 if (dump_enabled_p ())
3502 dump_printf_loc (MSG_NOTE
, vect_location
,
3503 "vectorizing stmts using SLP.\n");
3506 FOR_EACH_VEC_ELT (slp_instances
, i
, instance
)
3508 slp_tree root
= SLP_INSTANCE_TREE (instance
);
3511 gimple_stmt_iterator gsi
;
3513 /* Remove scalar call stmts. Do not do this for basic-block
3514 vectorization as not all uses may be vectorized.
3515 ??? Why should this be necessary? DCE should be able to
3516 remove the stmts itself.
3517 ??? For BB vectorization we can as well remove scalar
3518 stmts starting from the SLP tree root if they have no
3520 if (is_a
<loop_vec_info
> (vinfo
))
3521 vect_remove_slp_scalar_calls (root
);
3523 for (j
= 0; SLP_TREE_SCALAR_STMTS (root
).iterate (j
, &store
)
3524 && j
< SLP_INSTANCE_GROUP_SIZE (instance
); j
++)
3526 if (!STMT_VINFO_DATA_REF (vinfo_for_stmt (store
)))
3529 if (is_pattern_stmt_p (vinfo_for_stmt (store
)))
3530 store
= STMT_VINFO_RELATED_STMT (vinfo_for_stmt (store
));
3531 /* Free the attached stmt_vec_info and remove the stmt. */
3532 gsi
= gsi_for_stmt (store
);
3533 unlink_stmt_vdef (store
);
3534 gsi_remove (&gsi
, true);
3535 release_defs (store
);
3536 free_stmt_vec_info (store
);
3544 /* Vectorize the basic block. */
3547 vect_slp_transform_bb (basic_block bb
)
3549 bb_vec_info bb_vinfo
= vec_info_for_bb (bb
);
3550 gimple_stmt_iterator si
;
3552 gcc_assert (bb_vinfo
);
3554 if (dump_enabled_p ())
3555 dump_printf_loc (MSG_NOTE
, vect_location
, "SLPing BB\n");
3557 for (si
= gsi_start_bb (bb
); !gsi_end_p (si
); gsi_next (&si
))
3559 gimple
*stmt
= gsi_stmt (si
);
3560 stmt_vec_info stmt_info
;
3562 if (dump_enabled_p ())
3564 dump_printf_loc (MSG_NOTE
, vect_location
,
3565 "------>SLPing statement: ");
3566 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
3567 dump_printf (MSG_NOTE
, "\n");
3570 stmt_info
= vinfo_for_stmt (stmt
);
3571 gcc_assert (stmt_info
);
3573 /* Schedule all the SLP instances when the first SLP stmt is reached. */
3574 if (STMT_SLP_TYPE (stmt_info
))
3576 vect_schedule_slp (bb_vinfo
);
3581 if (dump_enabled_p ())
3582 dump_printf_loc (MSG_NOTE
, vect_location
,
3583 "BASIC BLOCK VECTORIZED\n");
3585 destroy_bb_vec_info (bb_vinfo
);