1 /* SLP - Basic Block Vectorization
2 Copyright (C) 2007-2016 Free Software Foundation, Inc.
3 Contributed by Dorit Naishlos <dorit@il.ibm.com>
4 and Ira Rosen <irar@il.ibm.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
24 #include "coretypes.h"
30 #include "tree-pass.h"
32 #include "optabs-tree.h"
33 #include "insn-config.h"
34 #include "recog.h" /* FIXME: for insn_data */
36 #include "fold-const.h"
37 #include "stor-layout.h"
38 #include "gimple-iterator.h"
40 #include "tree-vectorizer.h"
41 #include "langhooks.h"
42 #include "gimple-walk.h"
46 /* Recursively free the memory allocated for the SLP tree rooted at NODE. */
49 vect_free_slp_tree (slp_tree node
)
54 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node
), i
, child
)
55 vect_free_slp_tree (child
);
57 SLP_TREE_CHILDREN (node
).release ();
58 SLP_TREE_SCALAR_STMTS (node
).release ();
59 SLP_TREE_VEC_STMTS (node
).release ();
60 SLP_TREE_LOAD_PERMUTATION (node
).release ();
66 /* Free the memory allocated for the SLP instance. */
69 vect_free_slp_instance (slp_instance instance
)
71 vect_free_slp_tree (SLP_INSTANCE_TREE (instance
));
72 SLP_INSTANCE_LOADS (instance
).release ();
77 /* Create an SLP node for SCALAR_STMTS. */
80 vect_create_new_slp_node (vec
<gimple
*> scalar_stmts
)
83 gimple
*stmt
= scalar_stmts
[0];
86 if (is_gimple_call (stmt
))
87 nops
= gimple_call_num_args (stmt
);
88 else if (is_gimple_assign (stmt
))
90 nops
= gimple_num_ops (stmt
) - 1;
91 if (gimple_assign_rhs_code (stmt
) == COND_EXPR
)
97 node
= XNEW (struct _slp_tree
);
98 SLP_TREE_SCALAR_STMTS (node
) = scalar_stmts
;
99 SLP_TREE_VEC_STMTS (node
).create (0);
100 SLP_TREE_CHILDREN (node
).create (nops
);
101 SLP_TREE_LOAD_PERMUTATION (node
) = vNULL
;
102 SLP_TREE_TWO_OPERATORS (node
) = false;
103 SLP_TREE_DEF_TYPE (node
) = vect_internal_def
;
109 /* This structure is used in creation of an SLP tree. Each instance
110 corresponds to the same operand in a group of scalar stmts in an SLP
112 typedef struct _slp_oprnd_info
114 /* Def-stmts for the operands. */
115 vec
<gimple
*> def_stmts
;
116 /* Information about the first statement, its vector def-type, type, the
117 operand itself in case it's constant, and an indication if it's a pattern
119 enum vect_def_type first_dt
;
126 /* Allocate operands info for NOPS operands, and GROUP_SIZE def-stmts for each
128 static vec
<slp_oprnd_info
>
129 vect_create_oprnd_info (int nops
, int group_size
)
132 slp_oprnd_info oprnd_info
;
133 vec
<slp_oprnd_info
> oprnds_info
;
135 oprnds_info
.create (nops
);
136 for (i
= 0; i
< nops
; i
++)
138 oprnd_info
= XNEW (struct _slp_oprnd_info
);
139 oprnd_info
->def_stmts
.create (group_size
);
140 oprnd_info
->first_dt
= vect_uninitialized_def
;
141 oprnd_info
->first_op_type
= NULL_TREE
;
142 oprnd_info
->first_pattern
= false;
143 oprnd_info
->second_pattern
= false;
144 oprnds_info
.quick_push (oprnd_info
);
151 /* Free operands info. */
154 vect_free_oprnd_info (vec
<slp_oprnd_info
> &oprnds_info
)
157 slp_oprnd_info oprnd_info
;
159 FOR_EACH_VEC_ELT (oprnds_info
, i
, oprnd_info
)
161 oprnd_info
->def_stmts
.release ();
162 XDELETE (oprnd_info
);
165 oprnds_info
.release ();
169 /* Find the place of the data-ref in STMT in the interleaving chain that starts
170 from FIRST_STMT. Return -1 if the data-ref is not a part of the chain. */
173 vect_get_place_in_interleaving_chain (gimple
*stmt
, gimple
*first_stmt
)
175 gimple
*next_stmt
= first_stmt
;
178 if (first_stmt
!= GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt
)))
183 if (next_stmt
== stmt
)
185 next_stmt
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt
));
187 result
+= GROUP_GAP (vinfo_for_stmt (next_stmt
));
195 /* Get the defs for the rhs of STMT (collect them in OPRNDS_INFO), check that
196 they are of a valid type and that they match the defs of the first stmt of
197 the SLP group (stored in OPRNDS_INFO). If there was a fatal error
198 return -1, if the error could be corrected by swapping operands of the
199 operation return 1, if everything is ok return 0. */
202 vect_get_and_check_slp_defs (vec_info
*vinfo
,
203 gimple
*stmt
, unsigned stmt_num
,
204 vec
<slp_oprnd_info
> *oprnds_info
)
207 unsigned int i
, number_of_oprnds
;
209 enum vect_def_type dt
= vect_uninitialized_def
;
210 bool pattern
= false;
211 slp_oprnd_info oprnd_info
;
212 int first_op_idx
= 1;
213 bool commutative
= false;
214 bool first_op_cond
= false;
215 bool first
= stmt_num
== 0;
216 bool second
= stmt_num
== 1;
218 if (is_gimple_call (stmt
))
220 number_of_oprnds
= gimple_call_num_args (stmt
);
223 else if (is_gimple_assign (stmt
))
225 enum tree_code code
= gimple_assign_rhs_code (stmt
);
226 number_of_oprnds
= gimple_num_ops (stmt
) - 1;
227 if (gimple_assign_rhs_code (stmt
) == COND_EXPR
228 && COMPARISON_CLASS_P (gimple_assign_rhs1 (stmt
)))
230 first_op_cond
= true;
235 commutative
= commutative_tree_code (code
);
240 bool swapped
= false;
241 for (i
= 0; i
< number_of_oprnds
; i
++)
246 if (i
== 0 || i
== 1)
247 oprnd
= TREE_OPERAND (gimple_op (stmt
, first_op_idx
),
250 oprnd
= gimple_op (stmt
, first_op_idx
+ i
- 1);
253 oprnd
= gimple_op (stmt
, first_op_idx
+ (swapped
? !i
: i
));
255 oprnd_info
= (*oprnds_info
)[i
];
257 if (!vect_is_simple_use (oprnd
, vinfo
, &def_stmt
, &dt
))
259 if (dump_enabled_p ())
261 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
262 "Build SLP failed: can't analyze def for ");
263 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, oprnd
);
264 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
270 /* Check if DEF_STMT is a part of a pattern in LOOP and get the def stmt
271 from the pattern. Check that all the stmts of the node are in the
273 if (def_stmt
&& gimple_bb (def_stmt
)
274 && vect_stmt_in_region_p (vinfo
, def_stmt
)
275 && vinfo_for_stmt (def_stmt
)
276 && STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (def_stmt
))
277 && !STMT_VINFO_RELEVANT (vinfo_for_stmt (def_stmt
))
278 && !STMT_VINFO_LIVE_P (vinfo_for_stmt (def_stmt
)))
281 if (!first
&& !oprnd_info
->first_pattern
282 /* Allow different pattern state for the defs of the
283 first stmt in reduction chains. */
284 && (oprnd_info
->first_dt
!= vect_reduction_def
285 || (!second
&& !oprnd_info
->second_pattern
)))
295 if (dump_enabled_p ())
297 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
298 "Build SLP failed: some of the stmts"
299 " are in a pattern, and others are not ");
300 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, oprnd
);
301 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
307 def_stmt
= STMT_VINFO_RELATED_STMT (vinfo_for_stmt (def_stmt
));
308 dt
= STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt
));
310 if (dt
== vect_unknown_def_type
)
312 if (dump_enabled_p ())
313 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
314 "Unsupported pattern.\n");
318 switch (gimple_code (def_stmt
))
325 if (dump_enabled_p ())
326 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
327 "unsupported defining stmt:\n");
333 oprnd_info
->second_pattern
= pattern
;
337 oprnd_info
->first_dt
= dt
;
338 oprnd_info
->first_pattern
= pattern
;
339 oprnd_info
->first_op_type
= TREE_TYPE (oprnd
);
343 /* Not first stmt of the group, check that the def-stmt/s match
344 the def-stmt/s of the first stmt. Allow different definition
345 types for reduction chains: the first stmt must be a
346 vect_reduction_def (a phi node), and the rest
347 vect_internal_def. */
348 if (((oprnd_info
->first_dt
!= dt
349 && !(oprnd_info
->first_dt
== vect_reduction_def
350 && dt
== vect_internal_def
)
351 && !((oprnd_info
->first_dt
== vect_external_def
352 || oprnd_info
->first_dt
== vect_constant_def
)
353 && (dt
== vect_external_def
354 || dt
== vect_constant_def
)))
355 || !types_compatible_p (oprnd_info
->first_op_type
,
358 /* Try swapping operands if we got a mismatch. */
367 if (dump_enabled_p ())
368 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
369 "Build SLP failed: different types\n");
375 /* Check the types of the definitions. */
378 case vect_constant_def
:
379 case vect_external_def
:
380 case vect_reduction_def
:
383 case vect_internal_def
:
384 oprnd_info
->def_stmts
.quick_push (def_stmt
);
388 /* FORNOW: Not supported. */
389 if (dump_enabled_p ())
391 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
392 "Build SLP failed: illegal type of def ");
393 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, oprnd
);
394 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
406 tree cond
= gimple_assign_rhs1 (stmt
);
407 swap_ssa_operands (stmt
, &TREE_OPERAND (cond
, 0),
408 &TREE_OPERAND (cond
, 1));
409 TREE_SET_CODE (cond
, swap_tree_comparison (TREE_CODE (cond
)));
412 swap_ssa_operands (stmt
, gimple_assign_rhs1_ptr (stmt
),
413 gimple_assign_rhs2_ptr (stmt
));
420 /* Verify if the scalar stmts STMTS are isomorphic, require data
421 permutation or are of unsupported types of operation. Return
422 true if they are, otherwise return false and indicate in *MATCHES
423 which stmts are not isomorphic to the first one. If MATCHES[0]
424 is false then this indicates the comparison could not be
425 carried out or the stmts will never be vectorized by SLP. */
428 vect_build_slp_tree_1 (vec_info
*vinfo
,
429 vec
<gimple
*> stmts
, unsigned int group_size
,
430 unsigned nops
, unsigned int *max_nunits
,
431 bool *matches
, bool *two_operators
)
434 gimple
*first_stmt
= stmts
[0], *stmt
= stmts
[0];
435 enum tree_code first_stmt_code
= ERROR_MARK
;
436 enum tree_code alt_stmt_code
= ERROR_MARK
;
437 enum tree_code rhs_code
= ERROR_MARK
;
438 enum tree_code first_cond_code
= ERROR_MARK
;
440 bool need_same_oprnds
= false;
441 tree vectype
= NULL_TREE
, scalar_type
, first_op1
= NULL_TREE
;
444 machine_mode optab_op2_mode
;
445 machine_mode vec_mode
;
447 gimple
*first_load
= NULL
, *prev_first_load
= NULL
;
449 /* For every stmt in NODE find its def stmt/s. */
450 FOR_EACH_VEC_ELT (stmts
, i
, stmt
)
454 if (dump_enabled_p ())
456 dump_printf_loc (MSG_NOTE
, vect_location
, "Build SLP for ");
457 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
460 /* Fail to vectorize statements marked as unvectorizable. */
461 if (!STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (stmt
)))
463 if (dump_enabled_p ())
465 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
466 "Build SLP failed: unvectorizable statement ");
467 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
468 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
470 /* Fatal mismatch. */
475 lhs
= gimple_get_lhs (stmt
);
476 if (lhs
== NULL_TREE
)
478 if (dump_enabled_p ())
480 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
481 "Build SLP failed: not GIMPLE_ASSIGN nor "
483 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
484 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
486 /* Fatal mismatch. */
491 scalar_type
= vect_get_smallest_scalar_type (stmt
, &dummy
, &dummy
);
492 vectype
= get_vectype_for_scalar_type (scalar_type
);
495 if (dump_enabled_p ())
497 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
498 "Build SLP failed: unsupported data-type ");
499 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
501 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
503 /* Fatal mismatch. */
508 /* If populating the vector type requires unrolling then fail
509 before adjusting *max_nunits for basic-block vectorization. */
510 if (is_a
<bb_vec_info
> (vinfo
)
511 && TYPE_VECTOR_SUBPARTS (vectype
) > group_size
)
513 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
514 "Build SLP failed: unrolling required "
515 "in basic block SLP\n");
516 /* Fatal mismatch. */
521 /* In case of multiple types we need to detect the smallest type. */
522 if (*max_nunits
< TYPE_VECTOR_SUBPARTS (vectype
))
523 *max_nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
525 if (gcall
*call_stmt
= dyn_cast
<gcall
*> (stmt
))
527 rhs_code
= CALL_EXPR
;
528 if (gimple_call_internal_p (call_stmt
)
529 || gimple_call_tail_p (call_stmt
)
530 || gimple_call_noreturn_p (call_stmt
)
531 || !gimple_call_nothrow_p (call_stmt
)
532 || gimple_call_chain (call_stmt
))
534 if (dump_enabled_p ())
536 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
537 "Build SLP failed: unsupported call type ");
538 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
540 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
542 /* Fatal mismatch. */
548 rhs_code
= gimple_assign_rhs_code (stmt
);
550 /* Check the operation. */
553 first_stmt_code
= rhs_code
;
555 /* Shift arguments should be equal in all the packed stmts for a
556 vector shift with scalar shift operand. */
557 if (rhs_code
== LSHIFT_EXPR
|| rhs_code
== RSHIFT_EXPR
558 || rhs_code
== LROTATE_EXPR
559 || rhs_code
== RROTATE_EXPR
)
561 vec_mode
= TYPE_MODE (vectype
);
563 /* First see if we have a vector/vector shift. */
564 optab
= optab_for_tree_code (rhs_code
, vectype
,
568 || optab_handler (optab
, vec_mode
) == CODE_FOR_nothing
)
570 /* No vector/vector shift, try for a vector/scalar shift. */
571 optab
= optab_for_tree_code (rhs_code
, vectype
,
576 if (dump_enabled_p ())
577 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
578 "Build SLP failed: no optab.\n");
579 /* Fatal mismatch. */
583 icode
= (int) optab_handler (optab
, vec_mode
);
584 if (icode
== CODE_FOR_nothing
)
586 if (dump_enabled_p ())
587 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
589 "op not supported by target.\n");
590 /* Fatal mismatch. */
594 optab_op2_mode
= insn_data
[icode
].operand
[2].mode
;
595 if (!VECTOR_MODE_P (optab_op2_mode
))
597 need_same_oprnds
= true;
598 first_op1
= gimple_assign_rhs2 (stmt
);
602 else if (rhs_code
== WIDEN_LSHIFT_EXPR
)
604 need_same_oprnds
= true;
605 first_op1
= gimple_assign_rhs2 (stmt
);
610 if (first_stmt_code
!= rhs_code
611 && alt_stmt_code
== ERROR_MARK
)
612 alt_stmt_code
= rhs_code
;
613 if (first_stmt_code
!= rhs_code
614 && (first_stmt_code
!= IMAGPART_EXPR
615 || rhs_code
!= REALPART_EXPR
)
616 && (first_stmt_code
!= REALPART_EXPR
617 || rhs_code
!= IMAGPART_EXPR
)
618 /* Handle mismatches in plus/minus by computing both
619 and merging the results. */
620 && !((first_stmt_code
== PLUS_EXPR
621 || first_stmt_code
== MINUS_EXPR
)
622 && (alt_stmt_code
== PLUS_EXPR
623 || alt_stmt_code
== MINUS_EXPR
)
624 && rhs_code
== alt_stmt_code
)
625 && !(STMT_VINFO_GROUPED_ACCESS (vinfo_for_stmt (stmt
))
626 && (first_stmt_code
== ARRAY_REF
627 || first_stmt_code
== BIT_FIELD_REF
628 || first_stmt_code
== INDIRECT_REF
629 || first_stmt_code
== COMPONENT_REF
630 || first_stmt_code
== MEM_REF
)))
632 if (dump_enabled_p ())
634 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
635 "Build SLP failed: different operation "
637 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
638 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
640 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
648 && !operand_equal_p (first_op1
, gimple_assign_rhs2 (stmt
), 0))
650 if (dump_enabled_p ())
652 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
653 "Build SLP failed: different shift "
655 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
656 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
662 if (rhs_code
== CALL_EXPR
)
664 gimple
*first_stmt
= stmts
[0];
665 if (gimple_call_num_args (stmt
) != nops
666 || !operand_equal_p (gimple_call_fn (first_stmt
),
667 gimple_call_fn (stmt
), 0)
668 || gimple_call_fntype (first_stmt
)
669 != gimple_call_fntype (stmt
))
671 if (dump_enabled_p ())
673 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
674 "Build SLP failed: different calls in ");
675 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
677 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
685 /* Grouped store or load. */
686 if (STMT_VINFO_GROUPED_ACCESS (vinfo_for_stmt (stmt
)))
688 if (REFERENCE_CLASS_P (lhs
))
696 first_load
= GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt
));
699 /* Check that there are no loads from different interleaving
700 chains in the same node. */
701 if (prev_first_load
!= first_load
)
703 if (dump_enabled_p ())
705 dump_printf_loc (MSG_MISSED_OPTIMIZATION
,
707 "Build SLP failed: different "
708 "interleaving chains in one node ");
709 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
711 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
718 prev_first_load
= first_load
;
720 } /* Grouped access. */
723 if (TREE_CODE_CLASS (rhs_code
) == tcc_reference
)
725 /* Not grouped load. */
726 if (dump_enabled_p ())
728 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
729 "Build SLP failed: not grouped load ");
730 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
731 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
734 /* FORNOW: Not grouped loads are not supported. */
735 /* Fatal mismatch. */
740 /* Not memory operation. */
741 if (TREE_CODE_CLASS (rhs_code
) != tcc_binary
742 && TREE_CODE_CLASS (rhs_code
) != tcc_unary
743 && TREE_CODE_CLASS (rhs_code
) != tcc_expression
744 && TREE_CODE_CLASS (rhs_code
) != tcc_comparison
745 && rhs_code
!= CALL_EXPR
)
747 if (dump_enabled_p ())
749 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
750 "Build SLP failed: operation");
751 dump_printf (MSG_MISSED_OPTIMIZATION
, " unsupported ");
752 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
753 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
755 /* Fatal mismatch. */
760 if (rhs_code
== COND_EXPR
)
762 tree cond_expr
= gimple_assign_rhs1 (stmt
);
765 first_cond_code
= TREE_CODE (cond_expr
);
766 else if (first_cond_code
!= TREE_CODE (cond_expr
))
768 if (dump_enabled_p ())
770 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
771 "Build SLP failed: different"
773 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
775 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
786 for (i
= 0; i
< group_size
; ++i
)
790 /* If we allowed a two-operation SLP node verify the target can cope
791 with the permute we are going to use. */
792 if (alt_stmt_code
!= ERROR_MARK
793 && TREE_CODE_CLASS (alt_stmt_code
) != tcc_reference
)
796 = XALLOCAVEC (unsigned char, TYPE_VECTOR_SUBPARTS (vectype
));
797 for (i
= 0; i
< TYPE_VECTOR_SUBPARTS (vectype
); ++i
)
800 if (gimple_assign_rhs_code (stmts
[i
% group_size
]) == alt_stmt_code
)
801 sel
[i
] += TYPE_VECTOR_SUBPARTS (vectype
);
803 if (!can_vec_perm_p (TYPE_MODE (vectype
), false, sel
))
805 for (i
= 0; i
< group_size
; ++i
)
806 if (gimple_assign_rhs_code (stmts
[i
]) == alt_stmt_code
)
809 if (dump_enabled_p ())
811 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
812 "Build SLP failed: different operation "
814 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
816 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
818 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
824 *two_operators
= true;
830 /* Recursively build an SLP tree starting from NODE.
831 Fail (and return a value not equal to zero) if def-stmts are not
832 isomorphic, require data permutation or are of unsupported types of
833 operation. Otherwise, return 0.
834 The value returned is the depth in the SLP tree where a mismatch
838 vect_build_slp_tree (vec_info
*vinfo
,
839 slp_tree
*node
, unsigned int group_size
,
840 unsigned int *max_nunits
,
841 vec
<slp_tree
> *loads
,
842 bool *matches
, unsigned *npermutes
, unsigned *tree_size
,
843 unsigned max_tree_size
)
845 unsigned nops
, i
, this_tree_size
= 0;
850 stmt
= SLP_TREE_SCALAR_STMTS (*node
)[0];
851 if (is_gimple_call (stmt
))
852 nops
= gimple_call_num_args (stmt
);
853 else if (is_gimple_assign (stmt
))
855 nops
= gimple_num_ops (stmt
) - 1;
856 if (gimple_assign_rhs_code (stmt
) == COND_EXPR
)
862 bool two_operators
= false;
863 if (!vect_build_slp_tree_1 (vinfo
,
864 SLP_TREE_SCALAR_STMTS (*node
), group_size
, nops
,
865 max_nunits
, matches
, &two_operators
))
867 SLP_TREE_TWO_OPERATORS (*node
) = two_operators
;
869 /* If the SLP node is a load, terminate the recursion. */
870 if (STMT_VINFO_GROUPED_ACCESS (vinfo_for_stmt (stmt
))
871 && DR_IS_READ (STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt
))))
873 loads
->safe_push (*node
);
877 /* Get at the operands, verifying they are compatible. */
878 vec
<slp_oprnd_info
> oprnds_info
= vect_create_oprnd_info (nops
, group_size
);
879 slp_oprnd_info oprnd_info
;
880 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (*node
), i
, stmt
)
882 switch (vect_get_and_check_slp_defs (vinfo
, stmt
, i
, &oprnds_info
))
888 vect_free_oprnd_info (oprnds_info
);
895 for (i
= 0; i
< group_size
; ++i
)
898 vect_free_oprnd_info (oprnds_info
);
902 stmt
= SLP_TREE_SCALAR_STMTS (*node
)[0];
904 /* Create SLP_TREE nodes for the definition node/s. */
905 FOR_EACH_VEC_ELT (oprnds_info
, i
, oprnd_info
)
908 unsigned old_nloads
= loads
->length ();
909 unsigned old_max_nunits
= *max_nunits
;
911 if (oprnd_info
->first_dt
!= vect_internal_def
)
914 if (++this_tree_size
> max_tree_size
)
916 vect_free_oprnd_info (oprnds_info
);
920 child
= vect_create_new_slp_node (oprnd_info
->def_stmts
);
923 vect_free_oprnd_info (oprnds_info
);
927 if (vect_build_slp_tree (vinfo
, &child
,
928 group_size
, max_nunits
, loads
, matches
,
929 npermutes
, &this_tree_size
, max_tree_size
))
931 /* If we have all children of child built up from scalars then just
932 throw that away and build it up this node from scalars. */
933 if (!SLP_TREE_CHILDREN (child
).is_empty ())
938 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (child
), j
, grandchild
)
939 if (SLP_TREE_DEF_TYPE (grandchild
) == vect_internal_def
)
944 *max_nunits
= old_max_nunits
;
945 loads
->truncate (old_nloads
);
946 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (child
), j
, grandchild
)
947 vect_free_slp_tree (grandchild
);
948 SLP_TREE_CHILDREN (child
).truncate (0);
950 dump_printf_loc (MSG_NOTE
, vect_location
,
951 "Building parent vector operands from "
952 "scalars instead\n");
953 oprnd_info
->def_stmts
= vNULL
;
954 SLP_TREE_DEF_TYPE (child
) = vect_external_def
;
955 SLP_TREE_CHILDREN (*node
).quick_push (child
);
960 oprnd_info
->def_stmts
= vNULL
;
961 SLP_TREE_CHILDREN (*node
).quick_push (child
);
965 /* If the SLP build failed fatally and we analyze a basic-block
966 simply treat nodes we fail to build as externally defined
967 (and thus build vectors from the scalar defs).
968 The cost model will reject outright expensive cases.
969 ??? This doesn't treat cases where permutation ultimatively
970 fails (or we don't try permutation below). Ideally we'd
971 even compute a permutation that will end up with the maximum
973 if (is_a
<bb_vec_info
> (vinfo
)
975 /* ??? Rejecting patterns this way doesn't work. We'd have to
976 do extra work to cancel the pattern so the uses see the
978 && !is_pattern_stmt_p (vinfo_for_stmt (stmt
)))
984 *max_nunits
= old_max_nunits
;
985 loads
->truncate (old_nloads
);
986 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (child
), j
, grandchild
)
987 vect_free_slp_tree (grandchild
);
988 SLP_TREE_CHILDREN (child
).truncate (0);
990 dump_printf_loc (MSG_NOTE
, vect_location
,
991 "Building vector operands from scalars\n");
992 oprnd_info
->def_stmts
= vNULL
;
993 SLP_TREE_DEF_TYPE (child
) = vect_external_def
;
994 SLP_TREE_CHILDREN (*node
).quick_push (child
);
998 /* If the SLP build for operand zero failed and operand zero
999 and one can be commutated try that for the scalar stmts
1000 that failed the match. */
1002 /* A first scalar stmt mismatch signals a fatal mismatch. */
1004 /* ??? For COND_EXPRs we can swap the comparison operands
1005 as well as the arms under some constraints. */
1007 && oprnds_info
[1]->first_dt
== vect_internal_def
1008 && is_gimple_assign (stmt
)
1009 && commutative_tree_code (gimple_assign_rhs_code (stmt
))
1010 && !SLP_TREE_TWO_OPERATORS (*node
)
1011 /* Do so only if the number of not successful permutes was nor more
1012 than a cut-ff as re-trying the recursive match on
1013 possibly each level of the tree would expose exponential
1018 slp_tree grandchild
;
1021 *max_nunits
= old_max_nunits
;
1022 loads
->truncate (old_nloads
);
1023 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (child
), j
, grandchild
)
1024 vect_free_slp_tree (grandchild
);
1025 SLP_TREE_CHILDREN (child
).truncate (0);
1027 /* Swap mismatched definition stmts. */
1028 dump_printf_loc (MSG_NOTE
, vect_location
,
1029 "Re-trying with swapped operands of stmts ");
1030 for (j
= 0; j
< group_size
; ++j
)
1033 std::swap (oprnds_info
[0]->def_stmts
[j
],
1034 oprnds_info
[1]->def_stmts
[j
]);
1035 dump_printf (MSG_NOTE
, "%d ", j
);
1037 dump_printf (MSG_NOTE
, "\n");
1038 /* And try again with scratch 'matches' ... */
1039 bool *tem
= XALLOCAVEC (bool, group_size
);
1040 if (vect_build_slp_tree (vinfo
, &child
,
1041 group_size
, max_nunits
, loads
,
1042 tem
, npermutes
, &this_tree_size
,
1045 /* ... so if successful we can apply the operand swapping
1046 to the GIMPLE IL. This is necessary because for example
1047 vect_get_slp_defs uses operand indexes and thus expects
1048 canonical operand order. This is also necessary even
1049 if we end up building the operand from scalars as
1050 we'll continue to process swapped operand two. */
1051 for (j
= 0; j
< group_size
; ++j
)
1053 gimple
*stmt
= SLP_TREE_SCALAR_STMTS (*node
)[j
];
1054 gimple_set_plf (stmt
, GF_PLF_1
, false);
1056 for (j
= 0; j
< group_size
; ++j
)
1058 gimple
*stmt
= SLP_TREE_SCALAR_STMTS (*node
)[j
];
1061 /* Avoid swapping operands twice. */
1062 if (gimple_plf (stmt
, GF_PLF_1
))
1064 swap_ssa_operands (stmt
, gimple_assign_rhs1_ptr (stmt
),
1065 gimple_assign_rhs2_ptr (stmt
));
1066 gimple_set_plf (stmt
, GF_PLF_1
, true);
1069 /* Verify we swap all duplicates or none. */
1071 for (j
= 0; j
< group_size
; ++j
)
1073 gimple
*stmt
= SLP_TREE_SCALAR_STMTS (*node
)[j
];
1074 gcc_assert (gimple_plf (stmt
, GF_PLF_1
) == ! matches
[j
]);
1077 /* If we have all children of child built up from scalars then
1078 just throw that away and build it up this node from scalars. */
1079 if (!SLP_TREE_CHILDREN (child
).is_empty ())
1082 slp_tree grandchild
;
1084 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (child
), j
, grandchild
)
1085 if (SLP_TREE_DEF_TYPE (grandchild
) == vect_internal_def
)
1090 *max_nunits
= old_max_nunits
;
1091 loads
->truncate (old_nloads
);
1092 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (child
), j
, grandchild
)
1093 vect_free_slp_tree (grandchild
);
1094 SLP_TREE_CHILDREN (child
).truncate (0);
1096 dump_printf_loc (MSG_NOTE
, vect_location
,
1097 "Building parent vector operands from "
1098 "scalars instead\n");
1099 oprnd_info
->def_stmts
= vNULL
;
1100 SLP_TREE_DEF_TYPE (child
) = vect_external_def
;
1101 SLP_TREE_CHILDREN (*node
).quick_push (child
);
1106 oprnd_info
->def_stmts
= vNULL
;
1107 SLP_TREE_CHILDREN (*node
).quick_push (child
);
1114 oprnd_info
->def_stmts
= vNULL
;
1115 vect_free_slp_tree (child
);
1116 vect_free_oprnd_info (oprnds_info
);
1121 *tree_size
+= this_tree_size
;
1123 vect_free_oprnd_info (oprnds_info
);
1127 /* Dump a slp tree NODE using flags specified in DUMP_KIND. */
1130 vect_print_slp_tree (int dump_kind
, location_t loc
, slp_tree node
)
1136 dump_printf_loc (dump_kind
, loc
, "node%s\n",
1137 SLP_TREE_DEF_TYPE (node
) != vect_internal_def
1138 ? " (external)" : "");
1139 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node
), i
, stmt
)
1141 dump_printf_loc (dump_kind
, loc
, "\tstmt %d ", i
);
1142 dump_gimple_stmt (dump_kind
, TDF_SLIM
, stmt
, 0);
1144 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node
), i
, child
)
1145 vect_print_slp_tree (dump_kind
, loc
, child
);
1149 /* Mark the tree rooted at NODE with MARK (PURE_SLP or HYBRID).
1150 If MARK is HYBRID, it refers to a specific stmt in NODE (the stmt at index
1151 J). Otherwise, MARK is PURE_SLP and J is -1, which indicates that all the
1152 stmts in NODE are to be marked. */
1155 vect_mark_slp_stmts (slp_tree node
, enum slp_vect_type mark
, int j
)
1161 if (SLP_TREE_DEF_TYPE (node
) != vect_internal_def
)
1164 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node
), i
, stmt
)
1165 if (j
< 0 || i
== j
)
1166 STMT_SLP_TYPE (vinfo_for_stmt (stmt
)) = mark
;
1168 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node
), i
, child
)
1169 vect_mark_slp_stmts (child
, mark
, j
);
1173 /* Mark the statements of the tree rooted at NODE as relevant (vect_used). */
1176 vect_mark_slp_stmts_relevant (slp_tree node
)
1180 stmt_vec_info stmt_info
;
1183 if (SLP_TREE_DEF_TYPE (node
) != vect_internal_def
)
1186 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node
), i
, stmt
)
1188 stmt_info
= vinfo_for_stmt (stmt
);
1189 gcc_assert (!STMT_VINFO_RELEVANT (stmt_info
)
1190 || STMT_VINFO_RELEVANT (stmt_info
) == vect_used_in_scope
);
1191 STMT_VINFO_RELEVANT (stmt_info
) = vect_used_in_scope
;
1194 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node
), i
, child
)
1195 vect_mark_slp_stmts_relevant (child
);
1199 /* Rearrange the statements of NODE according to PERMUTATION. */
1202 vect_slp_rearrange_stmts (slp_tree node
, unsigned int group_size
,
1203 vec
<unsigned> permutation
)
1206 vec
<gimple
*> tmp_stmts
;
1210 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node
), i
, child
)
1211 vect_slp_rearrange_stmts (child
, group_size
, permutation
);
1213 gcc_assert (group_size
== SLP_TREE_SCALAR_STMTS (node
).length ());
1214 tmp_stmts
.create (group_size
);
1215 tmp_stmts
.quick_grow_cleared (group_size
);
1217 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node
), i
, stmt
)
1218 tmp_stmts
[permutation
[i
]] = stmt
;
1220 SLP_TREE_SCALAR_STMTS (node
).release ();
1221 SLP_TREE_SCALAR_STMTS (node
) = tmp_stmts
;
1225 /* Attempt to reorder stmts in a reduction chain so that we don't
1226 require any load permutation. Return true if that was possible,
1227 otherwise return false. */
1230 vect_attempt_slp_rearrange_stmts (slp_instance slp_instn
)
1232 unsigned int group_size
= SLP_INSTANCE_GROUP_SIZE (slp_instn
);
1236 slp_tree node
, load
;
1238 /* Compare all the permutation sequences to the first one. We know
1239 that at least one load is permuted. */
1240 node
= SLP_INSTANCE_LOADS (slp_instn
)[0];
1241 if (!node
->load_permutation
.exists ())
1243 for (i
= 1; SLP_INSTANCE_LOADS (slp_instn
).iterate (i
, &load
); ++i
)
1245 if (!load
->load_permutation
.exists ())
1247 FOR_EACH_VEC_ELT (load
->load_permutation
, j
, lidx
)
1248 if (lidx
!= node
->load_permutation
[j
])
1252 /* Check that the loads in the first sequence are different and there
1253 are no gaps between them. */
1254 load_index
= sbitmap_alloc (group_size
);
1255 bitmap_clear (load_index
);
1256 FOR_EACH_VEC_ELT (node
->load_permutation
, i
, lidx
)
1258 if (lidx
>= group_size
)
1260 if (bitmap_bit_p (load_index
, lidx
))
1262 sbitmap_free (load_index
);
1265 bitmap_set_bit (load_index
, lidx
);
1267 for (i
= 0; i
< group_size
; i
++)
1268 if (!bitmap_bit_p (load_index
, i
))
1270 sbitmap_free (load_index
);
1273 sbitmap_free (load_index
);
1275 /* This permutation is valid for reduction. Since the order of the
1276 statements in the nodes is not important unless they are memory
1277 accesses, we can rearrange the statements in all the nodes
1278 according to the order of the loads. */
1279 vect_slp_rearrange_stmts (SLP_INSTANCE_TREE (slp_instn
), group_size
,
1280 node
->load_permutation
);
1282 /* We are done, no actual permutations need to be generated. */
1283 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn
), i
, node
)
1284 SLP_TREE_LOAD_PERMUTATION (node
).release ();
1288 /* Check if the required load permutations in the SLP instance
1289 SLP_INSTN are supported. */
1292 vect_supported_load_permutation_p (slp_instance slp_instn
)
1294 unsigned int group_size
= SLP_INSTANCE_GROUP_SIZE (slp_instn
);
1295 unsigned int i
, j
, k
, next
;
1297 gimple
*stmt
, *load
, *next_load
;
1299 if (dump_enabled_p ())
1301 dump_printf_loc (MSG_NOTE
, vect_location
, "Load permutation ");
1302 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn
), i
, node
)
1303 if (node
->load_permutation
.exists ())
1304 FOR_EACH_VEC_ELT (node
->load_permutation
, j
, next
)
1305 dump_printf (MSG_NOTE
, "%d ", next
);
1307 for (k
= 0; k
< group_size
; ++k
)
1308 dump_printf (MSG_NOTE
, "%d ", k
);
1309 dump_printf (MSG_NOTE
, "\n");
1312 /* In case of reduction every load permutation is allowed, since the order
1313 of the reduction statements is not important (as opposed to the case of
1314 grouped stores). The only condition we need to check is that all the
1315 load nodes are of the same size and have the same permutation (and then
1316 rearrange all the nodes of the SLP instance according to this
1319 /* Check that all the load nodes are of the same size. */
1320 /* ??? Can't we assert this? */
1321 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn
), i
, node
)
1322 if (SLP_TREE_SCALAR_STMTS (node
).length () != (unsigned) group_size
)
1325 node
= SLP_INSTANCE_TREE (slp_instn
);
1326 stmt
= SLP_TREE_SCALAR_STMTS (node
)[0];
1328 /* Reduction (there are no data-refs in the root).
1329 In reduction chain the order of the loads is not important. */
1330 if (!STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt
))
1331 && !GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt
)))
1333 if (vect_attempt_slp_rearrange_stmts (slp_instn
))
1336 /* Fallthru to general load permutation handling. */
1339 /* In basic block vectorization we allow any subchain of an interleaving
1341 FORNOW: not supported in loop SLP because of realignment compications. */
1342 if (STMT_VINFO_BB_VINFO (vinfo_for_stmt (stmt
)))
1344 /* Check whether the loads in an instance form a subchain and thus
1345 no permutation is necessary. */
1346 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn
), i
, node
)
1348 if (!SLP_TREE_LOAD_PERMUTATION (node
).exists ())
1350 bool subchain_p
= true;
1352 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node
), j
, load
)
1355 && (next_load
!= load
1356 || GROUP_GAP (vinfo_for_stmt (load
)) != 1))
1361 next_load
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (load
));
1364 SLP_TREE_LOAD_PERMUTATION (node
).release ();
1367 /* Verify the permutation can be generated. */
1369 if (!vect_transform_slp_perm_load (node
, tem
, NULL
,
1370 1, slp_instn
, true))
1372 dump_printf_loc (MSG_MISSED_OPTIMIZATION
,
1374 "unsupported load permutation\n");
1382 /* For loop vectorization verify we can generate the permutation. */
1383 FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn
), i
, node
)
1384 if (node
->load_permutation
.exists ()
1385 && !vect_transform_slp_perm_load
1387 SLP_INSTANCE_UNROLLING_FACTOR (slp_instn
), slp_instn
, true))
1394 /* Find the last store in SLP INSTANCE. */
1397 vect_find_last_scalar_stmt_in_slp (slp_tree node
)
1399 gimple
*last
= NULL
, *stmt
;
1401 for (int i
= 0; SLP_TREE_SCALAR_STMTS (node
).iterate (i
, &stmt
); i
++)
1403 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (stmt
);
1404 if (is_pattern_stmt_p (stmt_vinfo
))
1405 last
= get_later_stmt (STMT_VINFO_RELATED_STMT (stmt_vinfo
), last
);
1407 last
= get_later_stmt (stmt
, last
);
1413 /* Compute the cost for the SLP node NODE in the SLP instance INSTANCE. */
1416 vect_analyze_slp_cost_1 (slp_instance instance
, slp_tree node
,
1417 stmt_vector_for_cost
*prologue_cost_vec
,
1418 stmt_vector_for_cost
*body_cost_vec
,
1419 unsigned ncopies_for_cost
)
1424 stmt_vec_info stmt_info
;
1427 /* Recurse down the SLP tree. */
1428 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node
), i
, child
)
1429 if (SLP_TREE_DEF_TYPE (child
) == vect_internal_def
)
1430 vect_analyze_slp_cost_1 (instance
, child
, prologue_cost_vec
,
1431 body_cost_vec
, ncopies_for_cost
);
1433 /* Look at the first scalar stmt to determine the cost. */
1434 stmt
= SLP_TREE_SCALAR_STMTS (node
)[0];
1435 stmt_info
= vinfo_for_stmt (stmt
);
1436 if (STMT_VINFO_GROUPED_ACCESS (stmt_info
))
1438 if (DR_IS_WRITE (STMT_VINFO_DATA_REF (stmt_info
)))
1439 vect_model_store_cost (stmt_info
, ncopies_for_cost
, false,
1440 vect_uninitialized_def
,
1441 node
, prologue_cost_vec
, body_cost_vec
);
1444 gcc_checking_assert (DR_IS_READ (STMT_VINFO_DATA_REF (stmt_info
)));
1445 if (SLP_TREE_LOAD_PERMUTATION (node
).exists ())
1447 /* If the load is permuted then the alignment is determined by
1448 the first group element not by the first scalar stmt DR. */
1449 stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
1450 stmt_info
= vinfo_for_stmt (stmt
);
1451 /* Record the cost for the permutation. */
1452 record_stmt_cost (body_cost_vec
, ncopies_for_cost
, vec_perm
,
1453 stmt_info
, 0, vect_body
);
1454 /* And adjust the number of loads performed. */
1456 = TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info
));
1458 = (GROUP_SIZE (stmt_info
) - GROUP_GAP (stmt_info
)
1459 + nunits
- 1) / nunits
;
1460 ncopies_for_cost
*= SLP_INSTANCE_UNROLLING_FACTOR (instance
);
1462 /* Record the cost for the vector loads. */
1463 vect_model_load_cost (stmt_info
, ncopies_for_cost
, false,
1464 node
, prologue_cost_vec
, body_cost_vec
);
1469 record_stmt_cost (body_cost_vec
, ncopies_for_cost
, vector_stmt
,
1470 stmt_info
, 0, vect_body
);
1471 if (SLP_TREE_TWO_OPERATORS (node
))
1473 record_stmt_cost (body_cost_vec
, ncopies_for_cost
, vector_stmt
,
1474 stmt_info
, 0, vect_body
);
1475 record_stmt_cost (body_cost_vec
, ncopies_for_cost
, vec_perm
,
1476 stmt_info
, 0, vect_body
);
1479 /* Push SLP node def-type to stmts. */
1480 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node
), i
, child
)
1481 if (SLP_TREE_DEF_TYPE (child
) != vect_internal_def
)
1482 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (child
), j
, stmt
)
1483 STMT_VINFO_DEF_TYPE (vinfo_for_stmt (stmt
)) = SLP_TREE_DEF_TYPE (child
);
1485 /* Scan operands and account for prologue cost of constants/externals.
1486 ??? This over-estimates cost for multiple uses and should be
1488 stmt
= SLP_TREE_SCALAR_STMTS (node
)[0];
1489 lhs
= gimple_get_lhs (stmt
);
1490 for (i
= 0; i
< gimple_num_ops (stmt
); ++i
)
1492 tree op
= gimple_op (stmt
, i
);
1494 enum vect_def_type dt
;
1495 if (!op
|| op
== lhs
)
1497 if (vect_is_simple_use (op
, stmt_info
->vinfo
, &def_stmt
, &dt
))
1499 /* Without looking at the actual initializer a vector of
1500 constants can be implemented as load from the constant pool.
1501 ??? We need to pass down stmt_info for a vector type
1502 even if it points to the wrong stmt. */
1503 if (dt
== vect_constant_def
)
1504 record_stmt_cost (prologue_cost_vec
, 1, vector_load
,
1505 stmt_info
, 0, vect_prologue
);
1506 else if (dt
== vect_external_def
)
1507 record_stmt_cost (prologue_cost_vec
, 1, vec_construct
,
1508 stmt_info
, 0, vect_prologue
);
1512 /* Restore stmt def-types. */
1513 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node
), i
, child
)
1514 if (SLP_TREE_DEF_TYPE (child
) != vect_internal_def
)
1515 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (child
), j
, stmt
)
1516 STMT_VINFO_DEF_TYPE (vinfo_for_stmt (stmt
)) = vect_internal_def
;
1519 /* Compute the cost for the SLP instance INSTANCE. */
1522 vect_analyze_slp_cost (slp_instance instance
, void *data
)
1524 stmt_vector_for_cost body_cost_vec
, prologue_cost_vec
;
1525 unsigned ncopies_for_cost
;
1526 stmt_info_for_cost
*si
;
1529 if (dump_enabled_p ())
1530 dump_printf_loc (MSG_NOTE
, vect_location
,
1531 "=== vect_analyze_slp_cost ===\n");
1533 /* Calculate the number of vector stmts to create based on the unrolling
1534 factor (number of vectors is 1 if NUNITS >= GROUP_SIZE, and is
1535 GROUP_SIZE / NUNITS otherwise. */
1536 unsigned group_size
= SLP_INSTANCE_GROUP_SIZE (instance
);
1537 slp_tree node
= SLP_INSTANCE_TREE (instance
);
1538 stmt_vec_info stmt_info
= vinfo_for_stmt (SLP_TREE_SCALAR_STMTS (node
)[0]);
1539 /* Adjust the group_size by the vectorization factor which is always one
1540 for basic-block vectorization. */
1541 if (STMT_VINFO_LOOP_VINFO (stmt_info
))
1542 group_size
*= LOOP_VINFO_VECT_FACTOR (STMT_VINFO_LOOP_VINFO (stmt_info
));
1543 unsigned nunits
= TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info
));
1544 /* For reductions look at a reduction operand in case the reduction
1545 operation is widening like DOT_PROD or SAD. */
1546 if (!STMT_VINFO_GROUPED_ACCESS (stmt_info
))
1548 gimple
*stmt
= SLP_TREE_SCALAR_STMTS (node
)[0];
1549 switch (gimple_assign_rhs_code (stmt
))
1553 nunits
= TYPE_VECTOR_SUBPARTS (get_vectype_for_scalar_type
1554 (TREE_TYPE (gimple_assign_rhs1 (stmt
))));
1559 ncopies_for_cost
= least_common_multiple (nunits
, group_size
) / nunits
;
1561 prologue_cost_vec
.create (10);
1562 body_cost_vec
.create (10);
1563 vect_analyze_slp_cost_1 (instance
, SLP_INSTANCE_TREE (instance
),
1564 &prologue_cost_vec
, &body_cost_vec
,
1567 /* Record the prologue costs, which were delayed until we were
1568 sure that SLP was successful. */
1569 FOR_EACH_VEC_ELT (prologue_cost_vec
, i
, si
)
1571 struct _stmt_vec_info
*stmt_info
1572 = si
->stmt
? vinfo_for_stmt (si
->stmt
) : NULL
;
1573 (void) add_stmt_cost (data
, si
->count
, si
->kind
, stmt_info
,
1574 si
->misalign
, vect_prologue
);
1577 /* Record the instance's instructions in the target cost model. */
1578 FOR_EACH_VEC_ELT (body_cost_vec
, i
, si
)
1580 struct _stmt_vec_info
*stmt_info
1581 = si
->stmt
? vinfo_for_stmt (si
->stmt
) : NULL
;
1582 (void) add_stmt_cost (data
, si
->count
, si
->kind
, stmt_info
,
1583 si
->misalign
, vect_body
);
1586 prologue_cost_vec
.release ();
1587 body_cost_vec
.release ();
1590 /* Splits a group of stores, currently beginning at FIRST_STMT, into two groups:
1591 one (still beginning at FIRST_STMT) of size GROUP1_SIZE (also containing
1592 the first GROUP1_SIZE stmts, since stores are consecutive), the second
1593 containing the remainder.
1594 Return the first stmt in the second group. */
1597 vect_split_slp_store_group (gimple
*first_stmt
, unsigned group1_size
)
1599 stmt_vec_info first_vinfo
= vinfo_for_stmt (first_stmt
);
1600 gcc_assert (GROUP_FIRST_ELEMENT (first_vinfo
) == first_stmt
);
1601 gcc_assert (group1_size
> 0);
1602 int group2_size
= GROUP_SIZE (first_vinfo
) - group1_size
;
1603 gcc_assert (group2_size
> 0);
1604 GROUP_SIZE (first_vinfo
) = group1_size
;
1606 gimple
*stmt
= first_stmt
;
1607 for (unsigned i
= group1_size
; i
> 1; i
--)
1609 stmt
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (stmt
));
1610 gcc_assert (GROUP_GAP (vinfo_for_stmt (stmt
)) == 1);
1612 /* STMT is now the last element of the first group. */
1613 gimple
*group2
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (stmt
));
1614 GROUP_NEXT_ELEMENT (vinfo_for_stmt (stmt
)) = 0;
1616 GROUP_SIZE (vinfo_for_stmt (group2
)) = group2_size
;
1617 for (stmt
= group2
; stmt
; stmt
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (stmt
)))
1619 GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt
)) = group2
;
1620 gcc_assert (GROUP_GAP (vinfo_for_stmt (stmt
)) == 1);
1623 /* For the second group, the GROUP_GAP is that before the original group,
1624 plus skipping over the first vector. */
1625 GROUP_GAP (vinfo_for_stmt (group2
)) =
1626 GROUP_GAP (first_vinfo
) + group1_size
;
1628 /* GROUP_GAP of the first group now has to skip over the second group too. */
1629 GROUP_GAP (first_vinfo
) += group2_size
;
1631 if (dump_enabled_p ())
1632 dump_printf_loc (MSG_NOTE
, vect_location
, "Split group into %d and %d\n",
1633 group1_size
, group2_size
);
1638 /* Analyze an SLP instance starting from a group of grouped stores. Call
1639 vect_build_slp_tree to build a tree of packed stmts if possible.
1640 Return FALSE if it's impossible to SLP any stmt in the loop. */
1643 vect_analyze_slp_instance (vec_info
*vinfo
,
1644 gimple
*stmt
, unsigned max_tree_size
)
1646 slp_instance new_instance
;
1648 unsigned int group_size
= GROUP_SIZE (vinfo_for_stmt (stmt
));
1649 unsigned int unrolling_factor
= 1, nunits
;
1650 tree vectype
, scalar_type
= NULL_TREE
;
1653 unsigned int max_nunits
= 0;
1654 vec
<slp_tree
> loads
;
1655 struct data_reference
*dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt
));
1656 vec
<gimple
*> scalar_stmts
;
1658 if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt
)))
1662 scalar_type
= TREE_TYPE (DR_REF (dr
));
1663 vectype
= get_vectype_for_scalar_type (scalar_type
);
1667 gcc_assert (is_a
<loop_vec_info
> (vinfo
));
1668 vectype
= STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt
));
1671 group_size
= GROUP_SIZE (vinfo_for_stmt (stmt
));
1675 gcc_assert (is_a
<loop_vec_info
> (vinfo
));
1676 vectype
= STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt
));
1677 group_size
= as_a
<loop_vec_info
> (vinfo
)->reductions
.length ();
1682 if (dump_enabled_p ())
1684 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1685 "Build SLP failed: unsupported data-type ");
1686 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, scalar_type
);
1687 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
1692 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
1694 /* Calculate the unrolling factor. */
1695 unrolling_factor
= least_common_multiple (nunits
, group_size
) / group_size
;
1696 if (unrolling_factor
!= 1 && is_a
<bb_vec_info
> (vinfo
))
1698 if (dump_enabled_p ())
1699 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1700 "Build SLP failed: unrolling required in basic"
1706 /* Create a node (a root of the SLP tree) for the packed grouped stores. */
1707 scalar_stmts
.create (group_size
);
1709 if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt
)))
1711 /* Collect the stores and store them in SLP_TREE_SCALAR_STMTS. */
1714 if (STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (next
))
1715 && STMT_VINFO_RELATED_STMT (vinfo_for_stmt (next
)))
1716 scalar_stmts
.safe_push (
1717 STMT_VINFO_RELATED_STMT (vinfo_for_stmt (next
)));
1719 scalar_stmts
.safe_push (next
);
1720 next
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (next
));
1722 /* Mark the first element of the reduction chain as reduction to properly
1723 transform the node. In the reduction analysis phase only the last
1724 element of the chain is marked as reduction. */
1725 if (!STMT_VINFO_GROUPED_ACCESS (vinfo_for_stmt (stmt
)))
1726 STMT_VINFO_DEF_TYPE (vinfo_for_stmt (stmt
)) = vect_reduction_def
;
1730 /* Collect reduction statements. */
1731 vec
<gimple
*> reductions
= as_a
<loop_vec_info
> (vinfo
)->reductions
;
1732 for (i
= 0; reductions
.iterate (i
, &next
); i
++)
1733 scalar_stmts
.safe_push (next
);
1736 node
= vect_create_new_slp_node (scalar_stmts
);
1738 loads
.create (group_size
);
1740 /* Build the tree for the SLP instance. */
1741 bool *matches
= XALLOCAVEC (bool, group_size
);
1742 unsigned npermutes
= 0;
1743 if (vect_build_slp_tree (vinfo
, &node
, group_size
,
1744 &max_nunits
, &loads
,
1745 matches
, &npermutes
, NULL
, max_tree_size
))
1747 /* Calculate the unrolling factor based on the smallest type. */
1748 if (max_nunits
> nunits
)
1749 unrolling_factor
= least_common_multiple (max_nunits
, group_size
)
1752 if (unrolling_factor
!= 1 && is_a
<bb_vec_info
> (vinfo
))
1754 if (dump_enabled_p ())
1755 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1756 "Build SLP failed: unrolling required in basic"
1758 vect_free_slp_tree (node
);
1763 /* Create a new SLP instance. */
1764 new_instance
= XNEW (struct _slp_instance
);
1765 SLP_INSTANCE_TREE (new_instance
) = node
;
1766 SLP_INSTANCE_GROUP_SIZE (new_instance
) = group_size
;
1767 SLP_INSTANCE_UNROLLING_FACTOR (new_instance
) = unrolling_factor
;
1768 SLP_INSTANCE_LOADS (new_instance
) = loads
;
1770 /* Compute the load permutation. */
1772 bool loads_permuted
= false;
1773 FOR_EACH_VEC_ELT (loads
, i
, load_node
)
1775 vec
<unsigned> load_permutation
;
1777 gimple
*load
, *first_stmt
;
1778 bool this_load_permuted
= false;
1779 load_permutation
.create (group_size
);
1780 first_stmt
= GROUP_FIRST_ELEMENT
1781 (vinfo_for_stmt (SLP_TREE_SCALAR_STMTS (load_node
)[0]));
1782 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (load_node
), j
, load
)
1785 = vect_get_place_in_interleaving_chain (load
, first_stmt
);
1786 gcc_assert (load_place
!= -1);
1787 if (load_place
!= j
)
1788 this_load_permuted
= true;
1789 load_permutation
.safe_push (load_place
);
1791 if (!this_load_permuted
1792 /* The load requires permutation when unrolling exposes
1793 a gap either because the group is larger than the SLP
1794 group-size or because there is a gap between the groups. */
1795 && (unrolling_factor
== 1
1796 || (group_size
== GROUP_SIZE (vinfo_for_stmt (first_stmt
))
1797 && GROUP_GAP (vinfo_for_stmt (first_stmt
)) == 0)))
1799 load_permutation
.release ();
1802 SLP_TREE_LOAD_PERMUTATION (load_node
) = load_permutation
;
1803 loads_permuted
= true;
1808 if (!vect_supported_load_permutation_p (new_instance
))
1810 if (dump_enabled_p ())
1812 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1813 "Build SLP failed: unsupported load "
1815 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
1816 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
1818 vect_free_slp_instance (new_instance
);
1823 vinfo
->slp_instances
.safe_push (new_instance
);
1825 if (dump_enabled_p ())
1827 dump_printf_loc (MSG_NOTE
, vect_location
,
1828 "Final SLP tree for instance:\n");
1829 vect_print_slp_tree (MSG_NOTE
, vect_location
, node
);
1835 /* Failed to SLP. */
1836 /* Free the allocated memory. */
1837 vect_free_slp_tree (node
);
1840 /* For basic block SLP, try to break the group up into multiples of the
1842 if (is_a
<bb_vec_info
> (vinfo
)
1843 && GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt
))
1844 && STMT_VINFO_GROUPED_ACCESS (vinfo_for_stmt (stmt
)))
1846 /* We consider breaking the group only on VF boundaries from the existing
1848 for (i
= 0; i
< group_size
; i
++)
1849 if (!matches
[i
]) break;
1851 if (i
>= nunits
&& i
< group_size
)
1853 /* Split into two groups at the first vector boundary before i. */
1854 gcc_assert ((nunits
& (nunits
- 1)) == 0);
1855 unsigned group1_size
= i
& ~(nunits
- 1);
1857 gimple
*rest
= vect_split_slp_store_group (stmt
, group1_size
);
1858 bool res
= vect_analyze_slp_instance (vinfo
, stmt
, max_tree_size
);
1859 /* If the first non-match was in the middle of a vector,
1860 skip the rest of that vector. */
1861 if (group1_size
< i
)
1863 i
= group1_size
+ nunits
;
1865 rest
= vect_split_slp_store_group (rest
, nunits
);
1868 res
|= vect_analyze_slp_instance (vinfo
, rest
, max_tree_size
);
1871 /* Even though the first vector did not all match, we might be able to SLP
1872 (some) of the remainder. FORNOW ignore this possibility. */
1879 /* Check if there are stmts in the loop can be vectorized using SLP. Build SLP
1880 trees of packed scalar stmts if SLP is possible. */
1883 vect_analyze_slp (vec_info
*vinfo
, unsigned max_tree_size
)
1886 gimple
*first_element
;
1889 if (dump_enabled_p ())
1890 dump_printf_loc (MSG_NOTE
, vect_location
, "=== vect_analyze_slp ===\n");
1892 /* Find SLP sequences starting from groups of grouped stores. */
1893 FOR_EACH_VEC_ELT (vinfo
->grouped_stores
, i
, first_element
)
1894 if (vect_analyze_slp_instance (vinfo
, first_element
, max_tree_size
))
1897 if (loop_vec_info loop_vinfo
= dyn_cast
<loop_vec_info
> (vinfo
))
1899 if (loop_vinfo
->reduction_chains
.length () > 0)
1901 /* Find SLP sequences starting from reduction chains. */
1902 FOR_EACH_VEC_ELT (loop_vinfo
->reduction_chains
, i
, first_element
)
1903 if (vect_analyze_slp_instance (vinfo
, first_element
,
1909 /* Don't try to vectorize SLP reductions if reduction chain was
1914 /* Find SLP sequences starting from groups of reductions. */
1915 if (loop_vinfo
->reductions
.length () > 1
1916 && vect_analyze_slp_instance (vinfo
, loop_vinfo
->reductions
[0],
1925 /* For each possible SLP instance decide whether to SLP it and calculate overall
1926 unrolling factor needed to SLP the loop. Return TRUE if decided to SLP at
1927 least one instance. */
1930 vect_make_slp_decision (loop_vec_info loop_vinfo
)
1932 unsigned int i
, unrolling_factor
= 1;
1933 vec
<slp_instance
> slp_instances
= LOOP_VINFO_SLP_INSTANCES (loop_vinfo
);
1934 slp_instance instance
;
1935 int decided_to_slp
= 0;
1937 if (dump_enabled_p ())
1938 dump_printf_loc (MSG_NOTE
, vect_location
, "=== vect_make_slp_decision ==="
1941 FOR_EACH_VEC_ELT (slp_instances
, i
, instance
)
1943 /* FORNOW: SLP if you can. */
1944 if (unrolling_factor
< SLP_INSTANCE_UNROLLING_FACTOR (instance
))
1945 unrolling_factor
= SLP_INSTANCE_UNROLLING_FACTOR (instance
);
1947 /* Mark all the stmts that belong to INSTANCE as PURE_SLP stmts. Later we
1948 call vect_detect_hybrid_slp () to find stmts that need hybrid SLP and
1949 loop-based vectorization. Such stmts will be marked as HYBRID. */
1950 vect_mark_slp_stmts (SLP_INSTANCE_TREE (instance
), pure_slp
, -1);
1954 LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo
) = unrolling_factor
;
1956 if (decided_to_slp
&& dump_enabled_p ())
1957 dump_printf_loc (MSG_NOTE
, vect_location
,
1958 "Decided to SLP %d instances. Unrolling factor %d\n",
1959 decided_to_slp
, unrolling_factor
);
1961 return (decided_to_slp
> 0);
1965 /* Find stmts that must be both vectorized and SLPed (since they feed stmts that
1966 can't be SLPed) in the tree rooted at NODE. Mark such stmts as HYBRID. */
1969 vect_detect_hybrid_slp_stmts (slp_tree node
, unsigned i
, slp_vect_type stype
)
1971 gimple
*stmt
= SLP_TREE_SCALAR_STMTS (node
)[i
];
1972 imm_use_iterator imm_iter
;
1974 stmt_vec_info use_vinfo
, stmt_vinfo
= vinfo_for_stmt (stmt
);
1976 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_vinfo
);
1977 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
1980 /* Propagate hybrid down the SLP tree. */
1981 if (stype
== hybrid
)
1983 else if (HYBRID_SLP_STMT (stmt_vinfo
))
1987 /* Check if a pure SLP stmt has uses in non-SLP stmts. */
1988 gcc_checking_assert (PURE_SLP_STMT (stmt_vinfo
));
1989 /* We always get the pattern stmt here, but for immediate
1990 uses we have to use the LHS of the original stmt. */
1991 gcc_checking_assert (!STMT_VINFO_IN_PATTERN_P (stmt_vinfo
));
1992 if (STMT_VINFO_RELATED_STMT (stmt_vinfo
))
1993 stmt
= STMT_VINFO_RELATED_STMT (stmt_vinfo
);
1994 if (TREE_CODE (gimple_op (stmt
, 0)) == SSA_NAME
)
1995 FOR_EACH_IMM_USE_STMT (use_stmt
, imm_iter
, gimple_op (stmt
, 0))
1997 if (!flow_bb_inside_loop_p (loop
, gimple_bb (use_stmt
)))
1999 use_vinfo
= vinfo_for_stmt (use_stmt
);
2000 if (STMT_VINFO_IN_PATTERN_P (use_vinfo
)
2001 && STMT_VINFO_RELATED_STMT (use_vinfo
))
2002 use_vinfo
= vinfo_for_stmt (STMT_VINFO_RELATED_STMT (use_vinfo
));
2003 if (!STMT_SLP_TYPE (use_vinfo
)
2004 && (STMT_VINFO_RELEVANT (use_vinfo
)
2005 || VECTORIZABLE_CYCLE_DEF (STMT_VINFO_DEF_TYPE (use_vinfo
)))
2006 && !(gimple_code (use_stmt
) == GIMPLE_PHI
2007 && STMT_VINFO_DEF_TYPE (use_vinfo
) == vect_reduction_def
))
2009 if (dump_enabled_p ())
2011 dump_printf_loc (MSG_NOTE
, vect_location
, "use of SLP "
2012 "def in non-SLP stmt: ");
2013 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, use_stmt
, 0);
2021 && !HYBRID_SLP_STMT (stmt_vinfo
))
2023 if (dump_enabled_p ())
2025 dump_printf_loc (MSG_NOTE
, vect_location
, "marking hybrid: ");
2026 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
2028 STMT_SLP_TYPE (stmt_vinfo
) = hybrid
;
2031 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node
), j
, child
)
2032 if (SLP_TREE_DEF_TYPE (child
) != vect_external_def
)
2033 vect_detect_hybrid_slp_stmts (child
, i
, stype
);
2036 /* Helpers for vect_detect_hybrid_slp walking pattern stmt uses. */
2039 vect_detect_hybrid_slp_1 (tree
*tp
, int *, void *data
)
2041 walk_stmt_info
*wi
= (walk_stmt_info
*)data
;
2042 struct loop
*loopp
= (struct loop
*)wi
->info
;
2047 if (TREE_CODE (*tp
) == SSA_NAME
2048 && !SSA_NAME_IS_DEFAULT_DEF (*tp
))
2050 gimple
*def_stmt
= SSA_NAME_DEF_STMT (*tp
);
2051 if (flow_bb_inside_loop_p (loopp
, gimple_bb (def_stmt
))
2052 && PURE_SLP_STMT (vinfo_for_stmt (def_stmt
)))
2054 if (dump_enabled_p ())
2056 dump_printf_loc (MSG_NOTE
, vect_location
, "marking hybrid: ");
2057 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, def_stmt
, 0);
2059 STMT_SLP_TYPE (vinfo_for_stmt (def_stmt
)) = hybrid
;
2067 vect_detect_hybrid_slp_2 (gimple_stmt_iterator
*gsi
, bool *handled
,
2070 /* If the stmt is in a SLP instance then this isn't a reason
2071 to mark use definitions in other SLP instances as hybrid. */
2072 if (STMT_SLP_TYPE (vinfo_for_stmt (gsi_stmt (*gsi
))) != loop_vect
)
2077 /* Find stmts that must be both vectorized and SLPed. */
2080 vect_detect_hybrid_slp (loop_vec_info loop_vinfo
)
2083 vec
<slp_instance
> slp_instances
= LOOP_VINFO_SLP_INSTANCES (loop_vinfo
);
2084 slp_instance instance
;
2086 if (dump_enabled_p ())
2087 dump_printf_loc (MSG_NOTE
, vect_location
, "=== vect_detect_hybrid_slp ==="
2090 /* First walk all pattern stmt in the loop and mark defs of uses as
2091 hybrid because immediate uses in them are not recorded. */
2092 for (i
= 0; i
< LOOP_VINFO_LOOP (loop_vinfo
)->num_nodes
; ++i
)
2094 basic_block bb
= LOOP_VINFO_BBS (loop_vinfo
)[i
];
2095 for (gimple_stmt_iterator gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
);
2098 gimple
*stmt
= gsi_stmt (gsi
);
2099 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
2100 if (STMT_VINFO_IN_PATTERN_P (stmt_info
))
2103 memset (&wi
, 0, sizeof (wi
));
2104 wi
.info
= LOOP_VINFO_LOOP (loop_vinfo
);
2105 gimple_stmt_iterator gsi2
2106 = gsi_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info
));
2107 walk_gimple_stmt (&gsi2
, vect_detect_hybrid_slp_2
,
2108 vect_detect_hybrid_slp_1
, &wi
);
2109 walk_gimple_seq (STMT_VINFO_PATTERN_DEF_SEQ (stmt_info
),
2110 vect_detect_hybrid_slp_2
,
2111 vect_detect_hybrid_slp_1
, &wi
);
2116 /* Then walk the SLP instance trees marking stmts with uses in
2117 non-SLP stmts as hybrid, also propagating hybrid down the
2118 SLP tree, collecting the above info on-the-fly. */
2119 FOR_EACH_VEC_ELT (slp_instances
, i
, instance
)
2121 for (unsigned i
= 0; i
< SLP_INSTANCE_GROUP_SIZE (instance
); ++i
)
2122 vect_detect_hybrid_slp_stmts (SLP_INSTANCE_TREE (instance
),
2128 /* Create and initialize a new bb_vec_info struct for BB, as well as
2129 stmt_vec_info structs for all the stmts in it. */
2132 new_bb_vec_info (gimple_stmt_iterator region_begin
,
2133 gimple_stmt_iterator region_end
)
2135 basic_block bb
= gsi_bb (region_begin
);
2136 bb_vec_info res
= NULL
;
2137 gimple_stmt_iterator gsi
;
2139 res
= (bb_vec_info
) xcalloc (1, sizeof (struct _bb_vec_info
));
2140 res
->kind
= vec_info::bb
;
2141 BB_VINFO_BB (res
) = bb
;
2142 res
->region_begin
= region_begin
;
2143 res
->region_end
= region_end
;
2145 for (gsi
= region_begin
; gsi_stmt (gsi
) != gsi_stmt (region_end
);
2148 gimple
*stmt
= gsi_stmt (gsi
);
2149 gimple_set_uid (stmt
, 0);
2150 set_vinfo_for_stmt (stmt
, new_stmt_vec_info (stmt
, res
));
2153 BB_VINFO_GROUPED_STORES (res
).create (10);
2154 BB_VINFO_SLP_INSTANCES (res
).create (2);
2155 BB_VINFO_TARGET_COST_DATA (res
) = init_cost (NULL
);
2162 /* Free BB_VINFO struct, as well as all the stmt_vec_info structs of all the
2163 stmts in the basic block. */
2166 destroy_bb_vec_info (bb_vec_info bb_vinfo
)
2168 vec
<slp_instance
> slp_instances
;
2169 slp_instance instance
;
2171 gimple_stmt_iterator si
;
2177 bb
= BB_VINFO_BB (bb_vinfo
);
2179 for (si
= bb_vinfo
->region_begin
;
2180 gsi_stmt (si
) != gsi_stmt (bb_vinfo
->region_end
); gsi_next (&si
))
2182 gimple
*stmt
= gsi_stmt (si
);
2183 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
2186 /* Free stmt_vec_info. */
2187 free_stmt_vec_info (stmt
);
2189 /* Reset region marker. */
2190 gimple_set_uid (stmt
, -1);
2193 vect_destroy_datarefs (bb_vinfo
);
2194 free_dependence_relations (BB_VINFO_DDRS (bb_vinfo
));
2195 BB_VINFO_GROUPED_STORES (bb_vinfo
).release ();
2196 slp_instances
= BB_VINFO_SLP_INSTANCES (bb_vinfo
);
2197 FOR_EACH_VEC_ELT (slp_instances
, i
, instance
)
2198 vect_free_slp_instance (instance
);
2199 BB_VINFO_SLP_INSTANCES (bb_vinfo
).release ();
2200 destroy_cost_data (BB_VINFO_TARGET_COST_DATA (bb_vinfo
));
2206 /* Analyze statements contained in SLP tree node after recursively analyzing
2207 the subtree. Return TRUE if the operations are supported. */
2210 vect_slp_analyze_node_operations (slp_tree node
)
2217 if (SLP_TREE_DEF_TYPE (node
) != vect_internal_def
)
2220 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node
), i
, child
)
2221 if (!vect_slp_analyze_node_operations (child
))
2225 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node
), i
, stmt
)
2227 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
2228 gcc_assert (stmt_info
);
2229 gcc_assert (STMT_SLP_TYPE (stmt_info
) != loop_vect
);
2231 /* Push SLP node def-type to stmt operands. */
2232 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node
), j
, child
)
2233 if (SLP_TREE_DEF_TYPE (child
) != vect_internal_def
)
2234 STMT_VINFO_DEF_TYPE (vinfo_for_stmt (SLP_TREE_SCALAR_STMTS (child
)[i
]))
2235 = SLP_TREE_DEF_TYPE (child
);
2236 res
= vect_analyze_stmt (stmt
, &dummy
, node
);
2237 /* Restore def-types. */
2238 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node
), j
, child
)
2239 if (SLP_TREE_DEF_TYPE (child
) != vect_internal_def
)
2240 STMT_VINFO_DEF_TYPE (vinfo_for_stmt (SLP_TREE_SCALAR_STMTS (child
)[i
]))
2241 = vect_internal_def
;
2250 /* Analyze statements in SLP instances of the basic block. Return TRUE if the
2251 operations are supported. */
2254 vect_slp_analyze_operations (vec
<slp_instance
> slp_instances
, void *data
)
2256 slp_instance instance
;
2259 if (dump_enabled_p ())
2260 dump_printf_loc (MSG_NOTE
, vect_location
,
2261 "=== vect_slp_analyze_operations ===\n");
2263 for (i
= 0; slp_instances
.iterate (i
, &instance
); )
2265 if (!vect_slp_analyze_node_operations (SLP_INSTANCE_TREE (instance
)))
2267 dump_printf_loc (MSG_NOTE
, vect_location
,
2268 "removing SLP instance operations starting from: ");
2269 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
,
2270 SLP_TREE_SCALAR_STMTS
2271 (SLP_INSTANCE_TREE (instance
))[0], 0);
2272 vect_free_slp_instance (instance
);
2273 slp_instances
.ordered_remove (i
);
2277 /* Compute the costs of the SLP instance. */
2278 vect_analyze_slp_cost (instance
, data
);
2283 if (!slp_instances
.length ())
2290 /* Compute the scalar cost of the SLP node NODE and its children
2291 and return it. Do not account defs that are marked in LIFE and
2292 update LIFE according to uses of NODE. */
2295 vect_bb_slp_scalar_cost (basic_block bb
,
2296 slp_tree node
, vec
<bool, va_heap
> *life
)
2298 unsigned scalar_cost
= 0;
2303 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node
), i
, stmt
)
2306 ssa_op_iter op_iter
;
2307 def_operand_p def_p
;
2308 stmt_vec_info stmt_info
;
2313 /* If there is a non-vectorized use of the defs then the scalar
2314 stmt is kept live in which case we do not account it or any
2315 required defs in the SLP children in the scalar cost. This
2316 way we make the vectorization more costly when compared to
2318 FOR_EACH_SSA_DEF_OPERAND (def_p
, stmt
, op_iter
, SSA_OP_DEF
)
2320 imm_use_iterator use_iter
;
2322 FOR_EACH_IMM_USE_STMT (use_stmt
, use_iter
, DEF_FROM_PTR (def_p
))
2323 if (!is_gimple_debug (use_stmt
)
2324 && (! vect_stmt_in_region_p (vinfo_for_stmt (stmt
)->vinfo
,
2326 || ! PURE_SLP_STMT (vinfo_for_stmt (use_stmt
))))
2329 BREAK_FROM_IMM_USE_STMT (use_iter
);
2335 stmt_info
= vinfo_for_stmt (stmt
);
2336 if (STMT_VINFO_DATA_REF (stmt_info
))
2338 if (DR_IS_READ (STMT_VINFO_DATA_REF (stmt_info
)))
2339 stmt_cost
= vect_get_stmt_cost (scalar_load
);
2341 stmt_cost
= vect_get_stmt_cost (scalar_store
);
2344 stmt_cost
= vect_get_stmt_cost (scalar_stmt
);
2346 scalar_cost
+= stmt_cost
;
2349 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node
), i
, child
)
2350 if (SLP_TREE_DEF_TYPE (child
) == vect_internal_def
)
2351 scalar_cost
+= vect_bb_slp_scalar_cost (bb
, child
, life
);
2356 /* Check if vectorization of the basic block is profitable. */
2359 vect_bb_vectorization_profitable_p (bb_vec_info bb_vinfo
)
2361 vec
<slp_instance
> slp_instances
= BB_VINFO_SLP_INSTANCES (bb_vinfo
);
2362 slp_instance instance
;
2364 unsigned int vec_inside_cost
= 0, vec_outside_cost
= 0, scalar_cost
= 0;
2365 unsigned int vec_prologue_cost
= 0, vec_epilogue_cost
= 0;
2367 /* Calculate scalar cost. */
2368 FOR_EACH_VEC_ELT (slp_instances
, i
, instance
)
2370 auto_vec
<bool, 20> life
;
2371 life
.safe_grow_cleared (SLP_INSTANCE_GROUP_SIZE (instance
));
2372 scalar_cost
+= vect_bb_slp_scalar_cost (BB_VINFO_BB (bb_vinfo
),
2373 SLP_INSTANCE_TREE (instance
),
2377 /* Complete the target-specific cost calculation. */
2378 finish_cost (BB_VINFO_TARGET_COST_DATA (bb_vinfo
), &vec_prologue_cost
,
2379 &vec_inside_cost
, &vec_epilogue_cost
);
2381 vec_outside_cost
= vec_prologue_cost
+ vec_epilogue_cost
;
2383 if (dump_enabled_p ())
2385 dump_printf_loc (MSG_NOTE
, vect_location
, "Cost model analysis: \n");
2386 dump_printf (MSG_NOTE
, " Vector inside of basic block cost: %d\n",
2388 dump_printf (MSG_NOTE
, " Vector prologue cost: %d\n", vec_prologue_cost
);
2389 dump_printf (MSG_NOTE
, " Vector epilogue cost: %d\n", vec_epilogue_cost
);
2390 dump_printf (MSG_NOTE
, " Scalar cost of basic block: %d\n", scalar_cost
);
2393 /* Vectorization is profitable if its cost is more than the cost of scalar
2394 version. Note that we err on the vector side for equal cost because
2395 the cost estimate is otherwise quite pessimistic (constant uses are
2396 free on the scalar side but cost a load on the vector side for
2398 if (vec_outside_cost
+ vec_inside_cost
> scalar_cost
)
2404 /* Check if the basic block can be vectorized. Returns a bb_vec_info
2405 if so and sets fatal to true if failure is independent of
2406 current_vector_size. */
2409 vect_slp_analyze_bb_1 (gimple_stmt_iterator region_begin
,
2410 gimple_stmt_iterator region_end
,
2411 vec
<data_reference_p
> datarefs
, int n_stmts
,
2414 bb_vec_info bb_vinfo
;
2415 slp_instance instance
;
2419 /* The first group of checks is independent of the vector size. */
2422 if (n_stmts
> PARAM_VALUE (PARAM_SLP_MAX_INSNS_IN_BB
))
2424 if (dump_enabled_p ())
2425 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2426 "not vectorized: too many instructions in "
2428 free_data_refs (datarefs
);
2432 bb_vinfo
= new_bb_vec_info (region_begin
, region_end
);
2436 BB_VINFO_DATAREFS (bb_vinfo
) = datarefs
;
2438 /* Analyze the data references. */
2440 if (!vect_analyze_data_refs (bb_vinfo
, &min_vf
))
2442 if (dump_enabled_p ())
2443 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2444 "not vectorized: unhandled data-ref in basic "
2447 destroy_bb_vec_info (bb_vinfo
);
2451 if (BB_VINFO_DATAREFS (bb_vinfo
).length () < 2)
2453 if (dump_enabled_p ())
2454 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2455 "not vectorized: not enough data-refs in "
2458 destroy_bb_vec_info (bb_vinfo
);
2462 if (!vect_analyze_data_ref_accesses (bb_vinfo
))
2464 if (dump_enabled_p ())
2465 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2466 "not vectorized: unhandled data access in "
2469 destroy_bb_vec_info (bb_vinfo
);
2473 /* If there are no grouped stores in the region there is no need
2474 to continue with pattern recog as vect_analyze_slp will fail
2476 if (bb_vinfo
->grouped_stores
.is_empty ())
2478 if (dump_enabled_p ())
2479 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2480 "not vectorized: no grouped stores in "
2483 destroy_bb_vec_info (bb_vinfo
);
2487 /* While the rest of the analysis below depends on it in some way. */
2490 vect_pattern_recog (bb_vinfo
);
2492 /* Check the SLP opportunities in the basic block, analyze and build SLP
2494 if (!vect_analyze_slp (bb_vinfo
, n_stmts
))
2496 if (dump_enabled_p ())
2498 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2499 "Failed to SLP the basic block.\n");
2500 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2501 "not vectorized: failed to find SLP opportunities "
2502 "in basic block.\n");
2505 destroy_bb_vec_info (bb_vinfo
);
2509 /* Analyze and verify the alignment of data references and the
2510 dependence in the SLP instances. */
2511 for (i
= 0; BB_VINFO_SLP_INSTANCES (bb_vinfo
).iterate (i
, &instance
); )
2513 if (! vect_slp_analyze_and_verify_instance_alignment (instance
)
2514 || ! vect_slp_analyze_instance_dependence (instance
))
2516 dump_printf_loc (MSG_NOTE
, vect_location
,
2517 "removing SLP instance operations starting from: ");
2518 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
,
2519 SLP_TREE_SCALAR_STMTS
2520 (SLP_INSTANCE_TREE (instance
))[0], 0);
2521 vect_free_slp_instance (instance
);
2522 BB_VINFO_SLP_INSTANCES (bb_vinfo
).ordered_remove (i
);
2526 /* Mark all the statements that we want to vectorize as pure SLP and
2528 vect_mark_slp_stmts (SLP_INSTANCE_TREE (instance
), pure_slp
, -1);
2529 vect_mark_slp_stmts_relevant (SLP_INSTANCE_TREE (instance
));
2533 if (! BB_VINFO_SLP_INSTANCES (bb_vinfo
).length ())
2535 destroy_bb_vec_info (bb_vinfo
);
2539 if (!vect_slp_analyze_operations (BB_VINFO_SLP_INSTANCES (bb_vinfo
),
2540 BB_VINFO_TARGET_COST_DATA (bb_vinfo
)))
2542 if (dump_enabled_p ())
2543 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2544 "not vectorized: bad operation in basic block.\n");
2546 destroy_bb_vec_info (bb_vinfo
);
2550 /* Cost model: check if the vectorization is worthwhile. */
2551 if (!unlimited_cost_model (NULL
)
2552 && !vect_bb_vectorization_profitable_p (bb_vinfo
))
2554 if (dump_enabled_p ())
2555 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2556 "not vectorized: vectorization is not "
2559 destroy_bb_vec_info (bb_vinfo
);
2563 if (dump_enabled_p ())
2564 dump_printf_loc (MSG_NOTE
, vect_location
,
2565 "Basic block will be vectorized using SLP\n");
2571 /* Main entry for the BB vectorizer. Analyze and transform BB, returns
2572 true if anything in the basic-block was vectorized. */
2575 vect_slp_bb (basic_block bb
)
2577 bb_vec_info bb_vinfo
;
2578 gimple_stmt_iterator gsi
;
2579 unsigned int vector_sizes
;
2580 bool any_vectorized
= false;
2582 if (dump_enabled_p ())
2583 dump_printf_loc (MSG_NOTE
, vect_location
, "===vect_slp_analyze_bb===\n");
2585 /* Autodetect first vector size we try. */
2586 current_vector_size
= 0;
2587 vector_sizes
= targetm
.vectorize
.autovectorize_vector_sizes ();
2589 gsi
= gsi_start_bb (bb
);
2593 if (gsi_end_p (gsi
))
2596 gimple_stmt_iterator region_begin
= gsi
;
2597 vec
<data_reference_p
> datarefs
= vNULL
;
2600 for (; !gsi_end_p (gsi
); gsi_next (&gsi
))
2602 gimple
*stmt
= gsi_stmt (gsi
);
2603 if (is_gimple_debug (stmt
))
2607 if (gimple_location (stmt
) != UNKNOWN_LOCATION
)
2608 vect_location
= gimple_location (stmt
);
2610 if (!find_data_references_in_stmt (NULL
, stmt
, &datarefs
))
2614 /* Skip leading unhandled stmts. */
2615 if (gsi_stmt (region_begin
) == gsi_stmt (gsi
))
2621 gimple_stmt_iterator region_end
= gsi
;
2623 bool vectorized
= false;
2625 bb_vinfo
= vect_slp_analyze_bb_1 (region_begin
, region_end
,
2626 datarefs
, insns
, fatal
);
2628 && dbg_cnt (vect_slp
))
2630 if (dump_enabled_p ())
2631 dump_printf_loc (MSG_NOTE
, vect_location
, "SLPing BB part\n");
2633 vect_schedule_slp (bb_vinfo
);
2635 if (dump_enabled_p ())
2636 dump_printf_loc (MSG_NOTE
, vect_location
,
2637 "basic block part vectorized\n");
2639 destroy_bb_vec_info (bb_vinfo
);
2644 destroy_bb_vec_info (bb_vinfo
);
2646 any_vectorized
|= vectorized
;
2648 vector_sizes
&= ~current_vector_size
;
2650 || vector_sizes
== 0
2651 || current_vector_size
== 0
2652 /* If vect_slp_analyze_bb_1 signaled that analysis for all
2653 vector sizes will fail do not bother iterating. */
2656 if (gsi_end_p (region_end
))
2659 /* Skip the unhandled stmt. */
2662 /* And reset vector sizes. */
2663 current_vector_size
= 0;
2664 vector_sizes
= targetm
.vectorize
.autovectorize_vector_sizes ();
2668 /* Try the next biggest vector size. */
2669 current_vector_size
= 1 << floor_log2 (vector_sizes
);
2670 if (dump_enabled_p ())
2671 dump_printf_loc (MSG_NOTE
, vect_location
,
2672 "***** Re-trying analysis with "
2673 "vector size %d\n", current_vector_size
);
2680 return any_vectorized
;
2684 /* Return 1 if vector type of boolean constant which is OPNUM
2685 operand in statement STMT is a boolean vector. */
2688 vect_mask_constant_operand_p (gimple
*stmt
, int opnum
)
2690 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (stmt
);
2691 enum tree_code code
= gimple_expr_code (stmt
);
2694 enum vect_def_type dt
;
2696 /* For comparison and COND_EXPR type is chosen depending
2697 on the other comparison operand. */
2698 if (TREE_CODE_CLASS (code
) == tcc_comparison
)
2701 op
= gimple_assign_rhs1 (stmt
);
2703 op
= gimple_assign_rhs2 (stmt
);
2705 if (!vect_is_simple_use (op
, stmt_vinfo
->vinfo
, &def_stmt
,
2709 return !vectype
|| VECTOR_BOOLEAN_TYPE_P (vectype
);
2712 if (code
== COND_EXPR
)
2714 tree cond
= gimple_assign_rhs1 (stmt
);
2716 if (TREE_CODE (cond
) == SSA_NAME
)
2720 op
= TREE_OPERAND (cond
, 1);
2722 op
= TREE_OPERAND (cond
, 0);
2724 if (!vect_is_simple_use (op
, stmt_vinfo
->vinfo
, &def_stmt
,
2728 return !vectype
|| VECTOR_BOOLEAN_TYPE_P (vectype
);
2731 return VECTOR_BOOLEAN_TYPE_P (STMT_VINFO_VECTYPE (stmt_vinfo
));
2735 /* For constant and loop invariant defs of SLP_NODE this function returns
2736 (vector) defs (VEC_OPRNDS) that will be used in the vectorized stmts.
2737 OP_NUM determines if we gather defs for operand 0 or operand 1 of the RHS of
2738 scalar stmts. NUMBER_OF_VECTORS is the number of vector defs to create.
2739 REDUC_INDEX is the index of the reduction operand in the statements, unless
2743 vect_get_constant_vectors (tree op
, slp_tree slp_node
,
2744 vec
<tree
> *vec_oprnds
,
2745 unsigned int op_num
, unsigned int number_of_vectors
,
2748 vec
<gimple
*> stmts
= SLP_TREE_SCALAR_STMTS (slp_node
);
2749 gimple
*stmt
= stmts
[0];
2750 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (stmt
);
2754 unsigned j
, number_of_places_left_in_vector
;
2757 int group_size
= stmts
.length ();
2758 unsigned int vec_num
, i
;
2759 unsigned number_of_copies
= 1;
2761 voprnds
.create (number_of_vectors
);
2762 bool constant_p
, is_store
;
2763 tree neutral_op
= NULL
;
2764 enum tree_code code
= gimple_expr_code (stmt
);
2767 gimple_seq ctor_seq
= NULL
;
2769 /* Check if vector type is a boolean vector. */
2770 if (TREE_CODE (TREE_TYPE (op
)) == BOOLEAN_TYPE
2771 && vect_mask_constant_operand_p (stmt
, op_num
))
2773 = build_same_sized_truth_vector_type (STMT_VINFO_VECTYPE (stmt_vinfo
));
2775 vector_type
= get_vectype_for_scalar_type (TREE_TYPE (op
));
2776 nunits
= TYPE_VECTOR_SUBPARTS (vector_type
);
2778 if (STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_reduction_def
2779 && reduc_index
!= -1)
2781 op_num
= reduc_index
;
2782 op
= gimple_op (stmt
, op_num
+ 1);
2783 /* For additional copies (see the explanation of NUMBER_OF_COPIES below)
2784 we need either neutral operands or the original operands. See
2785 get_initial_def_for_reduction() for details. */
2788 case WIDEN_SUM_EXPR
:
2795 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (op
)))
2796 neutral_op
= build_real (TREE_TYPE (op
), dconst0
);
2798 neutral_op
= build_int_cst (TREE_TYPE (op
), 0);
2803 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (op
)))
2804 neutral_op
= build_real (TREE_TYPE (op
), dconst1
);
2806 neutral_op
= build_int_cst (TREE_TYPE (op
), 1);
2811 neutral_op
= build_int_cst (TREE_TYPE (op
), -1);
2814 /* For MIN/MAX we don't have an easy neutral operand but
2815 the initial values can be used fine here. Only for
2816 a reduction chain we have to force a neutral element. */
2819 if (!GROUP_FIRST_ELEMENT (stmt_vinfo
))
2823 def_stmt
= SSA_NAME_DEF_STMT (op
);
2824 loop
= (gimple_bb (stmt
))->loop_father
;
2825 neutral_op
= PHI_ARG_DEF_FROM_EDGE (def_stmt
,
2826 loop_preheader_edge (loop
));
2831 gcc_assert (!GROUP_FIRST_ELEMENT (stmt_vinfo
));
2836 if (STMT_VINFO_DATA_REF (stmt_vinfo
))
2839 op
= gimple_assign_rhs1 (stmt
);
2846 if (CONSTANT_CLASS_P (op
))
2851 /* NUMBER_OF_COPIES is the number of times we need to use the same values in
2852 created vectors. It is greater than 1 if unrolling is performed.
2854 For example, we have two scalar operands, s1 and s2 (e.g., group of
2855 strided accesses of size two), while NUNITS is four (i.e., four scalars
2856 of this type can be packed in a vector). The output vector will contain
2857 two copies of each scalar operand: {s1, s2, s1, s2}. (NUMBER_OF_COPIES
2860 If GROUP_SIZE > NUNITS, the scalars will be split into several vectors
2861 containing the operands.
2863 For example, NUNITS is four as before, and the group size is 8
2864 (s1, s2, ..., s8). We will create two vectors {s1, s2, s3, s4} and
2865 {s5, s6, s7, s8}. */
2867 number_of_copies
= nunits
* number_of_vectors
/ group_size
;
2869 number_of_places_left_in_vector
= nunits
;
2870 elts
= XALLOCAVEC (tree
, nunits
);
2871 bool place_after_defs
= false;
2872 for (j
= 0; j
< number_of_copies
; j
++)
2874 for (i
= group_size
- 1; stmts
.iterate (i
, &stmt
); i
--)
2877 op
= gimple_assign_rhs1 (stmt
);
2884 tree cond
= gimple_assign_rhs1 (stmt
);
2885 if (TREE_CODE (cond
) == SSA_NAME
)
2886 op
= gimple_op (stmt
, op_num
+ 1);
2887 else if (op_num
== 0 || op_num
== 1)
2888 op
= TREE_OPERAND (cond
, op_num
);
2892 op
= gimple_assign_rhs2 (stmt
);
2894 op
= gimple_assign_rhs3 (stmt
);
2900 op
= gimple_call_arg (stmt
, op_num
);
2907 op
= gimple_op (stmt
, op_num
+ 1);
2908 /* Unlike the other binary operators, shifts/rotates have
2909 the shift count being int, instead of the same type as
2910 the lhs, so make sure the scalar is the right type if
2911 we are dealing with vectors of
2912 long long/long/short/char. */
2913 if (op_num
== 1 && TREE_CODE (op
) == INTEGER_CST
)
2914 op
= fold_convert (TREE_TYPE (vector_type
), op
);
2918 op
= gimple_op (stmt
, op_num
+ 1);
2923 if (reduc_index
!= -1)
2925 loop
= (gimple_bb (stmt
))->loop_father
;
2926 def_stmt
= SSA_NAME_DEF_STMT (op
);
2930 /* Get the def before the loop. In reduction chain we have only
2931 one initial value. */
2932 if ((j
!= (number_of_copies
- 1)
2933 || (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt
))
2938 op
= PHI_ARG_DEF_FROM_EDGE (def_stmt
,
2939 loop_preheader_edge (loop
));
2942 /* Create 'vect_ = {op0,op1,...,opn}'. */
2943 number_of_places_left_in_vector
--;
2945 if (!types_compatible_p (TREE_TYPE (vector_type
), TREE_TYPE (op
)))
2947 if (CONSTANT_CLASS_P (op
))
2949 if (VECTOR_BOOLEAN_TYPE_P (vector_type
))
2951 /* Can't use VIEW_CONVERT_EXPR for booleans because
2952 of possibly different sizes of scalar value and
2954 if (integer_zerop (op
))
2955 op
= build_int_cst (TREE_TYPE (vector_type
), 0);
2956 else if (integer_onep (op
))
2957 op
= build_int_cst (TREE_TYPE (vector_type
), 1);
2962 op
= fold_unary (VIEW_CONVERT_EXPR
,
2963 TREE_TYPE (vector_type
), op
);
2964 gcc_assert (op
&& CONSTANT_CLASS_P (op
));
2968 tree new_temp
= make_ssa_name (TREE_TYPE (vector_type
));
2970 op
= build1 (VIEW_CONVERT_EXPR
, TREE_TYPE (vector_type
), op
);
2972 = gimple_build_assign (new_temp
, VIEW_CONVERT_EXPR
, op
);
2973 gimple_seq_add_stmt (&ctor_seq
, init_stmt
);
2977 elts
[number_of_places_left_in_vector
] = op
;
2978 if (!CONSTANT_CLASS_P (op
))
2980 if (TREE_CODE (orig_op
) == SSA_NAME
2981 && !SSA_NAME_IS_DEFAULT_DEF (orig_op
)
2982 && STMT_VINFO_BB_VINFO (stmt_vinfo
)
2983 && (STMT_VINFO_BB_VINFO (stmt_vinfo
)->bb
2984 == gimple_bb (SSA_NAME_DEF_STMT (orig_op
))))
2985 place_after_defs
= true;
2987 if (number_of_places_left_in_vector
== 0)
2989 number_of_places_left_in_vector
= nunits
;
2992 vec_cst
= build_vector (vector_type
, elts
);
2995 vec
<constructor_elt
, va_gc
> *v
;
2997 vec_alloc (v
, nunits
);
2998 for (k
= 0; k
< nunits
; ++k
)
2999 CONSTRUCTOR_APPEND_ELT (v
, NULL_TREE
, elts
[k
]);
3000 vec_cst
= build_constructor (vector_type
, v
);
3003 gimple_stmt_iterator gsi
;
3004 if (place_after_defs
)
3007 (vect_find_last_scalar_stmt_in_slp (slp_node
));
3008 init
= vect_init_vector (stmt
, vec_cst
, vector_type
, &gsi
);
3011 init
= vect_init_vector (stmt
, vec_cst
, vector_type
, NULL
);
3012 if (ctor_seq
!= NULL
)
3014 gsi
= gsi_for_stmt (SSA_NAME_DEF_STMT (init
));
3015 gsi_insert_seq_before_without_update (&gsi
, ctor_seq
,
3019 voprnds
.quick_push (init
);
3020 place_after_defs
= false;
3025 /* Since the vectors are created in the reverse order, we should invert
3027 vec_num
= voprnds
.length ();
3028 for (j
= vec_num
; j
!= 0; j
--)
3030 vop
= voprnds
[j
- 1];
3031 vec_oprnds
->quick_push (vop
);
3036 /* In case that VF is greater than the unrolling factor needed for the SLP
3037 group of stmts, NUMBER_OF_VECTORS to be created is greater than
3038 NUMBER_OF_SCALARS/NUNITS or NUNITS/NUMBER_OF_SCALARS, and hence we have
3039 to replicate the vectors. */
3040 while (number_of_vectors
> vec_oprnds
->length ())
3042 tree neutral_vec
= NULL
;
3047 neutral_vec
= build_vector_from_val (vector_type
, neutral_op
);
3049 vec_oprnds
->quick_push (neutral_vec
);
3053 for (i
= 0; vec_oprnds
->iterate (i
, &vop
) && i
< vec_num
; i
++)
3054 vec_oprnds
->quick_push (vop
);
3060 /* Get vectorized definitions from SLP_NODE that contains corresponding
3061 vectorized def-stmts. */
3064 vect_get_slp_vect_defs (slp_tree slp_node
, vec
<tree
> *vec_oprnds
)
3067 gimple
*vec_def_stmt
;
3070 gcc_assert (SLP_TREE_VEC_STMTS (slp_node
).exists ());
3072 FOR_EACH_VEC_ELT (SLP_TREE_VEC_STMTS (slp_node
), i
, vec_def_stmt
)
3074 gcc_assert (vec_def_stmt
);
3075 vec_oprnd
= gimple_get_lhs (vec_def_stmt
);
3076 vec_oprnds
->quick_push (vec_oprnd
);
3081 /* Get vectorized definitions for SLP_NODE.
3082 If the scalar definitions are loop invariants or constants, collect them and
3083 call vect_get_constant_vectors() to create vector stmts.
3084 Otherwise, the def-stmts must be already vectorized and the vectorized stmts
3085 must be stored in the corresponding child of SLP_NODE, and we call
3086 vect_get_slp_vect_defs () to retrieve them. */
3089 vect_get_slp_defs (vec
<tree
> ops
, slp_tree slp_node
,
3090 vec
<vec
<tree
> > *vec_oprnds
, int reduc_index
)
3093 int number_of_vects
= 0, i
;
3094 unsigned int child_index
= 0;
3095 HOST_WIDE_INT lhs_size_unit
, rhs_size_unit
;
3096 slp_tree child
= NULL
;
3099 bool vectorized_defs
;
3101 first_stmt
= SLP_TREE_SCALAR_STMTS (slp_node
)[0];
3102 FOR_EACH_VEC_ELT (ops
, i
, oprnd
)
3104 /* For each operand we check if it has vectorized definitions in a child
3105 node or we need to create them (for invariants and constants). We
3106 check if the LHS of the first stmt of the next child matches OPRND.
3107 If it does, we found the correct child. Otherwise, we call
3108 vect_get_constant_vectors (), and not advance CHILD_INDEX in order
3109 to check this child node for the next operand. */
3110 vectorized_defs
= false;
3111 if (SLP_TREE_CHILDREN (slp_node
).length () > child_index
)
3113 child
= SLP_TREE_CHILDREN (slp_node
)[child_index
];
3115 /* We have to check both pattern and original def, if available. */
3116 if (SLP_TREE_DEF_TYPE (child
) == vect_internal_def
)
3118 gimple
*first_def
= SLP_TREE_SCALAR_STMTS (child
)[0];
3120 = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (first_def
));
3122 if (operand_equal_p (oprnd
, gimple_get_lhs (first_def
), 0)
3124 && operand_equal_p (oprnd
, gimple_get_lhs (related
), 0)))
3126 /* The number of vector defs is determined by the number of
3127 vector statements in the node from which we get those
3129 number_of_vects
= SLP_TREE_NUMBER_OF_VEC_STMTS (child
);
3130 vectorized_defs
= true;
3138 if (!vectorized_defs
)
3142 number_of_vects
= SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node
);
3143 /* Number of vector stmts was calculated according to LHS in
3144 vect_schedule_slp_instance (), fix it by replacing LHS with
3145 RHS, if necessary. See vect_get_smallest_scalar_type () for
3147 vect_get_smallest_scalar_type (first_stmt
, &lhs_size_unit
,
3149 if (rhs_size_unit
!= lhs_size_unit
)
3151 number_of_vects
*= rhs_size_unit
;
3152 number_of_vects
/= lhs_size_unit
;
3157 /* Allocate memory for vectorized defs. */
3159 vec_defs
.create (number_of_vects
);
3161 /* For reduction defs we call vect_get_constant_vectors (), since we are
3162 looking for initial loop invariant values. */
3163 if (vectorized_defs
&& reduc_index
== -1)
3164 /* The defs are already vectorized. */
3165 vect_get_slp_vect_defs (child
, &vec_defs
);
3167 /* Build vectors from scalar defs. */
3168 vect_get_constant_vectors (oprnd
, slp_node
, &vec_defs
, i
,
3169 number_of_vects
, reduc_index
);
3171 vec_oprnds
->quick_push (vec_defs
);
3173 /* For reductions, we only need initial values. */
3174 if (reduc_index
!= -1)
3180 /* Create NCOPIES permutation statements using the mask MASK_BYTES (by
3181 building a vector of type MASK_TYPE from it) and two input vectors placed in
3182 DR_CHAIN at FIRST_VEC_INDX and SECOND_VEC_INDX for the first copy and
3183 shifting by STRIDE elements of DR_CHAIN for every copy.
3184 (STRIDE is the number of vectorized stmts for NODE divided by the number of
3186 VECT_STMTS_COUNTER specifies the index in the vectorized stmts of NODE, where
3187 the created stmts must be inserted. */
3190 vect_create_mask_and_perm (gimple
*stmt
,
3191 tree mask
, int first_vec_indx
, int second_vec_indx
,
3192 gimple_stmt_iterator
*gsi
, slp_tree node
,
3193 tree vectype
, vec
<tree
> dr_chain
,
3194 int ncopies
, int vect_stmts_counter
)
3197 gimple
*perm_stmt
= NULL
;
3198 int i
, stride_in
, stride_out
;
3199 tree first_vec
, second_vec
, data_ref
;
3201 stride_out
= SLP_TREE_NUMBER_OF_VEC_STMTS (node
) / ncopies
;
3202 stride_in
= dr_chain
.length () / ncopies
;
3204 /* Initialize the vect stmts of NODE to properly insert the generated
3206 for (i
= SLP_TREE_VEC_STMTS (node
).length ();
3207 i
< (int) SLP_TREE_NUMBER_OF_VEC_STMTS (node
); i
++)
3208 SLP_TREE_VEC_STMTS (node
).quick_push (NULL
);
3210 perm_dest
= vect_create_destination_var (gimple_assign_lhs (stmt
), vectype
);
3211 for (i
= 0; i
< ncopies
; i
++)
3213 first_vec
= dr_chain
[first_vec_indx
];
3214 second_vec
= dr_chain
[second_vec_indx
];
3216 /* Generate the permute statement if necessary. */
3219 perm_stmt
= gimple_build_assign (perm_dest
, VEC_PERM_EXPR
,
3220 first_vec
, second_vec
, mask
);
3221 data_ref
= make_ssa_name (perm_dest
, perm_stmt
);
3222 gimple_set_lhs (perm_stmt
, data_ref
);
3223 vect_finish_stmt_generation (stmt
, perm_stmt
, gsi
);
3226 /* If mask was NULL_TREE generate the requested identity transform. */
3227 perm_stmt
= SSA_NAME_DEF_STMT (first_vec
);
3229 /* Store the vector statement in NODE. */
3230 SLP_TREE_VEC_STMTS (node
)[stride_out
* i
+ vect_stmts_counter
]
3233 first_vec_indx
+= stride_in
;
3234 second_vec_indx
+= stride_in
;
3239 /* Generate vector permute statements from a list of loads in DR_CHAIN.
3240 If ANALYZE_ONLY is TRUE, only check that it is possible to create valid
3241 permute statements for the SLP node NODE of the SLP instance
3242 SLP_NODE_INSTANCE. */
3245 vect_transform_slp_perm_load (slp_tree node
, vec
<tree
> dr_chain
,
3246 gimple_stmt_iterator
*gsi
, int vf
,
3247 slp_instance slp_node_instance
, bool analyze_only
)
3249 gimple
*stmt
= SLP_TREE_SCALAR_STMTS (node
)[0];
3250 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
3251 tree mask_element_type
= NULL_TREE
, mask_type
;
3252 int nunits
, vec_index
= 0;
3253 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
3254 int group_size
= SLP_INSTANCE_GROUP_SIZE (slp_node_instance
);
3255 int unroll_factor
, mask_element
, ncopies
;
3256 unsigned char *mask
;
3259 if (!STMT_VINFO_GROUPED_ACCESS (stmt_info
))
3262 stmt_info
= vinfo_for_stmt (GROUP_FIRST_ELEMENT (stmt_info
));
3264 mode
= TYPE_MODE (vectype
);
3266 /* The generic VEC_PERM_EXPR code always uses an integral type of the
3267 same size as the vector element being permuted. */
3268 mask_element_type
= lang_hooks
.types
.type_for_mode
3269 (int_mode_for_mode (TYPE_MODE (TREE_TYPE (vectype
))), 1);
3270 mask_type
= get_vectype_for_scalar_type (mask_element_type
);
3271 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
3272 mask
= XALLOCAVEC (unsigned char, nunits
);
3273 unroll_factor
= SLP_INSTANCE_UNROLLING_FACTOR (slp_node_instance
);
3275 /* Number of copies is determined by the final vectorization factor
3276 relatively to SLP_NODE_INSTANCE unrolling factor. */
3277 ncopies
= vf
/ SLP_INSTANCE_UNROLLING_FACTOR (slp_node_instance
);
3279 /* Generate permutation masks for every NODE. Number of masks for each NODE
3280 is equal to GROUP_SIZE.
3281 E.g., we have a group of three nodes with three loads from the same
3282 location in each node, and the vector size is 4. I.e., we have a
3283 a0b0c0a1b1c1... sequence and we need to create the following vectors:
3284 for a's: a0a0a0a1 a1a1a2a2 a2a3a3a3
3285 for b's: b0b0b0b1 b1b1b2b2 b2b3b3b3
3288 The masks for a's should be: {0,0,0,3} {3,3,6,6} {6,9,9,9}.
3289 The last mask is illegal since we assume two operands for permute
3290 operation, and the mask element values can't be outside that range.
3291 Hence, the last mask must be converted into {2,5,5,5}.
3292 For the first two permutations we need the first and the second input
3293 vectors: {a0,b0,c0,a1} and {b1,c1,a2,b2}, and for the last permutation
3294 we need the second and the third vectors: {b1,c1,a2,b2} and
3297 int vect_stmts_counter
= 0;
3299 int first_vec_index
= -1;
3300 int second_vec_index
= -1;
3303 for (int j
= 0; j
< unroll_factor
; j
++)
3305 for (int k
= 0; k
< group_size
; k
++)
3307 int i
= (SLP_TREE_LOAD_PERMUTATION (node
)[k
]
3308 + j
* STMT_VINFO_GROUP_SIZE (stmt_info
));
3309 vec_index
= i
/ nunits
;
3310 mask_element
= i
% nunits
;
3311 if (vec_index
== first_vec_index
3312 || first_vec_index
== -1)
3314 first_vec_index
= vec_index
;
3316 else if (vec_index
== second_vec_index
3317 || second_vec_index
== -1)
3319 second_vec_index
= vec_index
;
3320 mask_element
+= nunits
;
3324 if (dump_enabled_p ())
3326 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3327 "permutation requires at "
3328 "least three vectors ");
3329 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
3331 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
3336 gcc_assert (mask_element
>= 0
3337 && mask_element
< 2 * nunits
);
3338 if (mask_element
!= index
)
3340 mask
[index
++] = mask_element
;
3342 if (index
== nunits
)
3345 && ! can_vec_perm_p (mode
, false, mask
))
3347 if (dump_enabled_p ())
3349 dump_printf_loc (MSG_MISSED_OPTIMIZATION
,
3351 "unsupported vect permute { ");
3352 for (i
= 0; i
< nunits
; ++i
)
3353 dump_printf (MSG_MISSED_OPTIMIZATION
, "%d ", mask
[i
]);
3354 dump_printf (MSG_MISSED_OPTIMIZATION
, "}\n");
3361 tree mask_vec
= NULL_TREE
;
3365 tree
*mask_elts
= XALLOCAVEC (tree
, nunits
);
3366 for (int l
= 0; l
< nunits
; ++l
)
3367 mask_elts
[l
] = build_int_cst (mask_element_type
,
3369 mask_vec
= build_vector (mask_type
, mask_elts
);
3372 if (second_vec_index
== -1)
3373 second_vec_index
= first_vec_index
;
3374 vect_create_mask_and_perm (stmt
, mask_vec
, first_vec_index
,
3376 gsi
, node
, vectype
, dr_chain
,
3377 ncopies
, vect_stmts_counter
++);
3381 first_vec_index
= -1;
3382 second_vec_index
= -1;
3393 /* Vectorize SLP instance tree in postorder. */
3396 vect_schedule_slp_instance (slp_tree node
, slp_instance instance
,
3397 unsigned int vectorization_factor
)
3400 bool grouped_store
, is_store
;
3401 gimple_stmt_iterator si
;
3402 stmt_vec_info stmt_info
;
3403 unsigned int vec_stmts_size
, nunits
, group_size
;
3408 if (SLP_TREE_DEF_TYPE (node
) != vect_internal_def
)
3411 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node
), i
, child
)
3412 vect_schedule_slp_instance (child
, instance
, vectorization_factor
);
3414 /* Push SLP node def-type to stmts. */
3415 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node
), i
, child
)
3416 if (SLP_TREE_DEF_TYPE (child
) != vect_internal_def
)
3417 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (child
), j
, stmt
)
3418 STMT_VINFO_DEF_TYPE (vinfo_for_stmt (stmt
)) = SLP_TREE_DEF_TYPE (child
);
3420 stmt
= SLP_TREE_SCALAR_STMTS (node
)[0];
3421 stmt_info
= vinfo_for_stmt (stmt
);
3423 /* VECTYPE is the type of the destination. */
3424 vectype
= STMT_VINFO_VECTYPE (stmt_info
);
3425 nunits
= (unsigned int) TYPE_VECTOR_SUBPARTS (vectype
);
3426 group_size
= SLP_INSTANCE_GROUP_SIZE (instance
);
3428 /* For each SLP instance calculate number of vector stmts to be created
3429 for the scalar stmts in each node of the SLP tree. Number of vector
3430 elements in one vector iteration is the number of scalar elements in
3431 one scalar iteration (GROUP_SIZE) multiplied by VF divided by vector
3433 Unless this is a SLP reduction in which case the number of vector
3434 stmts is equal to the number of vector stmts of the children. */
3435 if (GROUP_FIRST_ELEMENT (stmt_info
)
3436 && !STMT_VINFO_GROUPED_ACCESS (stmt_info
))
3437 vec_stmts_size
= SLP_TREE_NUMBER_OF_VEC_STMTS (SLP_TREE_CHILDREN (node
)[0]);
3439 vec_stmts_size
= (vectorization_factor
* group_size
) / nunits
;
3441 if (!SLP_TREE_VEC_STMTS (node
).exists ())
3443 SLP_TREE_VEC_STMTS (node
).create (vec_stmts_size
);
3444 SLP_TREE_NUMBER_OF_VEC_STMTS (node
) = vec_stmts_size
;
3447 if (dump_enabled_p ())
3449 dump_printf_loc (MSG_NOTE
,vect_location
,
3450 "------>vectorizing SLP node starting from: ");
3451 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
3452 dump_printf (MSG_NOTE
, "\n");
3455 /* Vectorized stmts go before the last scalar stmt which is where
3456 all uses are ready. */
3457 si
= gsi_for_stmt (vect_find_last_scalar_stmt_in_slp (node
));
3459 /* Mark the first element of the reduction chain as reduction to properly
3460 transform the node. In the analysis phase only the last element of the
3461 chain is marked as reduction. */
3462 if (GROUP_FIRST_ELEMENT (stmt_info
) && !STMT_VINFO_GROUPED_ACCESS (stmt_info
)
3463 && GROUP_FIRST_ELEMENT (stmt_info
) == stmt
)
3465 STMT_VINFO_DEF_TYPE (stmt_info
) = vect_reduction_def
;
3466 STMT_VINFO_TYPE (stmt_info
) = reduc_vec_info_type
;
3469 /* Handle two-operation SLP nodes by vectorizing the group with
3470 both operations and then performing a merge. */
3471 if (SLP_TREE_TWO_OPERATORS (node
))
3473 enum tree_code code0
= gimple_assign_rhs_code (stmt
);
3474 enum tree_code ocode
;
3476 unsigned char *mask
= XALLOCAVEC (unsigned char, group_size
);
3477 bool allsame
= true;
3478 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node
), i
, ostmt
)
3479 if (gimple_assign_rhs_code (ostmt
) != code0
)
3483 ocode
= gimple_assign_rhs_code (ostmt
);
3492 tree tmask
= NULL_TREE
;
3493 vect_transform_stmt (stmt
, &si
, &grouped_store
, node
, instance
);
3494 v0
= SLP_TREE_VEC_STMTS (node
).copy ();
3495 SLP_TREE_VEC_STMTS (node
).truncate (0);
3496 gimple_assign_set_rhs_code (stmt
, ocode
);
3497 vect_transform_stmt (stmt
, &si
, &grouped_store
, node
, instance
);
3498 gimple_assign_set_rhs_code (stmt
, code0
);
3499 v1
= SLP_TREE_VEC_STMTS (node
).copy ();
3500 SLP_TREE_VEC_STMTS (node
).truncate (0);
3501 tree meltype
= build_nonstandard_integer_type
3502 (GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (vectype
))), 1);
3503 tree mvectype
= get_same_sized_vectype (meltype
, vectype
);
3505 for (j
= 0; j
< v0
.length (); ++j
)
3507 tree
*melts
= XALLOCAVEC (tree
, TYPE_VECTOR_SUBPARTS (vectype
));
3508 for (l
= 0; l
< TYPE_VECTOR_SUBPARTS (vectype
); ++l
)
3510 if (k
>= group_size
)
3512 melts
[l
] = build_int_cst
3513 (meltype
, mask
[k
++] * TYPE_VECTOR_SUBPARTS (vectype
) + l
);
3515 tmask
= build_vector (mvectype
, melts
);
3517 /* ??? Not all targets support a VEC_PERM_EXPR with a
3518 constant mask that would translate to a vec_merge RTX
3519 (with their vec_perm_const_ok). We can either not
3520 vectorize in that case or let veclower do its job.
3521 Unfortunately that isn't too great and at least for
3522 plus/minus we'd eventually like to match targets
3523 vector addsub instructions. */
3525 vstmt
= gimple_build_assign (make_ssa_name (vectype
),
3527 gimple_assign_lhs (v0
[j
]),
3528 gimple_assign_lhs (v1
[j
]), tmask
);
3529 vect_finish_stmt_generation (stmt
, vstmt
, &si
);
3530 SLP_TREE_VEC_STMTS (node
).quick_push (vstmt
);
3537 is_store
= vect_transform_stmt (stmt
, &si
, &grouped_store
, node
, instance
);
3539 /* Restore stmt def-types. */
3540 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node
), i
, child
)
3541 if (SLP_TREE_DEF_TYPE (child
) != vect_internal_def
)
3542 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (child
), j
, stmt
)
3543 STMT_VINFO_DEF_TYPE (vinfo_for_stmt (stmt
)) = vect_internal_def
;
3548 /* Replace scalar calls from SLP node NODE with setting of their lhs to zero.
3549 For loop vectorization this is done in vectorizable_call, but for SLP
3550 it needs to be deferred until end of vect_schedule_slp, because multiple
3551 SLP instances may refer to the same scalar stmt. */
3554 vect_remove_slp_scalar_calls (slp_tree node
)
3556 gimple
*stmt
, *new_stmt
;
3557 gimple_stmt_iterator gsi
;
3561 stmt_vec_info stmt_info
;
3563 if (SLP_TREE_DEF_TYPE (node
) != vect_internal_def
)
3566 FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node
), i
, child
)
3567 vect_remove_slp_scalar_calls (child
);
3569 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node
), i
, stmt
)
3571 if (!is_gimple_call (stmt
) || gimple_bb (stmt
) == NULL
)
3573 stmt_info
= vinfo_for_stmt (stmt
);
3574 if (stmt_info
== NULL
3575 || is_pattern_stmt_p (stmt_info
)
3576 || !PURE_SLP_STMT (stmt_info
))
3578 lhs
= gimple_call_lhs (stmt
);
3579 new_stmt
= gimple_build_assign (lhs
, build_zero_cst (TREE_TYPE (lhs
)));
3580 set_vinfo_for_stmt (new_stmt
, stmt_info
);
3581 set_vinfo_for_stmt (stmt
, NULL
);
3582 STMT_VINFO_STMT (stmt_info
) = new_stmt
;
3583 gsi
= gsi_for_stmt (stmt
);
3584 gsi_replace (&gsi
, new_stmt
, false);
3585 SSA_NAME_DEF_STMT (gimple_assign_lhs (new_stmt
)) = new_stmt
;
3589 /* Generate vector code for all SLP instances in the loop/basic block. */
3592 vect_schedule_slp (vec_info
*vinfo
)
3594 vec
<slp_instance
> slp_instances
;
3595 slp_instance instance
;
3597 bool is_store
= false;
3599 slp_instances
= vinfo
->slp_instances
;
3600 if (is_a
<loop_vec_info
> (vinfo
))
3601 vf
= as_a
<loop_vec_info
> (vinfo
)->vectorization_factor
;
3605 FOR_EACH_VEC_ELT (slp_instances
, i
, instance
)
3607 /* Schedule the tree of INSTANCE. */
3608 is_store
= vect_schedule_slp_instance (SLP_INSTANCE_TREE (instance
),
3610 if (dump_enabled_p ())
3611 dump_printf_loc (MSG_NOTE
, vect_location
,
3612 "vectorizing stmts using SLP.\n");
3615 FOR_EACH_VEC_ELT (slp_instances
, i
, instance
)
3617 slp_tree root
= SLP_INSTANCE_TREE (instance
);
3620 gimple_stmt_iterator gsi
;
3622 /* Remove scalar call stmts. Do not do this for basic-block
3623 vectorization as not all uses may be vectorized.
3624 ??? Why should this be necessary? DCE should be able to
3625 remove the stmts itself.
3626 ??? For BB vectorization we can as well remove scalar
3627 stmts starting from the SLP tree root if they have no
3629 if (is_a
<loop_vec_info
> (vinfo
))
3630 vect_remove_slp_scalar_calls (root
);
3632 for (j
= 0; SLP_TREE_SCALAR_STMTS (root
).iterate (j
, &store
)
3633 && j
< SLP_INSTANCE_GROUP_SIZE (instance
); j
++)
3635 if (!STMT_VINFO_DATA_REF (vinfo_for_stmt (store
)))
3638 if (is_pattern_stmt_p (vinfo_for_stmt (store
)))
3639 store
= STMT_VINFO_RELATED_STMT (vinfo_for_stmt (store
));
3640 /* Free the attached stmt_vec_info and remove the stmt. */
3641 gsi
= gsi_for_stmt (store
);
3642 unlink_stmt_vdef (store
);
3643 gsi_remove (&gsi
, true);
3644 release_defs (store
);
3645 free_stmt_vec_info (store
);