1 /* SLP - Basic Block Vectorization
2 Copyright (C) 2007, 2008, 2009, 2010
3 Free Software Foundation, Inc.
4 Contributed by Dorit Naishlos <dorit@il.ibm.com>
5 and Ira Rosen <irar@il.ibm.com>
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
25 #include "coretypes.h"
30 #include "basic-block.h"
31 #include "tree-pretty-print.h"
32 #include "gimple-pretty-print.h"
33 #include "tree-flow.h"
34 #include "tree-dump.h"
36 #include "cfglayout.h"
40 #include "tree-vectorizer.h"
42 /* Extract the location of the basic block in the source code.
43 Return the basic block location if succeed and NULL if not. */
46 find_bb_location (basic_block bb
)
49 gimple_stmt_iterator si
;
54 for (si
= gsi_start_bb (bb
); !gsi_end_p (si
); gsi_next (&si
))
57 if (gimple_location (stmt
) != UNKNOWN_LOC
)
58 return gimple_location (stmt
);
65 /* Recursively free the memory allocated for the SLP tree rooted at NODE. */
68 vect_free_slp_tree (slp_tree node
)
73 if (SLP_TREE_LEFT (node
))
74 vect_free_slp_tree (SLP_TREE_LEFT (node
));
76 if (SLP_TREE_RIGHT (node
))
77 vect_free_slp_tree (SLP_TREE_RIGHT (node
));
79 VEC_free (gimple
, heap
, SLP_TREE_SCALAR_STMTS (node
));
81 if (SLP_TREE_VEC_STMTS (node
))
82 VEC_free (gimple
, heap
, SLP_TREE_VEC_STMTS (node
));
88 /* Free the memory allocated for the SLP instance. */
91 vect_free_slp_instance (slp_instance instance
)
93 vect_free_slp_tree (SLP_INSTANCE_TREE (instance
));
94 VEC_free (int, heap
, SLP_INSTANCE_LOAD_PERMUTATION (instance
));
95 VEC_free (slp_tree
, heap
, SLP_INSTANCE_LOADS (instance
));
99 /* Get the defs for the rhs of STMT (collect them in DEF_STMTS0/1), check that
100 they are of a legal type and that they match the defs of the first stmt of
101 the SLP group (stored in FIRST_STMT_...). */
104 vect_get_and_check_slp_defs (loop_vec_info loop_vinfo
, bb_vec_info bb_vinfo
,
105 slp_tree slp_node
, gimple stmt
,
106 VEC (gimple
, heap
) **def_stmts0
,
107 VEC (gimple
, heap
) **def_stmts1
,
108 enum vect_def_type
*first_stmt_dt0
,
109 enum vect_def_type
*first_stmt_dt1
,
110 tree
*first_stmt_def0_type
,
111 tree
*first_stmt_def1_type
,
112 tree
*first_stmt_const_oprnd
,
113 int ncopies_for_cost
,
114 bool *pattern0
, bool *pattern1
)
117 unsigned int i
, number_of_oprnds
;
120 enum vect_def_type dt
[2] = {vect_unknown_def_type
, vect_unknown_def_type
};
121 stmt_vec_info stmt_info
=
122 vinfo_for_stmt (VEC_index (gimple
, SLP_TREE_SCALAR_STMTS (slp_node
), 0));
123 enum gimple_rhs_class rhs_class
;
124 struct loop
*loop
= NULL
;
127 loop
= LOOP_VINFO_LOOP (loop_vinfo
);
129 rhs_class
= get_gimple_rhs_class (gimple_assign_rhs_code (stmt
));
130 number_of_oprnds
= gimple_num_ops (stmt
) - 1; /* RHS only */
132 for (i
= 0; i
< number_of_oprnds
; i
++)
134 oprnd
= gimple_op (stmt
, i
+ 1);
136 if (!vect_is_simple_use (oprnd
, loop_vinfo
, bb_vinfo
, &def_stmt
, &def
,
138 || (!def_stmt
&& dt
[i
] != vect_constant_def
))
140 if (vect_print_dump_info (REPORT_SLP
))
142 fprintf (vect_dump
, "Build SLP failed: can't find def for ");
143 print_generic_expr (vect_dump
, oprnd
, TDF_SLIM
);
149 /* Check if DEF_STMT is a part of a pattern in LOOP and get the def stmt
150 from the pattern. Check that all the stmts of the node are in the
152 if (loop
&& def_stmt
&& gimple_bb (def_stmt
)
153 && flow_bb_inside_loop_p (loop
, gimple_bb (def_stmt
))
154 && vinfo_for_stmt (def_stmt
)
155 && STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (def_stmt
)))
157 if (!*first_stmt_dt0
)
161 if (i
== 1 && !*first_stmt_dt1
)
163 else if ((i
== 0 && !*pattern0
) || (i
== 1 && !*pattern1
))
165 if (vect_print_dump_info (REPORT_DETAILS
))
167 fprintf (vect_dump
, "Build SLP failed: some of the stmts"
168 " are in a pattern, and others are not ");
169 print_generic_expr (vect_dump
, oprnd
, TDF_SLIM
);
176 def_stmt
= STMT_VINFO_RELATED_STMT (vinfo_for_stmt (def_stmt
));
177 dt
[i
] = STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt
));
179 if (*dt
== vect_unknown_def_type
)
181 if (vect_print_dump_info (REPORT_DETAILS
))
182 fprintf (vect_dump
, "Unsupported pattern.");
186 switch (gimple_code (def_stmt
))
189 def
= gimple_phi_result (def_stmt
);
193 def
= gimple_assign_lhs (def_stmt
);
197 if (vect_print_dump_info (REPORT_DETAILS
))
198 fprintf (vect_dump
, "unsupported defining stmt: ");
203 if (!*first_stmt_dt0
)
205 /* op0 of the first stmt of the group - store its info. */
206 *first_stmt_dt0
= dt
[i
];
208 *first_stmt_def0_type
= TREE_TYPE (def
);
210 *first_stmt_const_oprnd
= oprnd
;
212 /* Analyze costs (for the first stmt of the group only). */
213 if (rhs_class
!= GIMPLE_SINGLE_RHS
)
214 /* Not memory operation (we don't call this functions for loads). */
215 vect_model_simple_cost (stmt_info
, ncopies_for_cost
, dt
, slp_node
);
218 vect_model_store_cost (stmt_info
, ncopies_for_cost
, dt
[0], slp_node
);
223 if (!*first_stmt_dt1
&& i
== 1)
225 /* op1 of the first stmt of the group - store its info. */
226 *first_stmt_dt1
= dt
[i
];
228 *first_stmt_def1_type
= TREE_TYPE (def
);
231 /* We assume that the stmt contains only one constant
232 operand. We fail otherwise, to be on the safe side. */
233 if (*first_stmt_const_oprnd
)
235 if (vect_print_dump_info (REPORT_SLP
))
236 fprintf (vect_dump
, "Build SLP failed: two constant "
240 *first_stmt_const_oprnd
= oprnd
;
245 /* Not first stmt of the group, check that the def-stmt/s match
246 the def-stmt/s of the first stmt. */
248 && (*first_stmt_dt0
!= dt
[i
]
249 || (*first_stmt_def0_type
&& def
250 && !types_compatible_p (*first_stmt_def0_type
,
253 && (*first_stmt_dt1
!= dt
[i
]
254 || (*first_stmt_def1_type
&& def
255 && !types_compatible_p (*first_stmt_def1_type
,
258 && !types_compatible_p (TREE_TYPE (*first_stmt_const_oprnd
),
261 if (vect_print_dump_info (REPORT_SLP
))
262 fprintf (vect_dump
, "Build SLP failed: different types ");
269 /* Check the types of the definitions. */
272 case vect_constant_def
:
273 case vect_external_def
:
276 case vect_internal_def
:
277 case vect_reduction_def
:
279 VEC_safe_push (gimple
, heap
, *def_stmts0
, def_stmt
);
281 VEC_safe_push (gimple
, heap
, *def_stmts1
, def_stmt
);
285 /* FORNOW: Not supported. */
286 if (vect_print_dump_info (REPORT_SLP
))
288 fprintf (vect_dump
, "Build SLP failed: illegal type of def ");
289 print_generic_expr (vect_dump
, def
, TDF_SLIM
);
300 /* Recursively build an SLP tree starting from NODE.
301 Fail (and return FALSE) if def-stmts are not isomorphic, require data
302 permutation or are of unsupported types of operation. Otherwise, return
306 vect_build_slp_tree (loop_vec_info loop_vinfo
, bb_vec_info bb_vinfo
,
307 slp_tree
*node
, unsigned int group_size
,
308 int *inside_cost
, int *outside_cost
,
309 int ncopies_for_cost
, unsigned int *max_nunits
,
310 VEC (int, heap
) **load_permutation
,
311 VEC (slp_tree
, heap
) **loads
,
312 unsigned int vectorization_factor
)
314 VEC (gimple
, heap
) *def_stmts0
= VEC_alloc (gimple
, heap
, group_size
);
315 VEC (gimple
, heap
) *def_stmts1
= VEC_alloc (gimple
, heap
, group_size
);
317 VEC (gimple
, heap
) *stmts
= SLP_TREE_SCALAR_STMTS (*node
);
318 gimple stmt
= VEC_index (gimple
, stmts
, 0);
319 enum vect_def_type first_stmt_dt0
= vect_uninitialized_def
;
320 enum vect_def_type first_stmt_dt1
= vect_uninitialized_def
;
321 enum tree_code first_stmt_code
= ERROR_MARK
, rhs_code
;
322 tree first_stmt_def1_type
= NULL_TREE
, first_stmt_def0_type
= NULL_TREE
;
324 bool stop_recursion
= false, need_same_oprnds
= false;
325 tree vectype
, scalar_type
, first_op1
= NULL_TREE
;
326 unsigned int ncopies
;
329 enum machine_mode optab_op2_mode
;
330 enum machine_mode vec_mode
;
331 tree first_stmt_const_oprnd
= NULL_TREE
;
332 struct data_reference
*first_dr
;
333 bool pattern0
= false, pattern1
= false;
335 bool permutation
= false;
336 unsigned int load_place
;
337 gimple first_load
, prev_first_load
= NULL
;
339 /* For every stmt in NODE find its def stmt/s. */
340 for (i
= 0; VEC_iterate (gimple
, stmts
, i
, stmt
); i
++)
342 if (vect_print_dump_info (REPORT_SLP
))
344 fprintf (vect_dump
, "Build SLP for ");
345 print_gimple_stmt (vect_dump
, stmt
, 0, TDF_SLIM
);
348 /* Fail to vectorize statements marked as unvectorizable. */
349 if (!STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (stmt
)))
351 if (vect_print_dump_info (REPORT_SLP
))
354 "Build SLP failed: unvectorizable statement ");
355 print_gimple_stmt (vect_dump
, stmt
, 0, TDF_SLIM
);
361 lhs
= gimple_get_lhs (stmt
);
362 if (lhs
== NULL_TREE
)
364 if (vect_print_dump_info (REPORT_SLP
))
367 "Build SLP failed: not GIMPLE_ASSIGN nor GIMPLE_CALL");
368 print_gimple_stmt (vect_dump
, stmt
, 0, TDF_SLIM
);
374 scalar_type
= vect_get_smallest_scalar_type (stmt
, &dummy
, &dummy
);
375 vectype
= get_vectype_for_scalar_type (scalar_type
);
378 if (vect_print_dump_info (REPORT_SLP
))
380 fprintf (vect_dump
, "Build SLP failed: unsupported data-type ");
381 print_generic_expr (vect_dump
, scalar_type
, TDF_SLIM
);
386 ncopies
= vectorization_factor
/ TYPE_VECTOR_SUBPARTS (vectype
);
389 if (vect_print_dump_info (REPORT_SLP
))
390 fprintf (vect_dump
, "SLP with multiple types ");
392 /* FORNOW: multiple types are unsupported in BB SLP. */
397 /* In case of multiple types we need to detect the smallest type. */
398 if (*max_nunits
< TYPE_VECTOR_SUBPARTS (vectype
))
399 *max_nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
401 if (is_gimple_call (stmt
))
402 rhs_code
= CALL_EXPR
;
404 rhs_code
= gimple_assign_rhs_code (stmt
);
406 /* Check the operation. */
409 first_stmt_code
= rhs_code
;
411 /* Shift arguments should be equal in all the packed stmts for a
412 vector shift with scalar shift operand. */
413 if (rhs_code
== LSHIFT_EXPR
|| rhs_code
== RSHIFT_EXPR
414 || rhs_code
== LROTATE_EXPR
415 || rhs_code
== RROTATE_EXPR
)
417 vec_mode
= TYPE_MODE (vectype
);
419 /* First see if we have a vector/vector shift. */
420 optab
= optab_for_tree_code (rhs_code
, vectype
,
424 || (optab
->handlers
[(int) vec_mode
].insn_code
425 == CODE_FOR_nothing
))
427 /* No vector/vector shift, try for a vector/scalar shift. */
428 optab
= optab_for_tree_code (rhs_code
, vectype
,
433 if (vect_print_dump_info (REPORT_SLP
))
434 fprintf (vect_dump
, "Build SLP failed: no optab.");
437 icode
= (int) optab
->handlers
[(int) vec_mode
].insn_code
;
438 if (icode
== CODE_FOR_nothing
)
440 if (vect_print_dump_info (REPORT_SLP
))
441 fprintf (vect_dump
, "Build SLP failed: "
442 "op not supported by target.");
445 optab_op2_mode
= insn_data
[icode
].operand
[2].mode
;
446 if (!VECTOR_MODE_P (optab_op2_mode
))
448 need_same_oprnds
= true;
449 first_op1
= gimple_assign_rhs2 (stmt
);
456 if (first_stmt_code
!= rhs_code
457 && (first_stmt_code
!= IMAGPART_EXPR
458 || rhs_code
!= REALPART_EXPR
)
459 && (first_stmt_code
!= REALPART_EXPR
460 || rhs_code
!= IMAGPART_EXPR
))
462 if (vect_print_dump_info (REPORT_SLP
))
465 "Build SLP failed: different operation in stmt ");
466 print_gimple_stmt (vect_dump
, stmt
, 0, TDF_SLIM
);
473 && !operand_equal_p (first_op1
, gimple_assign_rhs2 (stmt
), 0))
475 if (vect_print_dump_info (REPORT_SLP
))
478 "Build SLP failed: different shift arguments in ");
479 print_gimple_stmt (vect_dump
, stmt
, 0, TDF_SLIM
);
486 /* Strided store or load. */
487 if (STMT_VINFO_STRIDED_ACCESS (vinfo_for_stmt (stmt
)))
489 if (REFERENCE_CLASS_P (lhs
))
492 if (!vect_get_and_check_slp_defs (loop_vinfo
, bb_vinfo
, *node
,
493 stmt
, &def_stmts0
, &def_stmts1
,
496 &first_stmt_def0_type
,
497 &first_stmt_def1_type
,
498 &first_stmt_const_oprnd
,
500 &pattern0
, &pattern1
))
506 /* FORNOW: Check that there is no gap between the loads. */
507 if ((DR_GROUP_FIRST_DR (vinfo_for_stmt (stmt
)) == stmt
508 && DR_GROUP_GAP (vinfo_for_stmt (stmt
)) != 0)
509 || (DR_GROUP_FIRST_DR (vinfo_for_stmt (stmt
)) != stmt
510 && DR_GROUP_GAP (vinfo_for_stmt (stmt
)) != 1))
512 if (vect_print_dump_info (REPORT_SLP
))
514 fprintf (vect_dump
, "Build SLP failed: strided "
516 print_gimple_stmt (vect_dump
, stmt
, 0, TDF_SLIM
);
522 /* Check that the size of interleaved loads group is not
523 greater than the SLP group size. */
524 if (DR_GROUP_SIZE (vinfo_for_stmt (stmt
)) > ncopies
* group_size
)
526 if (vect_print_dump_info (REPORT_SLP
))
528 fprintf (vect_dump
, "Build SLP failed: the number of "
529 "interleaved loads is greater than"
530 " the SLP group size ");
531 print_gimple_stmt (vect_dump
, stmt
, 0, TDF_SLIM
);
537 first_load
= DR_GROUP_FIRST_DR (vinfo_for_stmt (stmt
));
540 /* Check that there are no loads from different interleaving
541 chains in the same node. The only exception is complex
543 if (prev_first_load
!= first_load
544 && rhs_code
!= REALPART_EXPR
545 && rhs_code
!= IMAGPART_EXPR
)
547 if (vect_print_dump_info (REPORT_SLP
))
549 fprintf (vect_dump
, "Build SLP failed: different "
550 "interleaving chains in one node ");
551 print_gimple_stmt (vect_dump
, stmt
, 0, TDF_SLIM
);
558 prev_first_load
= first_load
;
560 if (first_load
== stmt
)
562 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt
));
563 if (vect_supportable_dr_alignment (first_dr
)
564 == dr_unaligned_unsupported
)
566 if (vect_print_dump_info (REPORT_SLP
))
568 fprintf (vect_dump
, "Build SLP failed: unsupported "
570 print_gimple_stmt (vect_dump
, stmt
, 0, TDF_SLIM
);
576 /* Analyze costs (for the first stmt in the group). */
577 vect_model_load_cost (vinfo_for_stmt (stmt
),
578 ncopies_for_cost
, *node
);
581 /* Store the place of this load in the interleaving chain. In
582 case that permutation is needed we later decide if a specific
583 permutation is supported. */
584 load_place
= vect_get_place_in_interleaving_chain (stmt
,
589 VEC_safe_push (int, heap
, *load_permutation
, load_place
);
591 /* We stop the tree when we reach a group of loads. */
592 stop_recursion
= true;
595 } /* Strided access. */
598 if (TREE_CODE_CLASS (rhs_code
) == tcc_reference
)
600 /* Not strided load. */
601 if (vect_print_dump_info (REPORT_SLP
))
603 fprintf (vect_dump
, "Build SLP failed: not strided load ");
604 print_gimple_stmt (vect_dump
, stmt
, 0, TDF_SLIM
);
607 /* FORNOW: Not strided loads are not supported. */
611 /* Not memory operation. */
612 if (TREE_CODE_CLASS (rhs_code
) != tcc_binary
613 && TREE_CODE_CLASS (rhs_code
) != tcc_unary
)
615 if (vect_print_dump_info (REPORT_SLP
))
617 fprintf (vect_dump
, "Build SLP failed: operation");
618 fprintf (vect_dump
, " unsupported ");
619 print_gimple_stmt (vect_dump
, stmt
, 0, TDF_SLIM
);
625 /* Find the def-stmts. */
626 if (!vect_get_and_check_slp_defs (loop_vinfo
, bb_vinfo
, *node
, stmt
,
627 &def_stmts0
, &def_stmts1
,
628 &first_stmt_dt0
, &first_stmt_dt1
,
629 &first_stmt_def0_type
,
630 &first_stmt_def1_type
,
631 &first_stmt_const_oprnd
,
633 &pattern0
, &pattern1
))
638 /* Add the costs of the node to the overall instance costs. */
639 *inside_cost
+= SLP_TREE_INSIDE_OF_LOOP_COST (*node
);
640 *outside_cost
+= SLP_TREE_OUTSIDE_OF_LOOP_COST (*node
);
642 /* Strided loads were reached - stop the recursion. */
647 VEC_safe_push (slp_tree
, heap
, *loads
, *node
);
649 += targetm
.vectorize
.builtin_vectorization_cost (vec_perm
)
656 /* Create SLP_TREE nodes for the definition node/s. */
657 if (first_stmt_dt0
== vect_internal_def
)
659 slp_tree left_node
= XNEW (struct _slp_tree
);
660 SLP_TREE_SCALAR_STMTS (left_node
) = def_stmts0
;
661 SLP_TREE_VEC_STMTS (left_node
) = NULL
;
662 SLP_TREE_LEFT (left_node
) = NULL
;
663 SLP_TREE_RIGHT (left_node
) = NULL
;
664 SLP_TREE_OUTSIDE_OF_LOOP_COST (left_node
) = 0;
665 SLP_TREE_INSIDE_OF_LOOP_COST (left_node
) = 0;
666 if (!vect_build_slp_tree (loop_vinfo
, bb_vinfo
, &left_node
, group_size
,
667 inside_cost
, outside_cost
, ncopies_for_cost
,
668 max_nunits
, load_permutation
, loads
,
669 vectorization_factor
))
672 SLP_TREE_LEFT (*node
) = left_node
;
675 if (first_stmt_dt1
== vect_internal_def
)
677 slp_tree right_node
= XNEW (struct _slp_tree
);
678 SLP_TREE_SCALAR_STMTS (right_node
) = def_stmts1
;
679 SLP_TREE_VEC_STMTS (right_node
) = NULL
;
680 SLP_TREE_LEFT (right_node
) = NULL
;
681 SLP_TREE_RIGHT (right_node
) = NULL
;
682 SLP_TREE_OUTSIDE_OF_LOOP_COST (right_node
) = 0;
683 SLP_TREE_INSIDE_OF_LOOP_COST (right_node
) = 0;
684 if (!vect_build_slp_tree (loop_vinfo
, bb_vinfo
, &right_node
, group_size
,
685 inside_cost
, outside_cost
, ncopies_for_cost
,
686 max_nunits
, load_permutation
, loads
,
687 vectorization_factor
))
690 SLP_TREE_RIGHT (*node
) = right_node
;
698 vect_print_slp_tree (slp_tree node
)
706 fprintf (vect_dump
, "node ");
707 for (i
= 0; VEC_iterate (gimple
, SLP_TREE_SCALAR_STMTS (node
), i
, stmt
); i
++)
709 fprintf (vect_dump
, "\n\tstmt %d ", i
);
710 print_gimple_stmt (vect_dump
, stmt
, 0, TDF_SLIM
);
712 fprintf (vect_dump
, "\n");
714 vect_print_slp_tree (SLP_TREE_LEFT (node
));
715 vect_print_slp_tree (SLP_TREE_RIGHT (node
));
719 /* Mark the tree rooted at NODE with MARK (PURE_SLP or HYBRID).
720 If MARK is HYBRID, it refers to a specific stmt in NODE (the stmt at index
721 J). Otherwise, MARK is PURE_SLP and J is -1, which indicates that all the
722 stmts in NODE are to be marked. */
725 vect_mark_slp_stmts (slp_tree node
, enum slp_vect_type mark
, int j
)
733 for (i
= 0; VEC_iterate (gimple
, SLP_TREE_SCALAR_STMTS (node
), i
, stmt
); i
++)
735 STMT_SLP_TYPE (vinfo_for_stmt (stmt
)) = mark
;
737 vect_mark_slp_stmts (SLP_TREE_LEFT (node
), mark
, j
);
738 vect_mark_slp_stmts (SLP_TREE_RIGHT (node
), mark
, j
);
742 /* Mark the statements of the tree rooted at NODE as relevant (vect_used). */
745 vect_mark_slp_stmts_relevant (slp_tree node
)
749 stmt_vec_info stmt_info
;
754 for (i
= 0; VEC_iterate (gimple
, SLP_TREE_SCALAR_STMTS (node
), i
, stmt
); i
++)
756 stmt_info
= vinfo_for_stmt (stmt
);
757 gcc_assert (!STMT_VINFO_RELEVANT (stmt_info
)
758 || STMT_VINFO_RELEVANT (stmt_info
) == vect_used_in_scope
);
759 STMT_VINFO_RELEVANT (stmt_info
) = vect_used_in_scope
;
762 vect_mark_slp_stmts_relevant (SLP_TREE_LEFT (node
));
763 vect_mark_slp_stmts_relevant (SLP_TREE_RIGHT (node
));
767 /* Check if the permutation required by the SLP INSTANCE is supported.
768 Reorganize the SLP nodes stored in SLP_INSTANCE_LOADS if needed. */
771 vect_supported_slp_permutation_p (slp_instance instance
)
773 slp_tree node
= VEC_index (slp_tree
, SLP_INSTANCE_LOADS (instance
), 0);
774 gimple stmt
= VEC_index (gimple
, SLP_TREE_SCALAR_STMTS (node
), 0);
775 gimple first_load
= DR_GROUP_FIRST_DR (vinfo_for_stmt (stmt
));
776 VEC (slp_tree
, heap
) *sorted_loads
= NULL
;
778 slp_tree
*tmp_loads
= NULL
;
779 int group_size
= SLP_INSTANCE_GROUP_SIZE (instance
), i
, j
;
782 /* FORNOW: The only supported loads permutation is loads from the same
783 location in all the loads in the node, when the data-refs in
784 nodes of LOADS constitute an interleaving chain.
785 Sort the nodes according to the order of accesses in the chain. */
786 tmp_loads
= (slp_tree
*) xmalloc (sizeof (slp_tree
) * group_size
);
788 VEC_iterate (int, SLP_INSTANCE_LOAD_PERMUTATION (instance
), i
, index
)
789 && VEC_iterate (slp_tree
, SLP_INSTANCE_LOADS (instance
), j
, load
);
790 i
+= group_size
, j
++)
792 gimple scalar_stmt
= VEC_index (gimple
, SLP_TREE_SCALAR_STMTS (load
), 0);
793 /* Check that the loads are all in the same interleaving chain. */
794 if (DR_GROUP_FIRST_DR (vinfo_for_stmt (scalar_stmt
)) != first_load
)
796 if (vect_print_dump_info (REPORT_DETAILS
))
798 fprintf (vect_dump
, "Build SLP failed: unsupported data "
800 print_gimple_stmt (vect_dump
, scalar_stmt
, 0, TDF_SLIM
);
807 tmp_loads
[index
] = load
;
810 sorted_loads
= VEC_alloc (slp_tree
, heap
, group_size
);
811 for (i
= 0; i
< group_size
; i
++)
812 VEC_safe_push (slp_tree
, heap
, sorted_loads
, tmp_loads
[i
]);
814 VEC_free (slp_tree
, heap
, SLP_INSTANCE_LOADS (instance
));
815 SLP_INSTANCE_LOADS (instance
) = sorted_loads
;
818 if (!vect_transform_slp_perm_load (stmt
, NULL
, NULL
,
819 SLP_INSTANCE_UNROLLING_FACTOR (instance
),
827 /* Rearrange the statements of NODE according to PERMUTATION. */
830 vect_slp_rearrange_stmts (slp_tree node
, unsigned int group_size
,
831 VEC (int, heap
) *permutation
)
834 VEC (gimple
, heap
) *tmp_stmts
;
835 unsigned int index
, i
;
840 vect_slp_rearrange_stmts (SLP_TREE_LEFT (node
), group_size
, permutation
);
841 vect_slp_rearrange_stmts (SLP_TREE_RIGHT (node
), group_size
, permutation
);
843 gcc_assert (group_size
== VEC_length (gimple
, SLP_TREE_SCALAR_STMTS (node
)));
844 tmp_stmts
= VEC_alloc (gimple
, heap
, group_size
);
846 for (i
= 0; i
< group_size
; i
++)
847 VEC_safe_push (gimple
, heap
, tmp_stmts
, NULL
);
849 for (i
= 0; VEC_iterate (gimple
, SLP_TREE_SCALAR_STMTS (node
), i
, stmt
); i
++)
851 index
= VEC_index (int, permutation
, i
);
852 VEC_replace (gimple
, tmp_stmts
, index
, stmt
);
855 VEC_free (gimple
, heap
, SLP_TREE_SCALAR_STMTS (node
));
856 SLP_TREE_SCALAR_STMTS (node
) = tmp_stmts
;
860 /* Check if the required load permutation is supported.
861 LOAD_PERMUTATION contains a list of indices of the loads.
862 In SLP this permutation is relative to the order of strided stores that are
863 the base of the SLP instance. */
866 vect_supported_load_permutation_p (slp_instance slp_instn
, int group_size
,
867 VEC (int, heap
) *load_permutation
)
869 int i
= 0, j
, prev
= -1, next
, k
, number_of_groups
;
870 bool supported
, bad_permutation
= false;
875 /* FORNOW: permutations are only supported in SLP. */
879 if (vect_print_dump_info (REPORT_SLP
))
881 fprintf (vect_dump
, "Load permutation ");
882 for (i
= 0; VEC_iterate (int, load_permutation
, i
, next
); i
++)
883 fprintf (vect_dump
, "%d ", next
);
886 /* In case of reduction every load permutation is allowed, since the order
887 of the reduction statements is not important (as opposed to the case of
888 strided stores). The only condition we need to check is that all the
889 load nodes are of the same size and have the same permutation (and then
890 rearrange all the nodes of the SLP instance according to this
893 /* Check that all the load nodes are of the same size. */
895 VEC_iterate (slp_tree
, SLP_INSTANCE_LOADS (slp_instn
), i
, node
);
897 if (VEC_length (gimple
, SLP_TREE_SCALAR_STMTS (node
))
898 != (unsigned) group_size
)
901 node
= SLP_INSTANCE_TREE (slp_instn
);
902 stmt
= VEC_index (gimple
, SLP_TREE_SCALAR_STMTS (node
), 0);
903 /* LOAD_PERMUTATION is a list of indices of all the loads of the SLP
904 instance, not all the loads belong to the same node or interleaving
905 group. Hence, we need to divide them into groups according to
907 number_of_groups
= VEC_length (int, load_permutation
) / group_size
;
909 /* Reduction (there are no data-refs in the root). */
910 if (!STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt
)))
912 int first_group_load_index
;
914 /* Compare all the permutation sequences to the first one. */
915 for (i
= 1; i
< number_of_groups
; i
++)
918 for (j
= i
* group_size
; j
< i
* group_size
+ group_size
; j
++)
920 next
= VEC_index (int, load_permutation
, j
);
921 first_group_load_index
= VEC_index (int, load_permutation
, k
);
923 if (next
!= first_group_load_index
)
925 bad_permutation
= true;
936 if (!bad_permutation
)
938 /* This permutaion is valid for reduction. Since the order of the
939 statements in the nodes is not important unless they are memory
940 accesses, we can rearrange the statements in all the nodes
941 according to the order of the loads. */
942 vect_slp_rearrange_stmts (SLP_INSTANCE_TREE (slp_instn
), group_size
,
944 VEC_free (int, heap
, SLP_INSTANCE_LOAD_PERMUTATION (slp_instn
));
949 /* FORNOW: the only supported permutation is 0..01..1.. of length equal to
950 GROUP_SIZE and where each sequence of same drs is of GROUP_SIZE length as
951 well (unless it's reduction). */
952 if (VEC_length (int, load_permutation
)
953 != (unsigned int) (group_size
* group_size
))
957 load_index
= sbitmap_alloc (group_size
);
958 sbitmap_zero (load_index
);
959 for (j
= 0; j
< group_size
; j
++)
961 for (i
= j
* group_size
, k
= 0;
962 VEC_iterate (int, load_permutation
, i
, next
) && k
< group_size
;
965 if (i
!= j
* group_size
&& next
!= prev
)
974 if (TEST_BIT (load_index
, prev
))
980 SET_BIT (load_index
, prev
);
983 for (j
= 0; j
< group_size
; j
++)
984 if (!TEST_BIT (load_index
, j
))
987 sbitmap_free (load_index
);
989 if (supported
&& i
== group_size
* group_size
990 && vect_supported_slp_permutation_p (slp_instn
))
997 /* Find the first load in the loop that belongs to INSTANCE.
998 When loads are in several SLP nodes, there can be a case in which the first
999 load does not appear in the first SLP node to be transformed, causing
1000 incorrect order of statements. Since we generate all the loads together,
1001 they must be inserted before the first load of the SLP instance and not
1002 before the first load of the first node of the instance. */
1004 vect_find_first_load_in_slp_instance (slp_instance instance
)
1008 gimple first_load
= NULL
, load
;
1011 VEC_iterate (slp_tree
, SLP_INSTANCE_LOADS (instance
), i
, load_node
);
1014 VEC_iterate (gimple
, SLP_TREE_SCALAR_STMTS (load_node
), j
, load
);
1016 first_load
= get_earlier_stmt (load
, first_load
);
1022 /* Analyze an SLP instance starting from a group of strided stores. Call
1023 vect_build_slp_tree to build a tree of packed stmts if possible.
1024 Return FALSE if it's impossible to SLP any stmt in the loop. */
1027 vect_analyze_slp_instance (loop_vec_info loop_vinfo
, bb_vec_info bb_vinfo
,
1030 slp_instance new_instance
;
1031 slp_tree node
= XNEW (struct _slp_tree
);
1032 unsigned int group_size
= DR_GROUP_SIZE (vinfo_for_stmt (stmt
));
1033 unsigned int unrolling_factor
= 1, nunits
;
1034 tree vectype
, scalar_type
= NULL_TREE
;
1036 unsigned int vectorization_factor
= 0;
1037 int inside_cost
= 0, outside_cost
= 0, ncopies_for_cost
, i
;
1038 unsigned int max_nunits
= 0;
1039 VEC (int, heap
) *load_permutation
;
1040 VEC (slp_tree
, heap
) *loads
;
1041 struct data_reference
*dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt
));
1045 scalar_type
= TREE_TYPE (DR_REF (dr
));
1046 vectype
= get_vectype_for_scalar_type (scalar_type
);
1047 group_size
= DR_GROUP_SIZE (vinfo_for_stmt (stmt
));
1051 gcc_assert (loop_vinfo
);
1052 vectype
= STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt
));
1053 group_size
= VEC_length (gimple
, LOOP_VINFO_REDUCTIONS (loop_vinfo
));
1058 if (vect_print_dump_info (REPORT_SLP
))
1060 fprintf (vect_dump
, "Build SLP failed: unsupported data-type ");
1061 print_generic_expr (vect_dump
, scalar_type
, TDF_SLIM
);
1067 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
1069 vectorization_factor
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
1071 /* No multitypes in BB SLP. */
1072 vectorization_factor
= nunits
;
1074 /* Calculate the unrolling factor. */
1075 unrolling_factor
= least_common_multiple (nunits
, group_size
) / group_size
;
1076 if (unrolling_factor
!= 1 && !loop_vinfo
)
1078 if (vect_print_dump_info (REPORT_SLP
))
1079 fprintf (vect_dump
, "Build SLP failed: unrolling required in basic"
1085 /* Create a node (a root of the SLP tree) for the packed strided stores. */
1086 SLP_TREE_SCALAR_STMTS (node
) = VEC_alloc (gimple
, heap
, group_size
);
1090 /* Collect the stores and store them in SLP_TREE_SCALAR_STMTS. */
1093 VEC_safe_push (gimple
, heap
, SLP_TREE_SCALAR_STMTS (node
), next
);
1094 next
= DR_GROUP_NEXT_DR (vinfo_for_stmt (next
));
1099 /* Collect reduction statements. */
1100 for (i
= 0; VEC_iterate (gimple
, LOOP_VINFO_REDUCTIONS (loop_vinfo
), i
,
1104 VEC_safe_push (gimple
, heap
, SLP_TREE_SCALAR_STMTS (node
), next
);
1105 if (vect_print_dump_info (REPORT_DETAILS
))
1107 fprintf (vect_dump
, "pushing reduction into node: ");
1108 print_gimple_stmt (vect_dump
, next
, 0, TDF_SLIM
);
1113 SLP_TREE_VEC_STMTS (node
) = NULL
;
1114 SLP_TREE_NUMBER_OF_VEC_STMTS (node
) = 0;
1115 SLP_TREE_LEFT (node
) = NULL
;
1116 SLP_TREE_RIGHT (node
) = NULL
;
1117 SLP_TREE_OUTSIDE_OF_LOOP_COST (node
) = 0;
1118 SLP_TREE_INSIDE_OF_LOOP_COST (node
) = 0;
1120 /* Calculate the number of vector stmts to create based on the unrolling
1121 factor (number of vectors is 1 if NUNITS >= GROUP_SIZE, and is
1122 GROUP_SIZE / NUNITS otherwise. */
1123 ncopies_for_cost
= unrolling_factor
* group_size
/ nunits
;
1125 load_permutation
= VEC_alloc (int, heap
, group_size
* group_size
);
1126 loads
= VEC_alloc (slp_tree
, heap
, group_size
);
1128 /* Build the tree for the SLP instance. */
1129 if (vect_build_slp_tree (loop_vinfo
, bb_vinfo
, &node
, group_size
,
1130 &inside_cost
, &outside_cost
, ncopies_for_cost
,
1131 &max_nunits
, &load_permutation
, &loads
,
1132 vectorization_factor
))
1134 /* Create a new SLP instance. */
1135 new_instance
= XNEW (struct _slp_instance
);
1136 SLP_INSTANCE_TREE (new_instance
) = node
;
1137 SLP_INSTANCE_GROUP_SIZE (new_instance
) = group_size
;
1138 /* Calculate the unrolling factor based on the smallest type in the
1140 if (max_nunits
> nunits
)
1141 unrolling_factor
= least_common_multiple (max_nunits
, group_size
)
1144 SLP_INSTANCE_UNROLLING_FACTOR (new_instance
) = unrolling_factor
;
1145 SLP_INSTANCE_OUTSIDE_OF_LOOP_COST (new_instance
) = outside_cost
;
1146 SLP_INSTANCE_INSIDE_OF_LOOP_COST (new_instance
) = inside_cost
;
1147 SLP_INSTANCE_LOADS (new_instance
) = loads
;
1148 SLP_INSTANCE_FIRST_LOAD_STMT (new_instance
) = NULL
;
1149 SLP_INSTANCE_LOAD_PERMUTATION (new_instance
) = load_permutation
;
1150 if (VEC_length (slp_tree
, loads
))
1152 if (!vect_supported_load_permutation_p (new_instance
, group_size
,
1155 if (vect_print_dump_info (REPORT_SLP
))
1157 fprintf (vect_dump
, "Build SLP failed: unsupported load "
1159 print_gimple_stmt (vect_dump
, stmt
, 0, TDF_SLIM
);
1162 vect_free_slp_instance (new_instance
);
1166 SLP_INSTANCE_FIRST_LOAD_STMT (new_instance
)
1167 = vect_find_first_load_in_slp_instance (new_instance
);
1170 VEC_free (int, heap
, SLP_INSTANCE_LOAD_PERMUTATION (new_instance
));
1173 VEC_safe_push (slp_instance
, heap
,
1174 LOOP_VINFO_SLP_INSTANCES (loop_vinfo
),
1177 VEC_safe_push (slp_instance
, heap
, BB_VINFO_SLP_INSTANCES (bb_vinfo
),
1180 if (vect_print_dump_info (REPORT_SLP
))
1181 vect_print_slp_tree (node
);
1186 /* Failed to SLP. */
1187 /* Free the allocated memory. */
1188 vect_free_slp_tree (node
);
1189 VEC_free (int, heap
, load_permutation
);
1190 VEC_free (slp_tree
, heap
, loads
);
1196 /* Check if there are stmts in the loop can be vectorized using SLP. Build SLP
1197 trees of packed scalar stmts if SLP is possible. */
1200 vect_analyze_slp (loop_vec_info loop_vinfo
, bb_vec_info bb_vinfo
)
1203 VEC (gimple
, heap
) *strided_stores
, *reductions
= NULL
;
1207 if (vect_print_dump_info (REPORT_SLP
))
1208 fprintf (vect_dump
, "=== vect_analyze_slp ===");
1212 strided_stores
= LOOP_VINFO_STRIDED_STORES (loop_vinfo
);
1213 reductions
= LOOP_VINFO_REDUCTIONS (loop_vinfo
);
1216 strided_stores
= BB_VINFO_STRIDED_STORES (bb_vinfo
);
1218 /* Find SLP sequences starting from groups of strided stores. */
1219 for (i
= 0; VEC_iterate (gimple
, strided_stores
, i
, store
); i
++)
1220 if (vect_analyze_slp_instance (loop_vinfo
, bb_vinfo
, store
))
1223 if (bb_vinfo
&& !ok
)
1225 if (vect_print_dump_info (REPORT_SLP
))
1226 fprintf (vect_dump
, "Failed to SLP the basic block.");
1231 /* Find SLP sequences starting from groups of reductions. */
1232 if (loop_vinfo
&& VEC_length (gimple
, LOOP_VINFO_REDUCTIONS (loop_vinfo
)) > 1
1233 && vect_analyze_slp_instance (loop_vinfo
, bb_vinfo
,
1234 VEC_index (gimple
, reductions
, 0)))
1241 /* For each possible SLP instance decide whether to SLP it and calculate overall
1242 unrolling factor needed to SLP the loop. */
1245 vect_make_slp_decision (loop_vec_info loop_vinfo
)
1247 unsigned int i
, unrolling_factor
= 1;
1248 VEC (slp_instance
, heap
) *slp_instances
= LOOP_VINFO_SLP_INSTANCES (loop_vinfo
);
1249 slp_instance instance
;
1250 int decided_to_slp
= 0;
1252 if (vect_print_dump_info (REPORT_SLP
))
1253 fprintf (vect_dump
, "=== vect_make_slp_decision ===");
1255 for (i
= 0; VEC_iterate (slp_instance
, slp_instances
, i
, instance
); i
++)
1257 /* FORNOW: SLP if you can. */
1258 if (unrolling_factor
< SLP_INSTANCE_UNROLLING_FACTOR (instance
))
1259 unrolling_factor
= SLP_INSTANCE_UNROLLING_FACTOR (instance
);
1261 /* Mark all the stmts that belong to INSTANCE as PURE_SLP stmts. Later we
1262 call vect_detect_hybrid_slp () to find stmts that need hybrid SLP and
1263 loop-based vectorization. Such stmts will be marked as HYBRID. */
1264 vect_mark_slp_stmts (SLP_INSTANCE_TREE (instance
), pure_slp
, -1);
1268 LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo
) = unrolling_factor
;
1270 if (decided_to_slp
&& vect_print_dump_info (REPORT_SLP
))
1271 fprintf (vect_dump
, "Decided to SLP %d instances. Unrolling factor %d",
1272 decided_to_slp
, unrolling_factor
);
1276 /* Find stmts that must be both vectorized and SLPed (since they feed stmts that
1277 can't be SLPed) in the tree rooted at NODE. Mark such stmts as HYBRID. */
1280 vect_detect_hybrid_slp_stmts (slp_tree node
)
1284 imm_use_iterator imm_iter
;
1286 stmt_vec_info stmt_vinfo
;
1291 for (i
= 0; VEC_iterate (gimple
, SLP_TREE_SCALAR_STMTS (node
), i
, stmt
); i
++)
1292 if (PURE_SLP_STMT (vinfo_for_stmt (stmt
))
1293 && TREE_CODE (gimple_op (stmt
, 0)) == SSA_NAME
)
1294 FOR_EACH_IMM_USE_STMT (use_stmt
, imm_iter
, gimple_op (stmt
, 0))
1295 if ((stmt_vinfo
= vinfo_for_stmt (use_stmt
))
1296 && !STMT_SLP_TYPE (stmt_vinfo
)
1297 && (STMT_VINFO_RELEVANT (stmt_vinfo
)
1298 || VECTORIZABLE_CYCLE_DEF (STMT_VINFO_DEF_TYPE (stmt_vinfo
)))
1299 && !(gimple_code (use_stmt
) == GIMPLE_PHI
1300 && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (use_stmt
))
1301 == vect_reduction_def
))
1302 vect_mark_slp_stmts (node
, hybrid
, i
);
1304 vect_detect_hybrid_slp_stmts (SLP_TREE_LEFT (node
));
1305 vect_detect_hybrid_slp_stmts (SLP_TREE_RIGHT (node
));
1309 /* Find stmts that must be both vectorized and SLPed. */
1312 vect_detect_hybrid_slp (loop_vec_info loop_vinfo
)
1315 VEC (slp_instance
, heap
) *slp_instances
= LOOP_VINFO_SLP_INSTANCES (loop_vinfo
);
1316 slp_instance instance
;
1318 if (vect_print_dump_info (REPORT_SLP
))
1319 fprintf (vect_dump
, "=== vect_detect_hybrid_slp ===");
1321 for (i
= 0; VEC_iterate (slp_instance
, slp_instances
, i
, instance
); i
++)
1322 vect_detect_hybrid_slp_stmts (SLP_INSTANCE_TREE (instance
));
1326 /* Create and initialize a new bb_vec_info struct for BB, as well as
1327 stmt_vec_info structs for all the stmts in it. */
1330 new_bb_vec_info (basic_block bb
)
1332 bb_vec_info res
= NULL
;
1333 gimple_stmt_iterator gsi
;
1335 res
= (bb_vec_info
) xcalloc (1, sizeof (struct _bb_vec_info
));
1336 BB_VINFO_BB (res
) = bb
;
1338 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
1340 gimple stmt
= gsi_stmt (gsi
);
1341 gimple_set_uid (stmt
, 0);
1342 set_vinfo_for_stmt (stmt
, new_stmt_vec_info (stmt
, NULL
, res
));
1345 BB_VINFO_STRIDED_STORES (res
) = VEC_alloc (gimple
, heap
, 10);
1346 BB_VINFO_SLP_INSTANCES (res
) = VEC_alloc (slp_instance
, heap
, 2);
1353 /* Free BB_VINFO struct, as well as all the stmt_vec_info structs of all the
1354 stmts in the basic block. */
1357 destroy_bb_vec_info (bb_vec_info bb_vinfo
)
1360 gimple_stmt_iterator si
;
1365 bb
= BB_VINFO_BB (bb_vinfo
);
1367 for (si
= gsi_start_bb (bb
); !gsi_end_p (si
); gsi_next (&si
))
1369 gimple stmt
= gsi_stmt (si
);
1370 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1373 /* Free stmt_vec_info. */
1374 free_stmt_vec_info (stmt
);
1377 VEC_free (gimple
, heap
, BB_VINFO_STRIDED_STORES (bb_vinfo
));
1378 VEC_free (slp_instance
, heap
, BB_VINFO_SLP_INSTANCES (bb_vinfo
));
1384 /* Analyze statements contained in SLP tree node after recursively analyzing
1385 the subtree. Return TRUE if the operations are supported. */
1388 vect_slp_analyze_node_operations (bb_vec_info bb_vinfo
, slp_tree node
)
1397 if (!vect_slp_analyze_node_operations (bb_vinfo
, SLP_TREE_LEFT (node
))
1398 || !vect_slp_analyze_node_operations (bb_vinfo
, SLP_TREE_RIGHT (node
)))
1401 for (i
= 0; VEC_iterate (gimple
, SLP_TREE_SCALAR_STMTS (node
), i
, stmt
); i
++)
1403 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1404 gcc_assert (stmt_info
);
1405 gcc_assert (PURE_SLP_STMT (stmt_info
));
1407 if (!vect_analyze_stmt (stmt
, &dummy
, node
))
1415 /* Analyze statements in SLP instances of the basic block. Return TRUE if the
1416 operations are supported. */
1419 vect_slp_analyze_operations (bb_vec_info bb_vinfo
)
1421 VEC (slp_instance
, heap
) *slp_instances
= BB_VINFO_SLP_INSTANCES (bb_vinfo
);
1422 slp_instance instance
;
1425 for (i
= 0; VEC_iterate (slp_instance
, slp_instances
, i
, instance
); )
1427 if (!vect_slp_analyze_node_operations (bb_vinfo
,
1428 SLP_INSTANCE_TREE (instance
)))
1430 vect_free_slp_instance (instance
);
1431 VEC_ordered_remove (slp_instance
, slp_instances
, i
);
1437 if (!VEC_length (slp_instance
, slp_instances
))
1444 /* Cheick if the basic block can be vectorized. */
1447 vect_slp_analyze_bb (basic_block bb
)
1449 bb_vec_info bb_vinfo
;
1450 VEC (ddr_p
, heap
) *ddrs
;
1451 VEC (slp_instance
, heap
) *slp_instances
;
1452 slp_instance instance
;
1454 gimple_stmt_iterator gsi
;
1456 int max_vf
= MAX_VECTORIZATION_FACTOR
;
1458 if (vect_print_dump_info (REPORT_DETAILS
))
1459 fprintf (vect_dump
, "===vect_slp_analyze_bb===\n");
1461 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
1463 gimple stmt
= gsi_stmt (gsi
);
1464 if (!is_gimple_debug (stmt
)
1465 && !gimple_nop_p (stmt
)
1466 && gimple_code (stmt
) != GIMPLE_LABEL
)
1470 if (insns
> PARAM_VALUE (PARAM_SLP_MAX_INSNS_IN_BB
))
1472 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS
))
1473 fprintf (vect_dump
, "not vectorized: too many instructions in basic "
1479 bb_vinfo
= new_bb_vec_info (bb
);
1483 if (!vect_analyze_data_refs (NULL
, bb_vinfo
, &min_vf
))
1485 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS
))
1486 fprintf (vect_dump
, "not vectorized: unhandled data-ref in basic "
1489 destroy_bb_vec_info (bb_vinfo
);
1493 ddrs
= BB_VINFO_DDRS (bb_vinfo
);
1494 if (!VEC_length (ddr_p
, ddrs
))
1496 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS
))
1497 fprintf (vect_dump
, "not vectorized: not enough data-refs in basic "
1500 destroy_bb_vec_info (bb_vinfo
);
1504 if (!vect_analyze_data_ref_dependences (NULL
, bb_vinfo
, &max_vf
)
1507 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS
))
1508 fprintf (vect_dump
, "not vectorized: unhandled data dependence "
1509 "in basic block.\n");
1511 destroy_bb_vec_info (bb_vinfo
);
1515 if (!vect_analyze_data_refs_alignment (NULL
, bb_vinfo
))
1517 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS
))
1518 fprintf (vect_dump
, "not vectorized: bad data alignment in basic "
1521 destroy_bb_vec_info (bb_vinfo
);
1525 if (!vect_analyze_data_ref_accesses (NULL
, bb_vinfo
))
1527 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS
))
1528 fprintf (vect_dump
, "not vectorized: unhandled data access in basic "
1531 destroy_bb_vec_info (bb_vinfo
);
1535 if (!vect_verify_datarefs_alignment (NULL
, bb_vinfo
))
1537 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS
))
1538 fprintf (vect_dump
, "not vectorized: unsupported alignment in basic "
1541 destroy_bb_vec_info (bb_vinfo
);
1545 /* Check the SLP opportunities in the basic block, analyze and build SLP
1547 if (!vect_analyze_slp (NULL
, bb_vinfo
))
1549 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS
))
1550 fprintf (vect_dump
, "not vectorized: failed to find SLP opportunities "
1551 "in basic block.\n");
1553 destroy_bb_vec_info (bb_vinfo
);
1557 slp_instances
= BB_VINFO_SLP_INSTANCES (bb_vinfo
);
1559 /* Mark all the statements that we want to vectorize as pure SLP and
1561 for (i
= 0; VEC_iterate (slp_instance
, slp_instances
, i
, instance
); i
++)
1563 vect_mark_slp_stmts (SLP_INSTANCE_TREE (instance
), pure_slp
, -1);
1564 vect_mark_slp_stmts_relevant (SLP_INSTANCE_TREE (instance
));
1567 if (!vect_slp_analyze_operations (bb_vinfo
))
1569 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS
))
1570 fprintf (vect_dump
, "not vectorized: bad operation in basic block.\n");
1572 destroy_bb_vec_info (bb_vinfo
);
1576 if (vect_print_dump_info (REPORT_DETAILS
))
1577 fprintf (vect_dump
, "Basic block will be vectorized using SLP\n");
1583 /* SLP costs are calculated according to SLP instance unrolling factor (i.e.,
1584 the number of created vector stmts depends on the unrolling factor). However,
1585 the actual number of vector stmts for every SLP node depends on VF which is
1586 set later in vect_analyze_operations(). Hence, SLP costs should be updated.
1587 In this function we assume that the inside costs calculated in
1588 vect_model_xxx_cost are linear in ncopies. */
1591 vect_update_slp_costs_according_to_vf (loop_vec_info loop_vinfo
)
1593 unsigned int i
, vf
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
1594 VEC (slp_instance
, heap
) *slp_instances
= LOOP_VINFO_SLP_INSTANCES (loop_vinfo
);
1595 slp_instance instance
;
1597 if (vect_print_dump_info (REPORT_SLP
))
1598 fprintf (vect_dump
, "=== vect_update_slp_costs_according_to_vf ===");
1600 for (i
= 0; VEC_iterate (slp_instance
, slp_instances
, i
, instance
); i
++)
1601 /* We assume that costs are linear in ncopies. */
1602 SLP_INSTANCE_INSIDE_OF_LOOP_COST (instance
) *= vf
1603 / SLP_INSTANCE_UNROLLING_FACTOR (instance
);
1607 /* For constant and loop invariant defs of SLP_NODE this function returns
1608 (vector) defs (VEC_OPRNDS) that will be used in the vectorized stmts.
1609 OP_NUM determines if we gather defs for operand 0 or operand 1 of the scalar
1610 stmts. NUMBER_OF_VECTORS is the number of vector defs to create.
1611 REDUC_INDEX is the index of the reduction operand in the statements, unless
1615 vect_get_constant_vectors (slp_tree slp_node
, VEC(tree
,heap
) **vec_oprnds
,
1616 unsigned int op_num
, unsigned int number_of_vectors
,
1619 VEC (gimple
, heap
) *stmts
= SLP_TREE_SCALAR_STMTS (slp_node
);
1620 gimple stmt
= VEC_index (gimple
, stmts
, 0);
1621 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (stmt
);
1625 int j
, number_of_places_left_in_vector
;
1628 int group_size
= VEC_length (gimple
, stmts
);
1629 unsigned int vec_num
, i
;
1630 int number_of_copies
= 1;
1631 VEC (tree
, heap
) *voprnds
= VEC_alloc (tree
, heap
, number_of_vectors
);
1632 bool constant_p
, is_store
;
1633 tree neutral_op
= NULL
;
1635 if (STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_reduction_def
)
1637 enum tree_code code
= gimple_assign_rhs_code (stmt
);
1638 if (reduc_index
== -1)
1640 VEC_free (tree
, heap
, *vec_oprnds
);
1644 op_num
= reduc_index
- 1;
1645 op
= gimple_op (stmt
, op_num
+ 1);
1646 /* For additional copies (see the explanation of NUMBER_OF_COPIES below)
1647 we need either neutral operands or the original operands. See
1648 get_initial_def_for_reduction() for details. */
1651 case WIDEN_SUM_EXPR
:
1657 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (op
)))
1658 neutral_op
= build_real (TREE_TYPE (op
), dconst0
);
1660 neutral_op
= build_int_cst (TREE_TYPE (op
), 0);
1665 if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (op
)))
1666 neutral_op
= build_real (TREE_TYPE (op
), dconst1
);
1668 neutral_op
= build_int_cst (TREE_TYPE (op
), 1);
1673 neutral_op
= build_int_cst (TREE_TYPE (op
), -1);
1681 if (STMT_VINFO_DATA_REF (stmt_vinfo
))
1684 op
= gimple_assign_rhs1 (stmt
);
1689 op
= gimple_op (stmt
, op_num
+ 1);
1692 if (CONSTANT_CLASS_P (op
))
1697 vector_type
= get_vectype_for_scalar_type (TREE_TYPE (op
));
1698 gcc_assert (vector_type
);
1700 nunits
= TYPE_VECTOR_SUBPARTS (vector_type
);
1702 /* NUMBER_OF_COPIES is the number of times we need to use the same values in
1703 created vectors. It is greater than 1 if unrolling is performed.
1705 For example, we have two scalar operands, s1 and s2 (e.g., group of
1706 strided accesses of size two), while NUNITS is four (i.e., four scalars
1707 of this type can be packed in a vector). The output vector will contain
1708 two copies of each scalar operand: {s1, s2, s1, s2}. (NUMBER_OF_COPIES
1711 If GROUP_SIZE > NUNITS, the scalars will be split into several vectors
1712 containing the operands.
1714 For example, NUNITS is four as before, and the group size is 8
1715 (s1, s2, ..., s8). We will create two vectors {s1, s2, s3, s4} and
1716 {s5, s6, s7, s8}. */
1718 number_of_copies
= least_common_multiple (nunits
, group_size
) / group_size
;
1720 number_of_places_left_in_vector
= nunits
;
1721 for (j
= 0; j
< number_of_copies
; j
++)
1723 for (i
= group_size
- 1; VEC_iterate (gimple
, stmts
, i
, stmt
); i
--)
1726 op
= gimple_assign_rhs1 (stmt
);
1728 op
= gimple_op (stmt
, op_num
+ 1);
1730 if (reduc_index
!= -1)
1732 struct loop
*loop
= (gimple_bb (stmt
))->loop_father
;
1733 gimple def_stmt
= SSA_NAME_DEF_STMT (op
);
1736 /* Get the def before the loop. */
1737 op
= PHI_ARG_DEF_FROM_EDGE (def_stmt
,
1738 loop_preheader_edge (loop
));
1739 if (j
!= (number_of_copies
- 1) && neutral_op
)
1743 /* Create 'vect_ = {op0,op1,...,opn}'. */
1744 t
= tree_cons (NULL_TREE
, op
, t
);
1746 number_of_places_left_in_vector
--;
1748 if (number_of_places_left_in_vector
== 0)
1750 number_of_places_left_in_vector
= nunits
;
1753 vec_cst
= build_vector (vector_type
, t
);
1755 vec_cst
= build_constructor_from_list (vector_type
, t
);
1756 VEC_quick_push (tree
, voprnds
,
1757 vect_init_vector (stmt
, vec_cst
, vector_type
, NULL
));
1763 /* Since the vectors are created in the reverse order, we should invert
1765 vec_num
= VEC_length (tree
, voprnds
);
1766 for (j
= vec_num
- 1; j
>= 0; j
--)
1768 vop
= VEC_index (tree
, voprnds
, j
);
1769 VEC_quick_push (tree
, *vec_oprnds
, vop
);
1772 VEC_free (tree
, heap
, voprnds
);
1774 /* In case that VF is greater than the unrolling factor needed for the SLP
1775 group of stmts, NUMBER_OF_VECTORS to be created is greater than
1776 NUMBER_OF_SCALARS/NUNITS or NUNITS/NUMBER_OF_SCALARS, and hence we have
1777 to replicate the vectors. */
1778 while (number_of_vectors
> VEC_length (tree
, *vec_oprnds
))
1780 tree neutral_vec
= NULL
;
1787 for (i
= 0; i
< (unsigned) nunits
; i
++)
1788 t
= tree_cons (NULL_TREE
, neutral_op
, t
);
1789 neutral_vec
= build_vector (vector_type
, t
);
1792 VEC_quick_push (tree
, *vec_oprnds
, neutral_vec
);
1796 for (i
= 0; VEC_iterate (tree
, *vec_oprnds
, i
, vop
) && i
< vec_num
; i
++)
1797 VEC_quick_push (tree
, *vec_oprnds
, vop
);
1803 /* Get vectorized definitions from SLP_NODE that contains corresponding
1804 vectorized def-stmts. */
1807 vect_get_slp_vect_defs (slp_tree slp_node
, VEC (tree
,heap
) **vec_oprnds
)
1810 gimple vec_def_stmt
;
1813 gcc_assert (SLP_TREE_VEC_STMTS (slp_node
));
1816 VEC_iterate (gimple
, SLP_TREE_VEC_STMTS (slp_node
), i
, vec_def_stmt
);
1819 gcc_assert (vec_def_stmt
);
1820 vec_oprnd
= gimple_get_lhs (vec_def_stmt
);
1821 VEC_quick_push (tree
, *vec_oprnds
, vec_oprnd
);
1826 /* Get vectorized definitions for SLP_NODE.
1827 If the scalar definitions are loop invariants or constants, collect them and
1828 call vect_get_constant_vectors() to create vector stmts.
1829 Otherwise, the def-stmts must be already vectorized and the vectorized stmts
1830 must be stored in the LEFT/RIGHT node of SLP_NODE, and we call
1831 vect_get_slp_vect_defs() to retrieve them.
1832 If VEC_OPRNDS1 is NULL, don't get vector defs for the second operand (from
1833 the right node. This is used when the second operand must remain scalar. */
1836 vect_get_slp_defs (slp_tree slp_node
, VEC (tree
,heap
) **vec_oprnds0
,
1837 VEC (tree
,heap
) **vec_oprnds1
, int reduc_index
)
1840 enum tree_code code
;
1841 int number_of_vects
;
1842 HOST_WIDE_INT lhs_size_unit
, rhs_size_unit
;
1844 first_stmt
= VEC_index (gimple
, SLP_TREE_SCALAR_STMTS (slp_node
), 0);
1845 /* The number of vector defs is determined by the number of vector statements
1846 in the node from which we get those statements. */
1847 if (SLP_TREE_LEFT (slp_node
))
1848 number_of_vects
= SLP_TREE_NUMBER_OF_VEC_STMTS (SLP_TREE_LEFT (slp_node
));
1851 number_of_vects
= SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node
);
1852 /* Number of vector stmts was calculated according to LHS in
1853 vect_schedule_slp_instance(), fix it by replacing LHS with RHS, if
1854 necessary. See vect_get_smallest_scalar_type() for details. */
1855 vect_get_smallest_scalar_type (first_stmt
, &lhs_size_unit
,
1857 if (rhs_size_unit
!= lhs_size_unit
)
1859 number_of_vects
*= rhs_size_unit
;
1860 number_of_vects
/= lhs_size_unit
;
1864 /* Allocate memory for vectorized defs. */
1865 *vec_oprnds0
= VEC_alloc (tree
, heap
, number_of_vects
);
1867 /* SLP_NODE corresponds either to a group of stores or to a group of
1868 unary/binary operations. We don't call this function for loads.
1869 For reduction defs we call vect_get_constant_vectors(), since we are
1870 looking for initial loop invariant values. */
1871 if (SLP_TREE_LEFT (slp_node
) && reduc_index
== -1)
1872 /* The defs are already vectorized. */
1873 vect_get_slp_vect_defs (SLP_TREE_LEFT (slp_node
), vec_oprnds0
);
1875 /* Build vectors from scalar defs. */
1876 vect_get_constant_vectors (slp_node
, vec_oprnds0
, 0, number_of_vects
,
1879 if (STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
)))
1880 /* Since we don't call this function with loads, this is a group of
1884 /* For reductions, we only need initial values. */
1885 if (reduc_index
!= -1)
1888 code
= gimple_assign_rhs_code (first_stmt
);
1889 if (get_gimple_rhs_class (code
) != GIMPLE_BINARY_RHS
|| !vec_oprnds1
)
1892 /* The number of vector defs is determined by the number of vector statements
1893 in the node from which we get those statements. */
1894 if (SLP_TREE_RIGHT (slp_node
))
1895 number_of_vects
= SLP_TREE_NUMBER_OF_VEC_STMTS (SLP_TREE_RIGHT (slp_node
));
1897 number_of_vects
= SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node
);
1899 *vec_oprnds1
= VEC_alloc (tree
, heap
, number_of_vects
);
1901 if (SLP_TREE_RIGHT (slp_node
))
1902 /* The defs are already vectorized. */
1903 vect_get_slp_vect_defs (SLP_TREE_RIGHT (slp_node
), vec_oprnds1
);
1905 /* Build vectors from scalar defs. */
1906 vect_get_constant_vectors (slp_node
, vec_oprnds1
, 1, number_of_vects
, -1);
1910 /* Create NCOPIES permutation statements using the mask MASK_BYTES (by
1911 building a vector of type MASK_TYPE from it) and two input vectors placed in
1912 DR_CHAIN at FIRST_VEC_INDX and SECOND_VEC_INDX for the first copy and
1913 shifting by STRIDE elements of DR_CHAIN for every copy.
1914 (STRIDE is the number of vectorized stmts for NODE divided by the number of
1916 VECT_STMTS_COUNTER specifies the index in the vectorized stmts of NODE, where
1917 the created stmts must be inserted. */
1920 vect_create_mask_and_perm (gimple stmt
, gimple next_scalar_stmt
,
1921 tree mask
, int first_vec_indx
, int second_vec_indx
,
1922 gimple_stmt_iterator
*gsi
, slp_tree node
,
1923 tree builtin_decl
, tree vectype
,
1924 VEC(tree
,heap
) *dr_chain
,
1925 int ncopies
, int vect_stmts_counter
)
1928 gimple perm_stmt
= NULL
;
1929 stmt_vec_info next_stmt_info
;
1931 tree first_vec
, second_vec
, data_ref
;
1933 stride
= SLP_TREE_NUMBER_OF_VEC_STMTS (node
) / ncopies
;
1935 /* Initialize the vect stmts of NODE to properly insert the generated
1937 for (i
= VEC_length (gimple
, SLP_TREE_VEC_STMTS (node
));
1938 i
< (int) SLP_TREE_NUMBER_OF_VEC_STMTS (node
); i
++)
1939 VEC_quick_push (gimple
, SLP_TREE_VEC_STMTS (node
), NULL
);
1941 perm_dest
= vect_create_destination_var (gimple_assign_lhs (stmt
), vectype
);
1942 for (i
= 0; i
< ncopies
; i
++)
1944 first_vec
= VEC_index (tree
, dr_chain
, first_vec_indx
);
1945 second_vec
= VEC_index (tree
, dr_chain
, second_vec_indx
);
1947 /* Generate the permute statement. */
1948 perm_stmt
= gimple_build_call (builtin_decl
,
1949 3, first_vec
, second_vec
, mask
);
1950 data_ref
= make_ssa_name (perm_dest
, perm_stmt
);
1951 gimple_call_set_lhs (perm_stmt
, data_ref
);
1952 vect_finish_stmt_generation (stmt
, perm_stmt
, gsi
);
1954 /* Store the vector statement in NODE. */
1955 VEC_replace (gimple
, SLP_TREE_VEC_STMTS (node
),
1956 stride
* i
+ vect_stmts_counter
, perm_stmt
);
1958 first_vec_indx
+= stride
;
1959 second_vec_indx
+= stride
;
1962 /* Mark the scalar stmt as vectorized. */
1963 next_stmt_info
= vinfo_for_stmt (next_scalar_stmt
);
1964 STMT_VINFO_VEC_STMT (next_stmt_info
) = perm_stmt
;
1968 /* Given FIRST_MASK_ELEMENT - the mask element in element representation,
1969 return in CURRENT_MASK_ELEMENT its equivalent in target specific
1970 representation. Check that the mask is valid and return FALSE if not.
1971 Return TRUE in NEED_NEXT_VECTOR if the permutation requires to move to
1972 the next vector, i.e., the current first vector is not needed. */
1975 vect_get_mask_element (gimple stmt
, int first_mask_element
, int m
,
1976 int mask_nunits
, bool only_one_vec
, int index
,
1977 int *mask
, int *current_mask_element
,
1978 bool *need_next_vector
)
1981 static int number_of_mask_fixes
= 1;
1982 static bool mask_fixed
= false;
1983 static bool needs_first_vector
= false;
1985 /* Convert to target specific representation. */
1986 *current_mask_element
= first_mask_element
+ m
;
1987 /* Adjust the value in case it's a mask for second and third vectors. */
1988 *current_mask_element
-= mask_nunits
* (number_of_mask_fixes
- 1);
1990 if (*current_mask_element
< mask_nunits
)
1991 needs_first_vector
= true;
1993 /* We have only one input vector to permute but the mask accesses values in
1994 the next vector as well. */
1995 if (only_one_vec
&& *current_mask_element
>= mask_nunits
)
1997 if (vect_print_dump_info (REPORT_DETAILS
))
1999 fprintf (vect_dump
, "permutation requires at least two vectors ");
2000 print_gimple_stmt (vect_dump
, stmt
, 0, TDF_SLIM
);
2006 /* The mask requires the next vector. */
2007 if (*current_mask_element
>= mask_nunits
* 2)
2009 if (needs_first_vector
|| mask_fixed
)
2011 /* We either need the first vector too or have already moved to the
2012 next vector. In both cases, this permutation needs three
2014 if (vect_print_dump_info (REPORT_DETAILS
))
2016 fprintf (vect_dump
, "permutation requires at "
2017 "least three vectors ");
2018 print_gimple_stmt (vect_dump
, stmt
, 0, TDF_SLIM
);
2024 /* We move to the next vector, dropping the first one and working with
2025 the second and the third - we need to adjust the values of the mask
2027 *current_mask_element
-= mask_nunits
* number_of_mask_fixes
;
2029 for (i
= 0; i
< index
; i
++)
2030 mask
[i
] -= mask_nunits
* number_of_mask_fixes
;
2032 (number_of_mask_fixes
)++;
2036 *need_next_vector
= mask_fixed
;
2038 /* This was the last element of this mask. Start a new one. */
2039 if (index
== mask_nunits
- 1)
2041 number_of_mask_fixes
= 1;
2043 needs_first_vector
= false;
2050 /* Generate vector permute statements from a list of loads in DR_CHAIN.
2051 If ANALYZE_ONLY is TRUE, only check that it is possible to create valid
2052 permute statements for SLP_NODE_INSTANCE. */
2054 vect_transform_slp_perm_load (gimple stmt
, VEC (tree
, heap
) *dr_chain
,
2055 gimple_stmt_iterator
*gsi
, int vf
,
2056 slp_instance slp_node_instance
, bool analyze_only
)
2058 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
2059 tree mask_element_type
= NULL_TREE
, mask_type
;
2060 int i
, j
, k
, m
, scale
, mask_nunits
, nunits
, vec_index
= 0, scalar_index
;
2062 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
), builtin_decl
;
2063 gimple next_scalar_stmt
;
2064 int group_size
= SLP_INSTANCE_GROUP_SIZE (slp_node_instance
);
2065 int first_mask_element
;
2066 int index
, unroll_factor
, *mask
, current_mask_element
, ncopies
;
2067 bool only_one_vec
= false, need_next_vector
= false;
2068 int first_vec_index
, second_vec_index
, orig_vec_stmts_num
, vect_stmts_counter
;
2070 if (!targetm
.vectorize
.builtin_vec_perm
)
2072 if (vect_print_dump_info (REPORT_DETAILS
))
2074 fprintf (vect_dump
, "no builtin for vect permute for ");
2075 print_gimple_stmt (vect_dump
, stmt
, 0, TDF_SLIM
);
2081 builtin_decl
= targetm
.vectorize
.builtin_vec_perm (vectype
,
2082 &mask_element_type
);
2083 if (!builtin_decl
|| !mask_element_type
)
2085 if (vect_print_dump_info (REPORT_DETAILS
))
2087 fprintf (vect_dump
, "no builtin for vect permute for ");
2088 print_gimple_stmt (vect_dump
, stmt
, 0, TDF_SLIM
);
2094 mask_type
= get_vectype_for_scalar_type (mask_element_type
);
2095 mask_nunits
= TYPE_VECTOR_SUBPARTS (mask_type
);
2096 mask
= (int *) xmalloc (sizeof (int) * mask_nunits
);
2097 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
2098 scale
= mask_nunits
/ nunits
;
2099 unroll_factor
= SLP_INSTANCE_UNROLLING_FACTOR (slp_node_instance
);
2101 /* The number of vector stmts to generate based only on SLP_NODE_INSTANCE
2102 unrolling factor. */
2103 orig_vec_stmts_num
= group_size
*
2104 SLP_INSTANCE_UNROLLING_FACTOR (slp_node_instance
) / nunits
;
2105 if (orig_vec_stmts_num
== 1)
2106 only_one_vec
= true;
2108 /* Number of copies is determined by the final vectorization factor
2109 relatively to SLP_NODE_INSTANCE unrolling factor. */
2110 ncopies
= vf
/ SLP_INSTANCE_UNROLLING_FACTOR (slp_node_instance
);
2112 /* Generate permutation masks for every NODE. Number of masks for each NODE
2113 is equal to GROUP_SIZE.
2114 E.g., we have a group of three nodes with three loads from the same
2115 location in each node, and the vector size is 4. I.e., we have a
2116 a0b0c0a1b1c1... sequence and we need to create the following vectors:
2117 for a's: a0a0a0a1 a1a1a2a2 a2a3a3a3
2118 for b's: b0b0b0b1 b1b1b2b2 b2b3b3b3
2121 The masks for a's should be: {0,0,0,3} {3,3,6,6} {6,9,9,9} (in target
2122 scpecific type, e.g., in bytes for Altivec.
2123 The last mask is illegal since we assume two operands for permute
2124 operation, and the mask element values can't be outside that range. Hence,
2125 the last mask must be converted into {2,5,5,5}.
2126 For the first two permutations we need the first and the second input
2127 vectors: {a0,b0,c0,a1} and {b1,c1,a2,b2}, and for the last permutation
2128 we need the second and the third vectors: {b1,c1,a2,b2} and
2132 VEC_iterate (slp_tree
, SLP_INSTANCE_LOADS (slp_node_instance
),
2138 vect_stmts_counter
= 0;
2140 first_vec_index
= vec_index
++;
2142 second_vec_index
= first_vec_index
;
2144 second_vec_index
= vec_index
++;
2146 for (j
= 0; j
< unroll_factor
; j
++)
2148 for (k
= 0; k
< group_size
; k
++)
2150 first_mask_element
= (i
+ j
* group_size
) * scale
;
2151 for (m
= 0; m
< scale
; m
++)
2153 if (!vect_get_mask_element (stmt
, first_mask_element
, m
,
2154 mask_nunits
, only_one_vec
, index
, mask
,
2155 ¤t_mask_element
, &need_next_vector
))
2158 mask
[index
++] = current_mask_element
;
2161 if (index
== mask_nunits
)
2163 tree mask_vec
= NULL
;
2165 while (--index
>= 0)
2167 tree t
= build_int_cst (mask_element_type
, mask
[index
]);
2168 mask_vec
= tree_cons (NULL
, t
, mask_vec
);
2170 mask_vec
= build_vector (mask_type
, mask_vec
);
2173 if (!targetm
.vectorize
.builtin_vec_perm_ok (vectype
,
2176 if (vect_print_dump_info (REPORT_DETAILS
))
2178 fprintf (vect_dump
, "unsupported vect permute ");
2179 print_generic_expr (vect_dump
, mask_vec
, 0);
2187 if (need_next_vector
)
2189 first_vec_index
= second_vec_index
;
2190 second_vec_index
= vec_index
;
2193 next_scalar_stmt
= VEC_index (gimple
,
2194 SLP_TREE_SCALAR_STMTS (node
), scalar_index
++);
2196 vect_create_mask_and_perm (stmt
, next_scalar_stmt
,
2197 mask_vec
, first_vec_index
, second_vec_index
,
2198 gsi
, node
, builtin_decl
, vectype
, dr_chain
,
2199 ncopies
, vect_stmts_counter
++);
2212 /* Vectorize SLP instance tree in postorder. */
2215 vect_schedule_slp_instance (slp_tree node
, slp_instance instance
,
2216 unsigned int vectorization_factor
)
2219 bool strided_store
, is_store
;
2220 gimple_stmt_iterator si
;
2221 stmt_vec_info stmt_info
;
2222 unsigned int vec_stmts_size
, nunits
, group_size
;
2225 slp_tree loads_node
;
2230 vect_schedule_slp_instance (SLP_TREE_LEFT (node
), instance
,
2231 vectorization_factor
);
2232 vect_schedule_slp_instance (SLP_TREE_RIGHT (node
), instance
,
2233 vectorization_factor
);
2235 stmt
= VEC_index (gimple
, SLP_TREE_SCALAR_STMTS (node
), 0);
2236 stmt_info
= vinfo_for_stmt (stmt
);
2238 /* VECTYPE is the type of the destination. */
2239 vectype
= STMT_VINFO_VECTYPE (stmt_info
);
2240 nunits
= (unsigned int) TYPE_VECTOR_SUBPARTS (vectype
);
2241 group_size
= SLP_INSTANCE_GROUP_SIZE (instance
);
2243 /* For each SLP instance calculate number of vector stmts to be created
2244 for the scalar stmts in each node of the SLP tree. Number of vector
2245 elements in one vector iteration is the number of scalar elements in
2246 one scalar iteration (GROUP_SIZE) multiplied by VF divided by vector
2248 vec_stmts_size
= (vectorization_factor
* group_size
) / nunits
;
2250 /* In case of load permutation we have to allocate vectorized statements for
2251 all the nodes that participate in that permutation. */
2252 if (SLP_INSTANCE_LOAD_PERMUTATION (instance
))
2255 VEC_iterate (slp_tree
, SLP_INSTANCE_LOADS (instance
), i
, loads_node
);
2258 if (!SLP_TREE_VEC_STMTS (loads_node
))
2260 SLP_TREE_VEC_STMTS (loads_node
) = VEC_alloc (gimple
, heap
,
2262 SLP_TREE_NUMBER_OF_VEC_STMTS (loads_node
) = vec_stmts_size
;
2267 if (!SLP_TREE_VEC_STMTS (node
))
2269 SLP_TREE_VEC_STMTS (node
) = VEC_alloc (gimple
, heap
, vec_stmts_size
);
2270 SLP_TREE_NUMBER_OF_VEC_STMTS (node
) = vec_stmts_size
;
2273 if (vect_print_dump_info (REPORT_DETAILS
))
2275 fprintf (vect_dump
, "------>vectorizing SLP node starting from: ");
2276 print_gimple_stmt (vect_dump
, stmt
, 0, TDF_SLIM
);
2279 /* Loads should be inserted before the first load. */
2280 if (SLP_INSTANCE_FIRST_LOAD_STMT (instance
)
2281 && STMT_VINFO_STRIDED_ACCESS (stmt_info
)
2282 && !REFERENCE_CLASS_P (gimple_get_lhs (stmt
)))
2283 si
= gsi_for_stmt (SLP_INSTANCE_FIRST_LOAD_STMT (instance
));
2285 si
= gsi_for_stmt (stmt
);
2287 is_store
= vect_transform_stmt (stmt
, &si
, &strided_store
, node
, instance
);
2293 vect_schedule_slp (loop_vec_info loop_vinfo
, bb_vec_info bb_vinfo
)
2295 VEC (slp_instance
, heap
) *slp_instances
;
2296 slp_instance instance
;
2298 bool is_store
= false;
2302 slp_instances
= LOOP_VINFO_SLP_INSTANCES (loop_vinfo
);
2303 vf
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
2307 slp_instances
= BB_VINFO_SLP_INSTANCES (bb_vinfo
);
2311 for (i
= 0; VEC_iterate (slp_instance
, slp_instances
, i
, instance
); i
++)
2313 /* Schedule the tree of INSTANCE. */
2314 is_store
= vect_schedule_slp_instance (SLP_INSTANCE_TREE (instance
),
2316 if (vect_print_dump_info (REPORT_VECTORIZED_LOCATIONS
)
2317 || vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS
))
2318 fprintf (vect_dump
, "vectorizing stmts using SLP.");
2321 for (i
= 0; VEC_iterate (slp_instance
, slp_instances
, i
, instance
); i
++)
2323 slp_tree root
= SLP_INSTANCE_TREE (instance
);
2326 gimple_stmt_iterator gsi
;
2328 for (j
= 0; VEC_iterate (gimple
, SLP_TREE_SCALAR_STMTS (root
), j
, store
)
2329 && j
< SLP_INSTANCE_GROUP_SIZE (instance
); j
++)
2331 if (!STMT_VINFO_DATA_REF (vinfo_for_stmt (store
)))
2334 /* Free the attached stmt_vec_info and remove the stmt. */
2335 gsi
= gsi_for_stmt (store
);
2336 gsi_remove (&gsi
, true);
2337 free_stmt_vec_info (store
);
2345 /* Vectorize the basic block. */
2348 vect_slp_transform_bb (basic_block bb
)
2350 bb_vec_info bb_vinfo
= vec_info_for_bb (bb
);
2351 gimple_stmt_iterator si
;
2353 gcc_assert (bb_vinfo
);
2355 if (vect_print_dump_info (REPORT_DETAILS
))
2356 fprintf (vect_dump
, "SLPing BB\n");
2358 for (si
= gsi_start_bb (bb
); !gsi_end_p (si
); gsi_next (&si
))
2360 gimple stmt
= gsi_stmt (si
);
2361 stmt_vec_info stmt_info
;
2363 if (vect_print_dump_info (REPORT_DETAILS
))
2365 fprintf (vect_dump
, "------>SLPing statement: ");
2366 print_gimple_stmt (vect_dump
, stmt
, 0, TDF_SLIM
);
2369 stmt_info
= vinfo_for_stmt (stmt
);
2370 gcc_assert (stmt_info
);
2372 /* Schedule all the SLP instances when the first SLP stmt is reached. */
2373 if (STMT_SLP_TYPE (stmt_info
))
2375 vect_schedule_slp (NULL
, bb_vinfo
);
2380 mark_sym_for_renaming (gimple_vop (cfun
));
2381 /* The memory tags and pointers in vectorized statements need to
2382 have their SSA forms updated. FIXME, why can't this be delayed
2383 until all the loops have been transformed? */
2384 update_ssa (TODO_update_ssa
);
2386 if (vect_print_dump_info (REPORT_DETAILS
))
2387 fprintf (vect_dump
, "BASIC BLOCK VECTORIZED\n");
2389 destroy_bb_vec_info (bb_vinfo
);