1 /* Statement Analysis and Transformation for Vectorization
2 Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
3 Free Software Foundation, Inc.
4 Contributed by Dorit Naishlos <dorit@il.ibm.com>
5 and Ira Rosen <irar@il.ibm.com>
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
25 #include "coretypes.h"
30 #include "basic-block.h"
31 #include "tree-pretty-print.h"
32 #include "gimple-pretty-print.h"
33 #include "tree-flow.h"
34 #include "tree-dump.h"
36 #include "cfglayout.h"
40 #include "diagnostic-core.h"
42 #include "tree-vectorizer.h"
43 #include "langhooks.h"
46 /* Utility functions used by vect_mark_stmts_to_be_vectorized. */
48 /* Function vect_mark_relevant.
50 Mark STMT as "relevant for vectorization" and add it to WORKLIST. */
53 vect_mark_relevant (VEC(gimple
,heap
) **worklist
, gimple stmt
,
54 enum vect_relevant relevant
, bool live_p
)
56 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
57 enum vect_relevant save_relevant
= STMT_VINFO_RELEVANT (stmt_info
);
58 bool save_live_p
= STMT_VINFO_LIVE_P (stmt_info
);
60 if (vect_print_dump_info (REPORT_DETAILS
))
61 fprintf (vect_dump
, "mark relevant %d, live %d.", relevant
, live_p
);
63 if (STMT_VINFO_IN_PATTERN_P (stmt_info
))
67 /* This is the last stmt in a sequence that was detected as a
68 pattern that can potentially be vectorized. Don't mark the stmt
69 as relevant/live because it's not going to be vectorized.
70 Instead mark the pattern-stmt that replaces it. */
72 pattern_stmt
= STMT_VINFO_RELATED_STMT (stmt_info
);
74 if (vect_print_dump_info (REPORT_DETAILS
))
75 fprintf (vect_dump
, "last stmt in pattern. don't mark relevant/live.");
76 stmt_info
= vinfo_for_stmt (pattern_stmt
);
77 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_info
) == stmt
);
78 save_relevant
= STMT_VINFO_RELEVANT (stmt_info
);
79 save_live_p
= STMT_VINFO_LIVE_P (stmt_info
);
83 STMT_VINFO_LIVE_P (stmt_info
) |= live_p
;
84 if (relevant
> STMT_VINFO_RELEVANT (stmt_info
))
85 STMT_VINFO_RELEVANT (stmt_info
) = relevant
;
87 if (STMT_VINFO_RELEVANT (stmt_info
) == save_relevant
88 && STMT_VINFO_LIVE_P (stmt_info
) == save_live_p
)
90 if (vect_print_dump_info (REPORT_DETAILS
))
91 fprintf (vect_dump
, "already marked relevant/live.");
95 VEC_safe_push (gimple
, heap
, *worklist
, stmt
);
99 /* Function vect_stmt_relevant_p.
101 Return true if STMT in loop that is represented by LOOP_VINFO is
102 "relevant for vectorization".
104 A stmt is considered "relevant for vectorization" if:
105 - it has uses outside the loop.
106 - it has vdefs (it alters memory).
107 - control stmts in the loop (except for the exit condition).
109 CHECKME: what other side effects would the vectorizer allow? */
112 vect_stmt_relevant_p (gimple stmt
, loop_vec_info loop_vinfo
,
113 enum vect_relevant
*relevant
, bool *live_p
)
115 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
117 imm_use_iterator imm_iter
;
121 *relevant
= vect_unused_in_scope
;
124 /* cond stmt other than loop exit cond. */
125 if (is_ctrl_stmt (stmt
)
126 && STMT_VINFO_TYPE (vinfo_for_stmt (stmt
))
127 != loop_exit_ctrl_vec_info_type
)
128 *relevant
= vect_used_in_scope
;
130 /* changing memory. */
131 if (gimple_code (stmt
) != GIMPLE_PHI
)
132 if (gimple_vdef (stmt
))
134 if (vect_print_dump_info (REPORT_DETAILS
))
135 fprintf (vect_dump
, "vec_stmt_relevant_p: stmt has vdefs.");
136 *relevant
= vect_used_in_scope
;
139 /* uses outside the loop. */
140 FOR_EACH_PHI_OR_STMT_DEF (def_p
, stmt
, op_iter
, SSA_OP_DEF
)
142 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, DEF_FROM_PTR (def_p
))
144 basic_block bb
= gimple_bb (USE_STMT (use_p
));
145 if (!flow_bb_inside_loop_p (loop
, bb
))
147 if (vect_print_dump_info (REPORT_DETAILS
))
148 fprintf (vect_dump
, "vec_stmt_relevant_p: used out of loop.");
150 if (is_gimple_debug (USE_STMT (use_p
)))
153 /* We expect all such uses to be in the loop exit phis
154 (because of loop closed form) */
155 gcc_assert (gimple_code (USE_STMT (use_p
)) == GIMPLE_PHI
);
156 gcc_assert (bb
== single_exit (loop
)->dest
);
163 return (*live_p
|| *relevant
);
167 /* Function exist_non_indexing_operands_for_use_p
169 USE is one of the uses attached to STMT. Check if USE is
170 used in STMT for anything other than indexing an array. */
173 exist_non_indexing_operands_for_use_p (tree use
, gimple stmt
)
176 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
178 /* USE corresponds to some operand in STMT. If there is no data
179 reference in STMT, then any operand that corresponds to USE
180 is not indexing an array. */
181 if (!STMT_VINFO_DATA_REF (stmt_info
))
184 /* STMT has a data_ref. FORNOW this means that its of one of
188 (This should have been verified in analyze_data_refs).
190 'var' in the second case corresponds to a def, not a use,
191 so USE cannot correspond to any operands that are not used
194 Therefore, all we need to check is if STMT falls into the
195 first case, and whether var corresponds to USE. */
197 if (!gimple_assign_copy_p (stmt
))
199 if (TREE_CODE (gimple_assign_lhs (stmt
)) == SSA_NAME
)
201 operand
= gimple_assign_rhs1 (stmt
);
202 if (TREE_CODE (operand
) != SSA_NAME
)
213 Function process_use.
216 - a USE in STMT in a loop represented by LOOP_VINFO
217 - LIVE_P, RELEVANT - enum values to be set in the STMT_VINFO of the stmt
218 that defined USE. This is done by calling mark_relevant and passing it
219 the WORKLIST (to add DEF_STMT to the WORKLIST in case it is relevant).
222 Generally, LIVE_P and RELEVANT are used to define the liveness and
223 relevance info of the DEF_STMT of this USE:
224 STMT_VINFO_LIVE_P (DEF_STMT_info) <-- live_p
225 STMT_VINFO_RELEVANT (DEF_STMT_info) <-- relevant
227 - case 1: If USE is used only for address computations (e.g. array indexing),
228 which does not need to be directly vectorized, then the liveness/relevance
229 of the respective DEF_STMT is left unchanged.
230 - case 2: If STMT is a reduction phi and DEF_STMT is a reduction stmt, we
231 skip DEF_STMT cause it had already been processed.
232 - case 3: If DEF_STMT and STMT are in different nests, then "relevant" will
233 be modified accordingly.
235 Return true if everything is as expected. Return false otherwise. */
238 process_use (gimple stmt
, tree use
, loop_vec_info loop_vinfo
, bool live_p
,
239 enum vect_relevant relevant
, VEC(gimple
,heap
) **worklist
)
241 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
242 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (stmt
);
243 stmt_vec_info dstmt_vinfo
;
244 basic_block bb
, def_bb
;
247 enum vect_def_type dt
;
249 /* case 1: we are only interested in uses that need to be vectorized. Uses
250 that are used for address computation are not considered relevant. */
251 if (!exist_non_indexing_operands_for_use_p (use
, stmt
))
254 if (!vect_is_simple_use (use
, loop_vinfo
, NULL
, &def_stmt
, &def
, &dt
))
256 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS
))
257 fprintf (vect_dump
, "not vectorized: unsupported use in stmt.");
261 if (!def_stmt
|| gimple_nop_p (def_stmt
))
264 def_bb
= gimple_bb (def_stmt
);
265 if (!flow_bb_inside_loop_p (loop
, def_bb
))
267 if (vect_print_dump_info (REPORT_DETAILS
))
268 fprintf (vect_dump
, "def_stmt is out of loop.");
272 /* case 2: A reduction phi (STMT) defined by a reduction stmt (DEF_STMT).
273 DEF_STMT must have already been processed, because this should be the
274 only way that STMT, which is a reduction-phi, was put in the worklist,
275 as there should be no other uses for DEF_STMT in the loop. So we just
276 check that everything is as expected, and we are done. */
277 dstmt_vinfo
= vinfo_for_stmt (def_stmt
);
278 bb
= gimple_bb (stmt
);
279 if (gimple_code (stmt
) == GIMPLE_PHI
280 && STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_reduction_def
281 && gimple_code (def_stmt
) != GIMPLE_PHI
282 && STMT_VINFO_DEF_TYPE (dstmt_vinfo
) == vect_reduction_def
283 && bb
->loop_father
== def_bb
->loop_father
)
285 if (vect_print_dump_info (REPORT_DETAILS
))
286 fprintf (vect_dump
, "reduc-stmt defining reduc-phi in the same nest.");
287 if (STMT_VINFO_IN_PATTERN_P (dstmt_vinfo
))
288 dstmt_vinfo
= vinfo_for_stmt (STMT_VINFO_RELATED_STMT (dstmt_vinfo
));
289 gcc_assert (STMT_VINFO_RELEVANT (dstmt_vinfo
) < vect_used_by_reduction
);
290 gcc_assert (STMT_VINFO_LIVE_P (dstmt_vinfo
)
291 || STMT_VINFO_RELEVANT (dstmt_vinfo
) > vect_unused_in_scope
);
295 /* case 3a: outer-loop stmt defining an inner-loop stmt:
296 outer-loop-header-bb:
302 if (flow_loop_nested_p (def_bb
->loop_father
, bb
->loop_father
))
304 if (vect_print_dump_info (REPORT_DETAILS
))
305 fprintf (vect_dump
, "outer-loop def-stmt defining inner-loop stmt.");
309 case vect_unused_in_scope
:
310 relevant
= (STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_nested_cycle
) ?
311 vect_used_in_scope
: vect_unused_in_scope
;
314 case vect_used_in_outer_by_reduction
:
315 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo
) != vect_reduction_def
);
316 relevant
= vect_used_by_reduction
;
319 case vect_used_in_outer
:
320 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo
) != vect_reduction_def
);
321 relevant
= vect_used_in_scope
;
324 case vect_used_in_scope
:
332 /* case 3b: inner-loop stmt defining an outer-loop stmt:
333 outer-loop-header-bb:
337 outer-loop-tail-bb (or outer-loop-exit-bb in double reduction):
339 else if (flow_loop_nested_p (bb
->loop_father
, def_bb
->loop_father
))
341 if (vect_print_dump_info (REPORT_DETAILS
))
342 fprintf (vect_dump
, "inner-loop def-stmt defining outer-loop stmt.");
346 case vect_unused_in_scope
:
347 relevant
= (STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_reduction_def
348 || STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_double_reduction_def
) ?
349 vect_used_in_outer_by_reduction
: vect_unused_in_scope
;
352 case vect_used_by_reduction
:
353 relevant
= vect_used_in_outer_by_reduction
;
356 case vect_used_in_scope
:
357 relevant
= vect_used_in_outer
;
365 vect_mark_relevant (worklist
, def_stmt
, relevant
, live_p
);
370 /* Function vect_mark_stmts_to_be_vectorized.
372 Not all stmts in the loop need to be vectorized. For example:
381 Stmt 1 and 3 do not need to be vectorized, because loop control and
382 addressing of vectorized data-refs are handled differently.
384 This pass detects such stmts. */
387 vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo
)
389 VEC(gimple
,heap
) *worklist
;
390 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
391 basic_block
*bbs
= LOOP_VINFO_BBS (loop_vinfo
);
392 unsigned int nbbs
= loop
->num_nodes
;
393 gimple_stmt_iterator si
;
396 stmt_vec_info stmt_vinfo
;
400 enum vect_relevant relevant
, tmp_relevant
;
401 enum vect_def_type def_type
;
403 if (vect_print_dump_info (REPORT_DETAILS
))
404 fprintf (vect_dump
, "=== vect_mark_stmts_to_be_vectorized ===");
406 worklist
= VEC_alloc (gimple
, heap
, 64);
408 /* 1. Init worklist. */
409 for (i
= 0; i
< nbbs
; i
++)
412 for (si
= gsi_start_phis (bb
); !gsi_end_p (si
); gsi_next (&si
))
415 if (vect_print_dump_info (REPORT_DETAILS
))
417 fprintf (vect_dump
, "init: phi relevant? ");
418 print_gimple_stmt (vect_dump
, phi
, 0, TDF_SLIM
);
421 if (vect_stmt_relevant_p (phi
, loop_vinfo
, &relevant
, &live_p
))
422 vect_mark_relevant (&worklist
, phi
, relevant
, live_p
);
424 for (si
= gsi_start_bb (bb
); !gsi_end_p (si
); gsi_next (&si
))
426 stmt
= gsi_stmt (si
);
427 if (vect_print_dump_info (REPORT_DETAILS
))
429 fprintf (vect_dump
, "init: stmt relevant? ");
430 print_gimple_stmt (vect_dump
, stmt
, 0, TDF_SLIM
);
433 if (vect_stmt_relevant_p (stmt
, loop_vinfo
, &relevant
, &live_p
))
434 vect_mark_relevant (&worklist
, stmt
, relevant
, live_p
);
438 /* 2. Process_worklist */
439 while (VEC_length (gimple
, worklist
) > 0)
444 stmt
= VEC_pop (gimple
, worklist
);
445 if (vect_print_dump_info (REPORT_DETAILS
))
447 fprintf (vect_dump
, "worklist: examine stmt: ");
448 print_gimple_stmt (vect_dump
, stmt
, 0, TDF_SLIM
);
451 /* Examine the USEs of STMT. For each USE, mark the stmt that defines it
452 (DEF_STMT) as relevant/irrelevant and live/dead according to the
453 liveness and relevance properties of STMT. */
454 stmt_vinfo
= vinfo_for_stmt (stmt
);
455 relevant
= STMT_VINFO_RELEVANT (stmt_vinfo
);
456 live_p
= STMT_VINFO_LIVE_P (stmt_vinfo
);
458 /* Generally, the liveness and relevance properties of STMT are
459 propagated as is to the DEF_STMTs of its USEs:
460 live_p <-- STMT_VINFO_LIVE_P (STMT_VINFO)
461 relevant <-- STMT_VINFO_RELEVANT (STMT_VINFO)
463 One exception is when STMT has been identified as defining a reduction
464 variable; in this case we set the liveness/relevance as follows:
466 relevant = vect_used_by_reduction
467 This is because we distinguish between two kinds of relevant stmts -
468 those that are used by a reduction computation, and those that are
469 (also) used by a regular computation. This allows us later on to
470 identify stmts that are used solely by a reduction, and therefore the
471 order of the results that they produce does not have to be kept. */
473 def_type
= STMT_VINFO_DEF_TYPE (stmt_vinfo
);
474 tmp_relevant
= relevant
;
477 case vect_reduction_def
:
478 switch (tmp_relevant
)
480 case vect_unused_in_scope
:
481 relevant
= vect_used_by_reduction
;
484 case vect_used_by_reduction
:
485 if (gimple_code (stmt
) == GIMPLE_PHI
)
490 if (vect_print_dump_info (REPORT_DETAILS
))
491 fprintf (vect_dump
, "unsupported use of reduction.");
493 VEC_free (gimple
, heap
, worklist
);
500 case vect_nested_cycle
:
501 if (tmp_relevant
!= vect_unused_in_scope
502 && tmp_relevant
!= vect_used_in_outer_by_reduction
503 && tmp_relevant
!= vect_used_in_outer
)
505 if (vect_print_dump_info (REPORT_DETAILS
))
506 fprintf (vect_dump
, "unsupported use of nested cycle.");
508 VEC_free (gimple
, heap
, worklist
);
515 case vect_double_reduction_def
:
516 if (tmp_relevant
!= vect_unused_in_scope
517 && tmp_relevant
!= vect_used_by_reduction
)
519 if (vect_print_dump_info (REPORT_DETAILS
))
520 fprintf (vect_dump
, "unsupported use of double reduction.");
522 VEC_free (gimple
, heap
, worklist
);
533 FOR_EACH_PHI_OR_STMT_USE (use_p
, stmt
, iter
, SSA_OP_USE
)
535 tree op
= USE_FROM_PTR (use_p
);
536 if (!process_use (stmt
, op
, loop_vinfo
, live_p
, relevant
, &worklist
))
538 VEC_free (gimple
, heap
, worklist
);
542 } /* while worklist */
544 VEC_free (gimple
, heap
, worklist
);
549 /* Get cost by calling cost target builtin. */
552 int vect_get_stmt_cost (enum vect_cost_for_stmt type_of_cost
)
554 tree dummy_type
= NULL
;
557 return targetm
.vectorize
.builtin_vectorization_cost (type_of_cost
,
562 cost_for_stmt (gimple stmt
)
564 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
566 switch (STMT_VINFO_TYPE (stmt_info
))
568 case load_vec_info_type
:
569 return vect_get_stmt_cost (scalar_load
);
570 case store_vec_info_type
:
571 return vect_get_stmt_cost (scalar_store
);
572 case op_vec_info_type
:
573 case condition_vec_info_type
:
574 case assignment_vec_info_type
:
575 case reduc_vec_info_type
:
576 case induc_vec_info_type
:
577 case type_promotion_vec_info_type
:
578 case type_demotion_vec_info_type
:
579 case type_conversion_vec_info_type
:
580 case call_vec_info_type
:
581 return vect_get_stmt_cost (scalar_stmt
);
582 case undef_vec_info_type
:
588 /* Function vect_model_simple_cost.
590 Models cost for simple operations, i.e. those that only emit ncopies of a
591 single op. Right now, this does not account for multiple insns that could
592 be generated for the single vector op. We will handle that shortly. */
595 vect_model_simple_cost (stmt_vec_info stmt_info
, int ncopies
,
596 enum vect_def_type
*dt
, slp_tree slp_node
)
599 int inside_cost
= 0, outside_cost
= 0;
601 /* The SLP costs were already calculated during SLP tree build. */
602 if (PURE_SLP_STMT (stmt_info
))
605 inside_cost
= ncopies
* vect_get_stmt_cost (vector_stmt
);
607 /* FORNOW: Assuming maximum 2 args per stmts. */
608 for (i
= 0; i
< 2; i
++)
610 if (dt
[i
] == vect_constant_def
|| dt
[i
] == vect_external_def
)
611 outside_cost
+= vect_get_stmt_cost (vector_stmt
);
614 if (vect_print_dump_info (REPORT_COST
))
615 fprintf (vect_dump
, "vect_model_simple_cost: inside_cost = %d, "
616 "outside_cost = %d .", inside_cost
, outside_cost
);
618 /* Set the costs either in STMT_INFO or SLP_NODE (if exists). */
619 stmt_vinfo_set_inside_of_loop_cost (stmt_info
, slp_node
, inside_cost
);
620 stmt_vinfo_set_outside_of_loop_cost (stmt_info
, slp_node
, outside_cost
);
624 /* Function vect_cost_strided_group_size
626 For strided load or store, return the group_size only if it is the first
627 load or store of a group, else return 1. This ensures that group size is
628 only returned once per group. */
631 vect_cost_strided_group_size (stmt_vec_info stmt_info
)
633 gimple first_stmt
= DR_GROUP_FIRST_DR (stmt_info
);
635 if (first_stmt
== STMT_VINFO_STMT (stmt_info
))
636 return DR_GROUP_SIZE (stmt_info
);
642 /* Function vect_model_store_cost
644 Models cost for stores. In the case of strided accesses, one access
645 has the overhead of the strided access attributed to it. */
648 vect_model_store_cost (stmt_vec_info stmt_info
, int ncopies
,
649 enum vect_def_type dt
, slp_tree slp_node
)
652 unsigned int inside_cost
= 0, outside_cost
= 0;
653 struct data_reference
*first_dr
;
656 /* The SLP costs were already calculated during SLP tree build. */
657 if (PURE_SLP_STMT (stmt_info
))
660 if (dt
== vect_constant_def
|| dt
== vect_external_def
)
661 outside_cost
= vect_get_stmt_cost (scalar_to_vec
);
663 /* Strided access? */
664 if (DR_GROUP_FIRST_DR (stmt_info
))
668 first_stmt
= VEC_index (gimple
, SLP_TREE_SCALAR_STMTS (slp_node
), 0);
673 first_stmt
= DR_GROUP_FIRST_DR (stmt_info
);
674 group_size
= vect_cost_strided_group_size (stmt_info
);
677 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
679 /* Not a strided access. */
683 first_dr
= STMT_VINFO_DATA_REF (stmt_info
);
686 /* Is this an access in a group of stores, which provide strided access?
687 If so, add in the cost of the permutes. */
690 /* Uses a high and low interleave operation for each needed permute. */
691 inside_cost
= ncopies
* exact_log2(group_size
) * group_size
692 * vect_get_stmt_cost (vector_stmt
);
694 if (vect_print_dump_info (REPORT_COST
))
695 fprintf (vect_dump
, "vect_model_store_cost: strided group_size = %d .",
700 /* Costs of the stores. */
701 vect_get_store_cost (first_dr
, ncopies
, &inside_cost
);
703 if (vect_print_dump_info (REPORT_COST
))
704 fprintf (vect_dump
, "vect_model_store_cost: inside_cost = %d, "
705 "outside_cost = %d .", inside_cost
, outside_cost
);
707 /* Set the costs either in STMT_INFO or SLP_NODE (if exists). */
708 stmt_vinfo_set_inside_of_loop_cost (stmt_info
, slp_node
, inside_cost
);
709 stmt_vinfo_set_outside_of_loop_cost (stmt_info
, slp_node
, outside_cost
);
713 /* Calculate cost of DR's memory access. */
715 vect_get_store_cost (struct data_reference
*dr
, int ncopies
,
716 unsigned int *inside_cost
)
718 int alignment_support_scheme
= vect_supportable_dr_alignment (dr
, false);
720 switch (alignment_support_scheme
)
724 *inside_cost
+= ncopies
* vect_get_stmt_cost (vector_store
);
726 if (vect_print_dump_info (REPORT_COST
))
727 fprintf (vect_dump
, "vect_model_store_cost: aligned.");
732 case dr_unaligned_supported
:
734 gimple stmt
= DR_STMT (dr
);
735 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
736 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
738 /* Here, we assign an additional cost for the unaligned store. */
739 *inside_cost
+= ncopies
740 * targetm
.vectorize
.builtin_vectorization_cost (unaligned_store
,
741 vectype
, DR_MISALIGNMENT (dr
));
743 if (vect_print_dump_info (REPORT_COST
))
744 fprintf (vect_dump
, "vect_model_store_cost: unaligned supported by "
756 /* Function vect_model_load_cost
758 Models cost for loads. In the case of strided accesses, the last access
759 has the overhead of the strided access attributed to it. Since unaligned
760 accesses are supported for loads, we also account for the costs of the
761 access scheme chosen. */
764 vect_model_load_cost (stmt_vec_info stmt_info
, int ncopies
, slp_tree slp_node
)
769 struct data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
), *first_dr
;
770 unsigned int inside_cost
= 0, outside_cost
= 0;
772 /* The SLP costs were already calculated during SLP tree build. */
773 if (PURE_SLP_STMT (stmt_info
))
776 /* Strided accesses? */
777 first_stmt
= DR_GROUP_FIRST_DR (stmt_info
);
778 if (first_stmt
&& !slp_node
)
780 group_size
= vect_cost_strided_group_size (stmt_info
);
781 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
783 /* Not a strided access. */
790 /* Is this an access in a group of loads providing strided access?
791 If so, add in the cost of the permutes. */
794 /* Uses an even and odd extract operations for each needed permute. */
795 inside_cost
= ncopies
* exact_log2(group_size
) * group_size
796 * vect_get_stmt_cost (vector_stmt
);
798 if (vect_print_dump_info (REPORT_COST
))
799 fprintf (vect_dump
, "vect_model_load_cost: strided group_size = %d .",
803 /* The loads themselves. */
804 vect_get_load_cost (first_dr
, ncopies
,
805 ((!DR_GROUP_FIRST_DR (stmt_info
)) || group_size
> 1 || slp_node
),
806 &inside_cost
, &outside_cost
);
808 if (vect_print_dump_info (REPORT_COST
))
809 fprintf (vect_dump
, "vect_model_load_cost: inside_cost = %d, "
810 "outside_cost = %d .", inside_cost
, outside_cost
);
812 /* Set the costs either in STMT_INFO or SLP_NODE (if exists). */
813 stmt_vinfo_set_inside_of_loop_cost (stmt_info
, slp_node
, inside_cost
);
814 stmt_vinfo_set_outside_of_loop_cost (stmt_info
, slp_node
, outside_cost
);
818 /* Calculate cost of DR's memory access. */
820 vect_get_load_cost (struct data_reference
*dr
, int ncopies
,
821 bool add_realign_cost
, unsigned int *inside_cost
,
822 unsigned int *outside_cost
)
824 int alignment_support_scheme
= vect_supportable_dr_alignment (dr
, false);
826 switch (alignment_support_scheme
)
830 *inside_cost
+= ncopies
* vect_get_stmt_cost (vector_load
);
832 if (vect_print_dump_info (REPORT_COST
))
833 fprintf (vect_dump
, "vect_model_load_cost: aligned.");
837 case dr_unaligned_supported
:
839 gimple stmt
= DR_STMT (dr
);
840 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
841 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
843 /* Here, we assign an additional cost for the unaligned load. */
844 *inside_cost
+= ncopies
845 * targetm
.vectorize
.builtin_vectorization_cost (unaligned_load
,
846 vectype
, DR_MISALIGNMENT (dr
));
847 if (vect_print_dump_info (REPORT_COST
))
848 fprintf (vect_dump
, "vect_model_load_cost: unaligned supported by "
853 case dr_explicit_realign
:
855 *inside_cost
+= ncopies
* (2 * vect_get_stmt_cost (vector_load
)
856 + vect_get_stmt_cost (vector_stmt
));
858 /* FIXME: If the misalignment remains fixed across the iterations of
859 the containing loop, the following cost should be added to the
861 if (targetm
.vectorize
.builtin_mask_for_load
)
862 *inside_cost
+= vect_get_stmt_cost (vector_stmt
);
866 case dr_explicit_realign_optimized
:
868 if (vect_print_dump_info (REPORT_COST
))
869 fprintf (vect_dump
, "vect_model_load_cost: unaligned software "
872 /* Unaligned software pipeline has a load of an address, an initial
873 load, and possibly a mask operation to "prime" the loop. However,
874 if this is an access in a group of loads, which provide strided
875 access, then the above cost should only be considered for one
876 access in the group. Inside the loop, there is a load op
877 and a realignment op. */
879 if (add_realign_cost
)
881 *outside_cost
= 2 * vect_get_stmt_cost (vector_stmt
);
882 if (targetm
.vectorize
.builtin_mask_for_load
)
883 *outside_cost
+= vect_get_stmt_cost (vector_stmt
);
886 *inside_cost
+= ncopies
* (vect_get_stmt_cost (vector_load
)
887 + vect_get_stmt_cost (vector_stmt
));
897 /* Function vect_init_vector.
899 Insert a new stmt (INIT_STMT) that initializes a new vector variable with
900 the vector elements of VECTOR_VAR. Place the initialization at BSI if it
901 is not NULL. Otherwise, place the initialization at the loop preheader.
902 Return the DEF of INIT_STMT.
903 It will be used in the vectorization of STMT. */
906 vect_init_vector (gimple stmt
, tree vector_var
, tree vector_type
,
907 gimple_stmt_iterator
*gsi
)
909 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (stmt
);
917 new_var
= vect_get_new_vect_var (vector_type
, vect_simple_var
, "cst_");
918 add_referenced_var (new_var
);
919 init_stmt
= gimple_build_assign (new_var
, vector_var
);
920 new_temp
= make_ssa_name (new_var
, init_stmt
);
921 gimple_assign_set_lhs (init_stmt
, new_temp
);
924 vect_finish_stmt_generation (stmt
, init_stmt
, gsi
);
927 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_vinfo
);
931 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
933 if (nested_in_vect_loop_p (loop
, stmt
))
936 pe
= loop_preheader_edge (loop
);
937 new_bb
= gsi_insert_on_edge_immediate (pe
, init_stmt
);
938 gcc_assert (!new_bb
);
942 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_vinfo
);
944 gimple_stmt_iterator gsi_bb_start
;
946 gcc_assert (bb_vinfo
);
947 bb
= BB_VINFO_BB (bb_vinfo
);
948 gsi_bb_start
= gsi_after_labels (bb
);
949 gsi_insert_before (&gsi_bb_start
, init_stmt
, GSI_SAME_STMT
);
953 if (vect_print_dump_info (REPORT_DETAILS
))
955 fprintf (vect_dump
, "created new init_stmt: ");
956 print_gimple_stmt (vect_dump
, init_stmt
, 0, TDF_SLIM
);
959 vec_oprnd
= gimple_assign_lhs (init_stmt
);
964 /* Function vect_get_vec_def_for_operand.
966 OP is an operand in STMT. This function returns a (vector) def that will be
967 used in the vectorized stmt for STMT.
969 In the case that OP is an SSA_NAME which is defined in the loop, then
970 STMT_VINFO_VEC_STMT of the defining stmt holds the relevant def.
972 In case OP is an invariant or constant, a new stmt that creates a vector def
973 needs to be introduced. */
976 vect_get_vec_def_for_operand (tree op
, gimple stmt
, tree
*scalar_def
)
981 stmt_vec_info def_stmt_info
= NULL
;
982 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (stmt
);
983 tree vectype
= STMT_VINFO_VECTYPE (stmt_vinfo
);
984 unsigned int nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
985 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_vinfo
);
991 enum vect_def_type dt
;
995 if (vect_print_dump_info (REPORT_DETAILS
))
997 fprintf (vect_dump
, "vect_get_vec_def_for_operand: ");
998 print_generic_expr (vect_dump
, op
, TDF_SLIM
);
1001 is_simple_use
= vect_is_simple_use (op
, loop_vinfo
, NULL
, &def_stmt
, &def
,
1003 gcc_assert (is_simple_use
);
1004 if (vect_print_dump_info (REPORT_DETAILS
))
1008 fprintf (vect_dump
, "def = ");
1009 print_generic_expr (vect_dump
, def
, TDF_SLIM
);
1013 fprintf (vect_dump
, " def_stmt = ");
1014 print_gimple_stmt (vect_dump
, def_stmt
, 0, TDF_SLIM
);
1020 /* Case 1: operand is a constant. */
1021 case vect_constant_def
:
1023 vector_type
= get_vectype_for_scalar_type (TREE_TYPE (op
));
1024 gcc_assert (vector_type
);
1029 /* Create 'vect_cst_ = {cst,cst,...,cst}' */
1030 if (vect_print_dump_info (REPORT_DETAILS
))
1031 fprintf (vect_dump
, "Create vector_cst. nunits = %d", nunits
);
1033 for (i
= nunits
- 1; i
>= 0; --i
)
1035 t
= tree_cons (NULL_TREE
, op
, t
);
1037 vec_cst
= build_vector (vector_type
, t
);
1038 return vect_init_vector (stmt
, vec_cst
, vector_type
, NULL
);
1041 /* Case 2: operand is defined outside the loop - loop invariant. */
1042 case vect_external_def
:
1044 vector_type
= get_vectype_for_scalar_type (TREE_TYPE (def
));
1045 gcc_assert (vector_type
);
1046 nunits
= TYPE_VECTOR_SUBPARTS (vector_type
);
1051 /* Create 'vec_inv = {inv,inv,..,inv}' */
1052 if (vect_print_dump_info (REPORT_DETAILS
))
1053 fprintf (vect_dump
, "Create vector_inv.");
1055 for (i
= nunits
- 1; i
>= 0; --i
)
1057 t
= tree_cons (NULL_TREE
, def
, t
);
1060 /* FIXME: use build_constructor directly. */
1061 vec_inv
= build_constructor_from_list (vector_type
, t
);
1062 return vect_init_vector (stmt
, vec_inv
, vector_type
, NULL
);
1065 /* Case 3: operand is defined inside the loop. */
1066 case vect_internal_def
:
1069 *scalar_def
= NULL
/* FIXME tuples: def_stmt*/;
1071 /* Get the def from the vectorized stmt. */
1072 def_stmt_info
= vinfo_for_stmt (def_stmt
);
1073 vec_stmt
= STMT_VINFO_VEC_STMT (def_stmt_info
);
1074 gcc_assert (vec_stmt
);
1075 if (gimple_code (vec_stmt
) == GIMPLE_PHI
)
1076 vec_oprnd
= PHI_RESULT (vec_stmt
);
1077 else if (is_gimple_call (vec_stmt
))
1078 vec_oprnd
= gimple_call_lhs (vec_stmt
);
1080 vec_oprnd
= gimple_assign_lhs (vec_stmt
);
1084 /* Case 4: operand is defined by a loop header phi - reduction */
1085 case vect_reduction_def
:
1086 case vect_double_reduction_def
:
1087 case vect_nested_cycle
:
1091 gcc_assert (gimple_code (def_stmt
) == GIMPLE_PHI
);
1092 loop
= (gimple_bb (def_stmt
))->loop_father
;
1094 /* Get the def before the loop */
1095 op
= PHI_ARG_DEF_FROM_EDGE (def_stmt
, loop_preheader_edge (loop
));
1096 return get_initial_def_for_reduction (stmt
, op
, scalar_def
);
1099 /* Case 5: operand is defined by loop-header phi - induction. */
1100 case vect_induction_def
:
1102 gcc_assert (gimple_code (def_stmt
) == GIMPLE_PHI
);
1104 /* Get the def from the vectorized stmt. */
1105 def_stmt_info
= vinfo_for_stmt (def_stmt
);
1106 vec_stmt
= STMT_VINFO_VEC_STMT (def_stmt_info
);
1107 gcc_assert (vec_stmt
&& gimple_code (vec_stmt
) == GIMPLE_PHI
);
1108 vec_oprnd
= PHI_RESULT (vec_stmt
);
1118 /* Function vect_get_vec_def_for_stmt_copy
1120 Return a vector-def for an operand. This function is used when the
1121 vectorized stmt to be created (by the caller to this function) is a "copy"
1122 created in case the vectorized result cannot fit in one vector, and several
1123 copies of the vector-stmt are required. In this case the vector-def is
1124 retrieved from the vector stmt recorded in the STMT_VINFO_RELATED_STMT field
1125 of the stmt that defines VEC_OPRND.
1126 DT is the type of the vector def VEC_OPRND.
1129 In case the vectorization factor (VF) is bigger than the number
1130 of elements that can fit in a vectype (nunits), we have to generate
1131 more than one vector stmt to vectorize the scalar stmt. This situation
1132 arises when there are multiple data-types operated upon in the loop; the
1133 smallest data-type determines the VF, and as a result, when vectorizing
1134 stmts operating on wider types we need to create 'VF/nunits' "copies" of the
1135 vector stmt (each computing a vector of 'nunits' results, and together
1136 computing 'VF' results in each iteration). This function is called when
1137 vectorizing such a stmt (e.g. vectorizing S2 in the illustration below, in
1138 which VF=16 and nunits=4, so the number of copies required is 4):
1140 scalar stmt: vectorized into: STMT_VINFO_RELATED_STMT
1142 S1: x = load VS1.0: vx.0 = memref0 VS1.1
1143 VS1.1: vx.1 = memref1 VS1.2
1144 VS1.2: vx.2 = memref2 VS1.3
1145 VS1.3: vx.3 = memref3
1147 S2: z = x + ... VSnew.0: vz0 = vx.0 + ... VSnew.1
1148 VSnew.1: vz1 = vx.1 + ... VSnew.2
1149 VSnew.2: vz2 = vx.2 + ... VSnew.3
1150 VSnew.3: vz3 = vx.3 + ...
1152 The vectorization of S1 is explained in vectorizable_load.
1153 The vectorization of S2:
1154 To create the first vector-stmt out of the 4 copies - VSnew.0 -
1155 the function 'vect_get_vec_def_for_operand' is called to
1156 get the relevant vector-def for each operand of S2. For operand x it
1157 returns the vector-def 'vx.0'.
1159 To create the remaining copies of the vector-stmt (VSnew.j), this
1160 function is called to get the relevant vector-def for each operand. It is
1161 obtained from the respective VS1.j stmt, which is recorded in the
1162 STMT_VINFO_RELATED_STMT field of the stmt that defines VEC_OPRND.
1164 For example, to obtain the vector-def 'vx.1' in order to create the
1165 vector stmt 'VSnew.1', this function is called with VEC_OPRND='vx.0'.
1166 Given 'vx0' we obtain the stmt that defines it ('VS1.0'); from the
1167 STMT_VINFO_RELATED_STMT field of 'VS1.0' we obtain the next copy - 'VS1.1',
1168 and return its def ('vx.1').
1169 Overall, to create the above sequence this function will be called 3 times:
1170 vx.1 = vect_get_vec_def_for_stmt_copy (dt, vx.0);
1171 vx.2 = vect_get_vec_def_for_stmt_copy (dt, vx.1);
1172 vx.3 = vect_get_vec_def_for_stmt_copy (dt, vx.2); */
1175 vect_get_vec_def_for_stmt_copy (enum vect_def_type dt
, tree vec_oprnd
)
1177 gimple vec_stmt_for_operand
;
1178 stmt_vec_info def_stmt_info
;
1180 /* Do nothing; can reuse same def. */
1181 if (dt
== vect_external_def
|| dt
== vect_constant_def
)
1184 vec_stmt_for_operand
= SSA_NAME_DEF_STMT (vec_oprnd
);
1185 def_stmt_info
= vinfo_for_stmt (vec_stmt_for_operand
);
1186 gcc_assert (def_stmt_info
);
1187 vec_stmt_for_operand
= STMT_VINFO_RELATED_STMT (def_stmt_info
);
1188 gcc_assert (vec_stmt_for_operand
);
1189 vec_oprnd
= gimple_get_lhs (vec_stmt_for_operand
);
1190 if (gimple_code (vec_stmt_for_operand
) == GIMPLE_PHI
)
1191 vec_oprnd
= PHI_RESULT (vec_stmt_for_operand
);
1193 vec_oprnd
= gimple_get_lhs (vec_stmt_for_operand
);
1198 /* Get vectorized definitions for the operands to create a copy of an original
1199 stmt. See vect_get_vec_def_for_stmt_copy() for details. */
1202 vect_get_vec_defs_for_stmt_copy (enum vect_def_type
*dt
,
1203 VEC(tree
,heap
) **vec_oprnds0
,
1204 VEC(tree
,heap
) **vec_oprnds1
)
1206 tree vec_oprnd
= VEC_pop (tree
, *vec_oprnds0
);
1208 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
[0], vec_oprnd
);
1209 VEC_quick_push (tree
, *vec_oprnds0
, vec_oprnd
);
1211 if (vec_oprnds1
&& *vec_oprnds1
)
1213 vec_oprnd
= VEC_pop (tree
, *vec_oprnds1
);
1214 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
[1], vec_oprnd
);
1215 VEC_quick_push (tree
, *vec_oprnds1
, vec_oprnd
);
1220 /* Get vectorized definitions for OP0 and OP1, or SLP_NODE if it is not NULL. */
1223 vect_get_vec_defs (tree op0
, tree op1
, gimple stmt
,
1224 VEC(tree
,heap
) **vec_oprnds0
, VEC(tree
,heap
) **vec_oprnds1
,
1228 vect_get_slp_defs (slp_node
, vec_oprnds0
, vec_oprnds1
, -1);
1233 *vec_oprnds0
= VEC_alloc (tree
, heap
, 1);
1234 vec_oprnd
= vect_get_vec_def_for_operand (op0
, stmt
, NULL
);
1235 VEC_quick_push (tree
, *vec_oprnds0
, vec_oprnd
);
1239 *vec_oprnds1
= VEC_alloc (tree
, heap
, 1);
1240 vec_oprnd
= vect_get_vec_def_for_operand (op1
, stmt
, NULL
);
1241 VEC_quick_push (tree
, *vec_oprnds1
, vec_oprnd
);
1247 /* Function vect_finish_stmt_generation.
1249 Insert a new stmt. */
1252 vect_finish_stmt_generation (gimple stmt
, gimple vec_stmt
,
1253 gimple_stmt_iterator
*gsi
)
1255 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1256 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
1257 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
1259 gcc_assert (gimple_code (stmt
) != GIMPLE_LABEL
);
1261 gsi_insert_before (gsi
, vec_stmt
, GSI_SAME_STMT
);
1263 set_vinfo_for_stmt (vec_stmt
, new_stmt_vec_info (vec_stmt
, loop_vinfo
,
1266 if (vect_print_dump_info (REPORT_DETAILS
))
1268 fprintf (vect_dump
, "add new stmt: ");
1269 print_gimple_stmt (vect_dump
, vec_stmt
, 0, TDF_SLIM
);
1272 gimple_set_location (vec_stmt
, gimple_location (gsi_stmt (*gsi
)));
1275 /* Checks if CALL can be vectorized in type VECTYPE. Returns
1276 a function declaration if the target has a vectorized version
1277 of the function, or NULL_TREE if the function cannot be vectorized. */
1280 vectorizable_function (gimple call
, tree vectype_out
, tree vectype_in
)
1282 tree fndecl
= gimple_call_fndecl (call
);
1284 /* We only handle functions that do not read or clobber memory -- i.e.
1285 const or novops ones. */
1286 if (!(gimple_call_flags (call
) & (ECF_CONST
| ECF_NOVOPS
)))
1290 || TREE_CODE (fndecl
) != FUNCTION_DECL
1291 || !DECL_BUILT_IN (fndecl
))
1294 return targetm
.vectorize
.builtin_vectorized_function (fndecl
, vectype_out
,
1298 /* Function vectorizable_call.
1300 Check if STMT performs a function call that can be vectorized.
1301 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
1302 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
1303 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1306 vectorizable_call (gimple stmt
, gimple_stmt_iterator
*gsi
, gimple
*vec_stmt
)
1311 tree vec_oprnd0
= NULL_TREE
, vec_oprnd1
= NULL_TREE
;
1312 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
), prev_stmt_info
;
1313 tree vectype_out
, vectype_in
;
1316 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
1317 tree fndecl
, new_temp
, def
, rhs_type
;
1319 enum vect_def_type dt
[2] = {vect_unknown_def_type
, vect_unknown_def_type
};
1320 gimple new_stmt
= NULL
;
1322 VEC(tree
, heap
) *vargs
= NULL
;
1323 enum { NARROW
, NONE
, WIDEN
} modifier
;
1326 /* FORNOW: unsupported in basic block SLP. */
1327 gcc_assert (loop_vinfo
);
1329 if (!STMT_VINFO_RELEVANT_P (stmt_info
))
1332 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
1335 /* FORNOW: SLP not supported. */
1336 if (STMT_SLP_TYPE (stmt_info
))
1339 /* Is STMT a vectorizable call? */
1340 if (!is_gimple_call (stmt
))
1343 if (TREE_CODE (gimple_call_lhs (stmt
)) != SSA_NAME
)
1346 vectype_out
= STMT_VINFO_VECTYPE (stmt_info
);
1348 /* Process function arguments. */
1349 rhs_type
= NULL_TREE
;
1350 vectype_in
= NULL_TREE
;
1351 nargs
= gimple_call_num_args (stmt
);
1353 /* Bail out if the function has more than two arguments, we
1354 do not have interesting builtin functions to vectorize with
1355 more than two arguments. No arguments is also not good. */
1356 if (nargs
== 0 || nargs
> 2)
1359 for (i
= 0; i
< nargs
; i
++)
1363 op
= gimple_call_arg (stmt
, i
);
1365 /* We can only handle calls with arguments of the same type. */
1367 && !types_compatible_p (rhs_type
, TREE_TYPE (op
)))
1369 if (vect_print_dump_info (REPORT_DETAILS
))
1370 fprintf (vect_dump
, "argument types differ.");
1374 rhs_type
= TREE_TYPE (op
);
1376 if (!vect_is_simple_use_1 (op
, loop_vinfo
, NULL
,
1377 &def_stmt
, &def
, &dt
[i
], &opvectype
))
1379 if (vect_print_dump_info (REPORT_DETAILS
))
1380 fprintf (vect_dump
, "use not simple.");
1385 vectype_in
= opvectype
;
1387 && opvectype
!= vectype_in
)
1389 if (vect_print_dump_info (REPORT_DETAILS
))
1390 fprintf (vect_dump
, "argument vector types differ.");
1394 /* If all arguments are external or constant defs use a vector type with
1395 the same size as the output vector type. */
1397 vectype_in
= get_same_sized_vectype (rhs_type
, vectype_out
);
1399 gcc_assert (vectype_in
);
1402 if (vect_print_dump_info (REPORT_DETAILS
))
1404 fprintf (vect_dump
, "no vectype for scalar type ");
1405 print_generic_expr (vect_dump
, rhs_type
, TDF_SLIM
);
1412 nunits_in
= TYPE_VECTOR_SUBPARTS (vectype_in
);
1413 nunits_out
= TYPE_VECTOR_SUBPARTS (vectype_out
);
1414 if (nunits_in
== nunits_out
/ 2)
1416 else if (nunits_out
== nunits_in
)
1418 else if (nunits_out
== nunits_in
/ 2)
1423 /* For now, we only vectorize functions if a target specific builtin
1424 is available. TODO -- in some cases, it might be profitable to
1425 insert the calls for pieces of the vector, in order to be able
1426 to vectorize other operations in the loop. */
1427 fndecl
= vectorizable_function (stmt
, vectype_out
, vectype_in
);
1428 if (fndecl
== NULL_TREE
)
1430 if (vect_print_dump_info (REPORT_DETAILS
))
1431 fprintf (vect_dump
, "function is not vectorizable.");
1436 gcc_assert (!gimple_vuse (stmt
));
1438 if (modifier
== NARROW
)
1439 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits_out
;
1441 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits_in
;
1443 /* Sanity check: make sure that at least one copy of the vectorized stmt
1444 needs to be generated. */
1445 gcc_assert (ncopies
>= 1);
1447 if (!vec_stmt
) /* transformation not required. */
1449 STMT_VINFO_TYPE (stmt_info
) = call_vec_info_type
;
1450 if (vect_print_dump_info (REPORT_DETAILS
))
1451 fprintf (vect_dump
, "=== vectorizable_call ===");
1452 vect_model_simple_cost (stmt_info
, ncopies
, dt
, NULL
);
1458 if (vect_print_dump_info (REPORT_DETAILS
))
1459 fprintf (vect_dump
, "transform operation.");
1462 scalar_dest
= gimple_call_lhs (stmt
);
1463 vec_dest
= vect_create_destination_var (scalar_dest
, vectype_out
);
1465 prev_stmt_info
= NULL
;
1469 for (j
= 0; j
< ncopies
; ++j
)
1471 /* Build argument list for the vectorized call. */
1473 vargs
= VEC_alloc (tree
, heap
, nargs
);
1475 VEC_truncate (tree
, vargs
, 0);
1477 for (i
= 0; i
< nargs
; i
++)
1479 op
= gimple_call_arg (stmt
, i
);
1482 = vect_get_vec_def_for_operand (op
, stmt
, NULL
);
1485 vec_oprnd0
= gimple_call_arg (new_stmt
, i
);
1487 = vect_get_vec_def_for_stmt_copy (dt
[i
], vec_oprnd0
);
1490 VEC_quick_push (tree
, vargs
, vec_oprnd0
);
1493 new_stmt
= gimple_build_call_vec (fndecl
, vargs
);
1494 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
1495 gimple_call_set_lhs (new_stmt
, new_temp
);
1497 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
1498 mark_symbols_for_renaming (new_stmt
);
1501 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
1503 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
1505 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
1511 for (j
= 0; j
< ncopies
; ++j
)
1513 /* Build argument list for the vectorized call. */
1515 vargs
= VEC_alloc (tree
, heap
, nargs
* 2);
1517 VEC_truncate (tree
, vargs
, 0);
1519 for (i
= 0; i
< nargs
; i
++)
1521 op
= gimple_call_arg (stmt
, i
);
1525 = vect_get_vec_def_for_operand (op
, stmt
, NULL
);
1527 = vect_get_vec_def_for_stmt_copy (dt
[i
], vec_oprnd0
);
1531 vec_oprnd1
= gimple_call_arg (new_stmt
, 2*i
);
1533 = vect_get_vec_def_for_stmt_copy (dt
[i
], vec_oprnd1
);
1535 = vect_get_vec_def_for_stmt_copy (dt
[i
], vec_oprnd0
);
1538 VEC_quick_push (tree
, vargs
, vec_oprnd0
);
1539 VEC_quick_push (tree
, vargs
, vec_oprnd1
);
1542 new_stmt
= gimple_build_call_vec (fndecl
, vargs
);
1543 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
1544 gimple_call_set_lhs (new_stmt
, new_temp
);
1546 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
1547 mark_symbols_for_renaming (new_stmt
);
1550 STMT_VINFO_VEC_STMT (stmt_info
) = new_stmt
;
1552 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
1554 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
1557 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
1562 /* No current target implements this case. */
1566 VEC_free (tree
, heap
, vargs
);
1568 /* Update the exception handling table with the vector stmt if necessary. */
1569 if (maybe_clean_or_replace_eh_stmt (stmt
, *vec_stmt
))
1570 gimple_purge_dead_eh_edges (gimple_bb (stmt
));
1572 /* The call in STMT might prevent it from being removed in dce.
1573 We however cannot remove it here, due to the way the ssa name
1574 it defines is mapped to the new definition. So just replace
1575 rhs of the statement with something harmless. */
1577 type
= TREE_TYPE (scalar_dest
);
1578 new_stmt
= gimple_build_assign (gimple_call_lhs (stmt
),
1579 fold_convert (type
, integer_zero_node
));
1580 set_vinfo_for_stmt (new_stmt
, stmt_info
);
1581 set_vinfo_for_stmt (stmt
, NULL
);
1582 STMT_VINFO_STMT (stmt_info
) = new_stmt
;
1583 gsi_replace (gsi
, new_stmt
, false);
1584 SSA_NAME_DEF_STMT (gimple_assign_lhs (new_stmt
)) = new_stmt
;
1590 /* Function vect_gen_widened_results_half
1592 Create a vector stmt whose code, type, number of arguments, and result
1593 variable are CODE, OP_TYPE, and VEC_DEST, and its arguments are
1594 VEC_OPRND0 and VEC_OPRND1. The new vector stmt is to be inserted at BSI.
1595 In the case that CODE is a CALL_EXPR, this means that a call to DECL
1596 needs to be created (DECL is a function-decl of a target-builtin).
1597 STMT is the original scalar stmt that we are vectorizing. */
1600 vect_gen_widened_results_half (enum tree_code code
,
1602 tree vec_oprnd0
, tree vec_oprnd1
, int op_type
,
1603 tree vec_dest
, gimple_stmt_iterator
*gsi
,
1609 /* Generate half of the widened result: */
1610 if (code
== CALL_EXPR
)
1612 /* Target specific support */
1613 if (op_type
== binary_op
)
1614 new_stmt
= gimple_build_call (decl
, 2, vec_oprnd0
, vec_oprnd1
);
1616 new_stmt
= gimple_build_call (decl
, 1, vec_oprnd0
);
1617 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
1618 gimple_call_set_lhs (new_stmt
, new_temp
);
1622 /* Generic support */
1623 gcc_assert (op_type
== TREE_CODE_LENGTH (code
));
1624 if (op_type
!= binary_op
)
1626 new_stmt
= gimple_build_assign_with_ops (code
, vec_dest
, vec_oprnd0
,
1628 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
1629 gimple_assign_set_lhs (new_stmt
, new_temp
);
1631 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
1637 /* Check if STMT performs a conversion operation, that can be vectorized.
1638 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
1639 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
1640 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1643 vectorizable_conversion (gimple stmt
, gimple_stmt_iterator
*gsi
,
1644 gimple
*vec_stmt
, slp_tree slp_node
)
1649 tree vec_oprnd0
= NULL_TREE
, vec_oprnd1
= NULL_TREE
;
1650 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1651 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
1652 enum tree_code code
, code1
= ERROR_MARK
, code2
= ERROR_MARK
;
1653 tree decl1
= NULL_TREE
, decl2
= NULL_TREE
;
1657 enum vect_def_type dt
[2] = {vect_unknown_def_type
, vect_unknown_def_type
};
1658 gimple new_stmt
= NULL
;
1659 stmt_vec_info prev_stmt_info
;
1662 tree vectype_out
, vectype_in
;
1666 enum { NARROW
, NONE
, WIDEN
} modifier
;
1668 VEC(tree
,heap
) *vec_oprnds0
= NULL
;
1670 VEC(tree
,heap
) *dummy
= NULL
;
1673 /* Is STMT a vectorizable conversion? */
1675 /* FORNOW: unsupported in basic block SLP. */
1676 gcc_assert (loop_vinfo
);
1678 if (!STMT_VINFO_RELEVANT_P (stmt_info
))
1681 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
1684 if (!is_gimple_assign (stmt
))
1687 if (TREE_CODE (gimple_assign_lhs (stmt
)) != SSA_NAME
)
1690 code
= gimple_assign_rhs_code (stmt
);
1691 if (code
!= FIX_TRUNC_EXPR
&& code
!= FLOAT_EXPR
)
1694 /* Check types of lhs and rhs. */
1695 scalar_dest
= gimple_assign_lhs (stmt
);
1696 vectype_out
= STMT_VINFO_VECTYPE (stmt_info
);
1698 op0
= gimple_assign_rhs1 (stmt
);
1699 rhs_type
= TREE_TYPE (op0
);
1700 /* Check the operands of the operation. */
1701 if (!vect_is_simple_use_1 (op0
, loop_vinfo
, NULL
,
1702 &def_stmt
, &def
, &dt
[0], &vectype_in
))
1704 if (vect_print_dump_info (REPORT_DETAILS
))
1705 fprintf (vect_dump
, "use not simple.");
1708 /* If op0 is an external or constant defs use a vector type of
1709 the same size as the output vector type. */
1711 vectype_in
= get_same_sized_vectype (rhs_type
, vectype_out
);
1713 gcc_assert (vectype_in
);
1716 if (vect_print_dump_info (REPORT_DETAILS
))
1718 fprintf (vect_dump
, "no vectype for scalar type ");
1719 print_generic_expr (vect_dump
, rhs_type
, TDF_SLIM
);
1726 nunits_in
= TYPE_VECTOR_SUBPARTS (vectype_in
);
1727 nunits_out
= TYPE_VECTOR_SUBPARTS (vectype_out
);
1728 if (nunits_in
== nunits_out
/ 2)
1730 else if (nunits_out
== nunits_in
)
1732 else if (nunits_out
== nunits_in
/ 2)
1737 if (modifier
== NARROW
)
1738 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits_out
;
1740 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits_in
;
1742 /* FORNOW: SLP with multiple types is not supported. The SLP analysis verifies
1743 this, so we can safely override NCOPIES with 1 here. */
1747 /* Sanity check: make sure that at least one copy of the vectorized stmt
1748 needs to be generated. */
1749 gcc_assert (ncopies
>= 1);
1751 /* Supportable by target? */
1752 if ((modifier
== NONE
1753 && !targetm
.vectorize
.builtin_conversion (code
, vectype_out
, vectype_in
))
1754 || (modifier
== WIDEN
1755 && !supportable_widening_operation (code
, stmt
,
1756 vectype_out
, vectype_in
,
1759 &dummy_int
, &dummy
))
1760 || (modifier
== NARROW
1761 && !supportable_narrowing_operation (code
, vectype_out
, vectype_in
,
1762 &code1
, &dummy_int
, &dummy
)))
1764 if (vect_print_dump_info (REPORT_DETAILS
))
1765 fprintf (vect_dump
, "conversion not supported by target.");
1769 if (modifier
!= NONE
)
1771 /* FORNOW: SLP not supported. */
1772 if (STMT_SLP_TYPE (stmt_info
))
1776 if (!vec_stmt
) /* transformation not required. */
1778 STMT_VINFO_TYPE (stmt_info
) = type_conversion_vec_info_type
;
1783 if (vect_print_dump_info (REPORT_DETAILS
))
1784 fprintf (vect_dump
, "transform conversion.");
1787 vec_dest
= vect_create_destination_var (scalar_dest
, vectype_out
);
1789 if (modifier
== NONE
&& !slp_node
)
1790 vec_oprnds0
= VEC_alloc (tree
, heap
, 1);
1792 prev_stmt_info
= NULL
;
1796 for (j
= 0; j
< ncopies
; j
++)
1799 vect_get_vec_defs (op0
, NULL
, stmt
, &vec_oprnds0
, NULL
, slp_node
);
1801 vect_get_vec_defs_for_stmt_copy (dt
, &vec_oprnds0
, NULL
);
1804 targetm
.vectorize
.builtin_conversion (code
,
1805 vectype_out
, vectype_in
);
1806 FOR_EACH_VEC_ELT (tree
, vec_oprnds0
, i
, vop0
)
1808 /* Arguments are ready. create the new vector stmt. */
1809 new_stmt
= gimple_build_call (builtin_decl
, 1, vop0
);
1810 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
1811 gimple_call_set_lhs (new_stmt
, new_temp
);
1812 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
1814 VEC_quick_push (gimple
, SLP_TREE_VEC_STMTS (slp_node
), new_stmt
);
1818 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
1820 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
1821 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
1826 /* In case the vectorization factor (VF) is bigger than the number
1827 of elements that we can fit in a vectype (nunits), we have to
1828 generate more than one vector stmt - i.e - we need to "unroll"
1829 the vector stmt by a factor VF/nunits. */
1830 for (j
= 0; j
< ncopies
; j
++)
1833 vec_oprnd0
= vect_get_vec_def_for_operand (op0
, stmt
, NULL
);
1835 vec_oprnd0
= vect_get_vec_def_for_stmt_copy (dt
[0], vec_oprnd0
);
1837 /* Generate first half of the widened result: */
1839 = vect_gen_widened_results_half (code1
, decl1
,
1840 vec_oprnd0
, vec_oprnd1
,
1841 unary_op
, vec_dest
, gsi
, stmt
);
1843 STMT_VINFO_VEC_STMT (stmt_info
) = new_stmt
;
1845 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
1846 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
1848 /* Generate second half of the widened result: */
1850 = vect_gen_widened_results_half (code2
, decl2
,
1851 vec_oprnd0
, vec_oprnd1
,
1852 unary_op
, vec_dest
, gsi
, stmt
);
1853 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
1854 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
1859 /* In case the vectorization factor (VF) is bigger than the number
1860 of elements that we can fit in a vectype (nunits), we have to
1861 generate more than one vector stmt - i.e - we need to "unroll"
1862 the vector stmt by a factor VF/nunits. */
1863 for (j
= 0; j
< ncopies
; j
++)
1868 vec_oprnd0
= vect_get_vec_def_for_operand (op0
, stmt
, NULL
);
1869 vec_oprnd1
= vect_get_vec_def_for_stmt_copy (dt
[0], vec_oprnd0
);
1873 vec_oprnd0
= vect_get_vec_def_for_stmt_copy (dt
[0], vec_oprnd1
);
1874 vec_oprnd1
= vect_get_vec_def_for_stmt_copy (dt
[0], vec_oprnd0
);
1877 /* Arguments are ready. Create the new vector stmt. */
1878 new_stmt
= gimple_build_assign_with_ops (code1
, vec_dest
, vec_oprnd0
,
1880 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
1881 gimple_assign_set_lhs (new_stmt
, new_temp
);
1882 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
1885 STMT_VINFO_VEC_STMT (stmt_info
) = new_stmt
;
1887 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
1889 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
1892 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
1896 VEC_free (tree
, heap
, vec_oprnds0
);
1900 /* Function vectorizable_assignment.
1902 Check if STMT performs an assignment (copy) that can be vectorized.
1903 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
1904 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
1905 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1908 vectorizable_assignment (gimple stmt
, gimple_stmt_iterator
*gsi
,
1909 gimple
*vec_stmt
, slp_tree slp_node
)
1914 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1915 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
1916 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
1920 enum vect_def_type dt
[2] = {vect_unknown_def_type
, vect_unknown_def_type
};
1921 unsigned int nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
1924 VEC(tree
,heap
) *vec_oprnds
= NULL
;
1926 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
1927 gimple new_stmt
= NULL
;
1928 stmt_vec_info prev_stmt_info
= NULL
;
1929 enum tree_code code
;
1932 /* Multiple types in SLP are handled by creating the appropriate number of
1933 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
1938 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits
;
1940 gcc_assert (ncopies
>= 1);
1942 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
1945 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
1948 /* Is vectorizable assignment? */
1949 if (!is_gimple_assign (stmt
))
1952 scalar_dest
= gimple_assign_lhs (stmt
);
1953 if (TREE_CODE (scalar_dest
) != SSA_NAME
)
1956 code
= gimple_assign_rhs_code (stmt
);
1957 if (gimple_assign_single_p (stmt
)
1958 || code
== PAREN_EXPR
1959 || CONVERT_EXPR_CODE_P (code
))
1960 op
= gimple_assign_rhs1 (stmt
);
1964 if (!vect_is_simple_use_1 (op
, loop_vinfo
, bb_vinfo
,
1965 &def_stmt
, &def
, &dt
[0], &vectype_in
))
1967 if (vect_print_dump_info (REPORT_DETAILS
))
1968 fprintf (vect_dump
, "use not simple.");
1972 /* We can handle NOP_EXPR conversions that do not change the number
1973 of elements or the vector size. */
1974 if (CONVERT_EXPR_CODE_P (code
)
1976 || TYPE_VECTOR_SUBPARTS (vectype_in
) != nunits
1977 || (GET_MODE_SIZE (TYPE_MODE (vectype
))
1978 != GET_MODE_SIZE (TYPE_MODE (vectype_in
)))))
1981 if (!vec_stmt
) /* transformation not required. */
1983 STMT_VINFO_TYPE (stmt_info
) = assignment_vec_info_type
;
1984 if (vect_print_dump_info (REPORT_DETAILS
))
1985 fprintf (vect_dump
, "=== vectorizable_assignment ===");
1986 vect_model_simple_cost (stmt_info
, ncopies
, dt
, NULL
);
1991 if (vect_print_dump_info (REPORT_DETAILS
))
1992 fprintf (vect_dump
, "transform assignment.");
1995 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
1998 for (j
= 0; j
< ncopies
; j
++)
2002 vect_get_vec_defs (op
, NULL
, stmt
, &vec_oprnds
, NULL
, slp_node
);
2004 vect_get_vec_defs_for_stmt_copy (dt
, &vec_oprnds
, NULL
);
2006 /* Arguments are ready. create the new vector stmt. */
2007 FOR_EACH_VEC_ELT (tree
, vec_oprnds
, i
, vop
)
2009 if (CONVERT_EXPR_CODE_P (code
))
2010 vop
= build1 (VIEW_CONVERT_EXPR
, vectype
, vop
);
2011 new_stmt
= gimple_build_assign (vec_dest
, vop
);
2012 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
2013 gimple_assign_set_lhs (new_stmt
, new_temp
);
2014 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2016 VEC_quick_push (gimple
, SLP_TREE_VEC_STMTS (slp_node
), new_stmt
);
2023 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
2025 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
2027 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
2030 VEC_free (tree
, heap
, vec_oprnds
);
2034 /* Function vectorizable_operation.
2036 Check if STMT performs a binary or unary operation that can be vectorized.
2037 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2038 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2039 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2042 vectorizable_operation (gimple stmt
, gimple_stmt_iterator
*gsi
,
2043 gimple
*vec_stmt
, slp_tree slp_node
)
2047 tree op0
, op1
= NULL
;
2048 tree vec_oprnd1
= NULL_TREE
;
2049 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
2051 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
2052 enum tree_code code
;
2053 enum machine_mode vec_mode
;
2058 enum machine_mode optab_op2_mode
;
2061 enum vect_def_type dt
[2] = {vect_unknown_def_type
, vect_unknown_def_type
};
2062 gimple new_stmt
= NULL
;
2063 stmt_vec_info prev_stmt_info
;
2069 VEC(tree
,heap
) *vec_oprnds0
= NULL
, *vec_oprnds1
= NULL
;
2072 bool scalar_shift_arg
= false;
2073 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
2076 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
2079 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
2082 /* Is STMT a vectorizable binary/unary operation? */
2083 if (!is_gimple_assign (stmt
))
2086 if (TREE_CODE (gimple_assign_lhs (stmt
)) != SSA_NAME
)
2089 code
= gimple_assign_rhs_code (stmt
);
2091 /* For pointer addition, we should use the normal plus for
2092 the vector addition. */
2093 if (code
== POINTER_PLUS_EXPR
)
2096 /* Support only unary or binary operations. */
2097 op_type
= TREE_CODE_LENGTH (code
);
2098 if (op_type
!= unary_op
&& op_type
!= binary_op
)
2100 if (vect_print_dump_info (REPORT_DETAILS
))
2101 fprintf (vect_dump
, "num. args = %d (not unary/binary op).", op_type
);
2105 scalar_dest
= gimple_assign_lhs (stmt
);
2106 vectype_out
= STMT_VINFO_VECTYPE (stmt_info
);
2108 op0
= gimple_assign_rhs1 (stmt
);
2109 if (!vect_is_simple_use_1 (op0
, loop_vinfo
, bb_vinfo
,
2110 &def_stmt
, &def
, &dt
[0], &vectype
))
2112 if (vect_print_dump_info (REPORT_DETAILS
))
2113 fprintf (vect_dump
, "use not simple.");
2116 /* If op0 is an external or constant def use a vector type with
2117 the same size as the output vector type. */
2119 vectype
= get_same_sized_vectype (TREE_TYPE (op0
), vectype_out
);
2121 gcc_assert (vectype
);
2124 if (vect_print_dump_info (REPORT_DETAILS
))
2126 fprintf (vect_dump
, "no vectype for scalar type ");
2127 print_generic_expr (vect_dump
, TREE_TYPE (op0
), TDF_SLIM
);
2133 nunits_out
= TYPE_VECTOR_SUBPARTS (vectype_out
);
2134 nunits_in
= TYPE_VECTOR_SUBPARTS (vectype
);
2135 if (nunits_out
!= nunits_in
)
2138 if (op_type
== binary_op
)
2140 op1
= gimple_assign_rhs2 (stmt
);
2141 if (!vect_is_simple_use (op1
, loop_vinfo
, bb_vinfo
, &def_stmt
, &def
,
2144 if (vect_print_dump_info (REPORT_DETAILS
))
2145 fprintf (vect_dump
, "use not simple.");
2151 vf
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
2155 /* Multiple types in SLP are handled by creating the appropriate number of
2156 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
2161 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits_in
;
2163 gcc_assert (ncopies
>= 1);
2165 /* If this is a shift/rotate, determine whether the shift amount is a vector,
2166 or scalar. If the shift/rotate amount is a vector, use the vector/vector
2168 if (code
== LSHIFT_EXPR
|| code
== RSHIFT_EXPR
|| code
== LROTATE_EXPR
2169 || code
== RROTATE_EXPR
)
2171 /* vector shifted by vector */
2172 if (dt
[1] == vect_internal_def
)
2174 optab
= optab_for_tree_code (code
, vectype
, optab_vector
);
2175 if (vect_print_dump_info (REPORT_DETAILS
))
2176 fprintf (vect_dump
, "vector/vector shift/rotate found.");
2179 /* See if the machine has a vector shifted by scalar insn and if not
2180 then see if it has a vector shifted by vector insn */
2181 else if (dt
[1] == vect_constant_def
|| dt
[1] == vect_external_def
)
2183 optab
= optab_for_tree_code (code
, vectype
, optab_scalar
);
2185 && optab_handler (optab
, TYPE_MODE (vectype
)) != CODE_FOR_nothing
)
2187 scalar_shift_arg
= true;
2188 if (vect_print_dump_info (REPORT_DETAILS
))
2189 fprintf (vect_dump
, "vector/scalar shift/rotate found.");
2193 optab
= optab_for_tree_code (code
, vectype
, optab_vector
);
2195 && (optab_handler (optab
, TYPE_MODE (vectype
))
2196 != CODE_FOR_nothing
))
2198 if (vect_print_dump_info (REPORT_DETAILS
))
2199 fprintf (vect_dump
, "vector/vector shift/rotate found.");
2201 /* Unlike the other binary operators, shifts/rotates have
2202 the rhs being int, instead of the same type as the lhs,
2203 so make sure the scalar is the right type if we are
2204 dealing with vectors of short/char. */
2205 if (dt
[1] == vect_constant_def
)
2206 op1
= fold_convert (TREE_TYPE (vectype
), op1
);
2213 if (vect_print_dump_info (REPORT_DETAILS
))
2214 fprintf (vect_dump
, "operand mode requires invariant argument.");
2219 optab
= optab_for_tree_code (code
, vectype
, optab_default
);
2221 /* Supportable by target? */
2224 if (vect_print_dump_info (REPORT_DETAILS
))
2225 fprintf (vect_dump
, "no optab.");
2228 vec_mode
= TYPE_MODE (vectype
);
2229 icode
= (int) optab_handler (optab
, vec_mode
);
2230 if (icode
== CODE_FOR_nothing
)
2232 if (vect_print_dump_info (REPORT_DETAILS
))
2233 fprintf (vect_dump
, "op not supported by target.");
2234 /* Check only during analysis. */
2235 if (GET_MODE_SIZE (vec_mode
) != UNITS_PER_WORD
2236 || (vf
< vect_min_worthwhile_factor (code
)
2239 if (vect_print_dump_info (REPORT_DETAILS
))
2240 fprintf (vect_dump
, "proceeding using word mode.");
2243 /* Worthwhile without SIMD support? Check only during analysis. */
2244 if (!VECTOR_MODE_P (TYPE_MODE (vectype
))
2245 && vf
< vect_min_worthwhile_factor (code
)
2248 if (vect_print_dump_info (REPORT_DETAILS
))
2249 fprintf (vect_dump
, "not worthwhile without SIMD support.");
2253 if (!vec_stmt
) /* transformation not required. */
2255 STMT_VINFO_TYPE (stmt_info
) = op_vec_info_type
;
2256 if (vect_print_dump_info (REPORT_DETAILS
))
2257 fprintf (vect_dump
, "=== vectorizable_operation ===");
2258 vect_model_simple_cost (stmt_info
, ncopies
, dt
, NULL
);
2264 if (vect_print_dump_info (REPORT_DETAILS
))
2265 fprintf (vect_dump
, "transform binary/unary operation.");
2268 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
2270 /* Allocate VECs for vector operands. In case of SLP, vector operands are
2271 created in the previous stages of the recursion, so no allocation is
2272 needed, except for the case of shift with scalar shift argument. In that
2273 case we store the scalar operand in VEC_OPRNDS1 for every vector stmt to
2274 be created to vectorize the SLP group, i.e., SLP_NODE->VEC_STMTS_SIZE.
2275 In case of loop-based vectorization we allocate VECs of size 1. We
2276 allocate VEC_OPRNDS1 only in case of binary operation. */
2279 vec_oprnds0
= VEC_alloc (tree
, heap
, 1);
2280 if (op_type
== binary_op
)
2281 vec_oprnds1
= VEC_alloc (tree
, heap
, 1);
2283 else if (scalar_shift_arg
)
2284 vec_oprnds1
= VEC_alloc (tree
, heap
, slp_node
->vec_stmts_size
);
2286 /* In case the vectorization factor (VF) is bigger than the number
2287 of elements that we can fit in a vectype (nunits), we have to generate
2288 more than one vector stmt - i.e - we need to "unroll" the
2289 vector stmt by a factor VF/nunits. In doing so, we record a pointer
2290 from one copy of the vector stmt to the next, in the field
2291 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
2292 stages to find the correct vector defs to be used when vectorizing
2293 stmts that use the defs of the current stmt. The example below illustrates
2294 the vectorization process when VF=16 and nunits=4 (i.e - we need to create
2295 4 vectorized stmts):
2297 before vectorization:
2298 RELATED_STMT VEC_STMT
2302 step 1: vectorize stmt S1 (done in vectorizable_load. See more details
2304 RELATED_STMT VEC_STMT
2305 VS1_0: vx0 = memref0 VS1_1 -
2306 VS1_1: vx1 = memref1 VS1_2 -
2307 VS1_2: vx2 = memref2 VS1_3 -
2308 VS1_3: vx3 = memref3 - -
2309 S1: x = load - VS1_0
2312 step2: vectorize stmt S2 (done here):
2313 To vectorize stmt S2 we first need to find the relevant vector
2314 def for the first operand 'x'. This is, as usual, obtained from
2315 the vector stmt recorded in the STMT_VINFO_VEC_STMT of the stmt
2316 that defines 'x' (S1). This way we find the stmt VS1_0, and the
2317 relevant vector def 'vx0'. Having found 'vx0' we can generate
2318 the vector stmt VS2_0, and as usual, record it in the
2319 STMT_VINFO_VEC_STMT of stmt S2.
2320 When creating the second copy (VS2_1), we obtain the relevant vector
2321 def from the vector stmt recorded in the STMT_VINFO_RELATED_STMT of
2322 stmt VS1_0. This way we find the stmt VS1_1 and the relevant
2323 vector def 'vx1'. Using 'vx1' we create stmt VS2_1 and record a
2324 pointer to it in the STMT_VINFO_RELATED_STMT of the vector stmt VS2_0.
2325 Similarly when creating stmts VS2_2 and VS2_3. This is the resulting
2326 chain of stmts and pointers:
2327 RELATED_STMT VEC_STMT
2328 VS1_0: vx0 = memref0 VS1_1 -
2329 VS1_1: vx1 = memref1 VS1_2 -
2330 VS1_2: vx2 = memref2 VS1_3 -
2331 VS1_3: vx3 = memref3 - -
2332 S1: x = load - VS1_0
2333 VS2_0: vz0 = vx0 + v1 VS2_1 -
2334 VS2_1: vz1 = vx1 + v1 VS2_2 -
2335 VS2_2: vz2 = vx2 + v1 VS2_3 -
2336 VS2_3: vz3 = vx3 + v1 - -
2337 S2: z = x + 1 - VS2_0 */
2339 prev_stmt_info
= NULL
;
2340 for (j
= 0; j
< ncopies
; j
++)
2345 if (op_type
== binary_op
&& scalar_shift_arg
)
2347 /* Vector shl and shr insn patterns can be defined with scalar
2348 operand 2 (shift operand). In this case, use constant or loop
2349 invariant op1 directly, without extending it to vector mode
2351 optab_op2_mode
= insn_data
[icode
].operand
[2].mode
;
2352 if (!VECTOR_MODE_P (optab_op2_mode
))
2354 if (vect_print_dump_info (REPORT_DETAILS
))
2355 fprintf (vect_dump
, "operand 1 using scalar mode.");
2357 VEC_quick_push (tree
, vec_oprnds1
, vec_oprnd1
);
2360 /* Store vec_oprnd1 for every vector stmt to be created
2361 for SLP_NODE. We check during the analysis that all the
2362 shift arguments are the same.
2363 TODO: Allow different constants for different vector
2364 stmts generated for an SLP instance. */
2365 for (k
= 0; k
< slp_node
->vec_stmts_size
- 1; k
++)
2366 VEC_quick_push (tree
, vec_oprnds1
, vec_oprnd1
);
2371 /* vec_oprnd1 is available if operand 1 should be of a scalar-type
2372 (a special case for certain kind of vector shifts); otherwise,
2373 operand 1 should be of a vector type (the usual case). */
2374 if (op_type
== binary_op
&& !vec_oprnd1
)
2375 vect_get_vec_defs (op0
, op1
, stmt
, &vec_oprnds0
, &vec_oprnds1
,
2378 vect_get_vec_defs (op0
, NULL_TREE
, stmt
, &vec_oprnds0
, NULL
,
2382 vect_get_vec_defs_for_stmt_copy (dt
, &vec_oprnds0
, &vec_oprnds1
);
2384 /* Arguments are ready. Create the new vector stmt. */
2385 FOR_EACH_VEC_ELT (tree
, vec_oprnds0
, i
, vop0
)
2387 vop1
= ((op_type
== binary_op
)
2388 ? VEC_index (tree
, vec_oprnds1
, i
) : NULL
);
2389 new_stmt
= gimple_build_assign_with_ops (code
, vec_dest
, vop0
, vop1
);
2390 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
2391 gimple_assign_set_lhs (new_stmt
, new_temp
);
2392 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2394 VEC_quick_push (gimple
, SLP_TREE_VEC_STMTS (slp_node
), new_stmt
);
2401 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
2403 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
2404 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
2407 VEC_free (tree
, heap
, vec_oprnds0
);
2409 VEC_free (tree
, heap
, vec_oprnds1
);
2415 /* Get vectorized definitions for loop-based vectorization. For the first
2416 operand we call vect_get_vec_def_for_operand() (with OPRND containing
2417 scalar operand), and for the rest we get a copy with
2418 vect_get_vec_def_for_stmt_copy() using the previous vector definition
2419 (stored in OPRND). See vect_get_vec_def_for_stmt_copy() for details.
2420 The vectors are collected into VEC_OPRNDS. */
2423 vect_get_loop_based_defs (tree
*oprnd
, gimple stmt
, enum vect_def_type dt
,
2424 VEC (tree
, heap
) **vec_oprnds
, int multi_step_cvt
)
2428 /* Get first vector operand. */
2429 /* All the vector operands except the very first one (that is scalar oprnd)
2431 if (TREE_CODE (TREE_TYPE (*oprnd
)) != VECTOR_TYPE
)
2432 vec_oprnd
= vect_get_vec_def_for_operand (*oprnd
, stmt
, NULL
);
2434 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
, *oprnd
);
2436 VEC_quick_push (tree
, *vec_oprnds
, vec_oprnd
);
2438 /* Get second vector operand. */
2439 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
, vec_oprnd
);
2440 VEC_quick_push (tree
, *vec_oprnds
, vec_oprnd
);
2444 /* For conversion in multiple steps, continue to get operands
2447 vect_get_loop_based_defs (oprnd
, stmt
, dt
, vec_oprnds
, multi_step_cvt
- 1);
2451 /* Create vectorized demotion statements for vector operands from VEC_OPRNDS.
2452 For multi-step conversions store the resulting vectors and call the function
2456 vect_create_vectorized_demotion_stmts (VEC (tree
, heap
) **vec_oprnds
,
2457 int multi_step_cvt
, gimple stmt
,
2458 VEC (tree
, heap
) *vec_dsts
,
2459 gimple_stmt_iterator
*gsi
,
2460 slp_tree slp_node
, enum tree_code code
,
2461 stmt_vec_info
*prev_stmt_info
)
2464 tree vop0
, vop1
, new_tmp
, vec_dest
;
2466 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
2468 vec_dest
= VEC_pop (tree
, vec_dsts
);
2470 for (i
= 0; i
< VEC_length (tree
, *vec_oprnds
); i
+= 2)
2472 /* Create demotion operation. */
2473 vop0
= VEC_index (tree
, *vec_oprnds
, i
);
2474 vop1
= VEC_index (tree
, *vec_oprnds
, i
+ 1);
2475 new_stmt
= gimple_build_assign_with_ops (code
, vec_dest
, vop0
, vop1
);
2476 new_tmp
= make_ssa_name (vec_dest
, new_stmt
);
2477 gimple_assign_set_lhs (new_stmt
, new_tmp
);
2478 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2481 /* Store the resulting vector for next recursive call. */
2482 VEC_replace (tree
, *vec_oprnds
, i
/2, new_tmp
);
2485 /* This is the last step of the conversion sequence. Store the
2486 vectors in SLP_NODE or in vector info of the scalar statement
2487 (or in STMT_VINFO_RELATED_STMT chain). */
2489 VEC_quick_push (gimple
, SLP_TREE_VEC_STMTS (slp_node
), new_stmt
);
2492 if (!*prev_stmt_info
)
2493 STMT_VINFO_VEC_STMT (stmt_info
) = new_stmt
;
2495 STMT_VINFO_RELATED_STMT (*prev_stmt_info
) = new_stmt
;
2497 *prev_stmt_info
= vinfo_for_stmt (new_stmt
);
2502 /* For multi-step demotion operations we first generate demotion operations
2503 from the source type to the intermediate types, and then combine the
2504 results (stored in VEC_OPRNDS) in demotion operation to the destination
2508 /* At each level of recursion we have have of the operands we had at the
2510 VEC_truncate (tree
, *vec_oprnds
, (i
+1)/2);
2511 vect_create_vectorized_demotion_stmts (vec_oprnds
, multi_step_cvt
- 1,
2512 stmt
, vec_dsts
, gsi
, slp_node
,
2513 code
, prev_stmt_info
);
2518 /* Function vectorizable_type_demotion
2520 Check if STMT performs a binary or unary operation that involves
2521 type demotion, and if it can be vectorized.
2522 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2523 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2524 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2527 vectorizable_type_demotion (gimple stmt
, gimple_stmt_iterator
*gsi
,
2528 gimple
*vec_stmt
, slp_tree slp_node
)
2533 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
2534 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
2535 enum tree_code code
, code1
= ERROR_MARK
;
2538 enum vect_def_type dt
[2] = {vect_unknown_def_type
, vect_unknown_def_type
};
2539 stmt_vec_info prev_stmt_info
;
2546 int multi_step_cvt
= 0;
2547 VEC (tree
, heap
) *vec_oprnds0
= NULL
;
2548 VEC (tree
, heap
) *vec_dsts
= NULL
, *interm_types
= NULL
, *tmp_vec_dsts
= NULL
;
2549 tree last_oprnd
, intermediate_type
;
2551 /* FORNOW: not supported by basic block SLP vectorization. */
2552 gcc_assert (loop_vinfo
);
2554 if (!STMT_VINFO_RELEVANT_P (stmt_info
))
2557 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
2560 /* Is STMT a vectorizable type-demotion operation? */
2561 if (!is_gimple_assign (stmt
))
2564 if (TREE_CODE (gimple_assign_lhs (stmt
)) != SSA_NAME
)
2567 code
= gimple_assign_rhs_code (stmt
);
2568 if (!CONVERT_EXPR_CODE_P (code
))
2571 scalar_dest
= gimple_assign_lhs (stmt
);
2572 vectype_out
= STMT_VINFO_VECTYPE (stmt_info
);
2574 /* Check the operands of the operation. */
2575 op0
= gimple_assign_rhs1 (stmt
);
2576 if (! ((INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest
))
2577 && INTEGRAL_TYPE_P (TREE_TYPE (op0
)))
2578 || (SCALAR_FLOAT_TYPE_P (TREE_TYPE (scalar_dest
))
2579 && SCALAR_FLOAT_TYPE_P (TREE_TYPE (op0
))
2580 && CONVERT_EXPR_CODE_P (code
))))
2582 if (!vect_is_simple_use_1 (op0
, loop_vinfo
, NULL
,
2583 &def_stmt
, &def
, &dt
[0], &vectype_in
))
2585 if (vect_print_dump_info (REPORT_DETAILS
))
2586 fprintf (vect_dump
, "use not simple.");
2589 /* If op0 is an external def use a vector type with the
2590 same size as the output vector type if possible. */
2592 vectype_in
= get_same_sized_vectype (TREE_TYPE (op0
), vectype_out
);
2594 gcc_assert (vectype_in
);
2597 if (vect_print_dump_info (REPORT_DETAILS
))
2599 fprintf (vect_dump
, "no vectype for scalar type ");
2600 print_generic_expr (vect_dump
, TREE_TYPE (op0
), TDF_SLIM
);
2606 nunits_in
= TYPE_VECTOR_SUBPARTS (vectype_in
);
2607 nunits_out
= TYPE_VECTOR_SUBPARTS (vectype_out
);
2608 if (nunits_in
>= nunits_out
)
2611 /* Multiple types in SLP are handled by creating the appropriate number of
2612 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
2617 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits_out
;
2618 gcc_assert (ncopies
>= 1);
2620 /* Supportable by target? */
2621 if (!supportable_narrowing_operation (code
, vectype_out
, vectype_in
,
2622 &code1
, &multi_step_cvt
, &interm_types
))
2625 if (!vec_stmt
) /* transformation not required. */
2627 STMT_VINFO_TYPE (stmt_info
) = type_demotion_vec_info_type
;
2628 if (vect_print_dump_info (REPORT_DETAILS
))
2629 fprintf (vect_dump
, "=== vectorizable_demotion ===");
2630 vect_model_simple_cost (stmt_info
, ncopies
, dt
, NULL
);
2635 if (vect_print_dump_info (REPORT_DETAILS
))
2636 fprintf (vect_dump
, "transform type demotion operation. ncopies = %d.",
2639 /* In case of multi-step demotion, we first generate demotion operations to
2640 the intermediate types, and then from that types to the final one.
2641 We create vector destinations for the intermediate type (TYPES) received
2642 from supportable_narrowing_operation, and store them in the correct order
2643 for future use in vect_create_vectorized_demotion_stmts(). */
2645 vec_dsts
= VEC_alloc (tree
, heap
, multi_step_cvt
+ 1);
2647 vec_dsts
= VEC_alloc (tree
, heap
, 1);
2649 vec_dest
= vect_create_destination_var (scalar_dest
, vectype_out
);
2650 VEC_quick_push (tree
, vec_dsts
, vec_dest
);
2654 for (i
= VEC_length (tree
, interm_types
) - 1;
2655 VEC_iterate (tree
, interm_types
, i
, intermediate_type
); i
--)
2657 vec_dest
= vect_create_destination_var (scalar_dest
,
2659 VEC_quick_push (tree
, vec_dsts
, vec_dest
);
2663 /* In case the vectorization factor (VF) is bigger than the number
2664 of elements that we can fit in a vectype (nunits), we have to generate
2665 more than one vector stmt - i.e - we need to "unroll" the
2666 vector stmt by a factor VF/nunits. */
2668 prev_stmt_info
= NULL
;
2669 for (j
= 0; j
< ncopies
; j
++)
2673 vect_get_slp_defs (slp_node
, &vec_oprnds0
, NULL
, -1);
2676 VEC_free (tree
, heap
, vec_oprnds0
);
2677 vec_oprnds0
= VEC_alloc (tree
, heap
,
2678 (multi_step_cvt
? vect_pow2 (multi_step_cvt
) * 2 : 2));
2679 vect_get_loop_based_defs (&last_oprnd
, stmt
, dt
[0], &vec_oprnds0
,
2680 vect_pow2 (multi_step_cvt
) - 1);
2683 /* Arguments are ready. Create the new vector stmts. */
2684 tmp_vec_dsts
= VEC_copy (tree
, heap
, vec_dsts
);
2685 vect_create_vectorized_demotion_stmts (&vec_oprnds0
,
2686 multi_step_cvt
, stmt
, tmp_vec_dsts
,
2687 gsi
, slp_node
, code1
,
2691 VEC_free (tree
, heap
, vec_oprnds0
);
2692 VEC_free (tree
, heap
, vec_dsts
);
2693 VEC_free (tree
, heap
, tmp_vec_dsts
);
2694 VEC_free (tree
, heap
, interm_types
);
2696 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
2701 /* Create vectorized promotion statements for vector operands from VEC_OPRNDS0
2702 and VEC_OPRNDS1 (for binary operations). For multi-step conversions store
2703 the resulting vectors and call the function recursively. */
2706 vect_create_vectorized_promotion_stmts (VEC (tree
, heap
) **vec_oprnds0
,
2707 VEC (tree
, heap
) **vec_oprnds1
,
2708 int multi_step_cvt
, gimple stmt
,
2709 VEC (tree
, heap
) *vec_dsts
,
2710 gimple_stmt_iterator
*gsi
,
2711 slp_tree slp_node
, enum tree_code code1
,
2712 enum tree_code code2
, tree decl1
,
2713 tree decl2
, int op_type
,
2714 stmt_vec_info
*prev_stmt_info
)
2717 tree vop0
, vop1
, new_tmp1
, new_tmp2
, vec_dest
;
2718 gimple new_stmt1
, new_stmt2
;
2719 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
2720 VEC (tree
, heap
) *vec_tmp
;
2722 vec_dest
= VEC_pop (tree
, vec_dsts
);
2723 vec_tmp
= VEC_alloc (tree
, heap
, VEC_length (tree
, *vec_oprnds0
) * 2);
2725 FOR_EACH_VEC_ELT (tree
, *vec_oprnds0
, i
, vop0
)
2727 if (op_type
== binary_op
)
2728 vop1
= VEC_index (tree
, *vec_oprnds1
, i
);
2732 /* Generate the two halves of promotion operation. */
2733 new_stmt1
= vect_gen_widened_results_half (code1
, decl1
, vop0
, vop1
,
2734 op_type
, vec_dest
, gsi
, stmt
);
2735 new_stmt2
= vect_gen_widened_results_half (code2
, decl2
, vop0
, vop1
,
2736 op_type
, vec_dest
, gsi
, stmt
);
2737 if (is_gimple_call (new_stmt1
))
2739 new_tmp1
= gimple_call_lhs (new_stmt1
);
2740 new_tmp2
= gimple_call_lhs (new_stmt2
);
2744 new_tmp1
= gimple_assign_lhs (new_stmt1
);
2745 new_tmp2
= gimple_assign_lhs (new_stmt2
);
2750 /* Store the results for the recursive call. */
2751 VEC_quick_push (tree
, vec_tmp
, new_tmp1
);
2752 VEC_quick_push (tree
, vec_tmp
, new_tmp2
);
2756 /* Last step of promotion sequience - store the results. */
2759 VEC_quick_push (gimple
, SLP_TREE_VEC_STMTS (slp_node
), new_stmt1
);
2760 VEC_quick_push (gimple
, SLP_TREE_VEC_STMTS (slp_node
), new_stmt2
);
2764 if (!*prev_stmt_info
)
2765 STMT_VINFO_VEC_STMT (stmt_info
) = new_stmt1
;
2767 STMT_VINFO_RELATED_STMT (*prev_stmt_info
) = new_stmt1
;
2769 *prev_stmt_info
= vinfo_for_stmt (new_stmt1
);
2770 STMT_VINFO_RELATED_STMT (*prev_stmt_info
) = new_stmt2
;
2771 *prev_stmt_info
= vinfo_for_stmt (new_stmt2
);
2778 /* For multi-step promotion operation we first generate we call the
2779 function recurcively for every stage. We start from the input type,
2780 create promotion operations to the intermediate types, and then
2781 create promotions to the output type. */
2782 *vec_oprnds0
= VEC_copy (tree
, heap
, vec_tmp
);
2783 VEC_free (tree
, heap
, vec_tmp
);
2784 vect_create_vectorized_promotion_stmts (vec_oprnds0
, vec_oprnds1
,
2785 multi_step_cvt
- 1, stmt
,
2786 vec_dsts
, gsi
, slp_node
, code1
,
2787 code2
, decl2
, decl2
, op_type
,
2793 /* Function vectorizable_type_promotion
2795 Check if STMT performs a binary or unary operation that involves
2796 type promotion, and if it can be vectorized.
2797 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2798 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2799 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2802 vectorizable_type_promotion (gimple stmt
, gimple_stmt_iterator
*gsi
,
2803 gimple
*vec_stmt
, slp_tree slp_node
)
2807 tree op0
, op1
= NULL
;
2808 tree vec_oprnd0
=NULL
, vec_oprnd1
=NULL
;
2809 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
2810 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
2811 enum tree_code code
, code1
= ERROR_MARK
, code2
= ERROR_MARK
;
2812 tree decl1
= NULL_TREE
, decl2
= NULL_TREE
;
2816 enum vect_def_type dt
[2] = {vect_unknown_def_type
, vect_unknown_def_type
};
2817 stmt_vec_info prev_stmt_info
;
2824 tree intermediate_type
= NULL_TREE
;
2825 int multi_step_cvt
= 0;
2826 VEC (tree
, heap
) *vec_oprnds0
= NULL
, *vec_oprnds1
= NULL
;
2827 VEC (tree
, heap
) *vec_dsts
= NULL
, *interm_types
= NULL
, *tmp_vec_dsts
= NULL
;
2829 /* FORNOW: not supported by basic block SLP vectorization. */
2830 gcc_assert (loop_vinfo
);
2832 if (!STMT_VINFO_RELEVANT_P (stmt_info
))
2835 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
2838 /* Is STMT a vectorizable type-promotion operation? */
2839 if (!is_gimple_assign (stmt
))
2842 if (TREE_CODE (gimple_assign_lhs (stmt
)) != SSA_NAME
)
2845 code
= gimple_assign_rhs_code (stmt
);
2846 if (!CONVERT_EXPR_CODE_P (code
)
2847 && code
!= WIDEN_MULT_EXPR
)
2850 scalar_dest
= gimple_assign_lhs (stmt
);
2851 vectype_out
= STMT_VINFO_VECTYPE (stmt_info
);
2853 /* Check the operands of the operation. */
2854 op0
= gimple_assign_rhs1 (stmt
);
2855 if (! ((INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest
))
2856 && INTEGRAL_TYPE_P (TREE_TYPE (op0
)))
2857 || (SCALAR_FLOAT_TYPE_P (TREE_TYPE (scalar_dest
))
2858 && SCALAR_FLOAT_TYPE_P (TREE_TYPE (op0
))
2859 && CONVERT_EXPR_CODE_P (code
))))
2861 if (!vect_is_simple_use_1 (op0
, loop_vinfo
, NULL
,
2862 &def_stmt
, &def
, &dt
[0], &vectype_in
))
2864 if (vect_print_dump_info (REPORT_DETAILS
))
2865 fprintf (vect_dump
, "use not simple.");
2868 /* If op0 is an external or constant def use a vector type with
2869 the same size as the output vector type. */
2871 vectype_in
= get_same_sized_vectype (TREE_TYPE (op0
), vectype_out
);
2873 gcc_assert (vectype_in
);
2876 if (vect_print_dump_info (REPORT_DETAILS
))
2878 fprintf (vect_dump
, "no vectype for scalar type ");
2879 print_generic_expr (vect_dump
, TREE_TYPE (op0
), TDF_SLIM
);
2885 nunits_in
= TYPE_VECTOR_SUBPARTS (vectype_in
);
2886 nunits_out
= TYPE_VECTOR_SUBPARTS (vectype_out
);
2887 if (nunits_in
<= nunits_out
)
2890 /* Multiple types in SLP are handled by creating the appropriate number of
2891 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
2896 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits_in
;
2898 gcc_assert (ncopies
>= 1);
2900 op_type
= TREE_CODE_LENGTH (code
);
2901 if (op_type
== binary_op
)
2903 op1
= gimple_assign_rhs2 (stmt
);
2904 if (!vect_is_simple_use (op1
, loop_vinfo
, NULL
, &def_stmt
, &def
, &dt
[1]))
2906 if (vect_print_dump_info (REPORT_DETAILS
))
2907 fprintf (vect_dump
, "use not simple.");
2912 /* Supportable by target? */
2913 if (!supportable_widening_operation (code
, stmt
, vectype_out
, vectype_in
,
2914 &decl1
, &decl2
, &code1
, &code2
,
2915 &multi_step_cvt
, &interm_types
))
2918 /* Binary widening operation can only be supported directly by the
2920 gcc_assert (!(multi_step_cvt
&& op_type
== binary_op
));
2922 if (!vec_stmt
) /* transformation not required. */
2924 STMT_VINFO_TYPE (stmt_info
) = type_promotion_vec_info_type
;
2925 if (vect_print_dump_info (REPORT_DETAILS
))
2926 fprintf (vect_dump
, "=== vectorizable_promotion ===");
2927 vect_model_simple_cost (stmt_info
, 2*ncopies
, dt
, NULL
);
2933 if (vect_print_dump_info (REPORT_DETAILS
))
2934 fprintf (vect_dump
, "transform type promotion operation. ncopies = %d.",
2938 /* In case of multi-step promotion, we first generate promotion operations
2939 to the intermediate types, and then from that types to the final one.
2940 We store vector destination in VEC_DSTS in the correct order for
2941 recursive creation of promotion operations in
2942 vect_create_vectorized_promotion_stmts(). Vector destinations are created
2943 according to TYPES recieved from supportable_widening_operation(). */
2945 vec_dsts
= VEC_alloc (tree
, heap
, multi_step_cvt
+ 1);
2947 vec_dsts
= VEC_alloc (tree
, heap
, 1);
2949 vec_dest
= vect_create_destination_var (scalar_dest
, vectype_out
);
2950 VEC_quick_push (tree
, vec_dsts
, vec_dest
);
2954 for (i
= VEC_length (tree
, interm_types
) - 1;
2955 VEC_iterate (tree
, interm_types
, i
, intermediate_type
); i
--)
2957 vec_dest
= vect_create_destination_var (scalar_dest
,
2959 VEC_quick_push (tree
, vec_dsts
, vec_dest
);
2965 vec_oprnds0
= VEC_alloc (tree
, heap
,
2966 (multi_step_cvt
? vect_pow2 (multi_step_cvt
) : 1));
2967 if (op_type
== binary_op
)
2968 vec_oprnds1
= VEC_alloc (tree
, heap
, 1);
2971 /* In case the vectorization factor (VF) is bigger than the number
2972 of elements that we can fit in a vectype (nunits), we have to generate
2973 more than one vector stmt - i.e - we need to "unroll" the
2974 vector stmt by a factor VF/nunits. */
2976 prev_stmt_info
= NULL
;
2977 for (j
= 0; j
< ncopies
; j
++)
2983 vect_get_slp_defs (slp_node
, &vec_oprnds0
, &vec_oprnds1
, -1);
2986 vec_oprnd0
= vect_get_vec_def_for_operand (op0
, stmt
, NULL
);
2987 VEC_quick_push (tree
, vec_oprnds0
, vec_oprnd0
);
2988 if (op_type
== binary_op
)
2990 vec_oprnd1
= vect_get_vec_def_for_operand (op1
, stmt
, NULL
);
2991 VEC_quick_push (tree
, vec_oprnds1
, vec_oprnd1
);
2997 vec_oprnd0
= vect_get_vec_def_for_stmt_copy (dt
[0], vec_oprnd0
);
2998 VEC_replace (tree
, vec_oprnds0
, 0, vec_oprnd0
);
2999 if (op_type
== binary_op
)
3001 vec_oprnd1
= vect_get_vec_def_for_stmt_copy (dt
[1], vec_oprnd1
);
3002 VEC_replace (tree
, vec_oprnds1
, 0, vec_oprnd1
);
3006 /* Arguments are ready. Create the new vector stmts. */
3007 tmp_vec_dsts
= VEC_copy (tree
, heap
, vec_dsts
);
3008 vect_create_vectorized_promotion_stmts (&vec_oprnds0
, &vec_oprnds1
,
3009 multi_step_cvt
, stmt
,
3011 gsi
, slp_node
, code1
, code2
,
3012 decl1
, decl2
, op_type
,
3016 VEC_free (tree
, heap
, vec_dsts
);
3017 VEC_free (tree
, heap
, tmp_vec_dsts
);
3018 VEC_free (tree
, heap
, interm_types
);
3019 VEC_free (tree
, heap
, vec_oprnds0
);
3020 VEC_free (tree
, heap
, vec_oprnds1
);
3022 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
3027 /* Function vectorizable_store.
3029 Check if STMT defines a non scalar data-ref (array/pointer/structure) that
3031 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3032 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
3033 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3036 vectorizable_store (gimple stmt
, gimple_stmt_iterator
*gsi
, gimple
*vec_stmt
,
3042 tree vec_oprnd
= NULL_TREE
;
3043 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
3044 struct data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
), *first_dr
= NULL
;
3045 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
3046 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
3047 struct loop
*loop
= NULL
;
3048 enum machine_mode vec_mode
;
3050 enum dr_alignment_support alignment_support_scheme
;
3053 enum vect_def_type dt
;
3054 stmt_vec_info prev_stmt_info
= NULL
;
3055 tree dataref_ptr
= NULL_TREE
;
3056 int nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
3059 gimple next_stmt
, first_stmt
= NULL
;
3060 bool strided_store
= false;
3061 unsigned int group_size
, i
;
3062 VEC(tree
,heap
) *dr_chain
= NULL
, *oprnds
= NULL
, *result_chain
= NULL
;
3064 VEC(tree
,heap
) *vec_oprnds
= NULL
;
3065 bool slp
= (slp_node
!= NULL
);
3066 unsigned int vec_num
;
3067 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
3070 loop
= LOOP_VINFO_LOOP (loop_vinfo
);
3072 /* Multiple types in SLP are handled by creating the appropriate number of
3073 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
3078 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits
;
3080 gcc_assert (ncopies
>= 1);
3082 /* FORNOW. This restriction should be relaxed. */
3083 if (loop
&& nested_in_vect_loop_p (loop
, stmt
) && ncopies
> 1)
3085 if (vect_print_dump_info (REPORT_DETAILS
))
3086 fprintf (vect_dump
, "multiple types in nested loop.");
3090 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
3093 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
3096 /* Is vectorizable store? */
3098 if (!is_gimple_assign (stmt
))
3101 scalar_dest
= gimple_assign_lhs (stmt
);
3102 if (TREE_CODE (scalar_dest
) != ARRAY_REF
3103 && TREE_CODE (scalar_dest
) != INDIRECT_REF
3104 && TREE_CODE (scalar_dest
) != COMPONENT_REF
3105 && TREE_CODE (scalar_dest
) != IMAGPART_EXPR
3106 && TREE_CODE (scalar_dest
) != REALPART_EXPR
3107 && TREE_CODE (scalar_dest
) != MEM_REF
)
3110 gcc_assert (gimple_assign_single_p (stmt
));
3111 op
= gimple_assign_rhs1 (stmt
);
3112 if (!vect_is_simple_use (op
, loop_vinfo
, bb_vinfo
, &def_stmt
, &def
, &dt
))
3114 if (vect_print_dump_info (REPORT_DETAILS
))
3115 fprintf (vect_dump
, "use not simple.");
3119 /* The scalar rhs type needs to be trivially convertible to the vector
3120 component type. This should always be the case. */
3121 if (!useless_type_conversion_p (TREE_TYPE (vectype
), TREE_TYPE (op
)))
3123 if (vect_print_dump_info (REPORT_DETAILS
))
3124 fprintf (vect_dump
, "??? operands of different types");
3128 vec_mode
= TYPE_MODE (vectype
);
3129 /* FORNOW. In some cases can vectorize even if data-type not supported
3130 (e.g. - array initialization with 0). */
3131 if (optab_handler (mov_optab
, vec_mode
) == CODE_FOR_nothing
)
3134 if (!STMT_VINFO_DATA_REF (stmt_info
))
3137 if (STMT_VINFO_STRIDED_ACCESS (stmt_info
))
3139 strided_store
= true;
3140 first_stmt
= DR_GROUP_FIRST_DR (stmt_info
);
3141 if (!vect_strided_store_supported (vectype
)
3142 && !PURE_SLP_STMT (stmt_info
) && !slp
)
3145 if (first_stmt
== stmt
)
3147 /* STMT is the leader of the group. Check the operands of all the
3148 stmts of the group. */
3149 next_stmt
= DR_GROUP_NEXT_DR (stmt_info
);
3152 gcc_assert (gimple_assign_single_p (next_stmt
));
3153 op
= gimple_assign_rhs1 (next_stmt
);
3154 if (!vect_is_simple_use (op
, loop_vinfo
, bb_vinfo
, &def_stmt
,
3157 if (vect_print_dump_info (REPORT_DETAILS
))
3158 fprintf (vect_dump
, "use not simple.");
3161 next_stmt
= DR_GROUP_NEXT_DR (vinfo_for_stmt (next_stmt
));
3166 if (!vec_stmt
) /* transformation not required. */
3168 STMT_VINFO_TYPE (stmt_info
) = store_vec_info_type
;
3169 vect_model_store_cost (stmt_info
, ncopies
, dt
, NULL
);
3177 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
3178 group_size
= DR_GROUP_SIZE (vinfo_for_stmt (first_stmt
));
3180 DR_GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt
))++;
3183 gcc_assert (!loop
|| !nested_in_vect_loop_p (loop
, stmt
));
3185 /* We vectorize all the stmts of the interleaving group when we
3186 reach the last stmt in the group. */
3187 if (DR_GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt
))
3188 < DR_GROUP_SIZE (vinfo_for_stmt (first_stmt
))
3197 strided_store
= false;
3198 /* VEC_NUM is the number of vect stmts to be created for this
3200 vec_num
= SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node
);
3201 first_stmt
= VEC_index (gimple
, SLP_TREE_SCALAR_STMTS (slp_node
), 0);
3202 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
3205 /* VEC_NUM is the number of vect stmts to be created for this
3207 vec_num
= group_size
;
3213 group_size
= vec_num
= 1;
3216 if (vect_print_dump_info (REPORT_DETAILS
))
3217 fprintf (vect_dump
, "transform store. ncopies = %d",ncopies
);
3219 dr_chain
= VEC_alloc (tree
, heap
, group_size
);
3220 oprnds
= VEC_alloc (tree
, heap
, group_size
);
3222 alignment_support_scheme
= vect_supportable_dr_alignment (first_dr
, false);
3223 gcc_assert (alignment_support_scheme
);
3225 /* In case the vectorization factor (VF) is bigger than the number
3226 of elements that we can fit in a vectype (nunits), we have to generate
3227 more than one vector stmt - i.e - we need to "unroll" the
3228 vector stmt by a factor VF/nunits. For more details see documentation in
3229 vect_get_vec_def_for_copy_stmt. */
3231 /* In case of interleaving (non-unit strided access):
3238 We create vectorized stores starting from base address (the access of the
3239 first stmt in the chain (S2 in the above example), when the last store stmt
3240 of the chain (S4) is reached:
3243 VS2: &base + vec_size*1 = vx0
3244 VS3: &base + vec_size*2 = vx1
3245 VS4: &base + vec_size*3 = vx3
3247 Then permutation statements are generated:
3249 VS5: vx5 = VEC_INTERLEAVE_HIGH_EXPR < vx0, vx3 >
3250 VS6: vx6 = VEC_INTERLEAVE_LOW_EXPR < vx0, vx3 >
3253 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
3254 (the order of the data-refs in the output of vect_permute_store_chain
3255 corresponds to the order of scalar stmts in the interleaving chain - see
3256 the documentation of vect_permute_store_chain()).
3258 In case of both multiple types and interleaving, above vector stores and
3259 permutation stmts are created for every copy. The result vector stmts are
3260 put in STMT_VINFO_VEC_STMT for the first copy and in the corresponding
3261 STMT_VINFO_RELATED_STMT for the next copies.
3264 prev_stmt_info
= NULL
;
3265 for (j
= 0; j
< ncopies
; j
++)
3274 /* Get vectorized arguments for SLP_NODE. */
3275 vect_get_slp_defs (slp_node
, &vec_oprnds
, NULL
, -1);
3277 vec_oprnd
= VEC_index (tree
, vec_oprnds
, 0);
3281 /* For interleaved stores we collect vectorized defs for all the
3282 stores in the group in DR_CHAIN and OPRNDS. DR_CHAIN is then
3283 used as an input to vect_permute_store_chain(), and OPRNDS as
3284 an input to vect_get_vec_def_for_stmt_copy() for the next copy.
3286 If the store is not strided, GROUP_SIZE is 1, and DR_CHAIN and
3287 OPRNDS are of size 1. */
3288 next_stmt
= first_stmt
;
3289 for (i
= 0; i
< group_size
; i
++)
3291 /* Since gaps are not supported for interleaved stores,
3292 GROUP_SIZE is the exact number of stmts in the chain.
3293 Therefore, NEXT_STMT can't be NULL_TREE. In case that
3294 there is no interleaving, GROUP_SIZE is 1, and only one
3295 iteration of the loop will be executed. */
3296 gcc_assert (next_stmt
3297 && gimple_assign_single_p (next_stmt
));
3298 op
= gimple_assign_rhs1 (next_stmt
);
3300 vec_oprnd
= vect_get_vec_def_for_operand (op
, next_stmt
,
3302 VEC_quick_push(tree
, dr_chain
, vec_oprnd
);
3303 VEC_quick_push(tree
, oprnds
, vec_oprnd
);
3304 next_stmt
= DR_GROUP_NEXT_DR (vinfo_for_stmt (next_stmt
));
3308 /* We should have catched mismatched types earlier. */
3309 gcc_assert (useless_type_conversion_p (vectype
,
3310 TREE_TYPE (vec_oprnd
)));
3311 dataref_ptr
= vect_create_data_ref_ptr (first_stmt
, NULL
, NULL_TREE
,
3312 &dummy
, &ptr_incr
, false,
3314 gcc_assert (bb_vinfo
|| !inv_p
);
3318 /* For interleaved stores we created vectorized defs for all the
3319 defs stored in OPRNDS in the previous iteration (previous copy).
3320 DR_CHAIN is then used as an input to vect_permute_store_chain(),
3321 and OPRNDS as an input to vect_get_vec_def_for_stmt_copy() for the
3323 If the store is not strided, GROUP_SIZE is 1, and DR_CHAIN and
3324 OPRNDS are of size 1. */
3325 for (i
= 0; i
< group_size
; i
++)
3327 op
= VEC_index (tree
, oprnds
, i
);
3328 vect_is_simple_use (op
, loop_vinfo
, bb_vinfo
, &def_stmt
, &def
,
3330 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
, op
);
3331 VEC_replace(tree
, dr_chain
, i
, vec_oprnd
);
3332 VEC_replace(tree
, oprnds
, i
, vec_oprnd
);
3335 bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
, stmt
, NULL_TREE
);
3340 result_chain
= VEC_alloc (tree
, heap
, group_size
);
3342 if (!vect_permute_store_chain (dr_chain
, group_size
, stmt
, gsi
,
3347 next_stmt
= first_stmt
;
3348 for (i
= 0; i
< vec_num
; i
++)
3350 struct ptr_info_def
*pi
;
3353 /* Bump the vector pointer. */
3354 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
, stmt
,
3358 vec_oprnd
= VEC_index (tree
, vec_oprnds
, i
);
3359 else if (strided_store
)
3360 /* For strided stores vectorized defs are interleaved in
3361 vect_permute_store_chain(). */
3362 vec_oprnd
= VEC_index (tree
, result_chain
, i
);
3364 data_ref
= build2 (MEM_REF
, TREE_TYPE (vec_oprnd
), dataref_ptr
,
3365 build_int_cst (reference_alias_ptr_type
3366 (DR_REF (first_dr
)), 0));
3367 pi
= get_ptr_info (dataref_ptr
);
3368 pi
->align
= TYPE_ALIGN_UNIT (vectype
);
3369 if (aligned_access_p (first_dr
))
3371 else if (DR_MISALIGNMENT (first_dr
) == -1)
3373 TREE_TYPE (data_ref
)
3374 = build_aligned_type (TREE_TYPE (data_ref
),
3375 TYPE_ALIGN (TREE_TYPE (vectype
)));
3376 pi
->align
= TYPE_ALIGN_UNIT (TREE_TYPE (vectype
));
3381 TREE_TYPE (data_ref
)
3382 = build_aligned_type (TREE_TYPE (data_ref
),
3383 TYPE_ALIGN (TREE_TYPE (vectype
)));
3384 pi
->misalign
= DR_MISALIGNMENT (first_dr
);
3387 /* Arguments are ready. Create the new vector stmt. */
3388 new_stmt
= gimple_build_assign (data_ref
, vec_oprnd
);
3389 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3390 mark_symbols_for_renaming (new_stmt
);
3396 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
3398 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
3400 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
3401 next_stmt
= DR_GROUP_NEXT_DR (vinfo_for_stmt (next_stmt
));
3407 VEC_free (tree
, heap
, dr_chain
);
3408 VEC_free (tree
, heap
, oprnds
);
3410 VEC_free (tree
, heap
, result_chain
);
3415 /* vectorizable_load.
3417 Check if STMT reads a non scalar data-ref (array/pointer/structure) that
3419 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3420 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
3421 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3424 vectorizable_load (gimple stmt
, gimple_stmt_iterator
*gsi
, gimple
*vec_stmt
,
3425 slp_tree slp_node
, slp_instance slp_node_instance
)
3428 tree vec_dest
= NULL
;
3429 tree data_ref
= NULL
;
3430 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
3431 stmt_vec_info prev_stmt_info
;
3432 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
3433 struct loop
*loop
= NULL
;
3434 struct loop
*containing_loop
= (gimple_bb (stmt
))->loop_father
;
3435 bool nested_in_vect_loop
= false;
3436 struct data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
), *first_dr
;
3437 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
3439 enum machine_mode mode
;
3440 gimple new_stmt
= NULL
;
3442 enum dr_alignment_support alignment_support_scheme
;
3443 tree dataref_ptr
= NULL_TREE
;
3445 int nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
3447 int i
, j
, group_size
;
3448 tree msq
= NULL_TREE
, lsq
;
3449 tree offset
= NULL_TREE
;
3450 tree realignment_token
= NULL_TREE
;
3452 VEC(tree
,heap
) *dr_chain
= NULL
;
3453 bool strided_load
= false;
3457 bool compute_in_loop
= false;
3458 struct loop
*at_loop
;
3460 bool slp
= (slp_node
!= NULL
);
3461 bool slp_perm
= false;
3462 enum tree_code code
;
3463 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
3468 loop
= LOOP_VINFO_LOOP (loop_vinfo
);
3469 nested_in_vect_loop
= nested_in_vect_loop_p (loop
, stmt
);
3470 vf
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
3475 /* Multiple types in SLP are handled by creating the appropriate number of
3476 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
3481 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits
;
3483 gcc_assert (ncopies
>= 1);
3485 /* FORNOW. This restriction should be relaxed. */
3486 if (nested_in_vect_loop
&& ncopies
> 1)
3488 if (vect_print_dump_info (REPORT_DETAILS
))
3489 fprintf (vect_dump
, "multiple types in nested loop.");
3493 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
3496 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
3499 /* Is vectorizable load? */
3500 if (!is_gimple_assign (stmt
))
3503 scalar_dest
= gimple_assign_lhs (stmt
);
3504 if (TREE_CODE (scalar_dest
) != SSA_NAME
)
3507 code
= gimple_assign_rhs_code (stmt
);
3508 if (code
!= ARRAY_REF
3509 && code
!= INDIRECT_REF
3510 && code
!= COMPONENT_REF
3511 && code
!= IMAGPART_EXPR
3512 && code
!= REALPART_EXPR
3516 if (!STMT_VINFO_DATA_REF (stmt_info
))
3519 scalar_type
= TREE_TYPE (DR_REF (dr
));
3520 mode
= TYPE_MODE (vectype
);
3522 /* FORNOW. In some cases can vectorize even if data-type not supported
3523 (e.g. - data copies). */
3524 if (optab_handler (mov_optab
, mode
) == CODE_FOR_nothing
)
3526 if (vect_print_dump_info (REPORT_DETAILS
))
3527 fprintf (vect_dump
, "Aligned load, but unsupported type.");
3531 /* The vector component type needs to be trivially convertible to the
3532 scalar lhs. This should always be the case. */
3533 if (!useless_type_conversion_p (TREE_TYPE (scalar_dest
), TREE_TYPE (vectype
)))
3535 if (vect_print_dump_info (REPORT_DETAILS
))
3536 fprintf (vect_dump
, "??? operands of different types");
3540 /* Check if the load is a part of an interleaving chain. */
3541 if (STMT_VINFO_STRIDED_ACCESS (stmt_info
))
3543 strided_load
= true;
3545 gcc_assert (! nested_in_vect_loop
);
3547 /* Check if interleaving is supported. */
3548 if (!vect_strided_load_supported (vectype
)
3549 && !PURE_SLP_STMT (stmt_info
) && !slp
)
3553 if (!vec_stmt
) /* transformation not required. */
3555 STMT_VINFO_TYPE (stmt_info
) = load_vec_info_type
;
3556 vect_model_load_cost (stmt_info
, ncopies
, NULL
);
3560 if (vect_print_dump_info (REPORT_DETAILS
))
3561 fprintf (vect_dump
, "transform load.");
3567 first_stmt
= DR_GROUP_FIRST_DR (stmt_info
);
3568 /* Check if the chain of loads is already vectorized. */
3569 if (STMT_VINFO_VEC_STMT (vinfo_for_stmt (first_stmt
)))
3571 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
3574 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
3575 group_size
= DR_GROUP_SIZE (vinfo_for_stmt (first_stmt
));
3577 /* VEC_NUM is the number of vect stmts to be created for this group. */
3580 strided_load
= false;
3581 vec_num
= SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node
);
3582 if (SLP_INSTANCE_LOAD_PERMUTATION (slp_node_instance
))
3586 vec_num
= group_size
;
3588 dr_chain
= VEC_alloc (tree
, heap
, vec_num
);
3594 group_size
= vec_num
= 1;
3597 alignment_support_scheme
= vect_supportable_dr_alignment (first_dr
, false);
3598 gcc_assert (alignment_support_scheme
);
3600 /* In case the vectorization factor (VF) is bigger than the number
3601 of elements that we can fit in a vectype (nunits), we have to generate
3602 more than one vector stmt - i.e - we need to "unroll" the
3603 vector stmt by a factor VF/nunits. In doing so, we record a pointer
3604 from one copy of the vector stmt to the next, in the field
3605 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
3606 stages to find the correct vector defs to be used when vectorizing
3607 stmts that use the defs of the current stmt. The example below illustrates
3608 the vectorization process when VF=16 and nunits=4 (i.e - we need to create
3609 4 vectorized stmts):
3611 before vectorization:
3612 RELATED_STMT VEC_STMT
3616 step 1: vectorize stmt S1:
3617 We first create the vector stmt VS1_0, and, as usual, record a
3618 pointer to it in the STMT_VINFO_VEC_STMT of the scalar stmt S1.
3619 Next, we create the vector stmt VS1_1, and record a pointer to
3620 it in the STMT_VINFO_RELATED_STMT of the vector stmt VS1_0.
3621 Similarly, for VS1_2 and VS1_3. This is the resulting chain of
3623 RELATED_STMT VEC_STMT
3624 VS1_0: vx0 = memref0 VS1_1 -
3625 VS1_1: vx1 = memref1 VS1_2 -
3626 VS1_2: vx2 = memref2 VS1_3 -
3627 VS1_3: vx3 = memref3 - -
3628 S1: x = load - VS1_0
3631 See in documentation in vect_get_vec_def_for_stmt_copy for how the
3632 information we recorded in RELATED_STMT field is used to vectorize
3635 /* In case of interleaving (non-unit strided access):
3642 Vectorized loads are created in the order of memory accesses
3643 starting from the access of the first stmt of the chain:
3646 VS2: vx1 = &base + vec_size*1
3647 VS3: vx3 = &base + vec_size*2
3648 VS4: vx4 = &base + vec_size*3
3650 Then permutation statements are generated:
3652 VS5: vx5 = VEC_EXTRACT_EVEN_EXPR < vx0, vx1 >
3653 VS6: vx6 = VEC_EXTRACT_ODD_EXPR < vx0, vx1 >
3656 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
3657 (the order of the data-refs in the output of vect_permute_load_chain
3658 corresponds to the order of scalar stmts in the interleaving chain - see
3659 the documentation of vect_permute_load_chain()).
3660 The generation of permutation stmts and recording them in
3661 STMT_VINFO_VEC_STMT is done in vect_transform_strided_load().
3663 In case of both multiple types and interleaving, the vector loads and
3664 permutation stmts above are created for every copy. The result vector stmts
3665 are put in STMT_VINFO_VEC_STMT for the first copy and in the corresponding
3666 STMT_VINFO_RELATED_STMT for the next copies. */
3668 /* If the data reference is aligned (dr_aligned) or potentially unaligned
3669 on a target that supports unaligned accesses (dr_unaligned_supported)
3670 we generate the following code:
3674 p = p + indx * vectype_size;
3679 Otherwise, the data reference is potentially unaligned on a target that
3680 does not support unaligned accesses (dr_explicit_realign_optimized) -
3681 then generate the following code, in which the data in each iteration is
3682 obtained by two vector loads, one from the previous iteration, and one
3683 from the current iteration:
3685 msq_init = *(floor(p1))
3686 p2 = initial_addr + VS - 1;
3687 realignment_token = call target_builtin;
3690 p2 = p2 + indx * vectype_size
3692 vec_dest = realign_load (msq, lsq, realignment_token)
3697 /* If the misalignment remains the same throughout the execution of the
3698 loop, we can create the init_addr and permutation mask at the loop
3699 preheader. Otherwise, it needs to be created inside the loop.
3700 This can only occur when vectorizing memory accesses in the inner-loop
3701 nested within an outer-loop that is being vectorized. */
3703 if (loop
&& nested_in_vect_loop_p (loop
, stmt
)
3704 && (TREE_INT_CST_LOW (DR_STEP (dr
))
3705 % GET_MODE_SIZE (TYPE_MODE (vectype
)) != 0))
3707 gcc_assert (alignment_support_scheme
!= dr_explicit_realign_optimized
);
3708 compute_in_loop
= true;
3711 if ((alignment_support_scheme
== dr_explicit_realign_optimized
3712 || alignment_support_scheme
== dr_explicit_realign
)
3713 && !compute_in_loop
)
3715 msq
= vect_setup_realignment (first_stmt
, gsi
, &realignment_token
,
3716 alignment_support_scheme
, NULL_TREE
,
3718 if (alignment_support_scheme
== dr_explicit_realign_optimized
)
3720 phi
= SSA_NAME_DEF_STMT (msq
);
3721 offset
= size_int (TYPE_VECTOR_SUBPARTS (vectype
) - 1);
3727 prev_stmt_info
= NULL
;
3728 for (j
= 0; j
< ncopies
; j
++)
3730 /* 1. Create the vector pointer update chain. */
3732 dataref_ptr
= vect_create_data_ref_ptr (first_stmt
,
3734 &dummy
, &ptr_incr
, false,
3738 bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
, stmt
, NULL_TREE
);
3740 for (i
= 0; i
< vec_num
; i
++)
3743 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
, stmt
,
3746 /* 2. Create the vector-load in the loop. */
3747 switch (alignment_support_scheme
)
3750 case dr_unaligned_supported
:
3752 struct ptr_info_def
*pi
;
3754 = build2 (MEM_REF
, vectype
, dataref_ptr
,
3755 build_int_cst (reference_alias_ptr_type
3756 (DR_REF (first_dr
)), 0));
3757 pi
= get_ptr_info (dataref_ptr
);
3758 pi
->align
= TYPE_ALIGN_UNIT (vectype
);
3759 if (alignment_support_scheme
== dr_aligned
)
3761 gcc_assert (aligned_access_p (first_dr
));
3764 else if (DR_MISALIGNMENT (first_dr
) == -1)
3766 TREE_TYPE (data_ref
)
3767 = build_aligned_type (TREE_TYPE (data_ref
),
3768 TYPE_ALIGN (TREE_TYPE (vectype
)));
3769 pi
->align
= TYPE_ALIGN_UNIT (TREE_TYPE (vectype
));
3774 TREE_TYPE (data_ref
)
3775 = build_aligned_type (TREE_TYPE (data_ref
),
3776 TYPE_ALIGN (TREE_TYPE (vectype
)));
3777 pi
->misalign
= DR_MISALIGNMENT (first_dr
);
3781 case dr_explicit_realign
:
3784 tree vs_minus_1
= size_int (TYPE_VECTOR_SUBPARTS (vectype
) - 1);
3786 if (compute_in_loop
)
3787 msq
= vect_setup_realignment (first_stmt
, gsi
,
3789 dr_explicit_realign
,
3792 new_stmt
= gimple_build_assign_with_ops
3793 (BIT_AND_EXPR
, NULL_TREE
, dataref_ptr
,
3795 (TREE_TYPE (dataref_ptr
),
3796 -(HOST_WIDE_INT
)TYPE_ALIGN_UNIT (vectype
)));
3797 ptr
= make_ssa_name (SSA_NAME_VAR (dataref_ptr
), new_stmt
);
3798 gimple_assign_set_lhs (new_stmt
, ptr
);
3799 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3801 = build2 (MEM_REF
, vectype
, ptr
,
3802 build_int_cst (reference_alias_ptr_type
3803 (DR_REF (first_dr
)), 0));
3804 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
3805 new_stmt
= gimple_build_assign (vec_dest
, data_ref
);
3806 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
3807 gimple_assign_set_lhs (new_stmt
, new_temp
);
3808 gimple_set_vdef (new_stmt
, gimple_vdef (stmt
));
3809 gimple_set_vuse (new_stmt
, gimple_vuse (stmt
));
3810 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3813 bump
= size_binop (MULT_EXPR
, vs_minus_1
,
3814 TYPE_SIZE_UNIT (scalar_type
));
3815 ptr
= bump_vector_ptr (dataref_ptr
, NULL
, gsi
, stmt
, bump
);
3816 new_stmt
= gimple_build_assign_with_ops
3817 (BIT_AND_EXPR
, NULL_TREE
, ptr
,
3820 -(HOST_WIDE_INT
)TYPE_ALIGN_UNIT (vectype
)));
3821 ptr
= make_ssa_name (SSA_NAME_VAR (dataref_ptr
), new_stmt
);
3822 gimple_assign_set_lhs (new_stmt
, ptr
);
3823 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3825 = build2 (MEM_REF
, vectype
, ptr
,
3826 build_int_cst (reference_alias_ptr_type
3827 (DR_REF (first_dr
)), 0));
3830 case dr_explicit_realign_optimized
:
3831 new_stmt
= gimple_build_assign_with_ops
3832 (BIT_AND_EXPR
, NULL_TREE
, dataref_ptr
,
3834 (TREE_TYPE (dataref_ptr
),
3835 -(HOST_WIDE_INT
)TYPE_ALIGN_UNIT (vectype
)));
3836 new_temp
= make_ssa_name (SSA_NAME_VAR (dataref_ptr
), new_stmt
);
3837 gimple_assign_set_lhs (new_stmt
, new_temp
);
3838 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3840 = build2 (MEM_REF
, vectype
, new_temp
,
3841 build_int_cst (reference_alias_ptr_type
3842 (DR_REF (first_dr
)), 0));
3847 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
3848 new_stmt
= gimple_build_assign (vec_dest
, data_ref
);
3849 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
3850 gimple_assign_set_lhs (new_stmt
, new_temp
);
3851 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3852 mark_symbols_for_renaming (new_stmt
);
3854 /* 3. Handle explicit realignment if necessary/supported. Create in
3855 loop: vec_dest = realign_load (msq, lsq, realignment_token) */
3856 if (alignment_support_scheme
== dr_explicit_realign_optimized
3857 || alignment_support_scheme
== dr_explicit_realign
)
3861 lsq
= gimple_assign_lhs (new_stmt
);
3862 if (!realignment_token
)
3863 realignment_token
= dataref_ptr
;
3864 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
3865 tmp
= build3 (REALIGN_LOAD_EXPR
, vectype
, msq
, lsq
,
3867 new_stmt
= gimple_build_assign (vec_dest
, tmp
);
3868 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
3869 gimple_assign_set_lhs (new_stmt
, new_temp
);
3870 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3872 if (alignment_support_scheme
== dr_explicit_realign_optimized
)
3875 if (i
== vec_num
- 1 && j
== ncopies
- 1)
3876 add_phi_arg (phi
, lsq
, loop_latch_edge (containing_loop
),
3882 /* 4. Handle invariant-load. */
3883 if (inv_p
&& !bb_vinfo
)
3885 gcc_assert (!strided_load
);
3886 gcc_assert (nested_in_vect_loop_p (loop
, stmt
));
3891 tree vec_inv
, bitpos
, bitsize
= TYPE_SIZE (scalar_type
);
3893 /* CHECKME: bitpos depends on endianess? */
3894 bitpos
= bitsize_zero_node
;
3895 vec_inv
= build3 (BIT_FIELD_REF
, scalar_type
, new_temp
,
3898 vect_create_destination_var (scalar_dest
, NULL_TREE
);
3899 new_stmt
= gimple_build_assign (vec_dest
, vec_inv
);
3900 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
3901 gimple_assign_set_lhs (new_stmt
, new_temp
);
3902 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3904 for (k
= nunits
- 1; k
>= 0; --k
)
3905 t
= tree_cons (NULL_TREE
, new_temp
, t
);
3906 /* FIXME: use build_constructor directly. */
3907 vec_inv
= build_constructor_from_list (vectype
, t
);
3908 new_temp
= vect_init_vector (stmt
, vec_inv
, vectype
, gsi
);
3909 new_stmt
= SSA_NAME_DEF_STMT (new_temp
);
3912 gcc_unreachable (); /* FORNOW. */
3915 /* Collect vector loads and later create their permutation in
3916 vect_transform_strided_load (). */
3917 if (strided_load
|| slp_perm
)
3918 VEC_quick_push (tree
, dr_chain
, new_temp
);
3920 /* Store vector loads in the corresponding SLP_NODE. */
3921 if (slp
&& !slp_perm
)
3922 VEC_quick_push (gimple
, SLP_TREE_VEC_STMTS (slp_node
), new_stmt
);
3925 if (slp
&& !slp_perm
)
3930 if (!vect_transform_slp_perm_load (stmt
, dr_chain
, gsi
, vf
,
3931 slp_node_instance
, false))
3933 VEC_free (tree
, heap
, dr_chain
);
3941 if (!vect_transform_strided_load (stmt
, dr_chain
, group_size
, gsi
))
3944 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
3945 VEC_free (tree
, heap
, dr_chain
);
3946 dr_chain
= VEC_alloc (tree
, heap
, group_size
);
3951 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
3953 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
3954 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
3960 VEC_free (tree
, heap
, dr_chain
);
3965 /* Function vect_is_simple_cond.
3968 LOOP - the loop that is being vectorized.
3969 COND - Condition that is checked for simple use.
3971 Returns whether a COND can be vectorized. Checks whether
3972 condition operands are supportable using vec_is_simple_use. */
3975 vect_is_simple_cond (tree cond
, loop_vec_info loop_vinfo
)
3979 enum vect_def_type dt
;
3981 if (!COMPARISON_CLASS_P (cond
))
3984 lhs
= TREE_OPERAND (cond
, 0);
3985 rhs
= TREE_OPERAND (cond
, 1);
3987 if (TREE_CODE (lhs
) == SSA_NAME
)
3989 gimple lhs_def_stmt
= SSA_NAME_DEF_STMT (lhs
);
3990 if (!vect_is_simple_use (lhs
, loop_vinfo
, NULL
, &lhs_def_stmt
, &def
,
3994 else if (TREE_CODE (lhs
) != INTEGER_CST
&& TREE_CODE (lhs
) != REAL_CST
3995 && TREE_CODE (lhs
) != FIXED_CST
)
3998 if (TREE_CODE (rhs
) == SSA_NAME
)
4000 gimple rhs_def_stmt
= SSA_NAME_DEF_STMT (rhs
);
4001 if (!vect_is_simple_use (rhs
, loop_vinfo
, NULL
, &rhs_def_stmt
, &def
,
4005 else if (TREE_CODE (rhs
) != INTEGER_CST
&& TREE_CODE (rhs
) != REAL_CST
4006 && TREE_CODE (rhs
) != FIXED_CST
)
4012 /* vectorizable_condition.
4014 Check if STMT is conditional modify expression that can be vectorized.
4015 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4016 stmt using VEC_COND_EXPR to replace it, put it in VEC_STMT, and insert it
4019 When STMT is vectorized as nested cycle, REDUC_DEF is the vector variable
4020 to be used at REDUC_INDEX (in then clause if REDUC_INDEX is 1, and in
4021 else caluse if it is 2).
4023 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4026 vectorizable_condition (gimple stmt
, gimple_stmt_iterator
*gsi
,
4027 gimple
*vec_stmt
, tree reduc_def
, int reduc_index
)
4029 tree scalar_dest
= NULL_TREE
;
4030 tree vec_dest
= NULL_TREE
;
4031 tree op
= NULL_TREE
;
4032 tree cond_expr
, then_clause
, else_clause
;
4033 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
4034 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
4035 tree vec_cond_lhs
, vec_cond_rhs
, vec_then_clause
, vec_else_clause
;
4036 tree vec_compare
, vec_cond_expr
;
4038 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
4039 enum machine_mode vec_mode
;
4041 enum vect_def_type dt
, dts
[4];
4042 int nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
4043 int ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits
;
4044 enum tree_code code
;
4045 stmt_vec_info prev_stmt_info
= NULL
;
4048 /* FORNOW: unsupported in basic block SLP. */
4049 gcc_assert (loop_vinfo
);
4051 gcc_assert (ncopies
>= 1);
4052 if (reduc_index
&& ncopies
> 1)
4053 return false; /* FORNOW */
4055 if (!STMT_VINFO_RELEVANT_P (stmt_info
))
4058 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
4059 && !(STMT_VINFO_DEF_TYPE (stmt_info
) == vect_nested_cycle
4063 /* FORNOW: SLP not supported. */
4064 if (STMT_SLP_TYPE (stmt_info
))
4067 /* FORNOW: not yet supported. */
4068 if (STMT_VINFO_LIVE_P (stmt_info
))
4070 if (vect_print_dump_info (REPORT_DETAILS
))
4071 fprintf (vect_dump
, "value used after loop.");
4075 /* Is vectorizable conditional operation? */
4076 if (!is_gimple_assign (stmt
))
4079 code
= gimple_assign_rhs_code (stmt
);
4081 if (code
!= COND_EXPR
)
4084 gcc_assert (gimple_assign_single_p (stmt
));
4085 op
= gimple_assign_rhs1 (stmt
);
4086 cond_expr
= TREE_OPERAND (op
, 0);
4087 then_clause
= TREE_OPERAND (op
, 1);
4088 else_clause
= TREE_OPERAND (op
, 2);
4090 if (!vect_is_simple_cond (cond_expr
, loop_vinfo
))
4093 /* We do not handle two different vector types for the condition
4095 if (!types_compatible_p (TREE_TYPE (TREE_OPERAND (cond_expr
, 0)),
4096 TREE_TYPE (vectype
)))
4099 if (TREE_CODE (then_clause
) == SSA_NAME
)
4101 gimple then_def_stmt
= SSA_NAME_DEF_STMT (then_clause
);
4102 if (!vect_is_simple_use (then_clause
, loop_vinfo
, NULL
,
4103 &then_def_stmt
, &def
, &dt
))
4106 else if (TREE_CODE (then_clause
) != INTEGER_CST
4107 && TREE_CODE (then_clause
) != REAL_CST
4108 && TREE_CODE (then_clause
) != FIXED_CST
)
4111 if (TREE_CODE (else_clause
) == SSA_NAME
)
4113 gimple else_def_stmt
= SSA_NAME_DEF_STMT (else_clause
);
4114 if (!vect_is_simple_use (else_clause
, loop_vinfo
, NULL
,
4115 &else_def_stmt
, &def
, &dt
))
4118 else if (TREE_CODE (else_clause
) != INTEGER_CST
4119 && TREE_CODE (else_clause
) != REAL_CST
4120 && TREE_CODE (else_clause
) != FIXED_CST
)
4124 vec_mode
= TYPE_MODE (vectype
);
4128 STMT_VINFO_TYPE (stmt_info
) = condition_vec_info_type
;
4129 return expand_vec_cond_expr_p (TREE_TYPE (op
), vec_mode
);
4135 scalar_dest
= gimple_assign_lhs (stmt
);
4136 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
4138 /* Handle cond expr. */
4139 for (j
= 0; j
< ncopies
; j
++)
4146 vect_get_vec_def_for_operand (TREE_OPERAND (cond_expr
, 0),
4148 vect_is_simple_use (TREE_OPERAND (cond_expr
, 0), loop_vinfo
,
4149 NULL
, >emp
, &def
, &dts
[0]);
4151 vect_get_vec_def_for_operand (TREE_OPERAND (cond_expr
, 1),
4153 vect_is_simple_use (TREE_OPERAND (cond_expr
, 1), loop_vinfo
,
4154 NULL
, >emp
, &def
, &dts
[1]);
4155 if (reduc_index
== 1)
4156 vec_then_clause
= reduc_def
;
4159 vec_then_clause
= vect_get_vec_def_for_operand (then_clause
,
4161 vect_is_simple_use (then_clause
, loop_vinfo
,
4162 NULL
, >emp
, &def
, &dts
[2]);
4164 if (reduc_index
== 2)
4165 vec_else_clause
= reduc_def
;
4168 vec_else_clause
= vect_get_vec_def_for_operand (else_clause
,
4170 vect_is_simple_use (else_clause
, loop_vinfo
,
4171 NULL
, >emp
, &def
, &dts
[3]);
4176 vec_cond_lhs
= vect_get_vec_def_for_stmt_copy (dts
[0], vec_cond_lhs
);
4177 vec_cond_rhs
= vect_get_vec_def_for_stmt_copy (dts
[1], vec_cond_rhs
);
4178 vec_then_clause
= vect_get_vec_def_for_stmt_copy (dts
[2],
4180 vec_else_clause
= vect_get_vec_def_for_stmt_copy (dts
[3],
4184 /* Arguments are ready. Create the new vector stmt. */
4185 vec_compare
= build2 (TREE_CODE (cond_expr
), vectype
,
4186 vec_cond_lhs
, vec_cond_rhs
);
4187 vec_cond_expr
= build3 (VEC_COND_EXPR
, vectype
,
4188 vec_compare
, vec_then_clause
, vec_else_clause
);
4190 new_stmt
= gimple_build_assign (vec_dest
, vec_cond_expr
);
4191 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
4192 gimple_assign_set_lhs (new_stmt
, new_temp
);
4193 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
4195 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
4197 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
4199 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
4206 /* Make sure the statement is vectorizable. */
4209 vect_analyze_stmt (gimple stmt
, bool *need_to_vectorize
, slp_tree node
)
4211 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
4212 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
4213 enum vect_relevant relevance
= STMT_VINFO_RELEVANT (stmt_info
);
4215 tree scalar_type
, vectype
;
4217 if (vect_print_dump_info (REPORT_DETAILS
))
4219 fprintf (vect_dump
, "==> examining statement: ");
4220 print_gimple_stmt (vect_dump
, stmt
, 0, TDF_SLIM
);
4223 if (gimple_has_volatile_ops (stmt
))
4225 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS
))
4226 fprintf (vect_dump
, "not vectorized: stmt has volatile operands");
4231 /* Skip stmts that do not need to be vectorized. In loops this is expected
4233 - the COND_EXPR which is the loop exit condition
4234 - any LABEL_EXPRs in the loop
4235 - computations that are used only for array indexing or loop control.
4236 In basic blocks we only analyze statements that are a part of some SLP
4237 instance, therefore, all the statements are relevant. */
4239 if (!STMT_VINFO_RELEVANT_P (stmt_info
)
4240 && !STMT_VINFO_LIVE_P (stmt_info
))
4242 if (vect_print_dump_info (REPORT_DETAILS
))
4243 fprintf (vect_dump
, "irrelevant.");
4248 switch (STMT_VINFO_DEF_TYPE (stmt_info
))
4250 case vect_internal_def
:
4253 case vect_reduction_def
:
4254 case vect_nested_cycle
:
4255 gcc_assert (!bb_vinfo
&& (relevance
== vect_used_in_outer
4256 || relevance
== vect_used_in_outer_by_reduction
4257 || relevance
== vect_unused_in_scope
));
4260 case vect_induction_def
:
4261 case vect_constant_def
:
4262 case vect_external_def
:
4263 case vect_unknown_def_type
:
4270 gcc_assert (PURE_SLP_STMT (stmt_info
));
4272 scalar_type
= TREE_TYPE (gimple_get_lhs (stmt
));
4273 if (vect_print_dump_info (REPORT_DETAILS
))
4275 fprintf (vect_dump
, "get vectype for scalar type: ");
4276 print_generic_expr (vect_dump
, scalar_type
, TDF_SLIM
);
4279 vectype
= get_vectype_for_scalar_type (scalar_type
);
4282 if (vect_print_dump_info (REPORT_DETAILS
))
4284 fprintf (vect_dump
, "not SLPed: unsupported data-type ");
4285 print_generic_expr (vect_dump
, scalar_type
, TDF_SLIM
);
4290 if (vect_print_dump_info (REPORT_DETAILS
))
4292 fprintf (vect_dump
, "vectype: ");
4293 print_generic_expr (vect_dump
, vectype
, TDF_SLIM
);
4296 STMT_VINFO_VECTYPE (stmt_info
) = vectype
;
4299 if (STMT_VINFO_RELEVANT_P (stmt_info
))
4301 gcc_assert (!VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt
))));
4302 gcc_assert (STMT_VINFO_VECTYPE (stmt_info
));
4303 *need_to_vectorize
= true;
4308 && (STMT_VINFO_RELEVANT_P (stmt_info
)
4309 || STMT_VINFO_DEF_TYPE (stmt_info
) == vect_reduction_def
))
4310 ok
= (vectorizable_type_promotion (stmt
, NULL
, NULL
, NULL
)
4311 || vectorizable_type_demotion (stmt
, NULL
, NULL
, NULL
)
4312 || vectorizable_conversion (stmt
, NULL
, NULL
, NULL
)
4313 || vectorizable_operation (stmt
, NULL
, NULL
, NULL
)
4314 || vectorizable_assignment (stmt
, NULL
, NULL
, NULL
)
4315 || vectorizable_load (stmt
, NULL
, NULL
, NULL
, NULL
)
4316 || vectorizable_call (stmt
, NULL
, NULL
)
4317 || vectorizable_store (stmt
, NULL
, NULL
, NULL
)
4318 || vectorizable_reduction (stmt
, NULL
, NULL
, NULL
)
4319 || vectorizable_condition (stmt
, NULL
, NULL
, NULL
, 0));
4323 ok
= (vectorizable_operation (stmt
, NULL
, NULL
, node
)
4324 || vectorizable_assignment (stmt
, NULL
, NULL
, node
)
4325 || vectorizable_load (stmt
, NULL
, NULL
, node
, NULL
)
4326 || vectorizable_store (stmt
, NULL
, NULL
, node
));
4331 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS
))
4333 fprintf (vect_dump
, "not vectorized: relevant stmt not ");
4334 fprintf (vect_dump
, "supported: ");
4335 print_gimple_stmt (vect_dump
, stmt
, 0, TDF_SLIM
);
4344 /* Stmts that are (also) "live" (i.e. - that are used out of the loop)
4345 need extra handling, except for vectorizable reductions. */
4346 if (STMT_VINFO_LIVE_P (stmt_info
)
4347 && STMT_VINFO_TYPE (stmt_info
) != reduc_vec_info_type
)
4348 ok
= vectorizable_live_operation (stmt
, NULL
, NULL
);
4352 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS
))
4354 fprintf (vect_dump
, "not vectorized: live stmt not ");
4355 fprintf (vect_dump
, "supported: ");
4356 print_gimple_stmt (vect_dump
, stmt
, 0, TDF_SLIM
);
4362 if (!PURE_SLP_STMT (stmt_info
))
4364 /* Groups of strided accesses whose size is not a power of 2 are not
4365 vectorizable yet using loop-vectorization. Therefore, if this stmt
4366 feeds non-SLP-able stmts (i.e., this stmt has to be both SLPed and
4367 loop-based vectorized), the loop cannot be vectorized. */
4368 if (STMT_VINFO_STRIDED_ACCESS (stmt_info
)
4369 && exact_log2 (DR_GROUP_SIZE (vinfo_for_stmt (
4370 DR_GROUP_FIRST_DR (stmt_info
)))) == -1)
4372 if (vect_print_dump_info (REPORT_DETAILS
))
4374 fprintf (vect_dump
, "not vectorized: the size of group "
4375 "of strided accesses is not a power of 2");
4376 print_gimple_stmt (vect_dump
, stmt
, 0, TDF_SLIM
);
4387 /* Function vect_transform_stmt.
4389 Create a vectorized stmt to replace STMT, and insert it at BSI. */
4392 vect_transform_stmt (gimple stmt
, gimple_stmt_iterator
*gsi
,
4393 bool *strided_store
, slp_tree slp_node
,
4394 slp_instance slp_node_instance
)
4396 bool is_store
= false;
4397 gimple vec_stmt
= NULL
;
4398 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
4399 gimple orig_stmt_in_pattern
;
4402 switch (STMT_VINFO_TYPE (stmt_info
))
4404 case type_demotion_vec_info_type
:
4405 done
= vectorizable_type_demotion (stmt
, gsi
, &vec_stmt
, slp_node
);
4409 case type_promotion_vec_info_type
:
4410 done
= vectorizable_type_promotion (stmt
, gsi
, &vec_stmt
, slp_node
);
4414 case type_conversion_vec_info_type
:
4415 done
= vectorizable_conversion (stmt
, gsi
, &vec_stmt
, slp_node
);
4419 case induc_vec_info_type
:
4420 gcc_assert (!slp_node
);
4421 done
= vectorizable_induction (stmt
, gsi
, &vec_stmt
);
4425 case op_vec_info_type
:
4426 done
= vectorizable_operation (stmt
, gsi
, &vec_stmt
, slp_node
);
4430 case assignment_vec_info_type
:
4431 done
= vectorizable_assignment (stmt
, gsi
, &vec_stmt
, slp_node
);
4435 case load_vec_info_type
:
4436 done
= vectorizable_load (stmt
, gsi
, &vec_stmt
, slp_node
,
4441 case store_vec_info_type
:
4442 done
= vectorizable_store (stmt
, gsi
, &vec_stmt
, slp_node
);
4444 if (STMT_VINFO_STRIDED_ACCESS (stmt_info
) && !slp_node
)
4446 /* In case of interleaving, the whole chain is vectorized when the
4447 last store in the chain is reached. Store stmts before the last
4448 one are skipped, and there vec_stmt_info shouldn't be freed
4450 *strided_store
= true;
4451 if (STMT_VINFO_VEC_STMT (stmt_info
))
4458 case condition_vec_info_type
:
4459 gcc_assert (!slp_node
);
4460 done
= vectorizable_condition (stmt
, gsi
, &vec_stmt
, NULL
, 0);
4464 case call_vec_info_type
:
4465 gcc_assert (!slp_node
);
4466 done
= vectorizable_call (stmt
, gsi
, &vec_stmt
);
4469 case reduc_vec_info_type
:
4470 done
= vectorizable_reduction (stmt
, gsi
, &vec_stmt
, slp_node
);
4475 if (!STMT_VINFO_LIVE_P (stmt_info
))
4477 if (vect_print_dump_info (REPORT_DETAILS
))
4478 fprintf (vect_dump
, "stmt not supported.");
4483 /* Handle inner-loop stmts whose DEF is used in the loop-nest that
4484 is being vectorized, but outside the immediately enclosing loop. */
4486 && STMT_VINFO_LOOP_VINFO (stmt_info
)
4487 && nested_in_vect_loop_p (LOOP_VINFO_LOOP (
4488 STMT_VINFO_LOOP_VINFO (stmt_info
)), stmt
)
4489 && STMT_VINFO_TYPE (stmt_info
) != reduc_vec_info_type
4490 && (STMT_VINFO_RELEVANT (stmt_info
) == vect_used_in_outer
4491 || STMT_VINFO_RELEVANT (stmt_info
) ==
4492 vect_used_in_outer_by_reduction
))
4494 struct loop
*innerloop
= LOOP_VINFO_LOOP (
4495 STMT_VINFO_LOOP_VINFO (stmt_info
))->inner
;
4496 imm_use_iterator imm_iter
;
4497 use_operand_p use_p
;
4501 if (vect_print_dump_info (REPORT_DETAILS
))
4502 fprintf (vect_dump
, "Record the vdef for outer-loop vectorization.");
4504 /* Find the relevant loop-exit phi-node, and reord the vec_stmt there
4505 (to be used when vectorizing outer-loop stmts that use the DEF of
4507 if (gimple_code (stmt
) == GIMPLE_PHI
)
4508 scalar_dest
= PHI_RESULT (stmt
);
4510 scalar_dest
= gimple_assign_lhs (stmt
);
4512 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, scalar_dest
)
4514 if (!flow_bb_inside_loop_p (innerloop
, gimple_bb (USE_STMT (use_p
))))
4516 exit_phi
= USE_STMT (use_p
);
4517 STMT_VINFO_VEC_STMT (vinfo_for_stmt (exit_phi
)) = vec_stmt
;
4522 /* Handle stmts whose DEF is used outside the loop-nest that is
4523 being vectorized. */
4524 if (STMT_VINFO_LIVE_P (stmt_info
)
4525 && STMT_VINFO_TYPE (stmt_info
) != reduc_vec_info_type
)
4527 done
= vectorizable_live_operation (stmt
, gsi
, &vec_stmt
);
4533 STMT_VINFO_VEC_STMT (stmt_info
) = vec_stmt
;
4534 orig_stmt_in_pattern
= STMT_VINFO_RELATED_STMT (stmt_info
);
4535 if (orig_stmt_in_pattern
)
4537 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (orig_stmt_in_pattern
);
4538 /* STMT was inserted by the vectorizer to replace a computation idiom.
4539 ORIG_STMT_IN_PATTERN is a stmt in the original sequence that
4540 computed this idiom. We need to record a pointer to VEC_STMT in
4541 the stmt_info of ORIG_STMT_IN_PATTERN. See more details in the
4542 documentation of vect_pattern_recog. */
4543 if (STMT_VINFO_IN_PATTERN_P (stmt_vinfo
))
4545 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_vinfo
) == stmt
);
4546 STMT_VINFO_VEC_STMT (stmt_vinfo
) = vec_stmt
;
4555 /* Remove a group of stores (for SLP or interleaving), free their
4559 vect_remove_stores (gimple first_stmt
)
4561 gimple next
= first_stmt
;
4563 gimple_stmt_iterator next_si
;
4567 /* Free the attached stmt_vec_info and remove the stmt. */
4568 next_si
= gsi_for_stmt (next
);
4569 gsi_remove (&next_si
, true);
4570 tmp
= DR_GROUP_NEXT_DR (vinfo_for_stmt (next
));
4571 free_stmt_vec_info (next
);
4577 /* Function new_stmt_vec_info.
4579 Create and initialize a new stmt_vec_info struct for STMT. */
4582 new_stmt_vec_info (gimple stmt
, loop_vec_info loop_vinfo
,
4583 bb_vec_info bb_vinfo
)
4586 res
= (stmt_vec_info
) xcalloc (1, sizeof (struct _stmt_vec_info
));
4588 STMT_VINFO_TYPE (res
) = undef_vec_info_type
;
4589 STMT_VINFO_STMT (res
) = stmt
;
4590 STMT_VINFO_LOOP_VINFO (res
) = loop_vinfo
;
4591 STMT_VINFO_BB_VINFO (res
) = bb_vinfo
;
4592 STMT_VINFO_RELEVANT (res
) = vect_unused_in_scope
;
4593 STMT_VINFO_LIVE_P (res
) = false;
4594 STMT_VINFO_VECTYPE (res
) = NULL
;
4595 STMT_VINFO_VEC_STMT (res
) = NULL
;
4596 STMT_VINFO_VECTORIZABLE (res
) = true;
4597 STMT_VINFO_IN_PATTERN_P (res
) = false;
4598 STMT_VINFO_RELATED_STMT (res
) = NULL
;
4599 STMT_VINFO_DATA_REF (res
) = NULL
;
4601 STMT_VINFO_DR_BASE_ADDRESS (res
) = NULL
;
4602 STMT_VINFO_DR_OFFSET (res
) = NULL
;
4603 STMT_VINFO_DR_INIT (res
) = NULL
;
4604 STMT_VINFO_DR_STEP (res
) = NULL
;
4605 STMT_VINFO_DR_ALIGNED_TO (res
) = NULL
;
4607 if (gimple_code (stmt
) == GIMPLE_PHI
4608 && is_loop_header_bb_p (gimple_bb (stmt
)))
4609 STMT_VINFO_DEF_TYPE (res
) = vect_unknown_def_type
;
4611 STMT_VINFO_DEF_TYPE (res
) = vect_internal_def
;
4613 STMT_VINFO_SAME_ALIGN_REFS (res
) = VEC_alloc (dr_p
, heap
, 5);
4614 STMT_VINFO_INSIDE_OF_LOOP_COST (res
) = 0;
4615 STMT_VINFO_OUTSIDE_OF_LOOP_COST (res
) = 0;
4616 STMT_SLP_TYPE (res
) = loop_vect
;
4617 DR_GROUP_FIRST_DR (res
) = NULL
;
4618 DR_GROUP_NEXT_DR (res
) = NULL
;
4619 DR_GROUP_SIZE (res
) = 0;
4620 DR_GROUP_STORE_COUNT (res
) = 0;
4621 DR_GROUP_GAP (res
) = 0;
4622 DR_GROUP_SAME_DR_STMT (res
) = NULL
;
4623 DR_GROUP_READ_WRITE_DEPENDENCE (res
) = false;
4629 /* Create a hash table for stmt_vec_info. */
4632 init_stmt_vec_info_vec (void)
4634 gcc_assert (!stmt_vec_info_vec
);
4635 stmt_vec_info_vec
= VEC_alloc (vec_void_p
, heap
, 50);
4639 /* Free hash table for stmt_vec_info. */
4642 free_stmt_vec_info_vec (void)
4644 gcc_assert (stmt_vec_info_vec
);
4645 VEC_free (vec_void_p
, heap
, stmt_vec_info_vec
);
4649 /* Free stmt vectorization related info. */
4652 free_stmt_vec_info (gimple stmt
)
4654 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
4659 VEC_free (dr_p
, heap
, STMT_VINFO_SAME_ALIGN_REFS (stmt_info
));
4660 set_vinfo_for_stmt (stmt
, NULL
);
4665 /* Function get_vectype_for_scalar_type.
4667 Returns the vector type corresponding to SCALAR_TYPE as supported
4671 get_vectype_for_scalar_type (tree scalar_type
)
4673 enum machine_mode inner_mode
= TYPE_MODE (scalar_type
);
4674 unsigned int nbytes
= GET_MODE_SIZE (inner_mode
);
4678 if (nbytes
== 0 || nbytes
>= UNITS_PER_SIMD_WORD (inner_mode
))
4681 /* We can't build a vector type of elements with alignment bigger than
4683 if (nbytes
< TYPE_ALIGN_UNIT (scalar_type
))
4686 /* If we'd build a vector type of elements whose mode precision doesn't
4687 match their types precision we'll get mismatched types on vector
4688 extracts via BIT_FIELD_REFs. This effectively means we disable
4689 vectorization of bool and/or enum types in some languages. */
4690 if (INTEGRAL_TYPE_P (scalar_type
)
4691 && GET_MODE_BITSIZE (inner_mode
) != TYPE_PRECISION (scalar_type
))
4694 /* FORNOW: Only a single vector size per mode (UNITS_PER_SIMD_WORD)
4696 nunits
= UNITS_PER_SIMD_WORD (inner_mode
) / nbytes
;
4698 vectype
= build_vector_type (scalar_type
, nunits
);
4699 if (vect_print_dump_info (REPORT_DETAILS
))
4701 fprintf (vect_dump
, "get vectype with %d units of type ", nunits
);
4702 print_generic_expr (vect_dump
, scalar_type
, TDF_SLIM
);
4708 if (vect_print_dump_info (REPORT_DETAILS
))
4710 fprintf (vect_dump
, "vectype: ");
4711 print_generic_expr (vect_dump
, vectype
, TDF_SLIM
);
4714 if (!VECTOR_MODE_P (TYPE_MODE (vectype
))
4715 && !INTEGRAL_MODE_P (TYPE_MODE (vectype
)))
4717 if (vect_print_dump_info (REPORT_DETAILS
))
4718 fprintf (vect_dump
, "mode not supported by target.");
4725 /* Function get_same_sized_vectype
4727 Returns a vector type corresponding to SCALAR_TYPE of size
4728 VECTOR_TYPE if supported by the target. */
4731 get_same_sized_vectype (tree scalar_type
, tree vector_type ATTRIBUTE_UNUSED
)
4733 return get_vectype_for_scalar_type (scalar_type
);
4736 /* Function vect_is_simple_use.
4739 LOOP_VINFO - the vect info of the loop that is being vectorized.
4740 BB_VINFO - the vect info of the basic block that is being vectorized.
4741 OPERAND - operand of a stmt in the loop or bb.
4742 DEF - the defining stmt in case OPERAND is an SSA_NAME.
4744 Returns whether a stmt with OPERAND can be vectorized.
4745 For loops, supportable operands are constants, loop invariants, and operands
4746 that are defined by the current iteration of the loop. Unsupportable
4747 operands are those that are defined by a previous iteration of the loop (as
4748 is the case in reduction/induction computations).
4749 For basic blocks, supportable operands are constants and bb invariants.
4750 For now, operands defined outside the basic block are not supported. */
4753 vect_is_simple_use (tree operand
, loop_vec_info loop_vinfo
,
4754 bb_vec_info bb_vinfo
, gimple
*def_stmt
,
4755 tree
*def
, enum vect_def_type
*dt
)
4758 stmt_vec_info stmt_vinfo
;
4759 struct loop
*loop
= NULL
;
4762 loop
= LOOP_VINFO_LOOP (loop_vinfo
);
4767 if (vect_print_dump_info (REPORT_DETAILS
))
4769 fprintf (vect_dump
, "vect_is_simple_use: operand ");
4770 print_generic_expr (vect_dump
, operand
, TDF_SLIM
);
4773 if (TREE_CODE (operand
) == INTEGER_CST
|| TREE_CODE (operand
) == REAL_CST
)
4775 *dt
= vect_constant_def
;
4779 if (is_gimple_min_invariant (operand
))
4782 *dt
= vect_external_def
;
4786 if (TREE_CODE (operand
) == PAREN_EXPR
)
4788 if (vect_print_dump_info (REPORT_DETAILS
))
4789 fprintf (vect_dump
, "non-associatable copy.");
4790 operand
= TREE_OPERAND (operand
, 0);
4793 if (TREE_CODE (operand
) != SSA_NAME
)
4795 if (vect_print_dump_info (REPORT_DETAILS
))
4796 fprintf (vect_dump
, "not ssa-name.");
4800 *def_stmt
= SSA_NAME_DEF_STMT (operand
);
4801 if (*def_stmt
== NULL
)
4803 if (vect_print_dump_info (REPORT_DETAILS
))
4804 fprintf (vect_dump
, "no def_stmt.");
4808 if (vect_print_dump_info (REPORT_DETAILS
))
4810 fprintf (vect_dump
, "def_stmt: ");
4811 print_gimple_stmt (vect_dump
, *def_stmt
, 0, TDF_SLIM
);
4814 /* Empty stmt is expected only in case of a function argument.
4815 (Otherwise - we expect a phi_node or a GIMPLE_ASSIGN). */
4816 if (gimple_nop_p (*def_stmt
))
4819 *dt
= vect_external_def
;
4823 bb
= gimple_bb (*def_stmt
);
4825 if ((loop
&& !flow_bb_inside_loop_p (loop
, bb
))
4826 || (!loop
&& bb
!= BB_VINFO_BB (bb_vinfo
))
4827 || (!loop
&& gimple_code (*def_stmt
) == GIMPLE_PHI
))
4828 *dt
= vect_external_def
;
4831 stmt_vinfo
= vinfo_for_stmt (*def_stmt
);
4832 *dt
= STMT_VINFO_DEF_TYPE (stmt_vinfo
);
4835 if (*dt
== vect_unknown_def_type
)
4837 if (vect_print_dump_info (REPORT_DETAILS
))
4838 fprintf (vect_dump
, "Unsupported pattern.");
4842 if (vect_print_dump_info (REPORT_DETAILS
))
4843 fprintf (vect_dump
, "type of def: %d.",*dt
);
4845 switch (gimple_code (*def_stmt
))
4848 *def
= gimple_phi_result (*def_stmt
);
4852 *def
= gimple_assign_lhs (*def_stmt
);
4856 *def
= gimple_call_lhs (*def_stmt
);
4861 if (vect_print_dump_info (REPORT_DETAILS
))
4862 fprintf (vect_dump
, "unsupported defining stmt: ");
4869 /* Function vect_is_simple_use_1.
4871 Same as vect_is_simple_use_1 but also determines the vector operand
4872 type of OPERAND and stores it to *VECTYPE. If the definition of
4873 OPERAND is vect_uninitialized_def, vect_constant_def or
4874 vect_external_def *VECTYPE will be set to NULL_TREE and the caller
4875 is responsible to compute the best suited vector type for the
4879 vect_is_simple_use_1 (tree operand
, loop_vec_info loop_vinfo
,
4880 bb_vec_info bb_vinfo
, gimple
*def_stmt
,
4881 tree
*def
, enum vect_def_type
*dt
, tree
*vectype
)
4883 if (!vect_is_simple_use (operand
, loop_vinfo
, bb_vinfo
, def_stmt
, def
, dt
))
4886 /* Now get a vector type if the def is internal, otherwise supply
4887 NULL_TREE and leave it up to the caller to figure out a proper
4888 type for the use stmt. */
4889 if (*dt
== vect_internal_def
4890 || *dt
== vect_induction_def
4891 || *dt
== vect_reduction_def
4892 || *dt
== vect_double_reduction_def
4893 || *dt
== vect_nested_cycle
)
4895 stmt_vec_info stmt_info
= vinfo_for_stmt (*def_stmt
);
4896 if (STMT_VINFO_IN_PATTERN_P (stmt_info
))
4897 stmt_info
= vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info
));
4898 *vectype
= STMT_VINFO_VECTYPE (stmt_info
);
4899 gcc_assert (*vectype
!= NULL_TREE
);
4901 else if (*dt
== vect_uninitialized_def
4902 || *dt
== vect_constant_def
4903 || *dt
== vect_external_def
)
4904 *vectype
= NULL_TREE
;
4912 /* Function supportable_widening_operation
4914 Check whether an operation represented by the code CODE is a
4915 widening operation that is supported by the target platform in
4916 vector form (i.e., when operating on arguments of type VECTYPE_IN
4917 producing a result of type VECTYPE_OUT).
4919 Widening operations we currently support are NOP (CONVERT), FLOAT
4920 and WIDEN_MULT. This function checks if these operations are supported
4921 by the target platform either directly (via vector tree-codes), or via
4925 - CODE1 and CODE2 are codes of vector operations to be used when
4926 vectorizing the operation, if available.
4927 - DECL1 and DECL2 are decls of target builtin functions to be used
4928 when vectorizing the operation, if available. In this case,
4929 CODE1 and CODE2 are CALL_EXPR.
4930 - MULTI_STEP_CVT determines the number of required intermediate steps in
4931 case of multi-step conversion (like char->short->int - in that case
4932 MULTI_STEP_CVT will be 1).
4933 - INTERM_TYPES contains the intermediate type required to perform the
4934 widening operation (short in the above example). */
4937 supportable_widening_operation (enum tree_code code
, gimple stmt
,
4938 tree vectype_out
, tree vectype_in
,
4939 tree
*decl1
, tree
*decl2
,
4940 enum tree_code
*code1
, enum tree_code
*code2
,
4941 int *multi_step_cvt
,
4942 VEC (tree
, heap
) **interm_types
)
4944 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
4945 loop_vec_info loop_info
= STMT_VINFO_LOOP_VINFO (stmt_info
);
4946 struct loop
*vect_loop
= LOOP_VINFO_LOOP (loop_info
);
4948 enum machine_mode vec_mode
;
4949 enum insn_code icode1
, icode2
;
4950 optab optab1
, optab2
;
4951 tree vectype
= vectype_in
;
4952 tree wide_vectype
= vectype_out
;
4953 enum tree_code c1
, c2
;
4955 /* The result of a vectorized widening operation usually requires two vectors
4956 (because the widened results do not fit int one vector). The generated
4957 vector results would normally be expected to be generated in the same
4958 order as in the original scalar computation, i.e. if 8 results are
4959 generated in each vector iteration, they are to be organized as follows:
4960 vect1: [res1,res2,res3,res4], vect2: [res5,res6,res7,res8].
4962 However, in the special case that the result of the widening operation is
4963 used in a reduction computation only, the order doesn't matter (because
4964 when vectorizing a reduction we change the order of the computation).
4965 Some targets can take advantage of this and generate more efficient code.
4966 For example, targets like Altivec, that support widen_mult using a sequence
4967 of {mult_even,mult_odd} generate the following vectors:
4968 vect1: [res1,res3,res5,res7], vect2: [res2,res4,res6,res8].
4970 When vectorizing outer-loops, we execute the inner-loop sequentially
4971 (each vectorized inner-loop iteration contributes to VF outer-loop
4972 iterations in parallel). We therefore don't allow to change the order
4973 of the computation in the inner-loop during outer-loop vectorization. */
4975 if (STMT_VINFO_RELEVANT (stmt_info
) == vect_used_by_reduction
4976 && !nested_in_vect_loop_p (vect_loop
, stmt
))
4982 && code
== WIDEN_MULT_EXPR
4983 && targetm
.vectorize
.builtin_mul_widen_even
4984 && targetm
.vectorize
.builtin_mul_widen_even (vectype
)
4985 && targetm
.vectorize
.builtin_mul_widen_odd
4986 && targetm
.vectorize
.builtin_mul_widen_odd (vectype
))
4988 if (vect_print_dump_info (REPORT_DETAILS
))
4989 fprintf (vect_dump
, "Unordered widening operation detected.");
4991 *code1
= *code2
= CALL_EXPR
;
4992 *decl1
= targetm
.vectorize
.builtin_mul_widen_even (vectype
);
4993 *decl2
= targetm
.vectorize
.builtin_mul_widen_odd (vectype
);
4999 case WIDEN_MULT_EXPR
:
5000 if (BYTES_BIG_ENDIAN
)
5002 c1
= VEC_WIDEN_MULT_HI_EXPR
;
5003 c2
= VEC_WIDEN_MULT_LO_EXPR
;
5007 c2
= VEC_WIDEN_MULT_HI_EXPR
;
5008 c1
= VEC_WIDEN_MULT_LO_EXPR
;
5013 if (BYTES_BIG_ENDIAN
)
5015 c1
= VEC_UNPACK_HI_EXPR
;
5016 c2
= VEC_UNPACK_LO_EXPR
;
5020 c2
= VEC_UNPACK_HI_EXPR
;
5021 c1
= VEC_UNPACK_LO_EXPR
;
5026 if (BYTES_BIG_ENDIAN
)
5028 c1
= VEC_UNPACK_FLOAT_HI_EXPR
;
5029 c2
= VEC_UNPACK_FLOAT_LO_EXPR
;
5033 c2
= VEC_UNPACK_FLOAT_HI_EXPR
;
5034 c1
= VEC_UNPACK_FLOAT_LO_EXPR
;
5038 case FIX_TRUNC_EXPR
:
5039 /* ??? Not yet implemented due to missing VEC_UNPACK_FIX_TRUNC_HI_EXPR/
5040 VEC_UNPACK_FIX_TRUNC_LO_EXPR tree codes and optabs used for
5041 computing the operation. */
5048 if (code
== FIX_TRUNC_EXPR
)
5050 /* The signedness is determined from output operand. */
5051 optab1
= optab_for_tree_code (c1
, vectype_out
, optab_default
);
5052 optab2
= optab_for_tree_code (c2
, vectype_out
, optab_default
);
5056 optab1
= optab_for_tree_code (c1
, vectype
, optab_default
);
5057 optab2
= optab_for_tree_code (c2
, vectype
, optab_default
);
5060 if (!optab1
|| !optab2
)
5063 vec_mode
= TYPE_MODE (vectype
);
5064 if ((icode1
= optab_handler (optab1
, vec_mode
)) == CODE_FOR_nothing
5065 || (icode2
= optab_handler (optab2
, vec_mode
)) == CODE_FOR_nothing
)
5068 /* Check if it's a multi-step conversion that can be done using intermediate
5070 if (insn_data
[icode1
].operand
[0].mode
!= TYPE_MODE (wide_vectype
)
5071 || insn_data
[icode2
].operand
[0].mode
!= TYPE_MODE (wide_vectype
))
5074 tree prev_type
= vectype
, intermediate_type
;
5075 enum machine_mode intermediate_mode
, prev_mode
= vec_mode
;
5076 optab optab3
, optab4
;
5078 if (!CONVERT_EXPR_CODE_P (code
))
5084 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
5085 intermediate steps in promotion sequence. We try MAX_INTERM_CVT_STEPS
5086 to get to NARROW_VECTYPE, and fail if we do not. */
5087 *interm_types
= VEC_alloc (tree
, heap
, MAX_INTERM_CVT_STEPS
);
5088 for (i
= 0; i
< 3; i
++)
5090 intermediate_mode
= insn_data
[icode1
].operand
[0].mode
;
5091 intermediate_type
= lang_hooks
.types
.type_for_mode (intermediate_mode
,
5092 TYPE_UNSIGNED (prev_type
));
5093 optab3
= optab_for_tree_code (c1
, intermediate_type
, optab_default
);
5094 optab4
= optab_for_tree_code (c2
, intermediate_type
, optab_default
);
5096 if (!optab3
|| !optab4
5097 || ((icode1
= optab_handler (optab1
, prev_mode
))
5098 == CODE_FOR_nothing
)
5099 || insn_data
[icode1
].operand
[0].mode
!= intermediate_mode
5100 || ((icode2
= optab_handler (optab2
, prev_mode
))
5101 == CODE_FOR_nothing
)
5102 || insn_data
[icode2
].operand
[0].mode
!= intermediate_mode
5103 || ((icode1
= optab_handler (optab3
, intermediate_mode
))
5104 == CODE_FOR_nothing
)
5105 || ((icode2
= optab_handler (optab4
, intermediate_mode
))
5106 == CODE_FOR_nothing
))
5109 VEC_quick_push (tree
, *interm_types
, intermediate_type
);
5110 (*multi_step_cvt
)++;
5112 if (insn_data
[icode1
].operand
[0].mode
== TYPE_MODE (wide_vectype
)
5113 && insn_data
[icode2
].operand
[0].mode
== TYPE_MODE (wide_vectype
))
5116 prev_type
= intermediate_type
;
5117 prev_mode
= intermediate_mode
;
5129 /* Function supportable_narrowing_operation
5131 Check whether an operation represented by the code CODE is a
5132 narrowing operation that is supported by the target platform in
5133 vector form (i.e., when operating on arguments of type VECTYPE_IN
5134 and producing a result of type VECTYPE_OUT).
5136 Narrowing operations we currently support are NOP (CONVERT) and
5137 FIX_TRUNC. This function checks if these operations are supported by
5138 the target platform directly via vector tree-codes.
5141 - CODE1 is the code of a vector operation to be used when
5142 vectorizing the operation, if available.
5143 - MULTI_STEP_CVT determines the number of required intermediate steps in
5144 case of multi-step conversion (like int->short->char - in that case
5145 MULTI_STEP_CVT will be 1).
5146 - INTERM_TYPES contains the intermediate type required to perform the
5147 narrowing operation (short in the above example). */
5150 supportable_narrowing_operation (enum tree_code code
,
5151 tree vectype_out
, tree vectype_in
,
5152 enum tree_code
*code1
, int *multi_step_cvt
,
5153 VEC (tree
, heap
) **interm_types
)
5155 enum machine_mode vec_mode
;
5156 enum insn_code icode1
;
5157 optab optab1
, interm_optab
;
5158 tree vectype
= vectype_in
;
5159 tree narrow_vectype
= vectype_out
;
5161 tree intermediate_type
, prev_type
;
5167 c1
= VEC_PACK_TRUNC_EXPR
;
5170 case FIX_TRUNC_EXPR
:
5171 c1
= VEC_PACK_FIX_TRUNC_EXPR
;
5175 /* ??? Not yet implemented due to missing VEC_PACK_FLOAT_EXPR
5176 tree code and optabs used for computing the operation. */
5183 if (code
== FIX_TRUNC_EXPR
)
5184 /* The signedness is determined from output operand. */
5185 optab1
= optab_for_tree_code (c1
, vectype_out
, optab_default
);
5187 optab1
= optab_for_tree_code (c1
, vectype
, optab_default
);
5192 vec_mode
= TYPE_MODE (vectype
);
5193 if ((icode1
= optab_handler (optab1
, vec_mode
)) == CODE_FOR_nothing
)
5196 /* Check if it's a multi-step conversion that can be done using intermediate
5198 if (insn_data
[icode1
].operand
[0].mode
!= TYPE_MODE (narrow_vectype
))
5200 enum machine_mode intermediate_mode
, prev_mode
= vec_mode
;
5203 prev_type
= vectype
;
5204 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
5205 intermediate steps in promotion sequence. We try MAX_INTERM_CVT_STEPS
5206 to get to NARROW_VECTYPE, and fail if we do not. */
5207 *interm_types
= VEC_alloc (tree
, heap
, MAX_INTERM_CVT_STEPS
);
5208 for (i
= 0; i
< 3; i
++)
5210 intermediate_mode
= insn_data
[icode1
].operand
[0].mode
;
5211 intermediate_type
= lang_hooks
.types
.type_for_mode (intermediate_mode
,
5212 TYPE_UNSIGNED (prev_type
));
5213 interm_optab
= optab_for_tree_code (c1
, intermediate_type
,
5216 || ((icode1
= optab_handler (optab1
, prev_mode
))
5217 == CODE_FOR_nothing
)
5218 || insn_data
[icode1
].operand
[0].mode
!= intermediate_mode
5219 || ((icode1
= optab_handler (interm_optab
, intermediate_mode
))
5220 == CODE_FOR_nothing
))
5223 VEC_quick_push (tree
, *interm_types
, intermediate_type
);
5224 (*multi_step_cvt
)++;
5226 if (insn_data
[icode1
].operand
[0].mode
== TYPE_MODE (narrow_vectype
))
5229 prev_type
= intermediate_type
;
5230 prev_mode
= intermediate_mode
;