1 /* Statement Analysis and Transformation for Vectorization
2 Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
3 Free Software Foundation, Inc.
4 Contributed by Dorit Naishlos <dorit@il.ibm.com>
5 and Ira Rosen <irar@il.ibm.com>
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
25 #include "coretypes.h"
30 #include "basic-block.h"
31 #include "tree-pretty-print.h"
32 #include "gimple-pretty-print.h"
33 #include "tree-flow.h"
34 #include "tree-dump.h"
36 #include "cfglayout.h"
40 #include "diagnostic-core.h"
41 #include "tree-vectorizer.h"
42 #include "langhooks.h"
45 /* Utility functions used by vect_mark_stmts_to_be_vectorized. */
47 /* Function vect_mark_relevant.
49 Mark STMT as "relevant for vectorization" and add it to WORKLIST. */
52 vect_mark_relevant (VEC(gimple
,heap
) **worklist
, gimple stmt
,
53 enum vect_relevant relevant
, bool live_p
)
55 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
56 enum vect_relevant save_relevant
= STMT_VINFO_RELEVANT (stmt_info
);
57 bool save_live_p
= STMT_VINFO_LIVE_P (stmt_info
);
59 if (vect_print_dump_info (REPORT_DETAILS
))
60 fprintf (vect_dump
, "mark relevant %d, live %d.", relevant
, live_p
);
62 if (STMT_VINFO_IN_PATTERN_P (stmt_info
))
66 /* This is the last stmt in a sequence that was detected as a
67 pattern that can potentially be vectorized. Don't mark the stmt
68 as relevant/live because it's not going to be vectorized.
69 Instead mark the pattern-stmt that replaces it. */
71 pattern_stmt
= STMT_VINFO_RELATED_STMT (stmt_info
);
73 if (vect_print_dump_info (REPORT_DETAILS
))
74 fprintf (vect_dump
, "last stmt in pattern. don't mark relevant/live.");
75 stmt_info
= vinfo_for_stmt (pattern_stmt
);
76 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_info
) == stmt
);
77 save_relevant
= STMT_VINFO_RELEVANT (stmt_info
);
78 save_live_p
= STMT_VINFO_LIVE_P (stmt_info
);
82 STMT_VINFO_LIVE_P (stmt_info
) |= live_p
;
83 if (relevant
> STMT_VINFO_RELEVANT (stmt_info
))
84 STMT_VINFO_RELEVANT (stmt_info
) = relevant
;
86 if (STMT_VINFO_RELEVANT (stmt_info
) == save_relevant
87 && STMT_VINFO_LIVE_P (stmt_info
) == save_live_p
)
89 if (vect_print_dump_info (REPORT_DETAILS
))
90 fprintf (vect_dump
, "already marked relevant/live.");
94 VEC_safe_push (gimple
, heap
, *worklist
, stmt
);
98 /* Function vect_stmt_relevant_p.
100 Return true if STMT in loop that is represented by LOOP_VINFO is
101 "relevant for vectorization".
103 A stmt is considered "relevant for vectorization" if:
104 - it has uses outside the loop.
105 - it has vdefs (it alters memory).
106 - control stmts in the loop (except for the exit condition).
108 CHECKME: what other side effects would the vectorizer allow? */
111 vect_stmt_relevant_p (gimple stmt
, loop_vec_info loop_vinfo
,
112 enum vect_relevant
*relevant
, bool *live_p
)
114 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
116 imm_use_iterator imm_iter
;
120 *relevant
= vect_unused_in_scope
;
123 /* cond stmt other than loop exit cond. */
124 if (is_ctrl_stmt (stmt
)
125 && STMT_VINFO_TYPE (vinfo_for_stmt (stmt
))
126 != loop_exit_ctrl_vec_info_type
)
127 *relevant
= vect_used_in_scope
;
129 /* changing memory. */
130 if (gimple_code (stmt
) != GIMPLE_PHI
)
131 if (gimple_vdef (stmt
))
133 if (vect_print_dump_info (REPORT_DETAILS
))
134 fprintf (vect_dump
, "vec_stmt_relevant_p: stmt has vdefs.");
135 *relevant
= vect_used_in_scope
;
138 /* uses outside the loop. */
139 FOR_EACH_PHI_OR_STMT_DEF (def_p
, stmt
, op_iter
, SSA_OP_DEF
)
141 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, DEF_FROM_PTR (def_p
))
143 basic_block bb
= gimple_bb (USE_STMT (use_p
));
144 if (!flow_bb_inside_loop_p (loop
, bb
))
146 if (vect_print_dump_info (REPORT_DETAILS
))
147 fprintf (vect_dump
, "vec_stmt_relevant_p: used out of loop.");
149 if (is_gimple_debug (USE_STMT (use_p
)))
152 /* We expect all such uses to be in the loop exit phis
153 (because of loop closed form) */
154 gcc_assert (gimple_code (USE_STMT (use_p
)) == GIMPLE_PHI
);
155 gcc_assert (bb
== single_exit (loop
)->dest
);
162 return (*live_p
|| *relevant
);
166 /* Function exist_non_indexing_operands_for_use_p
168 USE is one of the uses attached to STMT. Check if USE is
169 used in STMT for anything other than indexing an array. */
172 exist_non_indexing_operands_for_use_p (tree use
, gimple stmt
)
175 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
177 /* USE corresponds to some operand in STMT. If there is no data
178 reference in STMT, then any operand that corresponds to USE
179 is not indexing an array. */
180 if (!STMT_VINFO_DATA_REF (stmt_info
))
183 /* STMT has a data_ref. FORNOW this means that its of one of
187 (This should have been verified in analyze_data_refs).
189 'var' in the second case corresponds to a def, not a use,
190 so USE cannot correspond to any operands that are not used
193 Therefore, all we need to check is if STMT falls into the
194 first case, and whether var corresponds to USE. */
196 if (!gimple_assign_copy_p (stmt
))
198 if (TREE_CODE (gimple_assign_lhs (stmt
)) == SSA_NAME
)
200 operand
= gimple_assign_rhs1 (stmt
);
201 if (TREE_CODE (operand
) != SSA_NAME
)
212 Function process_use.
215 - a USE in STMT in a loop represented by LOOP_VINFO
216 - LIVE_P, RELEVANT - enum values to be set in the STMT_VINFO of the stmt
217 that defined USE. This is done by calling mark_relevant and passing it
218 the WORKLIST (to add DEF_STMT to the WORKLIST in case it is relevant).
221 Generally, LIVE_P and RELEVANT are used to define the liveness and
222 relevance info of the DEF_STMT of this USE:
223 STMT_VINFO_LIVE_P (DEF_STMT_info) <-- live_p
224 STMT_VINFO_RELEVANT (DEF_STMT_info) <-- relevant
226 - case 1: If USE is used only for address computations (e.g. array indexing),
227 which does not need to be directly vectorized, then the liveness/relevance
228 of the respective DEF_STMT is left unchanged.
229 - case 2: If STMT is a reduction phi and DEF_STMT is a reduction stmt, we
230 skip DEF_STMT cause it had already been processed.
231 - case 3: If DEF_STMT and STMT are in different nests, then "relevant" will
232 be modified accordingly.
234 Return true if everything is as expected. Return false otherwise. */
237 process_use (gimple stmt
, tree use
, loop_vec_info loop_vinfo
, bool live_p
,
238 enum vect_relevant relevant
, VEC(gimple
,heap
) **worklist
)
240 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
241 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (stmt
);
242 stmt_vec_info dstmt_vinfo
;
243 basic_block bb
, def_bb
;
246 enum vect_def_type dt
;
248 /* case 1: we are only interested in uses that need to be vectorized. Uses
249 that are used for address computation are not considered relevant. */
250 if (!exist_non_indexing_operands_for_use_p (use
, stmt
))
253 if (!vect_is_simple_use (use
, loop_vinfo
, NULL
, &def_stmt
, &def
, &dt
))
255 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS
))
256 fprintf (vect_dump
, "not vectorized: unsupported use in stmt.");
260 if (!def_stmt
|| gimple_nop_p (def_stmt
))
263 def_bb
= gimple_bb (def_stmt
);
264 if (!flow_bb_inside_loop_p (loop
, def_bb
))
266 if (vect_print_dump_info (REPORT_DETAILS
))
267 fprintf (vect_dump
, "def_stmt is out of loop.");
271 /* case 2: A reduction phi (STMT) defined by a reduction stmt (DEF_STMT).
272 DEF_STMT must have already been processed, because this should be the
273 only way that STMT, which is a reduction-phi, was put in the worklist,
274 as there should be no other uses for DEF_STMT in the loop. So we just
275 check that everything is as expected, and we are done. */
276 dstmt_vinfo
= vinfo_for_stmt (def_stmt
);
277 bb
= gimple_bb (stmt
);
278 if (gimple_code (stmt
) == GIMPLE_PHI
279 && STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_reduction_def
280 && gimple_code (def_stmt
) != GIMPLE_PHI
281 && STMT_VINFO_DEF_TYPE (dstmt_vinfo
) == vect_reduction_def
282 && bb
->loop_father
== def_bb
->loop_father
)
284 if (vect_print_dump_info (REPORT_DETAILS
))
285 fprintf (vect_dump
, "reduc-stmt defining reduc-phi in the same nest.");
286 if (STMT_VINFO_IN_PATTERN_P (dstmt_vinfo
))
287 dstmt_vinfo
= vinfo_for_stmt (STMT_VINFO_RELATED_STMT (dstmt_vinfo
));
288 gcc_assert (STMT_VINFO_RELEVANT (dstmt_vinfo
) < vect_used_by_reduction
);
289 gcc_assert (STMT_VINFO_LIVE_P (dstmt_vinfo
)
290 || STMT_VINFO_RELEVANT (dstmt_vinfo
) > vect_unused_in_scope
);
294 /* case 3a: outer-loop stmt defining an inner-loop stmt:
295 outer-loop-header-bb:
301 if (flow_loop_nested_p (def_bb
->loop_father
, bb
->loop_father
))
303 if (vect_print_dump_info (REPORT_DETAILS
))
304 fprintf (vect_dump
, "outer-loop def-stmt defining inner-loop stmt.");
308 case vect_unused_in_scope
:
309 relevant
= (STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_nested_cycle
) ?
310 vect_used_in_scope
: vect_unused_in_scope
;
313 case vect_used_in_outer_by_reduction
:
314 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo
) != vect_reduction_def
);
315 relevant
= vect_used_by_reduction
;
318 case vect_used_in_outer
:
319 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo
) != vect_reduction_def
);
320 relevant
= vect_used_in_scope
;
323 case vect_used_in_scope
:
331 /* case 3b: inner-loop stmt defining an outer-loop stmt:
332 outer-loop-header-bb:
336 outer-loop-tail-bb (or outer-loop-exit-bb in double reduction):
338 else if (flow_loop_nested_p (bb
->loop_father
, def_bb
->loop_father
))
340 if (vect_print_dump_info (REPORT_DETAILS
))
341 fprintf (vect_dump
, "inner-loop def-stmt defining outer-loop stmt.");
345 case vect_unused_in_scope
:
346 relevant
= (STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_reduction_def
347 || STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_double_reduction_def
) ?
348 vect_used_in_outer_by_reduction
: vect_unused_in_scope
;
351 case vect_used_by_reduction
:
352 relevant
= vect_used_in_outer_by_reduction
;
355 case vect_used_in_scope
:
356 relevant
= vect_used_in_outer
;
364 vect_mark_relevant (worklist
, def_stmt
, relevant
, live_p
);
369 /* Function vect_mark_stmts_to_be_vectorized.
371 Not all stmts in the loop need to be vectorized. For example:
380 Stmt 1 and 3 do not need to be vectorized, because loop control and
381 addressing of vectorized data-refs are handled differently.
383 This pass detects such stmts. */
386 vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo
)
388 VEC(gimple
,heap
) *worklist
;
389 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
390 basic_block
*bbs
= LOOP_VINFO_BBS (loop_vinfo
);
391 unsigned int nbbs
= loop
->num_nodes
;
392 gimple_stmt_iterator si
;
395 stmt_vec_info stmt_vinfo
;
399 enum vect_relevant relevant
, tmp_relevant
;
400 enum vect_def_type def_type
;
402 if (vect_print_dump_info (REPORT_DETAILS
))
403 fprintf (vect_dump
, "=== vect_mark_stmts_to_be_vectorized ===");
405 worklist
= VEC_alloc (gimple
, heap
, 64);
407 /* 1. Init worklist. */
408 for (i
= 0; i
< nbbs
; i
++)
411 for (si
= gsi_start_phis (bb
); !gsi_end_p (si
); gsi_next (&si
))
414 if (vect_print_dump_info (REPORT_DETAILS
))
416 fprintf (vect_dump
, "init: phi relevant? ");
417 print_gimple_stmt (vect_dump
, phi
, 0, TDF_SLIM
);
420 if (vect_stmt_relevant_p (phi
, loop_vinfo
, &relevant
, &live_p
))
421 vect_mark_relevant (&worklist
, phi
, relevant
, live_p
);
423 for (si
= gsi_start_bb (bb
); !gsi_end_p (si
); gsi_next (&si
))
425 stmt
= gsi_stmt (si
);
426 if (vect_print_dump_info (REPORT_DETAILS
))
428 fprintf (vect_dump
, "init: stmt relevant? ");
429 print_gimple_stmt (vect_dump
, stmt
, 0, TDF_SLIM
);
432 if (vect_stmt_relevant_p (stmt
, loop_vinfo
, &relevant
, &live_p
))
433 vect_mark_relevant (&worklist
, stmt
, relevant
, live_p
);
437 /* 2. Process_worklist */
438 while (VEC_length (gimple
, worklist
) > 0)
443 stmt
= VEC_pop (gimple
, worklist
);
444 if (vect_print_dump_info (REPORT_DETAILS
))
446 fprintf (vect_dump
, "worklist: examine stmt: ");
447 print_gimple_stmt (vect_dump
, stmt
, 0, TDF_SLIM
);
450 /* Examine the USEs of STMT. For each USE, mark the stmt that defines it
451 (DEF_STMT) as relevant/irrelevant and live/dead according to the
452 liveness and relevance properties of STMT. */
453 stmt_vinfo
= vinfo_for_stmt (stmt
);
454 relevant
= STMT_VINFO_RELEVANT (stmt_vinfo
);
455 live_p
= STMT_VINFO_LIVE_P (stmt_vinfo
);
457 /* Generally, the liveness and relevance properties of STMT are
458 propagated as is to the DEF_STMTs of its USEs:
459 live_p <-- STMT_VINFO_LIVE_P (STMT_VINFO)
460 relevant <-- STMT_VINFO_RELEVANT (STMT_VINFO)
462 One exception is when STMT has been identified as defining a reduction
463 variable; in this case we set the liveness/relevance as follows:
465 relevant = vect_used_by_reduction
466 This is because we distinguish between two kinds of relevant stmts -
467 those that are used by a reduction computation, and those that are
468 (also) used by a regular computation. This allows us later on to
469 identify stmts that are used solely by a reduction, and therefore the
470 order of the results that they produce does not have to be kept. */
472 def_type
= STMT_VINFO_DEF_TYPE (stmt_vinfo
);
473 tmp_relevant
= relevant
;
476 case vect_reduction_def
:
477 switch (tmp_relevant
)
479 case vect_unused_in_scope
:
480 relevant
= vect_used_by_reduction
;
483 case vect_used_by_reduction
:
484 if (gimple_code (stmt
) == GIMPLE_PHI
)
489 if (vect_print_dump_info (REPORT_DETAILS
))
490 fprintf (vect_dump
, "unsupported use of reduction.");
492 VEC_free (gimple
, heap
, worklist
);
499 case vect_nested_cycle
:
500 if (tmp_relevant
!= vect_unused_in_scope
501 && tmp_relevant
!= vect_used_in_outer_by_reduction
502 && tmp_relevant
!= vect_used_in_outer
)
504 if (vect_print_dump_info (REPORT_DETAILS
))
505 fprintf (vect_dump
, "unsupported use of nested cycle.");
507 VEC_free (gimple
, heap
, worklist
);
514 case vect_double_reduction_def
:
515 if (tmp_relevant
!= vect_unused_in_scope
516 && tmp_relevant
!= vect_used_by_reduction
)
518 if (vect_print_dump_info (REPORT_DETAILS
))
519 fprintf (vect_dump
, "unsupported use of double reduction.");
521 VEC_free (gimple
, heap
, worklist
);
532 FOR_EACH_PHI_OR_STMT_USE (use_p
, stmt
, iter
, SSA_OP_USE
)
534 tree op
= USE_FROM_PTR (use_p
);
535 if (!process_use (stmt
, op
, loop_vinfo
, live_p
, relevant
, &worklist
))
537 VEC_free (gimple
, heap
, worklist
);
541 } /* while worklist */
543 VEC_free (gimple
, heap
, worklist
);
548 /* Get cost by calling cost target builtin. */
551 int vect_get_stmt_cost (enum vect_cost_for_stmt type_of_cost
)
553 tree dummy_type
= NULL
;
556 return targetm
.vectorize
.builtin_vectorization_cost (type_of_cost
,
561 /* Get cost for STMT. */
564 cost_for_stmt (gimple stmt
)
566 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
568 switch (STMT_VINFO_TYPE (stmt_info
))
570 case load_vec_info_type
:
571 return vect_get_stmt_cost (scalar_load
);
572 case store_vec_info_type
:
573 return vect_get_stmt_cost (scalar_store
);
574 case op_vec_info_type
:
575 case condition_vec_info_type
:
576 case assignment_vec_info_type
:
577 case reduc_vec_info_type
:
578 case induc_vec_info_type
:
579 case type_promotion_vec_info_type
:
580 case type_demotion_vec_info_type
:
581 case type_conversion_vec_info_type
:
582 case call_vec_info_type
:
583 return vect_get_stmt_cost (scalar_stmt
);
584 case undef_vec_info_type
:
590 /* Function vect_model_simple_cost.
592 Models cost for simple operations, i.e. those that only emit ncopies of a
593 single op. Right now, this does not account for multiple insns that could
594 be generated for the single vector op. We will handle that shortly. */
597 vect_model_simple_cost (stmt_vec_info stmt_info
, int ncopies
,
598 enum vect_def_type
*dt
, slp_tree slp_node
)
601 int inside_cost
= 0, outside_cost
= 0;
603 /* The SLP costs were already calculated during SLP tree build. */
604 if (PURE_SLP_STMT (stmt_info
))
607 inside_cost
= ncopies
* vect_get_stmt_cost (vector_stmt
);
609 /* FORNOW: Assuming maximum 2 args per stmts. */
610 for (i
= 0; i
< 2; i
++)
612 if (dt
[i
] == vect_constant_def
|| dt
[i
] == vect_external_def
)
613 outside_cost
+= vect_get_stmt_cost (vector_stmt
);
616 if (vect_print_dump_info (REPORT_COST
))
617 fprintf (vect_dump
, "vect_model_simple_cost: inside_cost = %d, "
618 "outside_cost = %d .", inside_cost
, outside_cost
);
620 /* Set the costs either in STMT_INFO or SLP_NODE (if exists). */
621 stmt_vinfo_set_inside_of_loop_cost (stmt_info
, slp_node
, inside_cost
);
622 stmt_vinfo_set_outside_of_loop_cost (stmt_info
, slp_node
, outside_cost
);
626 /* Function vect_cost_strided_group_size
628 For strided load or store, return the group_size only if it is the first
629 load or store of a group, else return 1. This ensures that group size is
630 only returned once per group. */
633 vect_cost_strided_group_size (stmt_vec_info stmt_info
)
635 gimple first_stmt
= DR_GROUP_FIRST_DR (stmt_info
);
637 if (first_stmt
== STMT_VINFO_STMT (stmt_info
))
638 return DR_GROUP_SIZE (stmt_info
);
644 /* Function vect_model_store_cost
646 Models cost for stores. In the case of strided accesses, one access
647 has the overhead of the strided access attributed to it. */
650 vect_model_store_cost (stmt_vec_info stmt_info
, int ncopies
,
651 enum vect_def_type dt
, slp_tree slp_node
)
654 unsigned int inside_cost
= 0, outside_cost
= 0;
655 struct data_reference
*first_dr
;
658 /* The SLP costs were already calculated during SLP tree build. */
659 if (PURE_SLP_STMT (stmt_info
))
662 if (dt
== vect_constant_def
|| dt
== vect_external_def
)
663 outside_cost
= vect_get_stmt_cost (scalar_to_vec
);
665 /* Strided access? */
666 if (DR_GROUP_FIRST_DR (stmt_info
))
670 first_stmt
= VEC_index (gimple
, SLP_TREE_SCALAR_STMTS (slp_node
), 0);
675 first_stmt
= DR_GROUP_FIRST_DR (stmt_info
);
676 group_size
= vect_cost_strided_group_size (stmt_info
);
679 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
681 /* Not a strided access. */
685 first_dr
= STMT_VINFO_DATA_REF (stmt_info
);
688 /* Is this an access in a group of stores, which provide strided access?
689 If so, add in the cost of the permutes. */
692 /* Uses a high and low interleave operation for each needed permute. */
693 inside_cost
= ncopies
* exact_log2(group_size
) * group_size
694 * vect_get_stmt_cost (vector_stmt
);
696 if (vect_print_dump_info (REPORT_COST
))
697 fprintf (vect_dump
, "vect_model_store_cost: strided group_size = %d .",
702 /* Costs of the stores. */
703 vect_get_store_cost (first_dr
, ncopies
, &inside_cost
);
705 if (vect_print_dump_info (REPORT_COST
))
706 fprintf (vect_dump
, "vect_model_store_cost: inside_cost = %d, "
707 "outside_cost = %d .", inside_cost
, outside_cost
);
709 /* Set the costs either in STMT_INFO or SLP_NODE (if exists). */
710 stmt_vinfo_set_inside_of_loop_cost (stmt_info
, slp_node
, inside_cost
);
711 stmt_vinfo_set_outside_of_loop_cost (stmt_info
, slp_node
, outside_cost
);
715 /* Calculate cost of DR's memory access. */
717 vect_get_store_cost (struct data_reference
*dr
, int ncopies
,
718 unsigned int *inside_cost
)
720 int alignment_support_scheme
= vect_supportable_dr_alignment (dr
, false);
722 switch (alignment_support_scheme
)
726 *inside_cost
+= ncopies
* vect_get_stmt_cost (vector_store
);
728 if (vect_print_dump_info (REPORT_COST
))
729 fprintf (vect_dump
, "vect_model_store_cost: aligned.");
734 case dr_unaligned_supported
:
736 gimple stmt
= DR_STMT (dr
);
737 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
738 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
740 /* Here, we assign an additional cost for the unaligned store. */
741 *inside_cost
+= ncopies
742 * targetm
.vectorize
.builtin_vectorization_cost (unaligned_store
,
743 vectype
, DR_MISALIGNMENT (dr
));
745 if (vect_print_dump_info (REPORT_COST
))
746 fprintf (vect_dump
, "vect_model_store_cost: unaligned supported by "
758 /* Function vect_model_load_cost
760 Models cost for loads. In the case of strided accesses, the last access
761 has the overhead of the strided access attributed to it. Since unaligned
762 accesses are supported for loads, we also account for the costs of the
763 access scheme chosen. */
766 vect_model_load_cost (stmt_vec_info stmt_info
, int ncopies
, slp_tree slp_node
)
771 struct data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
), *first_dr
;
772 unsigned int inside_cost
= 0, outside_cost
= 0;
774 /* The SLP costs were already calculated during SLP tree build. */
775 if (PURE_SLP_STMT (stmt_info
))
778 /* Strided accesses? */
779 first_stmt
= DR_GROUP_FIRST_DR (stmt_info
);
780 if (first_stmt
&& !slp_node
)
782 group_size
= vect_cost_strided_group_size (stmt_info
);
783 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
785 /* Not a strided access. */
792 /* Is this an access in a group of loads providing strided access?
793 If so, add in the cost of the permutes. */
796 /* Uses an even and odd extract operations for each needed permute. */
797 inside_cost
= ncopies
* exact_log2(group_size
) * group_size
798 * vect_get_stmt_cost (vector_stmt
);
800 if (vect_print_dump_info (REPORT_COST
))
801 fprintf (vect_dump
, "vect_model_load_cost: strided group_size = %d .",
805 /* The loads themselves. */
806 vect_get_load_cost (first_dr
, ncopies
,
807 ((!DR_GROUP_FIRST_DR (stmt_info
)) || group_size
> 1 || slp_node
),
808 &inside_cost
, &outside_cost
);
810 if (vect_print_dump_info (REPORT_COST
))
811 fprintf (vect_dump
, "vect_model_load_cost: inside_cost = %d, "
812 "outside_cost = %d .", inside_cost
, outside_cost
);
814 /* Set the costs either in STMT_INFO or SLP_NODE (if exists). */
815 stmt_vinfo_set_inside_of_loop_cost (stmt_info
, slp_node
, inside_cost
);
816 stmt_vinfo_set_outside_of_loop_cost (stmt_info
, slp_node
, outside_cost
);
820 /* Calculate cost of DR's memory access. */
822 vect_get_load_cost (struct data_reference
*dr
, int ncopies
,
823 bool add_realign_cost
, unsigned int *inside_cost
,
824 unsigned int *outside_cost
)
826 int alignment_support_scheme
= vect_supportable_dr_alignment (dr
, false);
828 switch (alignment_support_scheme
)
832 *inside_cost
+= ncopies
* vect_get_stmt_cost (vector_load
);
834 if (vect_print_dump_info (REPORT_COST
))
835 fprintf (vect_dump
, "vect_model_load_cost: aligned.");
839 case dr_unaligned_supported
:
841 gimple stmt
= DR_STMT (dr
);
842 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
843 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
845 /* Here, we assign an additional cost for the unaligned load. */
846 *inside_cost
+= ncopies
847 * targetm
.vectorize
.builtin_vectorization_cost (unaligned_load
,
848 vectype
, DR_MISALIGNMENT (dr
));
849 if (vect_print_dump_info (REPORT_COST
))
850 fprintf (vect_dump
, "vect_model_load_cost: unaligned supported by "
855 case dr_explicit_realign
:
857 *inside_cost
+= ncopies
* (2 * vect_get_stmt_cost (vector_load
)
858 + vect_get_stmt_cost (vector_stmt
));
860 /* FIXME: If the misalignment remains fixed across the iterations of
861 the containing loop, the following cost should be added to the
863 if (targetm
.vectorize
.builtin_mask_for_load
)
864 *inside_cost
+= vect_get_stmt_cost (vector_stmt
);
868 case dr_explicit_realign_optimized
:
870 if (vect_print_dump_info (REPORT_COST
))
871 fprintf (vect_dump
, "vect_model_load_cost: unaligned software "
874 /* Unaligned software pipeline has a load of an address, an initial
875 load, and possibly a mask operation to "prime" the loop. However,
876 if this is an access in a group of loads, which provide strided
877 access, then the above cost should only be considered for one
878 access in the group. Inside the loop, there is a load op
879 and a realignment op. */
881 if (add_realign_cost
)
883 *outside_cost
= 2 * vect_get_stmt_cost (vector_stmt
);
884 if (targetm
.vectorize
.builtin_mask_for_load
)
885 *outside_cost
+= vect_get_stmt_cost (vector_stmt
);
888 *inside_cost
+= ncopies
* (vect_get_stmt_cost (vector_load
)
889 + vect_get_stmt_cost (vector_stmt
));
899 /* Function vect_init_vector.
901 Insert a new stmt (INIT_STMT) that initializes a new vector variable with
902 the vector elements of VECTOR_VAR. Place the initialization at BSI if it
903 is not NULL. Otherwise, place the initialization at the loop preheader.
904 Return the DEF of INIT_STMT.
905 It will be used in the vectorization of STMT. */
908 vect_init_vector (gimple stmt
, tree vector_var
, tree vector_type
,
909 gimple_stmt_iterator
*gsi
)
911 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (stmt
);
919 new_var
= vect_get_new_vect_var (vector_type
, vect_simple_var
, "cst_");
920 add_referenced_var (new_var
);
921 init_stmt
= gimple_build_assign (new_var
, vector_var
);
922 new_temp
= make_ssa_name (new_var
, init_stmt
);
923 gimple_assign_set_lhs (init_stmt
, new_temp
);
926 vect_finish_stmt_generation (stmt
, init_stmt
, gsi
);
929 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_vinfo
);
933 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
935 if (nested_in_vect_loop_p (loop
, stmt
))
938 pe
= loop_preheader_edge (loop
);
939 new_bb
= gsi_insert_on_edge_immediate (pe
, init_stmt
);
940 gcc_assert (!new_bb
);
944 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_vinfo
);
946 gimple_stmt_iterator gsi_bb_start
;
948 gcc_assert (bb_vinfo
);
949 bb
= BB_VINFO_BB (bb_vinfo
);
950 gsi_bb_start
= gsi_after_labels (bb
);
951 gsi_insert_before (&gsi_bb_start
, init_stmt
, GSI_SAME_STMT
);
955 if (vect_print_dump_info (REPORT_DETAILS
))
957 fprintf (vect_dump
, "created new init_stmt: ");
958 print_gimple_stmt (vect_dump
, init_stmt
, 0, TDF_SLIM
);
961 vec_oprnd
= gimple_assign_lhs (init_stmt
);
966 /* Function vect_get_vec_def_for_operand.
968 OP is an operand in STMT. This function returns a (vector) def that will be
969 used in the vectorized stmt for STMT.
971 In the case that OP is an SSA_NAME which is defined in the loop, then
972 STMT_VINFO_VEC_STMT of the defining stmt holds the relevant def.
974 In case OP is an invariant or constant, a new stmt that creates a vector def
975 needs to be introduced. */
978 vect_get_vec_def_for_operand (tree op
, gimple stmt
, tree
*scalar_def
)
983 stmt_vec_info def_stmt_info
= NULL
;
984 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (stmt
);
986 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_vinfo
);
992 enum vect_def_type dt
;
996 if (vect_print_dump_info (REPORT_DETAILS
))
998 fprintf (vect_dump
, "vect_get_vec_def_for_operand: ");
999 print_generic_expr (vect_dump
, op
, TDF_SLIM
);
1002 is_simple_use
= vect_is_simple_use (op
, loop_vinfo
, NULL
, &def_stmt
, &def
,
1004 gcc_assert (is_simple_use
);
1005 if (vect_print_dump_info (REPORT_DETAILS
))
1009 fprintf (vect_dump
, "def = ");
1010 print_generic_expr (vect_dump
, def
, TDF_SLIM
);
1014 fprintf (vect_dump
, " def_stmt = ");
1015 print_gimple_stmt (vect_dump
, def_stmt
, 0, TDF_SLIM
);
1021 /* Case 1: operand is a constant. */
1022 case vect_constant_def
:
1024 vector_type
= get_vectype_for_scalar_type (TREE_TYPE (op
));
1025 gcc_assert (vector_type
);
1026 nunits
= TYPE_VECTOR_SUBPARTS (vector_type
);
1031 /* Create 'vect_cst_ = {cst,cst,...,cst}' */
1032 if (vect_print_dump_info (REPORT_DETAILS
))
1033 fprintf (vect_dump
, "Create vector_cst. nunits = %d", nunits
);
1035 vec_cst
= build_vector_from_val (vector_type
, op
);
1036 return vect_init_vector (stmt
, vec_cst
, vector_type
, NULL
);
1039 /* Case 2: operand is defined outside the loop - loop invariant. */
1040 case vect_external_def
:
1042 vector_type
= get_vectype_for_scalar_type (TREE_TYPE (def
));
1043 gcc_assert (vector_type
);
1044 nunits
= TYPE_VECTOR_SUBPARTS (vector_type
);
1049 /* Create 'vec_inv = {inv,inv,..,inv}' */
1050 if (vect_print_dump_info (REPORT_DETAILS
))
1051 fprintf (vect_dump
, "Create vector_inv.");
1053 for (i
= nunits
- 1; i
>= 0; --i
)
1055 t
= tree_cons (NULL_TREE
, def
, t
);
1058 /* FIXME: use build_constructor directly. */
1059 vec_inv
= build_constructor_from_list (vector_type
, t
);
1060 return vect_init_vector (stmt
, vec_inv
, vector_type
, NULL
);
1063 /* Case 3: operand is defined inside the loop. */
1064 case vect_internal_def
:
1067 *scalar_def
= NULL
/* FIXME tuples: def_stmt*/;
1069 /* Get the def from the vectorized stmt. */
1070 def_stmt_info
= vinfo_for_stmt (def_stmt
);
1071 vec_stmt
= STMT_VINFO_VEC_STMT (def_stmt_info
);
1072 gcc_assert (vec_stmt
);
1073 if (gimple_code (vec_stmt
) == GIMPLE_PHI
)
1074 vec_oprnd
= PHI_RESULT (vec_stmt
);
1075 else if (is_gimple_call (vec_stmt
))
1076 vec_oprnd
= gimple_call_lhs (vec_stmt
);
1078 vec_oprnd
= gimple_assign_lhs (vec_stmt
);
1082 /* Case 4: operand is defined by a loop header phi - reduction */
1083 case vect_reduction_def
:
1084 case vect_double_reduction_def
:
1085 case vect_nested_cycle
:
1089 gcc_assert (gimple_code (def_stmt
) == GIMPLE_PHI
);
1090 loop
= (gimple_bb (def_stmt
))->loop_father
;
1092 /* Get the def before the loop */
1093 op
= PHI_ARG_DEF_FROM_EDGE (def_stmt
, loop_preheader_edge (loop
));
1094 return get_initial_def_for_reduction (stmt
, op
, scalar_def
);
1097 /* Case 5: operand is defined by loop-header phi - induction. */
1098 case vect_induction_def
:
1100 gcc_assert (gimple_code (def_stmt
) == GIMPLE_PHI
);
1102 /* Get the def from the vectorized stmt. */
1103 def_stmt_info
= vinfo_for_stmt (def_stmt
);
1104 vec_stmt
= STMT_VINFO_VEC_STMT (def_stmt_info
);
1105 if (gimple_code (vec_stmt
) == GIMPLE_PHI
)
1106 vec_oprnd
= PHI_RESULT (vec_stmt
);
1108 vec_oprnd
= gimple_get_lhs (vec_stmt
);
1118 /* Function vect_get_vec_def_for_stmt_copy
1120 Return a vector-def for an operand. This function is used when the
1121 vectorized stmt to be created (by the caller to this function) is a "copy"
1122 created in case the vectorized result cannot fit in one vector, and several
1123 copies of the vector-stmt are required. In this case the vector-def is
1124 retrieved from the vector stmt recorded in the STMT_VINFO_RELATED_STMT field
1125 of the stmt that defines VEC_OPRND.
1126 DT is the type of the vector def VEC_OPRND.
1129 In case the vectorization factor (VF) is bigger than the number
1130 of elements that can fit in a vectype (nunits), we have to generate
1131 more than one vector stmt to vectorize the scalar stmt. This situation
1132 arises when there are multiple data-types operated upon in the loop; the
1133 smallest data-type determines the VF, and as a result, when vectorizing
1134 stmts operating on wider types we need to create 'VF/nunits' "copies" of the
1135 vector stmt (each computing a vector of 'nunits' results, and together
1136 computing 'VF' results in each iteration). This function is called when
1137 vectorizing such a stmt (e.g. vectorizing S2 in the illustration below, in
1138 which VF=16 and nunits=4, so the number of copies required is 4):
1140 scalar stmt: vectorized into: STMT_VINFO_RELATED_STMT
1142 S1: x = load VS1.0: vx.0 = memref0 VS1.1
1143 VS1.1: vx.1 = memref1 VS1.2
1144 VS1.2: vx.2 = memref2 VS1.3
1145 VS1.3: vx.3 = memref3
1147 S2: z = x + ... VSnew.0: vz0 = vx.0 + ... VSnew.1
1148 VSnew.1: vz1 = vx.1 + ... VSnew.2
1149 VSnew.2: vz2 = vx.2 + ... VSnew.3
1150 VSnew.3: vz3 = vx.3 + ...
1152 The vectorization of S1 is explained in vectorizable_load.
1153 The vectorization of S2:
1154 To create the first vector-stmt out of the 4 copies - VSnew.0 -
1155 the function 'vect_get_vec_def_for_operand' is called to
1156 get the relevant vector-def for each operand of S2. For operand x it
1157 returns the vector-def 'vx.0'.
1159 To create the remaining copies of the vector-stmt (VSnew.j), this
1160 function is called to get the relevant vector-def for each operand. It is
1161 obtained from the respective VS1.j stmt, which is recorded in the
1162 STMT_VINFO_RELATED_STMT field of the stmt that defines VEC_OPRND.
1164 For example, to obtain the vector-def 'vx.1' in order to create the
1165 vector stmt 'VSnew.1', this function is called with VEC_OPRND='vx.0'.
1166 Given 'vx0' we obtain the stmt that defines it ('VS1.0'); from the
1167 STMT_VINFO_RELATED_STMT field of 'VS1.0' we obtain the next copy - 'VS1.1',
1168 and return its def ('vx.1').
1169 Overall, to create the above sequence this function will be called 3 times:
1170 vx.1 = vect_get_vec_def_for_stmt_copy (dt, vx.0);
1171 vx.2 = vect_get_vec_def_for_stmt_copy (dt, vx.1);
1172 vx.3 = vect_get_vec_def_for_stmt_copy (dt, vx.2); */
1175 vect_get_vec_def_for_stmt_copy (enum vect_def_type dt
, tree vec_oprnd
)
1177 gimple vec_stmt_for_operand
;
1178 stmt_vec_info def_stmt_info
;
1180 /* Do nothing; can reuse same def. */
1181 if (dt
== vect_external_def
|| dt
== vect_constant_def
)
1184 vec_stmt_for_operand
= SSA_NAME_DEF_STMT (vec_oprnd
);
1185 def_stmt_info
= vinfo_for_stmt (vec_stmt_for_operand
);
1186 gcc_assert (def_stmt_info
);
1187 vec_stmt_for_operand
= STMT_VINFO_RELATED_STMT (def_stmt_info
);
1188 gcc_assert (vec_stmt_for_operand
);
1189 vec_oprnd
= gimple_get_lhs (vec_stmt_for_operand
);
1190 if (gimple_code (vec_stmt_for_operand
) == GIMPLE_PHI
)
1191 vec_oprnd
= PHI_RESULT (vec_stmt_for_operand
);
1193 vec_oprnd
= gimple_get_lhs (vec_stmt_for_operand
);
1198 /* Get vectorized definitions for the operands to create a copy of an original
1199 stmt. See vect_get_vec_def_for_stmt_copy () for details. */
1202 vect_get_vec_defs_for_stmt_copy (enum vect_def_type
*dt
,
1203 VEC(tree
,heap
) **vec_oprnds0
,
1204 VEC(tree
,heap
) **vec_oprnds1
)
1206 tree vec_oprnd
= VEC_pop (tree
, *vec_oprnds0
);
1208 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
[0], vec_oprnd
);
1209 VEC_quick_push (tree
, *vec_oprnds0
, vec_oprnd
);
1211 if (vec_oprnds1
&& *vec_oprnds1
)
1213 vec_oprnd
= VEC_pop (tree
, *vec_oprnds1
);
1214 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
[1], vec_oprnd
);
1215 VEC_quick_push (tree
, *vec_oprnds1
, vec_oprnd
);
1220 /* Get vectorized definitions for OP0 and OP1, or SLP_NODE if it is not
1224 vect_get_vec_defs (tree op0
, tree op1
, gimple stmt
,
1225 VEC(tree
,heap
) **vec_oprnds0
, VEC(tree
,heap
) **vec_oprnds1
,
1229 vect_get_slp_defs (op0
, op1
, slp_node
, vec_oprnds0
, vec_oprnds1
, -1);
1234 *vec_oprnds0
= VEC_alloc (tree
, heap
, 1);
1235 vec_oprnd
= vect_get_vec_def_for_operand (op0
, stmt
, NULL
);
1236 VEC_quick_push (tree
, *vec_oprnds0
, vec_oprnd
);
1240 *vec_oprnds1
= VEC_alloc (tree
, heap
, 1);
1241 vec_oprnd
= vect_get_vec_def_for_operand (op1
, stmt
, NULL
);
1242 VEC_quick_push (tree
, *vec_oprnds1
, vec_oprnd
);
1248 /* Function vect_finish_stmt_generation.
1250 Insert a new stmt. */
1253 vect_finish_stmt_generation (gimple stmt
, gimple vec_stmt
,
1254 gimple_stmt_iterator
*gsi
)
1256 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1257 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
1258 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
1260 gcc_assert (gimple_code (stmt
) != GIMPLE_LABEL
);
1262 gsi_insert_before (gsi
, vec_stmt
, GSI_SAME_STMT
);
1264 set_vinfo_for_stmt (vec_stmt
, new_stmt_vec_info (vec_stmt
, loop_vinfo
,
1267 if (vect_print_dump_info (REPORT_DETAILS
))
1269 fprintf (vect_dump
, "add new stmt: ");
1270 print_gimple_stmt (vect_dump
, vec_stmt
, 0, TDF_SLIM
);
1273 gimple_set_location (vec_stmt
, gimple_location (gsi_stmt (*gsi
)));
1276 /* Checks if CALL can be vectorized in type VECTYPE. Returns
1277 a function declaration if the target has a vectorized version
1278 of the function, or NULL_TREE if the function cannot be vectorized. */
1281 vectorizable_function (gimple call
, tree vectype_out
, tree vectype_in
)
1283 tree fndecl
= gimple_call_fndecl (call
);
1285 /* We only handle functions that do not read or clobber memory -- i.e.
1286 const or novops ones. */
1287 if (!(gimple_call_flags (call
) & (ECF_CONST
| ECF_NOVOPS
)))
1291 || TREE_CODE (fndecl
) != FUNCTION_DECL
1292 || !DECL_BUILT_IN (fndecl
))
1295 return targetm
.vectorize
.builtin_vectorized_function (fndecl
, vectype_out
,
1299 /* Function vectorizable_call.
1301 Check if STMT performs a function call that can be vectorized.
1302 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
1303 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
1304 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1307 vectorizable_call (gimple stmt
, gimple_stmt_iterator
*gsi
, gimple
*vec_stmt
)
1312 tree vec_oprnd0
= NULL_TREE
, vec_oprnd1
= NULL_TREE
;
1313 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
), prev_stmt_info
;
1314 tree vectype_out
, vectype_in
;
1317 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
1318 tree fndecl
, new_temp
, def
, rhs_type
;
1320 enum vect_def_type dt
[3]
1321 = {vect_unknown_def_type
, vect_unknown_def_type
, vect_unknown_def_type
};
1322 gimple new_stmt
= NULL
;
1324 VEC(tree
, heap
) *vargs
= NULL
;
1325 enum { NARROW
, NONE
, WIDEN
} modifier
;
1328 /* FORNOW: unsupported in basic block SLP. */
1329 gcc_assert (loop_vinfo
);
1331 if (!STMT_VINFO_RELEVANT_P (stmt_info
))
1334 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
1337 /* FORNOW: SLP not supported. */
1338 if (STMT_SLP_TYPE (stmt_info
))
1341 /* Is STMT a vectorizable call? */
1342 if (!is_gimple_call (stmt
))
1345 if (TREE_CODE (gimple_call_lhs (stmt
)) != SSA_NAME
)
1348 if (stmt_can_throw_internal (stmt
))
1351 vectype_out
= STMT_VINFO_VECTYPE (stmt_info
);
1353 /* Process function arguments. */
1354 rhs_type
= NULL_TREE
;
1355 vectype_in
= NULL_TREE
;
1356 nargs
= gimple_call_num_args (stmt
);
1358 /* Bail out if the function has more than three arguments, we do not have
1359 interesting builtin functions to vectorize with more than two arguments
1360 except for fma. No arguments is also not good. */
1361 if (nargs
== 0 || nargs
> 3)
1364 for (i
= 0; i
< nargs
; i
++)
1368 op
= gimple_call_arg (stmt
, i
);
1370 /* We can only handle calls with arguments of the same type. */
1372 && !types_compatible_p (rhs_type
, TREE_TYPE (op
)))
1374 if (vect_print_dump_info (REPORT_DETAILS
))
1375 fprintf (vect_dump
, "argument types differ.");
1379 rhs_type
= TREE_TYPE (op
);
1381 if (!vect_is_simple_use_1 (op
, loop_vinfo
, NULL
,
1382 &def_stmt
, &def
, &dt
[i
], &opvectype
))
1384 if (vect_print_dump_info (REPORT_DETAILS
))
1385 fprintf (vect_dump
, "use not simple.");
1390 vectype_in
= opvectype
;
1392 && opvectype
!= vectype_in
)
1394 if (vect_print_dump_info (REPORT_DETAILS
))
1395 fprintf (vect_dump
, "argument vector types differ.");
1399 /* If all arguments are external or constant defs use a vector type with
1400 the same size as the output vector type. */
1402 vectype_in
= get_same_sized_vectype (rhs_type
, vectype_out
);
1404 gcc_assert (vectype_in
);
1407 if (vect_print_dump_info (REPORT_DETAILS
))
1409 fprintf (vect_dump
, "no vectype for scalar type ");
1410 print_generic_expr (vect_dump
, rhs_type
, TDF_SLIM
);
1417 nunits_in
= TYPE_VECTOR_SUBPARTS (vectype_in
);
1418 nunits_out
= TYPE_VECTOR_SUBPARTS (vectype_out
);
1419 if (nunits_in
== nunits_out
/ 2)
1421 else if (nunits_out
== nunits_in
)
1423 else if (nunits_out
== nunits_in
/ 2)
1428 /* For now, we only vectorize functions if a target specific builtin
1429 is available. TODO -- in some cases, it might be profitable to
1430 insert the calls for pieces of the vector, in order to be able
1431 to vectorize other operations in the loop. */
1432 fndecl
= vectorizable_function (stmt
, vectype_out
, vectype_in
);
1433 if (fndecl
== NULL_TREE
)
1435 if (vect_print_dump_info (REPORT_DETAILS
))
1436 fprintf (vect_dump
, "function is not vectorizable.");
1441 gcc_assert (!gimple_vuse (stmt
));
1443 if (modifier
== NARROW
)
1444 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits_out
;
1446 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits_in
;
1448 /* Sanity check: make sure that at least one copy of the vectorized stmt
1449 needs to be generated. */
1450 gcc_assert (ncopies
>= 1);
1452 if (!vec_stmt
) /* transformation not required. */
1454 STMT_VINFO_TYPE (stmt_info
) = call_vec_info_type
;
1455 if (vect_print_dump_info (REPORT_DETAILS
))
1456 fprintf (vect_dump
, "=== vectorizable_call ===");
1457 vect_model_simple_cost (stmt_info
, ncopies
, dt
, NULL
);
1463 if (vect_print_dump_info (REPORT_DETAILS
))
1464 fprintf (vect_dump
, "transform operation.");
1467 scalar_dest
= gimple_call_lhs (stmt
);
1468 vec_dest
= vect_create_destination_var (scalar_dest
, vectype_out
);
1470 prev_stmt_info
= NULL
;
1474 for (j
= 0; j
< ncopies
; ++j
)
1476 /* Build argument list for the vectorized call. */
1478 vargs
= VEC_alloc (tree
, heap
, nargs
);
1480 VEC_truncate (tree
, vargs
, 0);
1482 for (i
= 0; i
< nargs
; i
++)
1484 op
= gimple_call_arg (stmt
, i
);
1487 = vect_get_vec_def_for_operand (op
, stmt
, NULL
);
1490 vec_oprnd0
= gimple_call_arg (new_stmt
, i
);
1492 = vect_get_vec_def_for_stmt_copy (dt
[i
], vec_oprnd0
);
1495 VEC_quick_push (tree
, vargs
, vec_oprnd0
);
1498 new_stmt
= gimple_build_call_vec (fndecl
, vargs
);
1499 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
1500 gimple_call_set_lhs (new_stmt
, new_temp
);
1502 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
1503 mark_symbols_for_renaming (new_stmt
);
1506 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
1508 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
1510 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
1516 for (j
= 0; j
< ncopies
; ++j
)
1518 /* Build argument list for the vectorized call. */
1520 vargs
= VEC_alloc (tree
, heap
, nargs
* 2);
1522 VEC_truncate (tree
, vargs
, 0);
1524 for (i
= 0; i
< nargs
; i
++)
1526 op
= gimple_call_arg (stmt
, i
);
1530 = vect_get_vec_def_for_operand (op
, stmt
, NULL
);
1532 = vect_get_vec_def_for_stmt_copy (dt
[i
], vec_oprnd0
);
1536 vec_oprnd1
= gimple_call_arg (new_stmt
, 2*i
);
1538 = vect_get_vec_def_for_stmt_copy (dt
[i
], vec_oprnd1
);
1540 = vect_get_vec_def_for_stmt_copy (dt
[i
], vec_oprnd0
);
1543 VEC_quick_push (tree
, vargs
, vec_oprnd0
);
1544 VEC_quick_push (tree
, vargs
, vec_oprnd1
);
1547 new_stmt
= gimple_build_call_vec (fndecl
, vargs
);
1548 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
1549 gimple_call_set_lhs (new_stmt
, new_temp
);
1551 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
1552 mark_symbols_for_renaming (new_stmt
);
1555 STMT_VINFO_VEC_STMT (stmt_info
) = new_stmt
;
1557 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
1559 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
1562 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
1567 /* No current target implements this case. */
1571 VEC_free (tree
, heap
, vargs
);
1573 /* Update the exception handling table with the vector stmt if necessary. */
1574 if (maybe_clean_or_replace_eh_stmt (stmt
, *vec_stmt
))
1575 gimple_purge_dead_eh_edges (gimple_bb (stmt
));
1577 /* The call in STMT might prevent it from being removed in dce.
1578 We however cannot remove it here, due to the way the ssa name
1579 it defines is mapped to the new definition. So just replace
1580 rhs of the statement with something harmless. */
1582 type
= TREE_TYPE (scalar_dest
);
1583 new_stmt
= gimple_build_assign (gimple_call_lhs (stmt
),
1584 build_zero_cst (type
));
1585 set_vinfo_for_stmt (new_stmt
, stmt_info
);
1586 set_vinfo_for_stmt (stmt
, NULL
);
1587 STMT_VINFO_STMT (stmt_info
) = new_stmt
;
1588 gsi_replace (gsi
, new_stmt
, false);
1589 SSA_NAME_DEF_STMT (gimple_assign_lhs (new_stmt
)) = new_stmt
;
1595 /* Function vect_gen_widened_results_half
1597 Create a vector stmt whose code, type, number of arguments, and result
1598 variable are CODE, OP_TYPE, and VEC_DEST, and its arguments are
1599 VEC_OPRND0 and VEC_OPRND1. The new vector stmt is to be inserted at BSI.
1600 In the case that CODE is a CALL_EXPR, this means that a call to DECL
1601 needs to be created (DECL is a function-decl of a target-builtin).
1602 STMT is the original scalar stmt that we are vectorizing. */
1605 vect_gen_widened_results_half (enum tree_code code
,
1607 tree vec_oprnd0
, tree vec_oprnd1
, int op_type
,
1608 tree vec_dest
, gimple_stmt_iterator
*gsi
,
1614 /* Generate half of the widened result: */
1615 if (code
== CALL_EXPR
)
1617 /* Target specific support */
1618 if (op_type
== binary_op
)
1619 new_stmt
= gimple_build_call (decl
, 2, vec_oprnd0
, vec_oprnd1
);
1621 new_stmt
= gimple_build_call (decl
, 1, vec_oprnd0
);
1622 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
1623 gimple_call_set_lhs (new_stmt
, new_temp
);
1627 /* Generic support */
1628 gcc_assert (op_type
== TREE_CODE_LENGTH (code
));
1629 if (op_type
!= binary_op
)
1631 new_stmt
= gimple_build_assign_with_ops (code
, vec_dest
, vec_oprnd0
,
1633 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
1634 gimple_assign_set_lhs (new_stmt
, new_temp
);
1636 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
1642 /* Check if STMT performs a conversion operation, that can be vectorized.
1643 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
1644 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
1645 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1648 vectorizable_conversion (gimple stmt
, gimple_stmt_iterator
*gsi
,
1649 gimple
*vec_stmt
, slp_tree slp_node
)
1654 tree vec_oprnd0
= NULL_TREE
, vec_oprnd1
= NULL_TREE
;
1655 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1656 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
1657 enum tree_code code
, code1
= ERROR_MARK
, code2
= ERROR_MARK
;
1658 tree decl1
= NULL_TREE
, decl2
= NULL_TREE
;
1662 enum vect_def_type dt
[2] = {vect_unknown_def_type
, vect_unknown_def_type
};
1663 gimple new_stmt
= NULL
;
1664 stmt_vec_info prev_stmt_info
;
1667 tree vectype_out
, vectype_in
;
1671 enum { NARROW
, NONE
, WIDEN
} modifier
;
1673 VEC(tree
,heap
) *vec_oprnds0
= NULL
;
1675 VEC(tree
,heap
) *dummy
= NULL
;
1678 /* Is STMT a vectorizable conversion? */
1680 /* FORNOW: unsupported in basic block SLP. */
1681 gcc_assert (loop_vinfo
);
1683 if (!STMT_VINFO_RELEVANT_P (stmt_info
))
1686 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
1689 if (!is_gimple_assign (stmt
))
1692 if (TREE_CODE (gimple_assign_lhs (stmt
)) != SSA_NAME
)
1695 code
= gimple_assign_rhs_code (stmt
);
1696 if (code
!= FIX_TRUNC_EXPR
&& code
!= FLOAT_EXPR
)
1699 /* Check types of lhs and rhs. */
1700 scalar_dest
= gimple_assign_lhs (stmt
);
1701 vectype_out
= STMT_VINFO_VECTYPE (stmt_info
);
1703 op0
= gimple_assign_rhs1 (stmt
);
1704 rhs_type
= TREE_TYPE (op0
);
1705 /* Check the operands of the operation. */
1706 if (!vect_is_simple_use_1 (op0
, loop_vinfo
, NULL
,
1707 &def_stmt
, &def
, &dt
[0], &vectype_in
))
1709 if (vect_print_dump_info (REPORT_DETAILS
))
1710 fprintf (vect_dump
, "use not simple.");
1713 /* If op0 is an external or constant defs use a vector type of
1714 the same size as the output vector type. */
1716 vectype_in
= get_same_sized_vectype (rhs_type
, vectype_out
);
1718 gcc_assert (vectype_in
);
1721 if (vect_print_dump_info (REPORT_DETAILS
))
1723 fprintf (vect_dump
, "no vectype for scalar type ");
1724 print_generic_expr (vect_dump
, rhs_type
, TDF_SLIM
);
1731 nunits_in
= TYPE_VECTOR_SUBPARTS (vectype_in
);
1732 nunits_out
= TYPE_VECTOR_SUBPARTS (vectype_out
);
1733 if (nunits_in
== nunits_out
/ 2)
1735 else if (nunits_out
== nunits_in
)
1737 else if (nunits_out
== nunits_in
/ 2)
1742 if (modifier
== NARROW
)
1743 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits_out
;
1745 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits_in
;
1747 /* Multiple types in SLP are handled by creating the appropriate number of
1748 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
1753 /* Sanity check: make sure that at least one copy of the vectorized stmt
1754 needs to be generated. */
1755 gcc_assert (ncopies
>= 1);
1757 /* Supportable by target? */
1758 if ((modifier
== NONE
1759 && !targetm
.vectorize
.builtin_conversion (code
, vectype_out
, vectype_in
))
1760 || (modifier
== WIDEN
1761 && !supportable_widening_operation (code
, stmt
,
1762 vectype_out
, vectype_in
,
1765 &dummy_int
, &dummy
))
1766 || (modifier
== NARROW
1767 && !supportable_narrowing_operation (code
, vectype_out
, vectype_in
,
1768 &code1
, &dummy_int
, &dummy
)))
1770 if (vect_print_dump_info (REPORT_DETAILS
))
1771 fprintf (vect_dump
, "conversion not supported by target.");
1775 if (modifier
!= NONE
)
1777 /* FORNOW: SLP not supported. */
1778 if (STMT_SLP_TYPE (stmt_info
))
1782 if (!vec_stmt
) /* transformation not required. */
1784 STMT_VINFO_TYPE (stmt_info
) = type_conversion_vec_info_type
;
1789 if (vect_print_dump_info (REPORT_DETAILS
))
1790 fprintf (vect_dump
, "transform conversion.");
1793 vec_dest
= vect_create_destination_var (scalar_dest
, vectype_out
);
1795 if (modifier
== NONE
&& !slp_node
)
1796 vec_oprnds0
= VEC_alloc (tree
, heap
, 1);
1798 prev_stmt_info
= NULL
;
1802 for (j
= 0; j
< ncopies
; j
++)
1805 vect_get_vec_defs (op0
, NULL
, stmt
, &vec_oprnds0
, NULL
, slp_node
);
1807 vect_get_vec_defs_for_stmt_copy (dt
, &vec_oprnds0
, NULL
);
1810 targetm
.vectorize
.builtin_conversion (code
,
1811 vectype_out
, vectype_in
);
1812 FOR_EACH_VEC_ELT (tree
, vec_oprnds0
, i
, vop0
)
1814 /* Arguments are ready. create the new vector stmt. */
1815 new_stmt
= gimple_build_call (builtin_decl
, 1, vop0
);
1816 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
1817 gimple_call_set_lhs (new_stmt
, new_temp
);
1818 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
1820 VEC_quick_push (gimple
, SLP_TREE_VEC_STMTS (slp_node
), new_stmt
);
1824 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
1826 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
1827 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
1832 /* In case the vectorization factor (VF) is bigger than the number
1833 of elements that we can fit in a vectype (nunits), we have to
1834 generate more than one vector stmt - i.e - we need to "unroll"
1835 the vector stmt by a factor VF/nunits. */
1836 for (j
= 0; j
< ncopies
; j
++)
1839 vec_oprnd0
= vect_get_vec_def_for_operand (op0
, stmt
, NULL
);
1841 vec_oprnd0
= vect_get_vec_def_for_stmt_copy (dt
[0], vec_oprnd0
);
1843 /* Generate first half of the widened result: */
1845 = vect_gen_widened_results_half (code1
, decl1
,
1846 vec_oprnd0
, vec_oprnd1
,
1847 unary_op
, vec_dest
, gsi
, stmt
);
1849 STMT_VINFO_VEC_STMT (stmt_info
) = new_stmt
;
1851 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
1852 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
1854 /* Generate second half of the widened result: */
1856 = vect_gen_widened_results_half (code2
, decl2
,
1857 vec_oprnd0
, vec_oprnd1
,
1858 unary_op
, vec_dest
, gsi
, stmt
);
1859 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
1860 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
1865 /* In case the vectorization factor (VF) is bigger than the number
1866 of elements that we can fit in a vectype (nunits), we have to
1867 generate more than one vector stmt - i.e - we need to "unroll"
1868 the vector stmt by a factor VF/nunits. */
1869 for (j
= 0; j
< ncopies
; j
++)
1874 vec_oprnd0
= vect_get_vec_def_for_operand (op0
, stmt
, NULL
);
1875 vec_oprnd1
= vect_get_vec_def_for_stmt_copy (dt
[0], vec_oprnd0
);
1879 vec_oprnd0
= vect_get_vec_def_for_stmt_copy (dt
[0], vec_oprnd1
);
1880 vec_oprnd1
= vect_get_vec_def_for_stmt_copy (dt
[0], vec_oprnd0
);
1883 /* Arguments are ready. Create the new vector stmt. */
1884 new_stmt
= gimple_build_assign_with_ops (code1
, vec_dest
, vec_oprnd0
,
1886 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
1887 gimple_assign_set_lhs (new_stmt
, new_temp
);
1888 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
1891 STMT_VINFO_VEC_STMT (stmt_info
) = new_stmt
;
1893 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
1895 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
1898 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
1902 VEC_free (tree
, heap
, vec_oprnds0
);
1908 /* Function vectorizable_assignment.
1910 Check if STMT performs an assignment (copy) that can be vectorized.
1911 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
1912 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
1913 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1916 vectorizable_assignment (gimple stmt
, gimple_stmt_iterator
*gsi
,
1917 gimple
*vec_stmt
, slp_tree slp_node
)
1922 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1923 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
1924 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
1928 enum vect_def_type dt
[2] = {vect_unknown_def_type
, vect_unknown_def_type
};
1929 unsigned int nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
1932 VEC(tree
,heap
) *vec_oprnds
= NULL
;
1934 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
1935 gimple new_stmt
= NULL
;
1936 stmt_vec_info prev_stmt_info
= NULL
;
1937 enum tree_code code
;
1940 /* Multiple types in SLP are handled by creating the appropriate number of
1941 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
1946 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits
;
1948 gcc_assert (ncopies
>= 1);
1950 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
1953 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
1956 /* Is vectorizable assignment? */
1957 if (!is_gimple_assign (stmt
))
1960 scalar_dest
= gimple_assign_lhs (stmt
);
1961 if (TREE_CODE (scalar_dest
) != SSA_NAME
)
1964 code
= gimple_assign_rhs_code (stmt
);
1965 if (gimple_assign_single_p (stmt
)
1966 || code
== PAREN_EXPR
1967 || CONVERT_EXPR_CODE_P (code
))
1968 op
= gimple_assign_rhs1 (stmt
);
1972 if (!vect_is_simple_use_1 (op
, loop_vinfo
, bb_vinfo
,
1973 &def_stmt
, &def
, &dt
[0], &vectype_in
))
1975 if (vect_print_dump_info (REPORT_DETAILS
))
1976 fprintf (vect_dump
, "use not simple.");
1980 /* We can handle NOP_EXPR conversions that do not change the number
1981 of elements or the vector size. */
1982 if (CONVERT_EXPR_CODE_P (code
)
1984 || TYPE_VECTOR_SUBPARTS (vectype_in
) != nunits
1985 || (GET_MODE_SIZE (TYPE_MODE (vectype
))
1986 != GET_MODE_SIZE (TYPE_MODE (vectype_in
)))))
1989 if (!vec_stmt
) /* transformation not required. */
1991 STMT_VINFO_TYPE (stmt_info
) = assignment_vec_info_type
;
1992 if (vect_print_dump_info (REPORT_DETAILS
))
1993 fprintf (vect_dump
, "=== vectorizable_assignment ===");
1994 vect_model_simple_cost (stmt_info
, ncopies
, dt
, NULL
);
1999 if (vect_print_dump_info (REPORT_DETAILS
))
2000 fprintf (vect_dump
, "transform assignment.");
2003 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
2006 for (j
= 0; j
< ncopies
; j
++)
2010 vect_get_vec_defs (op
, NULL
, stmt
, &vec_oprnds
, NULL
, slp_node
);
2012 vect_get_vec_defs_for_stmt_copy (dt
, &vec_oprnds
, NULL
);
2014 /* Arguments are ready. create the new vector stmt. */
2015 FOR_EACH_VEC_ELT (tree
, vec_oprnds
, i
, vop
)
2017 if (CONVERT_EXPR_CODE_P (code
))
2018 vop
= build1 (VIEW_CONVERT_EXPR
, vectype
, vop
);
2019 new_stmt
= gimple_build_assign (vec_dest
, vop
);
2020 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
2021 gimple_assign_set_lhs (new_stmt
, new_temp
);
2022 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2024 VEC_quick_push (gimple
, SLP_TREE_VEC_STMTS (slp_node
), new_stmt
);
2031 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
2033 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
2035 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
2038 VEC_free (tree
, heap
, vec_oprnds
);
2043 /* Function vectorizable_shift.
2045 Check if STMT performs a shift operation that can be vectorized.
2046 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2047 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2048 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2051 vectorizable_shift (gimple stmt
, gimple_stmt_iterator
*gsi
,
2052 gimple
*vec_stmt
, slp_tree slp_node
)
2056 tree op0
, op1
= NULL
;
2057 tree vec_oprnd1
= NULL_TREE
;
2058 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
2060 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
2061 enum tree_code code
;
2062 enum machine_mode vec_mode
;
2066 enum machine_mode optab_op2_mode
;
2069 enum vect_def_type dt
[2] = {vect_unknown_def_type
, vect_unknown_def_type
};
2070 gimple new_stmt
= NULL
;
2071 stmt_vec_info prev_stmt_info
;
2077 VEC (tree
, heap
) *vec_oprnds0
= NULL
, *vec_oprnds1
= NULL
;
2080 bool scalar_shift_arg
= true;
2081 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
2084 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
2087 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
2090 /* Is STMT a vectorizable binary/unary operation? */
2091 if (!is_gimple_assign (stmt
))
2094 if (TREE_CODE (gimple_assign_lhs (stmt
)) != SSA_NAME
)
2097 code
= gimple_assign_rhs_code (stmt
);
2099 if (!(code
== LSHIFT_EXPR
|| code
== RSHIFT_EXPR
|| code
== LROTATE_EXPR
2100 || code
== RROTATE_EXPR
))
2103 scalar_dest
= gimple_assign_lhs (stmt
);
2104 vectype_out
= STMT_VINFO_VECTYPE (stmt_info
);
2106 op0
= gimple_assign_rhs1 (stmt
);
2107 if (!vect_is_simple_use_1 (op0
, loop_vinfo
, bb_vinfo
,
2108 &def_stmt
, &def
, &dt
[0], &vectype
))
2110 if (vect_print_dump_info (REPORT_DETAILS
))
2111 fprintf (vect_dump
, "use not simple.");
2114 /* If op0 is an external or constant def use a vector type with
2115 the same size as the output vector type. */
2117 vectype
= get_same_sized_vectype (TREE_TYPE (op0
), vectype_out
);
2119 gcc_assert (vectype
);
2122 if (vect_print_dump_info (REPORT_DETAILS
))
2124 fprintf (vect_dump
, "no vectype for scalar type ");
2125 print_generic_expr (vect_dump
, TREE_TYPE (op0
), TDF_SLIM
);
2131 nunits_out
= TYPE_VECTOR_SUBPARTS (vectype_out
);
2132 nunits_in
= TYPE_VECTOR_SUBPARTS (vectype
);
2133 if (nunits_out
!= nunits_in
)
2136 op1
= gimple_assign_rhs2 (stmt
);
2137 if (!vect_is_simple_use (op1
, loop_vinfo
, bb_vinfo
, &def_stmt
, &def
, &dt
[1]))
2139 if (vect_print_dump_info (REPORT_DETAILS
))
2140 fprintf (vect_dump
, "use not simple.");
2145 vf
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
2149 /* Multiple types in SLP are handled by creating the appropriate number of
2150 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
2155 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits_in
;
2157 gcc_assert (ncopies
>= 1);
2159 /* Determine whether the shift amount is a vector, or scalar. If the
2160 shift/rotate amount is a vector, use the vector/vector shift optabs. */
2162 if (dt
[1] == vect_internal_def
&& !slp_node
)
2163 scalar_shift_arg
= false;
2164 else if (dt
[1] == vect_constant_def
2165 || dt
[1] == vect_external_def
2166 || dt
[1] == vect_internal_def
)
2168 /* In SLP, need to check whether the shift count is the same,
2169 in loops if it is a constant or invariant, it is always
2173 VEC (gimple
, heap
) *stmts
= SLP_TREE_SCALAR_STMTS (slp_node
);
2176 FOR_EACH_VEC_ELT (gimple
, stmts
, k
, slpstmt
)
2177 if (!operand_equal_p (gimple_assign_rhs2 (slpstmt
), op1
, 0))
2178 scalar_shift_arg
= false;
2183 if (vect_print_dump_info (REPORT_DETAILS
))
2184 fprintf (vect_dump
, "operand mode requires invariant argument.");
2188 /* Vector shifted by vector. */
2189 if (!scalar_shift_arg
)
2191 optab
= optab_for_tree_code (code
, vectype
, optab_vector
);
2192 if (vect_print_dump_info (REPORT_DETAILS
))
2193 fprintf (vect_dump
, "vector/vector shift/rotate found.");
2195 /* See if the machine has a vector shifted by scalar insn and if not
2196 then see if it has a vector shifted by vector insn. */
2199 optab
= optab_for_tree_code (code
, vectype
, optab_scalar
);
2201 && optab_handler (optab
, TYPE_MODE (vectype
)) != CODE_FOR_nothing
)
2203 if (vect_print_dump_info (REPORT_DETAILS
))
2204 fprintf (vect_dump
, "vector/scalar shift/rotate found.");
2208 optab
= optab_for_tree_code (code
, vectype
, optab_vector
);
2210 && (optab_handler (optab
, TYPE_MODE (vectype
))
2211 != CODE_FOR_nothing
))
2213 scalar_shift_arg
= false;
2215 if (vect_print_dump_info (REPORT_DETAILS
))
2216 fprintf (vect_dump
, "vector/vector shift/rotate found.");
2218 /* Unlike the other binary operators, shifts/rotates have
2219 the rhs being int, instead of the same type as the lhs,
2220 so make sure the scalar is the right type if we are
2221 dealing with vectors of short/char. */
2222 if (dt
[1] == vect_constant_def
)
2223 op1
= fold_convert (TREE_TYPE (vectype
), op1
);
2228 /* Supportable by target? */
2231 if (vect_print_dump_info (REPORT_DETAILS
))
2232 fprintf (vect_dump
, "no optab.");
2235 vec_mode
= TYPE_MODE (vectype
);
2236 icode
= (int) optab_handler (optab
, vec_mode
);
2237 if (icode
== CODE_FOR_nothing
)
2239 if (vect_print_dump_info (REPORT_DETAILS
))
2240 fprintf (vect_dump
, "op not supported by target.");
2241 /* Check only during analysis. */
2242 if (GET_MODE_SIZE (vec_mode
) != UNITS_PER_WORD
2243 || (vf
< vect_min_worthwhile_factor (code
)
2246 if (vect_print_dump_info (REPORT_DETAILS
))
2247 fprintf (vect_dump
, "proceeding using word mode.");
2250 /* Worthwhile without SIMD support? Check only during analysis. */
2251 if (!VECTOR_MODE_P (TYPE_MODE (vectype
))
2252 && vf
< vect_min_worthwhile_factor (code
)
2255 if (vect_print_dump_info (REPORT_DETAILS
))
2256 fprintf (vect_dump
, "not worthwhile without SIMD support.");
2260 if (!vec_stmt
) /* transformation not required. */
2262 STMT_VINFO_TYPE (stmt_info
) = shift_vec_info_type
;
2263 if (vect_print_dump_info (REPORT_DETAILS
))
2264 fprintf (vect_dump
, "=== vectorizable_shift ===");
2265 vect_model_simple_cost (stmt_info
, ncopies
, dt
, NULL
);
2271 if (vect_print_dump_info (REPORT_DETAILS
))
2272 fprintf (vect_dump
, "transform binary/unary operation.");
2275 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
2277 /* Allocate VECs for vector operands. In case of SLP, vector operands are
2278 created in the previous stages of the recursion, so no allocation is
2279 needed, except for the case of shift with scalar shift argument. In that
2280 case we store the scalar operand in VEC_OPRNDS1 for every vector stmt to
2281 be created to vectorize the SLP group, i.e., SLP_NODE->VEC_STMTS_SIZE.
2282 In case of loop-based vectorization we allocate VECs of size 1. We
2283 allocate VEC_OPRNDS1 only in case of binary operation. */
2286 vec_oprnds0
= VEC_alloc (tree
, heap
, 1);
2287 vec_oprnds1
= VEC_alloc (tree
, heap
, 1);
2289 else if (scalar_shift_arg
)
2290 vec_oprnds1
= VEC_alloc (tree
, heap
, slp_node
->vec_stmts_size
);
2292 prev_stmt_info
= NULL
;
2293 for (j
= 0; j
< ncopies
; j
++)
2298 if (scalar_shift_arg
)
2300 /* Vector shl and shr insn patterns can be defined with scalar
2301 operand 2 (shift operand). In this case, use constant or loop
2302 invariant op1 directly, without extending it to vector mode
2304 optab_op2_mode
= insn_data
[icode
].operand
[2].mode
;
2305 if (!VECTOR_MODE_P (optab_op2_mode
))
2307 if (vect_print_dump_info (REPORT_DETAILS
))
2308 fprintf (vect_dump
, "operand 1 using scalar mode.");
2310 VEC_quick_push (tree
, vec_oprnds1
, vec_oprnd1
);
2313 /* Store vec_oprnd1 for every vector stmt to be created
2314 for SLP_NODE. We check during the analysis that all
2315 the shift arguments are the same.
2316 TODO: Allow different constants for different vector
2317 stmts generated for an SLP instance. */
2318 for (k
= 0; k
< slp_node
->vec_stmts_size
- 1; k
++)
2319 VEC_quick_push (tree
, vec_oprnds1
, vec_oprnd1
);
2324 /* vec_oprnd1 is available if operand 1 should be of a scalar-type
2325 (a special case for certain kind of vector shifts); otherwise,
2326 operand 1 should be of a vector type (the usual case). */
2328 vect_get_vec_defs (op0
, NULL_TREE
, stmt
, &vec_oprnds0
, NULL
,
2331 vect_get_vec_defs (op0
, op1
, stmt
, &vec_oprnds0
, &vec_oprnds1
,
2335 vect_get_vec_defs_for_stmt_copy (dt
, &vec_oprnds0
, &vec_oprnds1
);
2337 /* Arguments are ready. Create the new vector stmt. */
2338 FOR_EACH_VEC_ELT (tree
, vec_oprnds0
, i
, vop0
)
2340 vop1
= VEC_index (tree
, vec_oprnds1
, i
);
2341 new_stmt
= gimple_build_assign_with_ops (code
, vec_dest
, vop0
, vop1
);
2342 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
2343 gimple_assign_set_lhs (new_stmt
, new_temp
);
2344 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2346 VEC_quick_push (gimple
, SLP_TREE_VEC_STMTS (slp_node
), new_stmt
);
2353 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
2355 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
2356 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
2359 VEC_free (tree
, heap
, vec_oprnds0
);
2360 VEC_free (tree
, heap
, vec_oprnds1
);
2366 /* Function vectorizable_operation.
2368 Check if STMT performs a binary, unary or ternary operation that can
2370 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2371 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2372 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2375 vectorizable_operation (gimple stmt
, gimple_stmt_iterator
*gsi
,
2376 gimple
*vec_stmt
, slp_tree slp_node
)
2380 tree op0
, op1
= NULL_TREE
, op2
= NULL_TREE
;
2381 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
2383 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
2384 enum tree_code code
;
2385 enum machine_mode vec_mode
;
2392 enum vect_def_type dt
[3]
2393 = {vect_unknown_def_type
, vect_unknown_def_type
, vect_unknown_def_type
};
2394 gimple new_stmt
= NULL
;
2395 stmt_vec_info prev_stmt_info
;
2401 VEC(tree
,heap
) *vec_oprnds0
= NULL
, *vec_oprnds1
= NULL
, *vec_oprnds2
= NULL
;
2402 tree vop0
, vop1
, vop2
;
2403 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
2406 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
2409 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
2412 /* Is STMT a vectorizable binary/unary operation? */
2413 if (!is_gimple_assign (stmt
))
2416 if (TREE_CODE (gimple_assign_lhs (stmt
)) != SSA_NAME
)
2419 code
= gimple_assign_rhs_code (stmt
);
2421 /* For pointer addition, we should use the normal plus for
2422 the vector addition. */
2423 if (code
== POINTER_PLUS_EXPR
)
2426 /* Support only unary or binary operations. */
2427 op_type
= TREE_CODE_LENGTH (code
);
2428 if (op_type
!= unary_op
&& op_type
!= binary_op
&& op_type
!= ternary_op
)
2430 if (vect_print_dump_info (REPORT_DETAILS
))
2431 fprintf (vect_dump
, "num. args = %d (not unary/binary/ternary op).",
2436 scalar_dest
= gimple_assign_lhs (stmt
);
2437 vectype_out
= STMT_VINFO_VECTYPE (stmt_info
);
2439 op0
= gimple_assign_rhs1 (stmt
);
2440 if (!vect_is_simple_use_1 (op0
, loop_vinfo
, bb_vinfo
,
2441 &def_stmt
, &def
, &dt
[0], &vectype
))
2443 if (vect_print_dump_info (REPORT_DETAILS
))
2444 fprintf (vect_dump
, "use not simple.");
2447 /* If op0 is an external or constant def use a vector type with
2448 the same size as the output vector type. */
2450 vectype
= get_same_sized_vectype (TREE_TYPE (op0
), vectype_out
);
2452 gcc_assert (vectype
);
2455 if (vect_print_dump_info (REPORT_DETAILS
))
2457 fprintf (vect_dump
, "no vectype for scalar type ");
2458 print_generic_expr (vect_dump
, TREE_TYPE (op0
), TDF_SLIM
);
2464 nunits_out
= TYPE_VECTOR_SUBPARTS (vectype_out
);
2465 nunits_in
= TYPE_VECTOR_SUBPARTS (vectype
);
2466 if (nunits_out
!= nunits_in
)
2469 if (op_type
== binary_op
|| op_type
== ternary_op
)
2471 op1
= gimple_assign_rhs2 (stmt
);
2472 if (!vect_is_simple_use (op1
, loop_vinfo
, bb_vinfo
, &def_stmt
, &def
,
2475 if (vect_print_dump_info (REPORT_DETAILS
))
2476 fprintf (vect_dump
, "use not simple.");
2480 if (op_type
== ternary_op
)
2482 op2
= gimple_assign_rhs3 (stmt
);
2483 if (!vect_is_simple_use (op2
, loop_vinfo
, bb_vinfo
, &def_stmt
, &def
,
2486 if (vect_print_dump_info (REPORT_DETAILS
))
2487 fprintf (vect_dump
, "use not simple.");
2493 vf
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
2497 /* Multiple types in SLP are handled by creating the appropriate number of
2498 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
2503 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits_in
;
2505 gcc_assert (ncopies
>= 1);
2507 /* Shifts are handled in vectorizable_shift (). */
2508 if (code
== LSHIFT_EXPR
|| code
== RSHIFT_EXPR
|| code
== LROTATE_EXPR
2509 || code
== RROTATE_EXPR
)
2512 optab
= optab_for_tree_code (code
, vectype
, optab_default
);
2514 /* Supportable by target? */
2517 if (vect_print_dump_info (REPORT_DETAILS
))
2518 fprintf (vect_dump
, "no optab.");
2521 vec_mode
= TYPE_MODE (vectype
);
2522 icode
= (int) optab_handler (optab
, vec_mode
);
2523 if (icode
== CODE_FOR_nothing
)
2525 if (vect_print_dump_info (REPORT_DETAILS
))
2526 fprintf (vect_dump
, "op not supported by target.");
2527 /* Check only during analysis. */
2528 if (GET_MODE_SIZE (vec_mode
) != UNITS_PER_WORD
2529 || (vf
< vect_min_worthwhile_factor (code
)
2532 if (vect_print_dump_info (REPORT_DETAILS
))
2533 fprintf (vect_dump
, "proceeding using word mode.");
2536 /* Worthwhile without SIMD support? Check only during analysis. */
2537 if (!VECTOR_MODE_P (TYPE_MODE (vectype
))
2538 && vf
< vect_min_worthwhile_factor (code
)
2541 if (vect_print_dump_info (REPORT_DETAILS
))
2542 fprintf (vect_dump
, "not worthwhile without SIMD support.");
2546 if (!vec_stmt
) /* transformation not required. */
2548 STMT_VINFO_TYPE (stmt_info
) = op_vec_info_type
;
2549 if (vect_print_dump_info (REPORT_DETAILS
))
2550 fprintf (vect_dump
, "=== vectorizable_operation ===");
2551 vect_model_simple_cost (stmt_info
, ncopies
, dt
, NULL
);
2557 if (vect_print_dump_info (REPORT_DETAILS
))
2558 fprintf (vect_dump
, "transform binary/unary operation.");
2561 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
2563 /* Allocate VECs for vector operands. In case of SLP, vector operands are
2564 created in the previous stages of the recursion, so no allocation is
2565 needed, except for the case of shift with scalar shift argument. In that
2566 case we store the scalar operand in VEC_OPRNDS1 for every vector stmt to
2567 be created to vectorize the SLP group, i.e., SLP_NODE->VEC_STMTS_SIZE.
2568 In case of loop-based vectorization we allocate VECs of size 1. We
2569 allocate VEC_OPRNDS1 only in case of binary operation. */
2572 vec_oprnds0
= VEC_alloc (tree
, heap
, 1);
2573 if (op_type
== binary_op
|| op_type
== ternary_op
)
2574 vec_oprnds1
= VEC_alloc (tree
, heap
, 1);
2575 if (op_type
== ternary_op
)
2576 vec_oprnds2
= VEC_alloc (tree
, heap
, 1);
2579 /* In case the vectorization factor (VF) is bigger than the number
2580 of elements that we can fit in a vectype (nunits), we have to generate
2581 more than one vector stmt - i.e - we need to "unroll" the
2582 vector stmt by a factor VF/nunits. In doing so, we record a pointer
2583 from one copy of the vector stmt to the next, in the field
2584 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
2585 stages to find the correct vector defs to be used when vectorizing
2586 stmts that use the defs of the current stmt. The example below
2587 illustrates the vectorization process when VF=16 and nunits=4 (i.e.,
2588 we need to create 4 vectorized stmts):
2590 before vectorization:
2591 RELATED_STMT VEC_STMT
2595 step 1: vectorize stmt S1 (done in vectorizable_load. See more details
2597 RELATED_STMT VEC_STMT
2598 VS1_0: vx0 = memref0 VS1_1 -
2599 VS1_1: vx1 = memref1 VS1_2 -
2600 VS1_2: vx2 = memref2 VS1_3 -
2601 VS1_3: vx3 = memref3 - -
2602 S1: x = load - VS1_0
2605 step2: vectorize stmt S2 (done here):
2606 To vectorize stmt S2 we first need to find the relevant vector
2607 def for the first operand 'x'. This is, as usual, obtained from
2608 the vector stmt recorded in the STMT_VINFO_VEC_STMT of the stmt
2609 that defines 'x' (S1). This way we find the stmt VS1_0, and the
2610 relevant vector def 'vx0'. Having found 'vx0' we can generate
2611 the vector stmt VS2_0, and as usual, record it in the
2612 STMT_VINFO_VEC_STMT of stmt S2.
2613 When creating the second copy (VS2_1), we obtain the relevant vector
2614 def from the vector stmt recorded in the STMT_VINFO_RELATED_STMT of
2615 stmt VS1_0. This way we find the stmt VS1_1 and the relevant
2616 vector def 'vx1'. Using 'vx1' we create stmt VS2_1 and record a
2617 pointer to it in the STMT_VINFO_RELATED_STMT of the vector stmt VS2_0.
2618 Similarly when creating stmts VS2_2 and VS2_3. This is the resulting
2619 chain of stmts and pointers:
2620 RELATED_STMT VEC_STMT
2621 VS1_0: vx0 = memref0 VS1_1 -
2622 VS1_1: vx1 = memref1 VS1_2 -
2623 VS1_2: vx2 = memref2 VS1_3 -
2624 VS1_3: vx3 = memref3 - -
2625 S1: x = load - VS1_0
2626 VS2_0: vz0 = vx0 + v1 VS2_1 -
2627 VS2_1: vz1 = vx1 + v1 VS2_2 -
2628 VS2_2: vz2 = vx2 + v1 VS2_3 -
2629 VS2_3: vz3 = vx3 + v1 - -
2630 S2: z = x + 1 - VS2_0 */
2632 prev_stmt_info
= NULL
;
2633 for (j
= 0; j
< ncopies
; j
++)
2638 if (op_type
== binary_op
|| op_type
== ternary_op
)
2639 vect_get_vec_defs (op0
, op1
, stmt
, &vec_oprnds0
, &vec_oprnds1
,
2642 vect_get_vec_defs (op0
, NULL_TREE
, stmt
, &vec_oprnds0
, NULL
,
2644 if (op_type
== ternary_op
)
2646 vec_oprnds2
= VEC_alloc (tree
, heap
, 1);
2647 VEC_quick_push (tree
, vec_oprnds2
,
2648 vect_get_vec_def_for_operand (op2
, stmt
, NULL
));
2653 vect_get_vec_defs_for_stmt_copy (dt
, &vec_oprnds0
, &vec_oprnds1
);
2654 if (op_type
== ternary_op
)
2656 tree vec_oprnd
= VEC_pop (tree
, vec_oprnds2
);
2657 VEC_quick_push (tree
, vec_oprnds2
,
2658 vect_get_vec_def_for_stmt_copy (dt
[2],
2663 /* Arguments are ready. Create the new vector stmt. */
2664 FOR_EACH_VEC_ELT (tree
, vec_oprnds0
, i
, vop0
)
2666 vop1
= ((op_type
== binary_op
|| op_type
== ternary_op
)
2667 ? VEC_index (tree
, vec_oprnds1
, i
) : NULL_TREE
);
2668 vop2
= ((op_type
== ternary_op
)
2669 ? VEC_index (tree
, vec_oprnds2
, i
) : NULL_TREE
);
2670 new_stmt
= gimple_build_assign_with_ops3 (code
, vec_dest
,
2672 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
2673 gimple_assign_set_lhs (new_stmt
, new_temp
);
2674 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2676 VEC_quick_push (gimple
, SLP_TREE_VEC_STMTS (slp_node
), new_stmt
);
2683 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
2685 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
2686 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
2689 VEC_free (tree
, heap
, vec_oprnds0
);
2691 VEC_free (tree
, heap
, vec_oprnds1
);
2693 VEC_free (tree
, heap
, vec_oprnds2
);
2699 /* Get vectorized definitions for loop-based vectorization. For the first
2700 operand we call vect_get_vec_def_for_operand() (with OPRND containing
2701 scalar operand), and for the rest we get a copy with
2702 vect_get_vec_def_for_stmt_copy() using the previous vector definition
2703 (stored in OPRND). See vect_get_vec_def_for_stmt_copy() for details.
2704 The vectors are collected into VEC_OPRNDS. */
2707 vect_get_loop_based_defs (tree
*oprnd
, gimple stmt
, enum vect_def_type dt
,
2708 VEC (tree
, heap
) **vec_oprnds
, int multi_step_cvt
)
2712 /* Get first vector operand. */
2713 /* All the vector operands except the very first one (that is scalar oprnd)
2715 if (TREE_CODE (TREE_TYPE (*oprnd
)) != VECTOR_TYPE
)
2716 vec_oprnd
= vect_get_vec_def_for_operand (*oprnd
, stmt
, NULL
);
2718 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
, *oprnd
);
2720 VEC_quick_push (tree
, *vec_oprnds
, vec_oprnd
);
2722 /* Get second vector operand. */
2723 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
, vec_oprnd
);
2724 VEC_quick_push (tree
, *vec_oprnds
, vec_oprnd
);
2728 /* For conversion in multiple steps, continue to get operands
2731 vect_get_loop_based_defs (oprnd
, stmt
, dt
, vec_oprnds
, multi_step_cvt
- 1);
2735 /* Create vectorized demotion statements for vector operands from VEC_OPRNDS.
2736 For multi-step conversions store the resulting vectors and call the function
2740 vect_create_vectorized_demotion_stmts (VEC (tree
, heap
) **vec_oprnds
,
2741 int multi_step_cvt
, gimple stmt
,
2742 VEC (tree
, heap
) *vec_dsts
,
2743 gimple_stmt_iterator
*gsi
,
2744 slp_tree slp_node
, enum tree_code code
,
2745 stmt_vec_info
*prev_stmt_info
)
2748 tree vop0
, vop1
, new_tmp
, vec_dest
;
2750 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
2752 vec_dest
= VEC_pop (tree
, vec_dsts
);
2754 for (i
= 0; i
< VEC_length (tree
, *vec_oprnds
); i
+= 2)
2756 /* Create demotion operation. */
2757 vop0
= VEC_index (tree
, *vec_oprnds
, i
);
2758 vop1
= VEC_index (tree
, *vec_oprnds
, i
+ 1);
2759 new_stmt
= gimple_build_assign_with_ops (code
, vec_dest
, vop0
, vop1
);
2760 new_tmp
= make_ssa_name (vec_dest
, new_stmt
);
2761 gimple_assign_set_lhs (new_stmt
, new_tmp
);
2762 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2765 /* Store the resulting vector for next recursive call. */
2766 VEC_replace (tree
, *vec_oprnds
, i
/2, new_tmp
);
2769 /* This is the last step of the conversion sequence. Store the
2770 vectors in SLP_NODE or in vector info of the scalar statement
2771 (or in STMT_VINFO_RELATED_STMT chain). */
2773 VEC_quick_push (gimple
, SLP_TREE_VEC_STMTS (slp_node
), new_stmt
);
2776 if (!*prev_stmt_info
)
2777 STMT_VINFO_VEC_STMT (stmt_info
) = new_stmt
;
2779 STMT_VINFO_RELATED_STMT (*prev_stmt_info
) = new_stmt
;
2781 *prev_stmt_info
= vinfo_for_stmt (new_stmt
);
2786 /* For multi-step demotion operations we first generate demotion operations
2787 from the source type to the intermediate types, and then combine the
2788 results (stored in VEC_OPRNDS) in demotion operation to the destination
2792 /* At each level of recursion we have have of the operands we had at the
2794 VEC_truncate (tree
, *vec_oprnds
, (i
+1)/2);
2795 vect_create_vectorized_demotion_stmts (vec_oprnds
, multi_step_cvt
- 1,
2796 stmt
, vec_dsts
, gsi
, slp_node
,
2797 code
, prev_stmt_info
);
2802 /* Function vectorizable_type_demotion
2804 Check if STMT performs a binary or unary operation that involves
2805 type demotion, and if it can be vectorized.
2806 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2807 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2808 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2811 vectorizable_type_demotion (gimple stmt
, gimple_stmt_iterator
*gsi
,
2812 gimple
*vec_stmt
, slp_tree slp_node
)
2817 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
2818 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
2819 enum tree_code code
, code1
= ERROR_MARK
;
2822 enum vect_def_type dt
[2] = {vect_unknown_def_type
, vect_unknown_def_type
};
2823 stmt_vec_info prev_stmt_info
;
2830 int multi_step_cvt
= 0;
2831 VEC (tree
, heap
) *vec_oprnds0
= NULL
;
2832 VEC (tree
, heap
) *vec_dsts
= NULL
, *interm_types
= NULL
, *tmp_vec_dsts
= NULL
;
2833 tree last_oprnd
, intermediate_type
;
2835 /* FORNOW: not supported by basic block SLP vectorization. */
2836 gcc_assert (loop_vinfo
);
2838 if (!STMT_VINFO_RELEVANT_P (stmt_info
))
2841 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
2844 /* Is STMT a vectorizable type-demotion operation? */
2845 if (!is_gimple_assign (stmt
))
2848 if (TREE_CODE (gimple_assign_lhs (stmt
)) != SSA_NAME
)
2851 code
= gimple_assign_rhs_code (stmt
);
2852 if (!CONVERT_EXPR_CODE_P (code
))
2855 scalar_dest
= gimple_assign_lhs (stmt
);
2856 vectype_out
= STMT_VINFO_VECTYPE (stmt_info
);
2858 /* Check the operands of the operation. */
2859 op0
= gimple_assign_rhs1 (stmt
);
2860 if (! ((INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest
))
2861 && INTEGRAL_TYPE_P (TREE_TYPE (op0
)))
2862 || (SCALAR_FLOAT_TYPE_P (TREE_TYPE (scalar_dest
))
2863 && SCALAR_FLOAT_TYPE_P (TREE_TYPE (op0
))
2864 && CONVERT_EXPR_CODE_P (code
))))
2866 if (!vect_is_simple_use_1 (op0
, loop_vinfo
, NULL
,
2867 &def_stmt
, &def
, &dt
[0], &vectype_in
))
2869 if (vect_print_dump_info (REPORT_DETAILS
))
2870 fprintf (vect_dump
, "use not simple.");
2873 /* If op0 is an external def use a vector type with the
2874 same size as the output vector type if possible. */
2876 vectype_in
= get_same_sized_vectype (TREE_TYPE (op0
), vectype_out
);
2878 gcc_assert (vectype_in
);
2881 if (vect_print_dump_info (REPORT_DETAILS
))
2883 fprintf (vect_dump
, "no vectype for scalar type ");
2884 print_generic_expr (vect_dump
, TREE_TYPE (op0
), TDF_SLIM
);
2890 nunits_in
= TYPE_VECTOR_SUBPARTS (vectype_in
);
2891 nunits_out
= TYPE_VECTOR_SUBPARTS (vectype_out
);
2892 if (nunits_in
>= nunits_out
)
2895 /* Multiple types in SLP are handled by creating the appropriate number of
2896 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
2901 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits_out
;
2902 gcc_assert (ncopies
>= 1);
2904 /* Supportable by target? */
2905 if (!supportable_narrowing_operation (code
, vectype_out
, vectype_in
,
2906 &code1
, &multi_step_cvt
, &interm_types
))
2909 if (!vec_stmt
) /* transformation not required. */
2911 STMT_VINFO_TYPE (stmt_info
) = type_demotion_vec_info_type
;
2912 if (vect_print_dump_info (REPORT_DETAILS
))
2913 fprintf (vect_dump
, "=== vectorizable_demotion ===");
2914 vect_model_simple_cost (stmt_info
, ncopies
, dt
, NULL
);
2919 if (vect_print_dump_info (REPORT_DETAILS
))
2920 fprintf (vect_dump
, "transform type demotion operation. ncopies = %d.",
2923 /* In case of multi-step demotion, we first generate demotion operations to
2924 the intermediate types, and then from that types to the final one.
2925 We create vector destinations for the intermediate type (TYPES) received
2926 from supportable_narrowing_operation, and store them in the correct order
2927 for future use in vect_create_vectorized_demotion_stmts(). */
2929 vec_dsts
= VEC_alloc (tree
, heap
, multi_step_cvt
+ 1);
2931 vec_dsts
= VEC_alloc (tree
, heap
, 1);
2933 vec_dest
= vect_create_destination_var (scalar_dest
, vectype_out
);
2934 VEC_quick_push (tree
, vec_dsts
, vec_dest
);
2938 for (i
= VEC_length (tree
, interm_types
) - 1;
2939 VEC_iterate (tree
, interm_types
, i
, intermediate_type
); i
--)
2941 vec_dest
= vect_create_destination_var (scalar_dest
,
2943 VEC_quick_push (tree
, vec_dsts
, vec_dest
);
2947 /* In case the vectorization factor (VF) is bigger than the number
2948 of elements that we can fit in a vectype (nunits), we have to generate
2949 more than one vector stmt - i.e - we need to "unroll" the
2950 vector stmt by a factor VF/nunits. */
2952 prev_stmt_info
= NULL
;
2953 for (j
= 0; j
< ncopies
; j
++)
2957 vect_get_slp_defs (op0
, NULL_TREE
, slp_node
, &vec_oprnds0
, NULL
, -1);
2960 VEC_free (tree
, heap
, vec_oprnds0
);
2961 vec_oprnds0
= VEC_alloc (tree
, heap
,
2962 (multi_step_cvt
? vect_pow2 (multi_step_cvt
) * 2 : 2));
2963 vect_get_loop_based_defs (&last_oprnd
, stmt
, dt
[0], &vec_oprnds0
,
2964 vect_pow2 (multi_step_cvt
) - 1);
2967 /* Arguments are ready. Create the new vector stmts. */
2968 tmp_vec_dsts
= VEC_copy (tree
, heap
, vec_dsts
);
2969 vect_create_vectorized_demotion_stmts (&vec_oprnds0
,
2970 multi_step_cvt
, stmt
, tmp_vec_dsts
,
2971 gsi
, slp_node
, code1
,
2975 VEC_free (tree
, heap
, vec_oprnds0
);
2976 VEC_free (tree
, heap
, vec_dsts
);
2977 VEC_free (tree
, heap
, tmp_vec_dsts
);
2978 VEC_free (tree
, heap
, interm_types
);
2980 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
2985 /* Create vectorized promotion statements for vector operands from VEC_OPRNDS0
2986 and VEC_OPRNDS1 (for binary operations). For multi-step conversions store
2987 the resulting vectors and call the function recursively. */
2990 vect_create_vectorized_promotion_stmts (VEC (tree
, heap
) **vec_oprnds0
,
2991 VEC (tree
, heap
) **vec_oprnds1
,
2992 int multi_step_cvt
, gimple stmt
,
2993 VEC (tree
, heap
) *vec_dsts
,
2994 gimple_stmt_iterator
*gsi
,
2995 slp_tree slp_node
, enum tree_code code1
,
2996 enum tree_code code2
, tree decl1
,
2997 tree decl2
, int op_type
,
2998 stmt_vec_info
*prev_stmt_info
)
3001 tree vop0
, vop1
, new_tmp1
, new_tmp2
, vec_dest
;
3002 gimple new_stmt1
, new_stmt2
;
3003 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
3004 VEC (tree
, heap
) *vec_tmp
;
3006 vec_dest
= VEC_pop (tree
, vec_dsts
);
3007 vec_tmp
= VEC_alloc (tree
, heap
, VEC_length (tree
, *vec_oprnds0
) * 2);
3009 FOR_EACH_VEC_ELT (tree
, *vec_oprnds0
, i
, vop0
)
3011 if (op_type
== binary_op
)
3012 vop1
= VEC_index (tree
, *vec_oprnds1
, i
);
3016 /* Generate the two halves of promotion operation. */
3017 new_stmt1
= vect_gen_widened_results_half (code1
, decl1
, vop0
, vop1
,
3018 op_type
, vec_dest
, gsi
, stmt
);
3019 new_stmt2
= vect_gen_widened_results_half (code2
, decl2
, vop0
, vop1
,
3020 op_type
, vec_dest
, gsi
, stmt
);
3021 if (is_gimple_call (new_stmt1
))
3023 new_tmp1
= gimple_call_lhs (new_stmt1
);
3024 new_tmp2
= gimple_call_lhs (new_stmt2
);
3028 new_tmp1
= gimple_assign_lhs (new_stmt1
);
3029 new_tmp2
= gimple_assign_lhs (new_stmt2
);
3034 /* Store the results for the recursive call. */
3035 VEC_quick_push (tree
, vec_tmp
, new_tmp1
);
3036 VEC_quick_push (tree
, vec_tmp
, new_tmp2
);
3040 /* Last step of promotion sequience - store the results. */
3043 VEC_quick_push (gimple
, SLP_TREE_VEC_STMTS (slp_node
), new_stmt1
);
3044 VEC_quick_push (gimple
, SLP_TREE_VEC_STMTS (slp_node
), new_stmt2
);
3048 if (!*prev_stmt_info
)
3049 STMT_VINFO_VEC_STMT (stmt_info
) = new_stmt1
;
3051 STMT_VINFO_RELATED_STMT (*prev_stmt_info
) = new_stmt1
;
3053 *prev_stmt_info
= vinfo_for_stmt (new_stmt1
);
3054 STMT_VINFO_RELATED_STMT (*prev_stmt_info
) = new_stmt2
;
3055 *prev_stmt_info
= vinfo_for_stmt (new_stmt2
);
3062 /* For multi-step promotion operation we first generate we call the
3063 function recurcively for every stage. We start from the input type,
3064 create promotion operations to the intermediate types, and then
3065 create promotions to the output type. */
3066 *vec_oprnds0
= VEC_copy (tree
, heap
, vec_tmp
);
3067 vect_create_vectorized_promotion_stmts (vec_oprnds0
, vec_oprnds1
,
3068 multi_step_cvt
- 1, stmt
,
3069 vec_dsts
, gsi
, slp_node
, code1
,
3070 code2
, decl2
, decl2
, op_type
,
3074 VEC_free (tree
, heap
, vec_tmp
);
3078 /* Function vectorizable_type_promotion
3080 Check if STMT performs a binary or unary operation that involves
3081 type promotion, and if it can be vectorized.
3082 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3083 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
3084 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3087 vectorizable_type_promotion (gimple stmt
, gimple_stmt_iterator
*gsi
,
3088 gimple
*vec_stmt
, slp_tree slp_node
)
3092 tree op0
, op1
= NULL
;
3093 tree vec_oprnd0
=NULL
, vec_oprnd1
=NULL
;
3094 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
3095 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
3096 enum tree_code code
, code1
= ERROR_MARK
, code2
= ERROR_MARK
;
3097 tree decl1
= NULL_TREE
, decl2
= NULL_TREE
;
3101 enum vect_def_type dt
[2] = {vect_unknown_def_type
, vect_unknown_def_type
};
3102 stmt_vec_info prev_stmt_info
;
3109 tree intermediate_type
= NULL_TREE
;
3110 int multi_step_cvt
= 0;
3111 VEC (tree
, heap
) *vec_oprnds0
= NULL
, *vec_oprnds1
= NULL
;
3112 VEC (tree
, heap
) *vec_dsts
= NULL
, *interm_types
= NULL
, *tmp_vec_dsts
= NULL
;
3114 /* FORNOW: not supported by basic block SLP vectorization. */
3115 gcc_assert (loop_vinfo
);
3117 if (!STMT_VINFO_RELEVANT_P (stmt_info
))
3120 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
3123 /* Is STMT a vectorizable type-promotion operation? */
3124 if (!is_gimple_assign (stmt
))
3127 if (TREE_CODE (gimple_assign_lhs (stmt
)) != SSA_NAME
)
3130 code
= gimple_assign_rhs_code (stmt
);
3131 if (!CONVERT_EXPR_CODE_P (code
)
3132 && code
!= WIDEN_MULT_EXPR
)
3135 scalar_dest
= gimple_assign_lhs (stmt
);
3136 vectype_out
= STMT_VINFO_VECTYPE (stmt_info
);
3138 /* Check the operands of the operation. */
3139 op0
= gimple_assign_rhs1 (stmt
);
3140 if (! ((INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest
))
3141 && INTEGRAL_TYPE_P (TREE_TYPE (op0
)))
3142 || (SCALAR_FLOAT_TYPE_P (TREE_TYPE (scalar_dest
))
3143 && SCALAR_FLOAT_TYPE_P (TREE_TYPE (op0
))
3144 && CONVERT_EXPR_CODE_P (code
))))
3146 if (!vect_is_simple_use_1 (op0
, loop_vinfo
, NULL
,
3147 &def_stmt
, &def
, &dt
[0], &vectype_in
))
3149 if (vect_print_dump_info (REPORT_DETAILS
))
3150 fprintf (vect_dump
, "use not simple.");
3153 /* If op0 is an external or constant def use a vector type with
3154 the same size as the output vector type. */
3156 vectype_in
= get_same_sized_vectype (TREE_TYPE (op0
), vectype_out
);
3158 gcc_assert (vectype_in
);
3161 if (vect_print_dump_info (REPORT_DETAILS
))
3163 fprintf (vect_dump
, "no vectype for scalar type ");
3164 print_generic_expr (vect_dump
, TREE_TYPE (op0
), TDF_SLIM
);
3170 nunits_in
= TYPE_VECTOR_SUBPARTS (vectype_in
);
3171 nunits_out
= TYPE_VECTOR_SUBPARTS (vectype_out
);
3172 if (nunits_in
<= nunits_out
)
3175 /* Multiple types in SLP are handled by creating the appropriate number of
3176 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
3181 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits_in
;
3183 gcc_assert (ncopies
>= 1);
3185 op_type
= TREE_CODE_LENGTH (code
);
3186 if (op_type
== binary_op
)
3188 op1
= gimple_assign_rhs2 (stmt
);
3189 if (!vect_is_simple_use (op1
, loop_vinfo
, NULL
, &def_stmt
, &def
, &dt
[1]))
3191 if (vect_print_dump_info (REPORT_DETAILS
))
3192 fprintf (vect_dump
, "use not simple.");
3197 /* Supportable by target? */
3198 if (!supportable_widening_operation (code
, stmt
, vectype_out
, vectype_in
,
3199 &decl1
, &decl2
, &code1
, &code2
,
3200 &multi_step_cvt
, &interm_types
))
3203 /* Binary widening operation can only be supported directly by the
3205 gcc_assert (!(multi_step_cvt
&& op_type
== binary_op
));
3207 if (!vec_stmt
) /* transformation not required. */
3209 STMT_VINFO_TYPE (stmt_info
) = type_promotion_vec_info_type
;
3210 if (vect_print_dump_info (REPORT_DETAILS
))
3211 fprintf (vect_dump
, "=== vectorizable_promotion ===");
3212 vect_model_simple_cost (stmt_info
, 2*ncopies
, dt
, NULL
);
3218 if (vect_print_dump_info (REPORT_DETAILS
))
3219 fprintf (vect_dump
, "transform type promotion operation. ncopies = %d.",
3223 /* In case of multi-step promotion, we first generate promotion operations
3224 to the intermediate types, and then from that types to the final one.
3225 We store vector destination in VEC_DSTS in the correct order for
3226 recursive creation of promotion operations in
3227 vect_create_vectorized_promotion_stmts(). Vector destinations are created
3228 according to TYPES recieved from supportable_widening_operation(). */
3230 vec_dsts
= VEC_alloc (tree
, heap
, multi_step_cvt
+ 1);
3232 vec_dsts
= VEC_alloc (tree
, heap
, 1);
3234 vec_dest
= vect_create_destination_var (scalar_dest
, vectype_out
);
3235 VEC_quick_push (tree
, vec_dsts
, vec_dest
);
3239 for (i
= VEC_length (tree
, interm_types
) - 1;
3240 VEC_iterate (tree
, interm_types
, i
, intermediate_type
); i
--)
3242 vec_dest
= vect_create_destination_var (scalar_dest
,
3244 VEC_quick_push (tree
, vec_dsts
, vec_dest
);
3250 vec_oprnds0
= VEC_alloc (tree
, heap
,
3251 (multi_step_cvt
? vect_pow2 (multi_step_cvt
) : 1));
3252 if (op_type
== binary_op
)
3253 vec_oprnds1
= VEC_alloc (tree
, heap
, 1);
3256 /* In case the vectorization factor (VF) is bigger than the number
3257 of elements that we can fit in a vectype (nunits), we have to generate
3258 more than one vector stmt - i.e - we need to "unroll" the
3259 vector stmt by a factor VF/nunits. */
3261 prev_stmt_info
= NULL
;
3262 for (j
= 0; j
< ncopies
; j
++)
3268 vect_get_slp_defs (op0
, op1
, slp_node
, &vec_oprnds0
,
3272 vec_oprnd0
= vect_get_vec_def_for_operand (op0
, stmt
, NULL
);
3273 VEC_quick_push (tree
, vec_oprnds0
, vec_oprnd0
);
3274 if (op_type
== binary_op
)
3276 vec_oprnd1
= vect_get_vec_def_for_operand (op1
, stmt
, NULL
);
3277 VEC_quick_push (tree
, vec_oprnds1
, vec_oprnd1
);
3283 vec_oprnd0
= vect_get_vec_def_for_stmt_copy (dt
[0], vec_oprnd0
);
3284 VEC_replace (tree
, vec_oprnds0
, 0, vec_oprnd0
);
3285 if (op_type
== binary_op
)
3287 vec_oprnd1
= vect_get_vec_def_for_stmt_copy (dt
[1], vec_oprnd1
);
3288 VEC_replace (tree
, vec_oprnds1
, 0, vec_oprnd1
);
3292 /* Arguments are ready. Create the new vector stmts. */
3293 tmp_vec_dsts
= VEC_copy (tree
, heap
, vec_dsts
);
3294 vect_create_vectorized_promotion_stmts (&vec_oprnds0
, &vec_oprnds1
,
3295 multi_step_cvt
, stmt
,
3297 gsi
, slp_node
, code1
, code2
,
3298 decl1
, decl2
, op_type
,
3302 VEC_free (tree
, heap
, vec_dsts
);
3303 VEC_free (tree
, heap
, tmp_vec_dsts
);
3304 VEC_free (tree
, heap
, interm_types
);
3305 VEC_free (tree
, heap
, vec_oprnds0
);
3306 VEC_free (tree
, heap
, vec_oprnds1
);
3308 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
3313 /* Function vectorizable_store.
3315 Check if STMT defines a non scalar data-ref (array/pointer/structure) that
3317 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3318 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
3319 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3322 vectorizable_store (gimple stmt
, gimple_stmt_iterator
*gsi
, gimple
*vec_stmt
,
3328 tree vec_oprnd
= NULL_TREE
;
3329 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
3330 struct data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
), *first_dr
= NULL
;
3331 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
3332 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
3333 struct loop
*loop
= NULL
;
3334 enum machine_mode vec_mode
;
3336 enum dr_alignment_support alignment_support_scheme
;
3339 enum vect_def_type dt
;
3340 stmt_vec_info prev_stmt_info
= NULL
;
3341 tree dataref_ptr
= NULL_TREE
;
3342 int nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
3345 gimple next_stmt
, first_stmt
= NULL
;
3346 bool strided_store
= false;
3347 unsigned int group_size
, i
;
3348 VEC(tree
,heap
) *dr_chain
= NULL
, *oprnds
= NULL
, *result_chain
= NULL
;
3350 VEC(tree
,heap
) *vec_oprnds
= NULL
;
3351 bool slp
= (slp_node
!= NULL
);
3352 unsigned int vec_num
;
3353 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
3356 loop
= LOOP_VINFO_LOOP (loop_vinfo
);
3358 /* Multiple types in SLP are handled by creating the appropriate number of
3359 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
3364 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits
;
3366 gcc_assert (ncopies
>= 1);
3368 /* FORNOW. This restriction should be relaxed. */
3369 if (loop
&& nested_in_vect_loop_p (loop
, stmt
) && ncopies
> 1)
3371 if (vect_print_dump_info (REPORT_DETAILS
))
3372 fprintf (vect_dump
, "multiple types in nested loop.");
3376 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
3379 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
3382 /* Is vectorizable store? */
3384 if (!is_gimple_assign (stmt
))
3387 scalar_dest
= gimple_assign_lhs (stmt
);
3388 if (TREE_CODE (scalar_dest
) != ARRAY_REF
3389 && TREE_CODE (scalar_dest
) != INDIRECT_REF
3390 && TREE_CODE (scalar_dest
) != COMPONENT_REF
3391 && TREE_CODE (scalar_dest
) != IMAGPART_EXPR
3392 && TREE_CODE (scalar_dest
) != REALPART_EXPR
3393 && TREE_CODE (scalar_dest
) != MEM_REF
)
3396 gcc_assert (gimple_assign_single_p (stmt
));
3397 op
= gimple_assign_rhs1 (stmt
);
3398 if (!vect_is_simple_use (op
, loop_vinfo
, bb_vinfo
, &def_stmt
, &def
, &dt
))
3400 if (vect_print_dump_info (REPORT_DETAILS
))
3401 fprintf (vect_dump
, "use not simple.");
3405 /* The scalar rhs type needs to be trivially convertible to the vector
3406 component type. This should always be the case. */
3407 if (!useless_type_conversion_p (TREE_TYPE (vectype
), TREE_TYPE (op
)))
3409 if (vect_print_dump_info (REPORT_DETAILS
))
3410 fprintf (vect_dump
, "??? operands of different types");
3414 vec_mode
= TYPE_MODE (vectype
);
3415 /* FORNOW. In some cases can vectorize even if data-type not supported
3416 (e.g. - array initialization with 0). */
3417 if (optab_handler (mov_optab
, vec_mode
) == CODE_FOR_nothing
)
3420 if (!STMT_VINFO_DATA_REF (stmt_info
))
3423 if (tree_int_cst_compare (DR_STEP (dr
), size_zero_node
) < 0)
3425 if (vect_print_dump_info (REPORT_DETAILS
))
3426 fprintf (vect_dump
, "negative step for store.");
3430 if (STMT_VINFO_STRIDED_ACCESS (stmt_info
))
3432 strided_store
= true;
3433 first_stmt
= DR_GROUP_FIRST_DR (stmt_info
);
3434 if (!slp
&& !PURE_SLP_STMT (stmt_info
))
3436 group_size
= DR_GROUP_SIZE (vinfo_for_stmt (first_stmt
));
3437 if (!vect_strided_store_supported (vectype
, group_size
))
3441 if (first_stmt
== stmt
)
3443 /* STMT is the leader of the group. Check the operands of all the
3444 stmts of the group. */
3445 next_stmt
= DR_GROUP_NEXT_DR (stmt_info
);
3448 gcc_assert (gimple_assign_single_p (next_stmt
));
3449 op
= gimple_assign_rhs1 (next_stmt
);
3450 if (!vect_is_simple_use (op
, loop_vinfo
, bb_vinfo
, &def_stmt
,
3453 if (vect_print_dump_info (REPORT_DETAILS
))
3454 fprintf (vect_dump
, "use not simple.");
3457 next_stmt
= DR_GROUP_NEXT_DR (vinfo_for_stmt (next_stmt
));
3462 if (!vec_stmt
) /* transformation not required. */
3464 STMT_VINFO_TYPE (stmt_info
) = store_vec_info_type
;
3465 vect_model_store_cost (stmt_info
, ncopies
, dt
, NULL
);
3473 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
3474 group_size
= DR_GROUP_SIZE (vinfo_for_stmt (first_stmt
));
3476 DR_GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt
))++;
3479 gcc_assert (!loop
|| !nested_in_vect_loop_p (loop
, stmt
));
3481 /* We vectorize all the stmts of the interleaving group when we
3482 reach the last stmt in the group. */
3483 if (DR_GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt
))
3484 < DR_GROUP_SIZE (vinfo_for_stmt (first_stmt
))
3493 strided_store
= false;
3494 /* VEC_NUM is the number of vect stmts to be created for this
3496 vec_num
= SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node
);
3497 first_stmt
= VEC_index (gimple
, SLP_TREE_SCALAR_STMTS (slp_node
), 0);
3498 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
3501 /* VEC_NUM is the number of vect stmts to be created for this
3503 vec_num
= group_size
;
3509 group_size
= vec_num
= 1;
3512 if (vect_print_dump_info (REPORT_DETAILS
))
3513 fprintf (vect_dump
, "transform store. ncopies = %d",ncopies
);
3515 dr_chain
= VEC_alloc (tree
, heap
, group_size
);
3516 oprnds
= VEC_alloc (tree
, heap
, group_size
);
3518 alignment_support_scheme
= vect_supportable_dr_alignment (first_dr
, false);
3519 gcc_assert (alignment_support_scheme
);
3521 /* In case the vectorization factor (VF) is bigger than the number
3522 of elements that we can fit in a vectype (nunits), we have to generate
3523 more than one vector stmt - i.e - we need to "unroll" the
3524 vector stmt by a factor VF/nunits. For more details see documentation in
3525 vect_get_vec_def_for_copy_stmt. */
3527 /* In case of interleaving (non-unit strided access):
3534 We create vectorized stores starting from base address (the access of the
3535 first stmt in the chain (S2 in the above example), when the last store stmt
3536 of the chain (S4) is reached:
3539 VS2: &base + vec_size*1 = vx0
3540 VS3: &base + vec_size*2 = vx1
3541 VS4: &base + vec_size*3 = vx3
3543 Then permutation statements are generated:
3545 VS5: vx5 = VEC_INTERLEAVE_HIGH_EXPR < vx0, vx3 >
3546 VS6: vx6 = VEC_INTERLEAVE_LOW_EXPR < vx0, vx3 >
3549 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
3550 (the order of the data-refs in the output of vect_permute_store_chain
3551 corresponds to the order of scalar stmts in the interleaving chain - see
3552 the documentation of vect_permute_store_chain()).
3554 In case of both multiple types and interleaving, above vector stores and
3555 permutation stmts are created for every copy. The result vector stmts are
3556 put in STMT_VINFO_VEC_STMT for the first copy and in the corresponding
3557 STMT_VINFO_RELATED_STMT for the next copies.
3560 prev_stmt_info
= NULL
;
3561 for (j
= 0; j
< ncopies
; j
++)
3570 /* Get vectorized arguments for SLP_NODE. */
3571 vect_get_slp_defs (NULL_TREE
, NULL_TREE
, slp_node
, &vec_oprnds
,
3574 vec_oprnd
= VEC_index (tree
, vec_oprnds
, 0);
3578 /* For interleaved stores we collect vectorized defs for all the
3579 stores in the group in DR_CHAIN and OPRNDS. DR_CHAIN is then
3580 used as an input to vect_permute_store_chain(), and OPRNDS as
3581 an input to vect_get_vec_def_for_stmt_copy() for the next copy.
3583 If the store is not strided, GROUP_SIZE is 1, and DR_CHAIN and
3584 OPRNDS are of size 1. */
3585 next_stmt
= first_stmt
;
3586 for (i
= 0; i
< group_size
; i
++)
3588 /* Since gaps are not supported for interleaved stores,
3589 GROUP_SIZE is the exact number of stmts in the chain.
3590 Therefore, NEXT_STMT can't be NULL_TREE. In case that
3591 there is no interleaving, GROUP_SIZE is 1, and only one
3592 iteration of the loop will be executed. */
3593 gcc_assert (next_stmt
3594 && gimple_assign_single_p (next_stmt
));
3595 op
= gimple_assign_rhs1 (next_stmt
);
3597 vec_oprnd
= vect_get_vec_def_for_operand (op
, next_stmt
,
3599 VEC_quick_push(tree
, dr_chain
, vec_oprnd
);
3600 VEC_quick_push(tree
, oprnds
, vec_oprnd
);
3601 next_stmt
= DR_GROUP_NEXT_DR (vinfo_for_stmt (next_stmt
));
3605 /* We should have catched mismatched types earlier. */
3606 gcc_assert (useless_type_conversion_p (vectype
,
3607 TREE_TYPE (vec_oprnd
)));
3608 dataref_ptr
= vect_create_data_ref_ptr (first_stmt
, vectype
, NULL
,
3609 NULL_TREE
, &dummy
, gsi
,
3610 &ptr_incr
, false, &inv_p
);
3611 gcc_assert (bb_vinfo
|| !inv_p
);
3615 /* For interleaved stores we created vectorized defs for all the
3616 defs stored in OPRNDS in the previous iteration (previous copy).
3617 DR_CHAIN is then used as an input to vect_permute_store_chain(),
3618 and OPRNDS as an input to vect_get_vec_def_for_stmt_copy() for the
3620 If the store is not strided, GROUP_SIZE is 1, and DR_CHAIN and
3621 OPRNDS are of size 1. */
3622 for (i
= 0; i
< group_size
; i
++)
3624 op
= VEC_index (tree
, oprnds
, i
);
3625 vect_is_simple_use (op
, loop_vinfo
, bb_vinfo
, &def_stmt
, &def
,
3627 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
, op
);
3628 VEC_replace(tree
, dr_chain
, i
, vec_oprnd
);
3629 VEC_replace(tree
, oprnds
, i
, vec_oprnd
);
3632 bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
, stmt
, NULL_TREE
);
3638 result_chain
= VEC_alloc (tree
, heap
, group_size
);
3640 vect_permute_store_chain (dr_chain
, group_size
, stmt
, gsi
,
3644 next_stmt
= first_stmt
;
3645 for (i
= 0; i
< vec_num
; i
++)
3647 struct ptr_info_def
*pi
;
3650 /* Bump the vector pointer. */
3651 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
, stmt
,
3655 vec_oprnd
= VEC_index (tree
, vec_oprnds
, i
);
3656 else if (strided_store
)
3657 /* For strided stores vectorized defs are interleaved in
3658 vect_permute_store_chain(). */
3659 vec_oprnd
= VEC_index (tree
, result_chain
, i
);
3661 data_ref
= build2 (MEM_REF
, TREE_TYPE (vec_oprnd
), dataref_ptr
,
3662 build_int_cst (reference_alias_ptr_type
3663 (DR_REF (first_dr
)), 0));
3664 pi
= get_ptr_info (dataref_ptr
);
3665 pi
->align
= TYPE_ALIGN_UNIT (vectype
);
3666 if (aligned_access_p (first_dr
))
3668 else if (DR_MISALIGNMENT (first_dr
) == -1)
3670 TREE_TYPE (data_ref
)
3671 = build_aligned_type (TREE_TYPE (data_ref
),
3672 TYPE_ALIGN (TREE_TYPE (vectype
)));
3673 pi
->align
= TYPE_ALIGN_UNIT (TREE_TYPE (vectype
));
3678 TREE_TYPE (data_ref
)
3679 = build_aligned_type (TREE_TYPE (data_ref
),
3680 TYPE_ALIGN (TREE_TYPE (vectype
)));
3681 pi
->misalign
= DR_MISALIGNMENT (first_dr
);
3684 /* Arguments are ready. Create the new vector stmt. */
3685 new_stmt
= gimple_build_assign (data_ref
, vec_oprnd
);
3686 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3687 mark_symbols_for_renaming (new_stmt
);
3692 next_stmt
= DR_GROUP_NEXT_DR (vinfo_for_stmt (next_stmt
));
3699 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
3701 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
3702 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
3706 VEC_free (tree
, heap
, dr_chain
);
3707 VEC_free (tree
, heap
, oprnds
);
3709 VEC_free (tree
, heap
, result_chain
);
3711 VEC_free (tree
, heap
, vec_oprnds
);
3716 /* Given a vector type VECTYPE returns a builtin DECL to be used
3717 for vector permutation and stores a mask into *MASK that implements
3718 reversal of the vector elements. If that is impossible to do
3719 returns NULL (and *MASK is unchanged). */
3722 perm_mask_for_reverse (tree vectype
, tree
*mask
)
3725 tree mask_element_type
, mask_type
;
3726 tree mask_vec
= NULL
;
3729 if (!targetm
.vectorize
.builtin_vec_perm
)
3732 builtin_decl
= targetm
.vectorize
.builtin_vec_perm (vectype
,
3733 &mask_element_type
);
3734 if (!builtin_decl
|| !mask_element_type
)
3737 mask_type
= get_vectype_for_scalar_type (mask_element_type
);
3738 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
3740 || TYPE_VECTOR_SUBPARTS (vectype
) != TYPE_VECTOR_SUBPARTS (mask_type
))
3743 for (i
= 0; i
< nunits
; i
++)
3744 mask_vec
= tree_cons (NULL
, build_int_cst (mask_element_type
, i
), mask_vec
);
3745 mask_vec
= build_vector (mask_type
, mask_vec
);
3747 if (!targetm
.vectorize
.builtin_vec_perm_ok (vectype
, mask_vec
))
3751 return builtin_decl
;
3754 /* Given a vector variable X, that was generated for the scalar LHS of
3755 STMT, generate instructions to reverse the vector elements of X,
3756 insert them a *GSI and return the permuted vector variable. */
3759 reverse_vec_elements (tree x
, gimple stmt
, gimple_stmt_iterator
*gsi
)
3761 tree vectype
= TREE_TYPE (x
);
3762 tree mask_vec
, builtin_decl
;
3763 tree perm_dest
, data_ref
;
3766 builtin_decl
= perm_mask_for_reverse (vectype
, &mask_vec
);
3768 perm_dest
= vect_create_destination_var (gimple_assign_lhs (stmt
), vectype
);
3770 /* Generate the permute statement. */
3771 perm_stmt
= gimple_build_call (builtin_decl
, 3, x
, x
, mask_vec
);
3772 if (!useless_type_conversion_p (vectype
,
3773 TREE_TYPE (TREE_TYPE (builtin_decl
))))
3775 tree tem
= create_tmp_reg (TREE_TYPE (TREE_TYPE (builtin_decl
)), NULL
);
3776 tem
= make_ssa_name (tem
, perm_stmt
);
3777 gimple_call_set_lhs (perm_stmt
, tem
);
3778 vect_finish_stmt_generation (stmt
, perm_stmt
, gsi
);
3779 perm_stmt
= gimple_build_assign (NULL_TREE
,
3780 build1 (VIEW_CONVERT_EXPR
,
3783 data_ref
= make_ssa_name (perm_dest
, perm_stmt
);
3784 gimple_set_lhs (perm_stmt
, data_ref
);
3785 vect_finish_stmt_generation (stmt
, perm_stmt
, gsi
);
3790 /* vectorizable_load.
3792 Check if STMT reads a non scalar data-ref (array/pointer/structure) that
3794 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3795 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
3796 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3799 vectorizable_load (gimple stmt
, gimple_stmt_iterator
*gsi
, gimple
*vec_stmt
,
3800 slp_tree slp_node
, slp_instance slp_node_instance
)
3803 tree vec_dest
= NULL
;
3804 tree data_ref
= NULL
;
3805 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
3806 stmt_vec_info prev_stmt_info
;
3807 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
3808 struct loop
*loop
= NULL
;
3809 struct loop
*containing_loop
= (gimple_bb (stmt
))->loop_father
;
3810 bool nested_in_vect_loop
= false;
3811 struct data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
), *first_dr
;
3812 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
3814 enum machine_mode mode
;
3815 gimple new_stmt
= NULL
;
3817 enum dr_alignment_support alignment_support_scheme
;
3818 tree dataref_ptr
= NULL_TREE
;
3820 int nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
3822 int i
, j
, group_size
;
3823 tree msq
= NULL_TREE
, lsq
;
3824 tree offset
= NULL_TREE
;
3825 tree realignment_token
= NULL_TREE
;
3827 VEC(tree
,heap
) *dr_chain
= NULL
;
3828 bool strided_load
= false;
3833 bool compute_in_loop
= false;
3834 struct loop
*at_loop
;
3836 bool slp
= (slp_node
!= NULL
);
3837 bool slp_perm
= false;
3838 enum tree_code code
;
3839 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
3844 loop
= LOOP_VINFO_LOOP (loop_vinfo
);
3845 nested_in_vect_loop
= nested_in_vect_loop_p (loop
, stmt
);
3846 vf
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
3851 /* Multiple types in SLP are handled by creating the appropriate number of
3852 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
3857 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits
;
3859 gcc_assert (ncopies
>= 1);
3861 /* FORNOW. This restriction should be relaxed. */
3862 if (nested_in_vect_loop
&& ncopies
> 1)
3864 if (vect_print_dump_info (REPORT_DETAILS
))
3865 fprintf (vect_dump
, "multiple types in nested loop.");
3869 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
3872 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
3875 /* Is vectorizable load? */
3876 if (!is_gimple_assign (stmt
))
3879 scalar_dest
= gimple_assign_lhs (stmt
);
3880 if (TREE_CODE (scalar_dest
) != SSA_NAME
)
3883 code
= gimple_assign_rhs_code (stmt
);
3884 if (code
!= ARRAY_REF
3885 && code
!= INDIRECT_REF
3886 && code
!= COMPONENT_REF
3887 && code
!= IMAGPART_EXPR
3888 && code
!= REALPART_EXPR
3892 if (!STMT_VINFO_DATA_REF (stmt_info
))
3895 negative
= tree_int_cst_compare (DR_STEP (dr
), size_zero_node
) < 0;
3896 if (negative
&& ncopies
> 1)
3898 if (vect_print_dump_info (REPORT_DETAILS
))
3899 fprintf (vect_dump
, "multiple types with negative step.");
3903 scalar_type
= TREE_TYPE (DR_REF (dr
));
3904 mode
= TYPE_MODE (vectype
);
3906 /* FORNOW. In some cases can vectorize even if data-type not supported
3907 (e.g. - data copies). */
3908 if (optab_handler (mov_optab
, mode
) == CODE_FOR_nothing
)
3910 if (vect_print_dump_info (REPORT_DETAILS
))
3911 fprintf (vect_dump
, "Aligned load, but unsupported type.");
3915 /* The vector component type needs to be trivially convertible to the
3916 scalar lhs. This should always be the case. */
3917 if (!useless_type_conversion_p (TREE_TYPE (scalar_dest
), TREE_TYPE (vectype
)))
3919 if (vect_print_dump_info (REPORT_DETAILS
))
3920 fprintf (vect_dump
, "??? operands of different types");
3924 /* Check if the load is a part of an interleaving chain. */
3925 if (STMT_VINFO_STRIDED_ACCESS (stmt_info
))
3927 strided_load
= true;
3929 gcc_assert (! nested_in_vect_loop
);
3931 first_stmt
= DR_GROUP_FIRST_DR (stmt_info
);
3932 if (!slp
&& !PURE_SLP_STMT (stmt_info
))
3934 group_size
= DR_GROUP_SIZE (vinfo_for_stmt (first_stmt
));
3935 if (!vect_strided_load_supported (vectype
, group_size
))
3942 gcc_assert (!strided_load
);
3943 alignment_support_scheme
= vect_supportable_dr_alignment (dr
, false);
3944 if (alignment_support_scheme
!= dr_aligned
3945 && alignment_support_scheme
!= dr_unaligned_supported
)
3947 if (vect_print_dump_info (REPORT_DETAILS
))
3948 fprintf (vect_dump
, "negative step but alignment required.");
3951 if (!perm_mask_for_reverse (vectype
, NULL
))
3953 if (vect_print_dump_info (REPORT_DETAILS
))
3954 fprintf (vect_dump
, "negative step and reversing not supported.");
3959 if (!vec_stmt
) /* transformation not required. */
3961 STMT_VINFO_TYPE (stmt_info
) = load_vec_info_type
;
3962 vect_model_load_cost (stmt_info
, ncopies
, NULL
);
3966 if (vect_print_dump_info (REPORT_DETAILS
))
3967 fprintf (vect_dump
, "transform load. ncopies = %d", ncopies
);
3973 first_stmt
= DR_GROUP_FIRST_DR (stmt_info
);
3974 /* Check if the chain of loads is already vectorized. */
3975 if (STMT_VINFO_VEC_STMT (vinfo_for_stmt (first_stmt
)))
3977 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
3980 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
3981 group_size
= DR_GROUP_SIZE (vinfo_for_stmt (first_stmt
));
3983 /* VEC_NUM is the number of vect stmts to be created for this group. */
3986 strided_load
= false;
3987 vec_num
= SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node
);
3988 if (SLP_INSTANCE_LOAD_PERMUTATION (slp_node_instance
))
3992 vec_num
= group_size
;
3998 group_size
= vec_num
= 1;
4001 alignment_support_scheme
= vect_supportable_dr_alignment (first_dr
, false);
4002 gcc_assert (alignment_support_scheme
);
4004 /* In case the vectorization factor (VF) is bigger than the number
4005 of elements that we can fit in a vectype (nunits), we have to generate
4006 more than one vector stmt - i.e - we need to "unroll" the
4007 vector stmt by a factor VF/nunits. In doing so, we record a pointer
4008 from one copy of the vector stmt to the next, in the field
4009 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
4010 stages to find the correct vector defs to be used when vectorizing
4011 stmts that use the defs of the current stmt. The example below
4012 illustrates the vectorization process when VF=16 and nunits=4 (i.e., we
4013 need to create 4 vectorized stmts):
4015 before vectorization:
4016 RELATED_STMT VEC_STMT
4020 step 1: vectorize stmt S1:
4021 We first create the vector stmt VS1_0, and, as usual, record a
4022 pointer to it in the STMT_VINFO_VEC_STMT of the scalar stmt S1.
4023 Next, we create the vector stmt VS1_1, and record a pointer to
4024 it in the STMT_VINFO_RELATED_STMT of the vector stmt VS1_0.
4025 Similarly, for VS1_2 and VS1_3. This is the resulting chain of
4027 RELATED_STMT VEC_STMT
4028 VS1_0: vx0 = memref0 VS1_1 -
4029 VS1_1: vx1 = memref1 VS1_2 -
4030 VS1_2: vx2 = memref2 VS1_3 -
4031 VS1_3: vx3 = memref3 - -
4032 S1: x = load - VS1_0
4035 See in documentation in vect_get_vec_def_for_stmt_copy for how the
4036 information we recorded in RELATED_STMT field is used to vectorize
4039 /* In case of interleaving (non-unit strided access):
4046 Vectorized loads are created in the order of memory accesses
4047 starting from the access of the first stmt of the chain:
4050 VS2: vx1 = &base + vec_size*1
4051 VS3: vx3 = &base + vec_size*2
4052 VS4: vx4 = &base + vec_size*3
4054 Then permutation statements are generated:
4056 VS5: vx5 = VEC_EXTRACT_EVEN_EXPR < vx0, vx1 >
4057 VS6: vx6 = VEC_EXTRACT_ODD_EXPR < vx0, vx1 >
4060 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
4061 (the order of the data-refs in the output of vect_permute_load_chain
4062 corresponds to the order of scalar stmts in the interleaving chain - see
4063 the documentation of vect_permute_load_chain()).
4064 The generation of permutation stmts and recording them in
4065 STMT_VINFO_VEC_STMT is done in vect_transform_strided_load().
4067 In case of both multiple types and interleaving, the vector loads and
4068 permutation stmts above are created for every copy. The result vector
4069 stmts are put in STMT_VINFO_VEC_STMT for the first copy and in the
4070 corresponding STMT_VINFO_RELATED_STMT for the next copies. */
4072 /* If the data reference is aligned (dr_aligned) or potentially unaligned
4073 on a target that supports unaligned accesses (dr_unaligned_supported)
4074 we generate the following code:
4078 p = p + indx * vectype_size;
4083 Otherwise, the data reference is potentially unaligned on a target that
4084 does not support unaligned accesses (dr_explicit_realign_optimized) -
4085 then generate the following code, in which the data in each iteration is
4086 obtained by two vector loads, one from the previous iteration, and one
4087 from the current iteration:
4089 msq_init = *(floor(p1))
4090 p2 = initial_addr + VS - 1;
4091 realignment_token = call target_builtin;
4094 p2 = p2 + indx * vectype_size
4096 vec_dest = realign_load (msq, lsq, realignment_token)
4101 /* If the misalignment remains the same throughout the execution of the
4102 loop, we can create the init_addr and permutation mask at the loop
4103 preheader. Otherwise, it needs to be created inside the loop.
4104 This can only occur when vectorizing memory accesses in the inner-loop
4105 nested within an outer-loop that is being vectorized. */
4107 if (loop
&& nested_in_vect_loop_p (loop
, stmt
)
4108 && (TREE_INT_CST_LOW (DR_STEP (dr
))
4109 % GET_MODE_SIZE (TYPE_MODE (vectype
)) != 0))
4111 gcc_assert (alignment_support_scheme
!= dr_explicit_realign_optimized
);
4112 compute_in_loop
= true;
4115 if ((alignment_support_scheme
== dr_explicit_realign_optimized
4116 || alignment_support_scheme
== dr_explicit_realign
)
4117 && !compute_in_loop
)
4119 msq
= vect_setup_realignment (first_stmt
, gsi
, &realignment_token
,
4120 alignment_support_scheme
, NULL_TREE
,
4122 if (alignment_support_scheme
== dr_explicit_realign_optimized
)
4124 phi
= SSA_NAME_DEF_STMT (msq
);
4125 offset
= size_int (TYPE_VECTOR_SUBPARTS (vectype
) - 1);
4132 offset
= size_int (-TYPE_VECTOR_SUBPARTS (vectype
) + 1);
4134 prev_stmt_info
= NULL
;
4135 for (j
= 0; j
< ncopies
; j
++)
4137 /* 1. Create the vector pointer update chain. */
4139 dataref_ptr
= vect_create_data_ref_ptr (first_stmt
, vectype
, at_loop
,
4140 offset
, &dummy
, gsi
,
4141 &ptr_incr
, false, &inv_p
);
4144 bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
, stmt
, NULL_TREE
);
4146 if (strided_load
|| slp_perm
)
4147 dr_chain
= VEC_alloc (tree
, heap
, vec_num
);
4149 for (i
= 0; i
< vec_num
; i
++)
4152 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
, stmt
,
4155 /* 2. Create the vector-load in the loop. */
4156 switch (alignment_support_scheme
)
4159 case dr_unaligned_supported
:
4161 struct ptr_info_def
*pi
;
4163 = build2 (MEM_REF
, vectype
, dataref_ptr
,
4164 build_int_cst (reference_alias_ptr_type
4165 (DR_REF (first_dr
)), 0));
4166 pi
= get_ptr_info (dataref_ptr
);
4167 pi
->align
= TYPE_ALIGN_UNIT (vectype
);
4168 if (alignment_support_scheme
== dr_aligned
)
4170 gcc_assert (aligned_access_p (first_dr
));
4173 else if (DR_MISALIGNMENT (first_dr
) == -1)
4175 TREE_TYPE (data_ref
)
4176 = build_aligned_type (TREE_TYPE (data_ref
),
4177 TYPE_ALIGN (TREE_TYPE (vectype
)));
4178 pi
->align
= TYPE_ALIGN_UNIT (TREE_TYPE (vectype
));
4183 TREE_TYPE (data_ref
)
4184 = build_aligned_type (TREE_TYPE (data_ref
),
4185 TYPE_ALIGN (TREE_TYPE (vectype
)));
4186 pi
->misalign
= DR_MISALIGNMENT (first_dr
);
4190 case dr_explicit_realign
:
4193 tree vs_minus_1
= size_int (TYPE_VECTOR_SUBPARTS (vectype
) - 1);
4195 if (compute_in_loop
)
4196 msq
= vect_setup_realignment (first_stmt
, gsi
,
4198 dr_explicit_realign
,
4201 new_stmt
= gimple_build_assign_with_ops
4202 (BIT_AND_EXPR
, NULL_TREE
, dataref_ptr
,
4204 (TREE_TYPE (dataref_ptr
),
4205 -(HOST_WIDE_INT
)TYPE_ALIGN_UNIT (vectype
)));
4206 ptr
= make_ssa_name (SSA_NAME_VAR (dataref_ptr
), new_stmt
);
4207 gimple_assign_set_lhs (new_stmt
, ptr
);
4208 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
4210 = build2 (MEM_REF
, vectype
, ptr
,
4211 build_int_cst (reference_alias_ptr_type
4212 (DR_REF (first_dr
)), 0));
4213 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
4214 new_stmt
= gimple_build_assign (vec_dest
, data_ref
);
4215 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
4216 gimple_assign_set_lhs (new_stmt
, new_temp
);
4217 gimple_set_vdef (new_stmt
, gimple_vdef (stmt
));
4218 gimple_set_vuse (new_stmt
, gimple_vuse (stmt
));
4219 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
4222 bump
= size_binop (MULT_EXPR
, vs_minus_1
,
4223 TYPE_SIZE_UNIT (scalar_type
));
4224 ptr
= bump_vector_ptr (dataref_ptr
, NULL
, gsi
, stmt
, bump
);
4225 new_stmt
= gimple_build_assign_with_ops
4226 (BIT_AND_EXPR
, NULL_TREE
, ptr
,
4229 -(HOST_WIDE_INT
)TYPE_ALIGN_UNIT (vectype
)));
4230 ptr
= make_ssa_name (SSA_NAME_VAR (dataref_ptr
), new_stmt
);
4231 gimple_assign_set_lhs (new_stmt
, ptr
);
4232 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
4234 = build2 (MEM_REF
, vectype
, ptr
,
4235 build_int_cst (reference_alias_ptr_type
4236 (DR_REF (first_dr
)), 0));
4239 case dr_explicit_realign_optimized
:
4240 new_stmt
= gimple_build_assign_with_ops
4241 (BIT_AND_EXPR
, NULL_TREE
, dataref_ptr
,
4243 (TREE_TYPE (dataref_ptr
),
4244 -(HOST_WIDE_INT
)TYPE_ALIGN_UNIT (vectype
)));
4245 new_temp
= make_ssa_name (SSA_NAME_VAR (dataref_ptr
), new_stmt
);
4246 gimple_assign_set_lhs (new_stmt
, new_temp
);
4247 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
4249 = build2 (MEM_REF
, vectype
, new_temp
,
4250 build_int_cst (reference_alias_ptr_type
4251 (DR_REF (first_dr
)), 0));
4256 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
4257 new_stmt
= gimple_build_assign (vec_dest
, data_ref
);
4258 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
4259 gimple_assign_set_lhs (new_stmt
, new_temp
);
4260 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
4261 mark_symbols_for_renaming (new_stmt
);
4263 /* 3. Handle explicit realignment if necessary/supported. Create in
4264 loop: vec_dest = realign_load (msq, lsq, realignment_token) */
4265 if (alignment_support_scheme
== dr_explicit_realign_optimized
4266 || alignment_support_scheme
== dr_explicit_realign
)
4268 lsq
= gimple_assign_lhs (new_stmt
);
4269 if (!realignment_token
)
4270 realignment_token
= dataref_ptr
;
4271 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
4273 = gimple_build_assign_with_ops3 (REALIGN_LOAD_EXPR
, vec_dest
,
4274 msq
, lsq
, realignment_token
);
4275 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
4276 gimple_assign_set_lhs (new_stmt
, new_temp
);
4277 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
4279 if (alignment_support_scheme
== dr_explicit_realign_optimized
)
4282 if (i
== vec_num
- 1 && j
== ncopies
- 1)
4283 add_phi_arg (phi
, lsq
, loop_latch_edge (containing_loop
),
4289 /* 4. Handle invariant-load. */
4290 if (inv_p
&& !bb_vinfo
)
4292 gcc_assert (!strided_load
);
4293 gcc_assert (nested_in_vect_loop_p (loop
, stmt
));
4298 tree vec_inv
, bitpos
, bitsize
= TYPE_SIZE (scalar_type
);
4300 /* CHECKME: bitpos depends on endianess? */
4301 bitpos
= bitsize_zero_node
;
4302 vec_inv
= build3 (BIT_FIELD_REF
, scalar_type
, new_temp
,
4305 vect_create_destination_var (scalar_dest
, NULL_TREE
);
4306 new_stmt
= gimple_build_assign (vec_dest
, vec_inv
);
4307 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
4308 gimple_assign_set_lhs (new_stmt
, new_temp
);
4309 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
4311 for (k
= nunits
- 1; k
>= 0; --k
)
4312 t
= tree_cons (NULL_TREE
, new_temp
, t
);
4313 /* FIXME: use build_constructor directly. */
4314 vec_inv
= build_constructor_from_list (vectype
, t
);
4315 new_temp
= vect_init_vector (stmt
, vec_inv
, vectype
, gsi
);
4316 new_stmt
= SSA_NAME_DEF_STMT (new_temp
);
4319 gcc_unreachable (); /* FORNOW. */
4324 new_temp
= reverse_vec_elements (new_temp
, stmt
, gsi
);
4325 new_stmt
= SSA_NAME_DEF_STMT (new_temp
);
4328 /* Collect vector loads and later create their permutation in
4329 vect_transform_strided_load (). */
4330 if (strided_load
|| slp_perm
)
4331 VEC_quick_push (tree
, dr_chain
, new_temp
);
4333 /* Store vector loads in the corresponding SLP_NODE. */
4334 if (slp
&& !slp_perm
)
4335 VEC_quick_push (gimple
, SLP_TREE_VEC_STMTS (slp_node
), new_stmt
);
4338 if (slp
&& !slp_perm
)
4343 if (!vect_transform_slp_perm_load (stmt
, dr_chain
, gsi
, vf
,
4344 slp_node_instance
, false))
4346 VEC_free (tree
, heap
, dr_chain
);
4354 vect_transform_strided_load (stmt
, dr_chain
, group_size
, gsi
);
4355 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
4360 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
4362 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
4363 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
4367 VEC_free (tree
, heap
, dr_chain
);
4373 /* Function vect_is_simple_cond.
4376 LOOP - the loop that is being vectorized.
4377 COND - Condition that is checked for simple use.
4379 Returns whether a COND can be vectorized. Checks whether
4380 condition operands are supportable using vec_is_simple_use. */
4383 vect_is_simple_cond (tree cond
, loop_vec_info loop_vinfo
)
4387 enum vect_def_type dt
;
4389 if (!COMPARISON_CLASS_P (cond
))
4392 lhs
= TREE_OPERAND (cond
, 0);
4393 rhs
= TREE_OPERAND (cond
, 1);
4395 if (TREE_CODE (lhs
) == SSA_NAME
)
4397 gimple lhs_def_stmt
= SSA_NAME_DEF_STMT (lhs
);
4398 if (!vect_is_simple_use (lhs
, loop_vinfo
, NULL
, &lhs_def_stmt
, &def
,
4402 else if (TREE_CODE (lhs
) != INTEGER_CST
&& TREE_CODE (lhs
) != REAL_CST
4403 && TREE_CODE (lhs
) != FIXED_CST
)
4406 if (TREE_CODE (rhs
) == SSA_NAME
)
4408 gimple rhs_def_stmt
= SSA_NAME_DEF_STMT (rhs
);
4409 if (!vect_is_simple_use (rhs
, loop_vinfo
, NULL
, &rhs_def_stmt
, &def
,
4413 else if (TREE_CODE (rhs
) != INTEGER_CST
&& TREE_CODE (rhs
) != REAL_CST
4414 && TREE_CODE (rhs
) != FIXED_CST
)
4420 /* vectorizable_condition.
4422 Check if STMT is conditional modify expression that can be vectorized.
4423 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4424 stmt using VEC_COND_EXPR to replace it, put it in VEC_STMT, and insert it
4427 When STMT is vectorized as nested cycle, REDUC_DEF is the vector variable
4428 to be used at REDUC_INDEX (in then clause if REDUC_INDEX is 1, and in
4429 else caluse if it is 2).
4431 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4434 vectorizable_condition (gimple stmt
, gimple_stmt_iterator
*gsi
,
4435 gimple
*vec_stmt
, tree reduc_def
, int reduc_index
)
4437 tree scalar_dest
= NULL_TREE
;
4438 tree vec_dest
= NULL_TREE
;
4439 tree op
= NULL_TREE
;
4440 tree cond_expr
, then_clause
, else_clause
;
4441 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
4442 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
4443 tree vec_cond_lhs
= NULL_TREE
, vec_cond_rhs
= NULL_TREE
;
4444 tree vec_then_clause
= NULL_TREE
, vec_else_clause
= NULL_TREE
;
4445 tree vec_compare
, vec_cond_expr
;
4447 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
4448 enum machine_mode vec_mode
;
4450 enum vect_def_type dt
, dts
[4];
4451 int nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
4452 int ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits
;
4453 enum tree_code code
;
4454 stmt_vec_info prev_stmt_info
= NULL
;
4457 /* FORNOW: unsupported in basic block SLP. */
4458 gcc_assert (loop_vinfo
);
4460 gcc_assert (ncopies
>= 1);
4461 if (reduc_index
&& ncopies
> 1)
4462 return false; /* FORNOW */
4464 if (!STMT_VINFO_RELEVANT_P (stmt_info
))
4467 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
4468 && !(STMT_VINFO_DEF_TYPE (stmt_info
) == vect_nested_cycle
4472 /* FORNOW: SLP not supported. */
4473 if (STMT_SLP_TYPE (stmt_info
))
4476 /* FORNOW: not yet supported. */
4477 if (STMT_VINFO_LIVE_P (stmt_info
))
4479 if (vect_print_dump_info (REPORT_DETAILS
))
4480 fprintf (vect_dump
, "value used after loop.");
4484 /* Is vectorizable conditional operation? */
4485 if (!is_gimple_assign (stmt
))
4488 code
= gimple_assign_rhs_code (stmt
);
4490 if (code
!= COND_EXPR
)
4493 gcc_assert (gimple_assign_single_p (stmt
));
4494 op
= gimple_assign_rhs1 (stmt
);
4495 cond_expr
= TREE_OPERAND (op
, 0);
4496 then_clause
= TREE_OPERAND (op
, 1);
4497 else_clause
= TREE_OPERAND (op
, 2);
4499 if (!vect_is_simple_cond (cond_expr
, loop_vinfo
))
4502 /* We do not handle two different vector types for the condition
4504 if (!types_compatible_p (TREE_TYPE (TREE_OPERAND (cond_expr
, 0)),
4505 TREE_TYPE (vectype
)))
4508 if (TREE_CODE (then_clause
) == SSA_NAME
)
4510 gimple then_def_stmt
= SSA_NAME_DEF_STMT (then_clause
);
4511 if (!vect_is_simple_use (then_clause
, loop_vinfo
, NULL
,
4512 &then_def_stmt
, &def
, &dt
))
4515 else if (TREE_CODE (then_clause
) != INTEGER_CST
4516 && TREE_CODE (then_clause
) != REAL_CST
4517 && TREE_CODE (then_clause
) != FIXED_CST
)
4520 if (TREE_CODE (else_clause
) == SSA_NAME
)
4522 gimple else_def_stmt
= SSA_NAME_DEF_STMT (else_clause
);
4523 if (!vect_is_simple_use (else_clause
, loop_vinfo
, NULL
,
4524 &else_def_stmt
, &def
, &dt
))
4527 else if (TREE_CODE (else_clause
) != INTEGER_CST
4528 && TREE_CODE (else_clause
) != REAL_CST
4529 && TREE_CODE (else_clause
) != FIXED_CST
)
4533 vec_mode
= TYPE_MODE (vectype
);
4537 STMT_VINFO_TYPE (stmt_info
) = condition_vec_info_type
;
4538 return expand_vec_cond_expr_p (TREE_TYPE (op
), vec_mode
);
4544 scalar_dest
= gimple_assign_lhs (stmt
);
4545 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
4547 /* Handle cond expr. */
4548 for (j
= 0; j
< ncopies
; j
++)
4555 vect_get_vec_def_for_operand (TREE_OPERAND (cond_expr
, 0),
4557 vect_is_simple_use (TREE_OPERAND (cond_expr
, 0), loop_vinfo
,
4558 NULL
, >emp
, &def
, &dts
[0]);
4560 vect_get_vec_def_for_operand (TREE_OPERAND (cond_expr
, 1),
4562 vect_is_simple_use (TREE_OPERAND (cond_expr
, 1), loop_vinfo
,
4563 NULL
, >emp
, &def
, &dts
[1]);
4564 if (reduc_index
== 1)
4565 vec_then_clause
= reduc_def
;
4568 vec_then_clause
= vect_get_vec_def_for_operand (then_clause
,
4570 vect_is_simple_use (then_clause
, loop_vinfo
,
4571 NULL
, >emp
, &def
, &dts
[2]);
4573 if (reduc_index
== 2)
4574 vec_else_clause
= reduc_def
;
4577 vec_else_clause
= vect_get_vec_def_for_operand (else_clause
,
4579 vect_is_simple_use (else_clause
, loop_vinfo
,
4580 NULL
, >emp
, &def
, &dts
[3]);
4585 vec_cond_lhs
= vect_get_vec_def_for_stmt_copy (dts
[0], vec_cond_lhs
);
4586 vec_cond_rhs
= vect_get_vec_def_for_stmt_copy (dts
[1], vec_cond_rhs
);
4587 vec_then_clause
= vect_get_vec_def_for_stmt_copy (dts
[2],
4589 vec_else_clause
= vect_get_vec_def_for_stmt_copy (dts
[3],
4593 /* Arguments are ready. Create the new vector stmt. */
4594 vec_compare
= build2 (TREE_CODE (cond_expr
), vectype
,
4595 vec_cond_lhs
, vec_cond_rhs
);
4596 vec_cond_expr
= build3 (VEC_COND_EXPR
, vectype
,
4597 vec_compare
, vec_then_clause
, vec_else_clause
);
4599 new_stmt
= gimple_build_assign (vec_dest
, vec_cond_expr
);
4600 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
4601 gimple_assign_set_lhs (new_stmt
, new_temp
);
4602 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
4604 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
4606 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
4608 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
4615 /* Make sure the statement is vectorizable. */
4618 vect_analyze_stmt (gimple stmt
, bool *need_to_vectorize
, slp_tree node
)
4620 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
4621 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
4622 enum vect_relevant relevance
= STMT_VINFO_RELEVANT (stmt_info
);
4624 tree scalar_type
, vectype
;
4626 if (vect_print_dump_info (REPORT_DETAILS
))
4628 fprintf (vect_dump
, "==> examining statement: ");
4629 print_gimple_stmt (vect_dump
, stmt
, 0, TDF_SLIM
);
4632 if (gimple_has_volatile_ops (stmt
))
4634 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS
))
4635 fprintf (vect_dump
, "not vectorized: stmt has volatile operands");
4640 /* Skip stmts that do not need to be vectorized. In loops this is expected
4642 - the COND_EXPR which is the loop exit condition
4643 - any LABEL_EXPRs in the loop
4644 - computations that are used only for array indexing or loop control.
4645 In basic blocks we only analyze statements that are a part of some SLP
4646 instance, therefore, all the statements are relevant. */
4648 if (!STMT_VINFO_RELEVANT_P (stmt_info
)
4649 && !STMT_VINFO_LIVE_P (stmt_info
))
4651 if (vect_print_dump_info (REPORT_DETAILS
))
4652 fprintf (vect_dump
, "irrelevant.");
4657 switch (STMT_VINFO_DEF_TYPE (stmt_info
))
4659 case vect_internal_def
:
4662 case vect_reduction_def
:
4663 case vect_nested_cycle
:
4664 gcc_assert (!bb_vinfo
&& (relevance
== vect_used_in_outer
4665 || relevance
== vect_used_in_outer_by_reduction
4666 || relevance
== vect_unused_in_scope
));
4669 case vect_induction_def
:
4670 case vect_constant_def
:
4671 case vect_external_def
:
4672 case vect_unknown_def_type
:
4679 gcc_assert (PURE_SLP_STMT (stmt_info
));
4681 scalar_type
= TREE_TYPE (gimple_get_lhs (stmt
));
4682 if (vect_print_dump_info (REPORT_DETAILS
))
4684 fprintf (vect_dump
, "get vectype for scalar type: ");
4685 print_generic_expr (vect_dump
, scalar_type
, TDF_SLIM
);
4688 vectype
= get_vectype_for_scalar_type (scalar_type
);
4691 if (vect_print_dump_info (REPORT_DETAILS
))
4693 fprintf (vect_dump
, "not SLPed: unsupported data-type ");
4694 print_generic_expr (vect_dump
, scalar_type
, TDF_SLIM
);
4699 if (vect_print_dump_info (REPORT_DETAILS
))
4701 fprintf (vect_dump
, "vectype: ");
4702 print_generic_expr (vect_dump
, vectype
, TDF_SLIM
);
4705 STMT_VINFO_VECTYPE (stmt_info
) = vectype
;
4708 if (STMT_VINFO_RELEVANT_P (stmt_info
))
4710 gcc_assert (!VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt
))));
4711 gcc_assert (STMT_VINFO_VECTYPE (stmt_info
));
4712 *need_to_vectorize
= true;
4717 && (STMT_VINFO_RELEVANT_P (stmt_info
)
4718 || STMT_VINFO_DEF_TYPE (stmt_info
) == vect_reduction_def
))
4719 ok
= (vectorizable_type_promotion (stmt
, NULL
, NULL
, NULL
)
4720 || vectorizable_type_demotion (stmt
, NULL
, NULL
, NULL
)
4721 || vectorizable_conversion (stmt
, NULL
, NULL
, NULL
)
4722 || vectorizable_shift (stmt
, NULL
, NULL
, NULL
)
4723 || vectorizable_operation (stmt
, NULL
, NULL
, NULL
)
4724 || vectorizable_assignment (stmt
, NULL
, NULL
, NULL
)
4725 || vectorizable_load (stmt
, NULL
, NULL
, NULL
, NULL
)
4726 || vectorizable_call (stmt
, NULL
, NULL
)
4727 || vectorizable_store (stmt
, NULL
, NULL
, NULL
)
4728 || vectorizable_reduction (stmt
, NULL
, NULL
, NULL
)
4729 || vectorizable_condition (stmt
, NULL
, NULL
, NULL
, 0));
4733 ok
= (vectorizable_shift (stmt
, NULL
, NULL
, node
)
4734 || vectorizable_operation (stmt
, NULL
, NULL
, node
)
4735 || vectorizable_assignment (stmt
, NULL
, NULL
, node
)
4736 || vectorizable_load (stmt
, NULL
, NULL
, node
, NULL
)
4737 || vectorizable_store (stmt
, NULL
, NULL
, node
));
4742 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS
))
4744 fprintf (vect_dump
, "not vectorized: relevant stmt not ");
4745 fprintf (vect_dump
, "supported: ");
4746 print_gimple_stmt (vect_dump
, stmt
, 0, TDF_SLIM
);
4755 /* Stmts that are (also) "live" (i.e. - that are used out of the loop)
4756 need extra handling, except for vectorizable reductions. */
4757 if (STMT_VINFO_LIVE_P (stmt_info
)
4758 && STMT_VINFO_TYPE (stmt_info
) != reduc_vec_info_type
)
4759 ok
= vectorizable_live_operation (stmt
, NULL
, NULL
);
4763 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS
))
4765 fprintf (vect_dump
, "not vectorized: live stmt not ");
4766 fprintf (vect_dump
, "supported: ");
4767 print_gimple_stmt (vect_dump
, stmt
, 0, TDF_SLIM
);
4777 /* Function vect_transform_stmt.
4779 Create a vectorized stmt to replace STMT, and insert it at BSI. */
4782 vect_transform_stmt (gimple stmt
, gimple_stmt_iterator
*gsi
,
4783 bool *strided_store
, slp_tree slp_node
,
4784 slp_instance slp_node_instance
)
4786 bool is_store
= false;
4787 gimple vec_stmt
= NULL
;
4788 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
4789 gimple orig_stmt_in_pattern
, orig_scalar_stmt
= stmt
;
4792 switch (STMT_VINFO_TYPE (stmt_info
))
4794 case type_demotion_vec_info_type
:
4795 done
= vectorizable_type_demotion (stmt
, gsi
, &vec_stmt
, slp_node
);
4799 case type_promotion_vec_info_type
:
4800 done
= vectorizable_type_promotion (stmt
, gsi
, &vec_stmt
, slp_node
);
4804 case type_conversion_vec_info_type
:
4805 done
= vectorizable_conversion (stmt
, gsi
, &vec_stmt
, slp_node
);
4809 case induc_vec_info_type
:
4810 gcc_assert (!slp_node
);
4811 done
= vectorizable_induction (stmt
, gsi
, &vec_stmt
);
4815 case shift_vec_info_type
:
4816 done
= vectorizable_shift (stmt
, gsi
, &vec_stmt
, slp_node
);
4820 case op_vec_info_type
:
4821 done
= vectorizable_operation (stmt
, gsi
, &vec_stmt
, slp_node
);
4825 case assignment_vec_info_type
:
4826 done
= vectorizable_assignment (stmt
, gsi
, &vec_stmt
, slp_node
);
4830 case load_vec_info_type
:
4831 done
= vectorizable_load (stmt
, gsi
, &vec_stmt
, slp_node
,
4836 case store_vec_info_type
:
4837 done
= vectorizable_store (stmt
, gsi
, &vec_stmt
, slp_node
);
4839 if (STMT_VINFO_STRIDED_ACCESS (stmt_info
) && !slp_node
)
4841 /* In case of interleaving, the whole chain is vectorized when the
4842 last store in the chain is reached. Store stmts before the last
4843 one are skipped, and there vec_stmt_info shouldn't be freed
4845 *strided_store
= true;
4846 if (STMT_VINFO_VEC_STMT (stmt_info
))
4853 case condition_vec_info_type
:
4854 gcc_assert (!slp_node
);
4855 done
= vectorizable_condition (stmt
, gsi
, &vec_stmt
, NULL
, 0);
4859 case call_vec_info_type
:
4860 gcc_assert (!slp_node
);
4861 done
= vectorizable_call (stmt
, gsi
, &vec_stmt
);
4862 stmt
= gsi_stmt (*gsi
);
4865 case reduc_vec_info_type
:
4866 done
= vectorizable_reduction (stmt
, gsi
, &vec_stmt
, slp_node
);
4871 if (!STMT_VINFO_LIVE_P (stmt_info
))
4873 if (vect_print_dump_info (REPORT_DETAILS
))
4874 fprintf (vect_dump
, "stmt not supported.");
4879 /* Handle inner-loop stmts whose DEF is used in the loop-nest that
4880 is being vectorized, but outside the immediately enclosing loop. */
4882 && STMT_VINFO_LOOP_VINFO (stmt_info
)
4883 && nested_in_vect_loop_p (LOOP_VINFO_LOOP (
4884 STMT_VINFO_LOOP_VINFO (stmt_info
)), stmt
)
4885 && STMT_VINFO_TYPE (stmt_info
) != reduc_vec_info_type
4886 && (STMT_VINFO_RELEVANT (stmt_info
) == vect_used_in_outer
4887 || STMT_VINFO_RELEVANT (stmt_info
) ==
4888 vect_used_in_outer_by_reduction
))
4890 struct loop
*innerloop
= LOOP_VINFO_LOOP (
4891 STMT_VINFO_LOOP_VINFO (stmt_info
))->inner
;
4892 imm_use_iterator imm_iter
;
4893 use_operand_p use_p
;
4897 if (vect_print_dump_info (REPORT_DETAILS
))
4898 fprintf (vect_dump
, "Record the vdef for outer-loop vectorization.");
4900 /* Find the relevant loop-exit phi-node, and reord the vec_stmt there
4901 (to be used when vectorizing outer-loop stmts that use the DEF of
4903 if (gimple_code (stmt
) == GIMPLE_PHI
)
4904 scalar_dest
= PHI_RESULT (stmt
);
4906 scalar_dest
= gimple_assign_lhs (stmt
);
4908 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, scalar_dest
)
4910 if (!flow_bb_inside_loop_p (innerloop
, gimple_bb (USE_STMT (use_p
))))
4912 exit_phi
= USE_STMT (use_p
);
4913 STMT_VINFO_VEC_STMT (vinfo_for_stmt (exit_phi
)) = vec_stmt
;
4918 /* Handle stmts whose DEF is used outside the loop-nest that is
4919 being vectorized. */
4920 if (STMT_VINFO_LIVE_P (stmt_info
)
4921 && STMT_VINFO_TYPE (stmt_info
) != reduc_vec_info_type
)
4923 done
= vectorizable_live_operation (stmt
, gsi
, &vec_stmt
);
4929 STMT_VINFO_VEC_STMT (stmt_info
) = vec_stmt
;
4930 orig_stmt_in_pattern
= STMT_VINFO_RELATED_STMT (stmt_info
);
4931 if (orig_stmt_in_pattern
)
4933 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (orig_stmt_in_pattern
);
4934 /* STMT was inserted by the vectorizer to replace a computation idiom.
4935 ORIG_STMT_IN_PATTERN is a stmt in the original sequence that
4936 computed this idiom. We need to record a pointer to VEC_STMT in
4937 the stmt_info of ORIG_STMT_IN_PATTERN. See more details in the
4938 documentation of vect_pattern_recog. */
4939 if (STMT_VINFO_IN_PATTERN_P (stmt_vinfo
))
4941 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_vinfo
)
4942 == orig_scalar_stmt
);
4943 STMT_VINFO_VEC_STMT (stmt_vinfo
) = vec_stmt
;
4952 /* Remove a group of stores (for SLP or interleaving), free their
4956 vect_remove_stores (gimple first_stmt
)
4958 gimple next
= first_stmt
;
4960 gimple_stmt_iterator next_si
;
4964 /* Free the attached stmt_vec_info and remove the stmt. */
4965 next_si
= gsi_for_stmt (next
);
4966 gsi_remove (&next_si
, true);
4967 tmp
= DR_GROUP_NEXT_DR (vinfo_for_stmt (next
));
4968 free_stmt_vec_info (next
);
4974 /* Function new_stmt_vec_info.
4976 Create and initialize a new stmt_vec_info struct for STMT. */
4979 new_stmt_vec_info (gimple stmt
, loop_vec_info loop_vinfo
,
4980 bb_vec_info bb_vinfo
)
4983 res
= (stmt_vec_info
) xcalloc (1, sizeof (struct _stmt_vec_info
));
4985 STMT_VINFO_TYPE (res
) = undef_vec_info_type
;
4986 STMT_VINFO_STMT (res
) = stmt
;
4987 STMT_VINFO_LOOP_VINFO (res
) = loop_vinfo
;
4988 STMT_VINFO_BB_VINFO (res
) = bb_vinfo
;
4989 STMT_VINFO_RELEVANT (res
) = vect_unused_in_scope
;
4990 STMT_VINFO_LIVE_P (res
) = false;
4991 STMT_VINFO_VECTYPE (res
) = NULL
;
4992 STMT_VINFO_VEC_STMT (res
) = NULL
;
4993 STMT_VINFO_VECTORIZABLE (res
) = true;
4994 STMT_VINFO_IN_PATTERN_P (res
) = false;
4995 STMT_VINFO_RELATED_STMT (res
) = NULL
;
4996 STMT_VINFO_DATA_REF (res
) = NULL
;
4998 STMT_VINFO_DR_BASE_ADDRESS (res
) = NULL
;
4999 STMT_VINFO_DR_OFFSET (res
) = NULL
;
5000 STMT_VINFO_DR_INIT (res
) = NULL
;
5001 STMT_VINFO_DR_STEP (res
) = NULL
;
5002 STMT_VINFO_DR_ALIGNED_TO (res
) = NULL
;
5004 if (gimple_code (stmt
) == GIMPLE_PHI
5005 && is_loop_header_bb_p (gimple_bb (stmt
)))
5006 STMT_VINFO_DEF_TYPE (res
) = vect_unknown_def_type
;
5008 STMT_VINFO_DEF_TYPE (res
) = vect_internal_def
;
5010 STMT_VINFO_SAME_ALIGN_REFS (res
) = VEC_alloc (dr_p
, heap
, 5);
5011 STMT_VINFO_INSIDE_OF_LOOP_COST (res
) = 0;
5012 STMT_VINFO_OUTSIDE_OF_LOOP_COST (res
) = 0;
5013 STMT_SLP_TYPE (res
) = loop_vect
;
5014 DR_GROUP_FIRST_DR (res
) = NULL
;
5015 DR_GROUP_NEXT_DR (res
) = NULL
;
5016 DR_GROUP_SIZE (res
) = 0;
5017 DR_GROUP_STORE_COUNT (res
) = 0;
5018 DR_GROUP_GAP (res
) = 0;
5019 DR_GROUP_SAME_DR_STMT (res
) = NULL
;
5020 DR_GROUP_READ_WRITE_DEPENDENCE (res
) = false;
5026 /* Create a hash table for stmt_vec_info. */
5029 init_stmt_vec_info_vec (void)
5031 gcc_assert (!stmt_vec_info_vec
);
5032 stmt_vec_info_vec
= VEC_alloc (vec_void_p
, heap
, 50);
5036 /* Free hash table for stmt_vec_info. */
5039 free_stmt_vec_info_vec (void)
5041 gcc_assert (stmt_vec_info_vec
);
5042 VEC_free (vec_void_p
, heap
, stmt_vec_info_vec
);
5046 /* Free stmt vectorization related info. */
5049 free_stmt_vec_info (gimple stmt
)
5051 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
5056 VEC_free (dr_p
, heap
, STMT_VINFO_SAME_ALIGN_REFS (stmt_info
));
5057 set_vinfo_for_stmt (stmt
, NULL
);
5062 /* Function get_vectype_for_scalar_type_and_size.
5064 Returns the vector type corresponding to SCALAR_TYPE and SIZE as supported
5068 get_vectype_for_scalar_type_and_size (tree scalar_type
, unsigned size
)
5070 enum machine_mode inner_mode
= TYPE_MODE (scalar_type
);
5071 enum machine_mode simd_mode
;
5072 unsigned int nbytes
= GET_MODE_SIZE (inner_mode
);
5079 /* We can't build a vector type of elements with alignment bigger than
5081 if (nbytes
< TYPE_ALIGN_UNIT (scalar_type
))
5084 /* If we'd build a vector type of elements whose mode precision doesn't
5085 match their types precision we'll get mismatched types on vector
5086 extracts via BIT_FIELD_REFs. This effectively means we disable
5087 vectorization of bool and/or enum types in some languages. */
5088 if (INTEGRAL_TYPE_P (scalar_type
)
5089 && GET_MODE_BITSIZE (inner_mode
) != TYPE_PRECISION (scalar_type
))
5092 if (GET_MODE_CLASS (inner_mode
) != MODE_INT
5093 && GET_MODE_CLASS (inner_mode
) != MODE_FLOAT
)
5096 /* If no size was supplied use the mode the target prefers. Otherwise
5097 lookup a vector mode of the specified size. */
5099 simd_mode
= targetm
.vectorize
.preferred_simd_mode (inner_mode
);
5101 simd_mode
= mode_for_vector (inner_mode
, size
/ nbytes
);
5102 nunits
= GET_MODE_SIZE (simd_mode
) / nbytes
;
5106 vectype
= build_vector_type (scalar_type
, nunits
);
5107 if (vect_print_dump_info (REPORT_DETAILS
))
5109 fprintf (vect_dump
, "get vectype with %d units of type ", nunits
);
5110 print_generic_expr (vect_dump
, scalar_type
, TDF_SLIM
);
5116 if (vect_print_dump_info (REPORT_DETAILS
))
5118 fprintf (vect_dump
, "vectype: ");
5119 print_generic_expr (vect_dump
, vectype
, TDF_SLIM
);
5122 if (!VECTOR_MODE_P (TYPE_MODE (vectype
))
5123 && !INTEGRAL_MODE_P (TYPE_MODE (vectype
)))
5125 if (vect_print_dump_info (REPORT_DETAILS
))
5126 fprintf (vect_dump
, "mode not supported by target.");
5133 unsigned int current_vector_size
;
5135 /* Function get_vectype_for_scalar_type.
5137 Returns the vector type corresponding to SCALAR_TYPE as supported
5141 get_vectype_for_scalar_type (tree scalar_type
)
5144 vectype
= get_vectype_for_scalar_type_and_size (scalar_type
,
5145 current_vector_size
);
5147 && current_vector_size
== 0)
5148 current_vector_size
= GET_MODE_SIZE (TYPE_MODE (vectype
));
5152 /* Function get_same_sized_vectype
5154 Returns a vector type corresponding to SCALAR_TYPE of size
5155 VECTOR_TYPE if supported by the target. */
5158 get_same_sized_vectype (tree scalar_type
, tree vector_type
)
5160 return get_vectype_for_scalar_type_and_size
5161 (scalar_type
, GET_MODE_SIZE (TYPE_MODE (vector_type
)));
5164 /* Function vect_is_simple_use.
5167 LOOP_VINFO - the vect info of the loop that is being vectorized.
5168 BB_VINFO - the vect info of the basic block that is being vectorized.
5169 OPERAND - operand of a stmt in the loop or bb.
5170 DEF - the defining stmt in case OPERAND is an SSA_NAME.
5172 Returns whether a stmt with OPERAND can be vectorized.
5173 For loops, supportable operands are constants, loop invariants, and operands
5174 that are defined by the current iteration of the loop. Unsupportable
5175 operands are those that are defined by a previous iteration of the loop (as
5176 is the case in reduction/induction computations).
5177 For basic blocks, supportable operands are constants and bb invariants.
5178 For now, operands defined outside the basic block are not supported. */
5181 vect_is_simple_use (tree operand
, loop_vec_info loop_vinfo
,
5182 bb_vec_info bb_vinfo
, gimple
*def_stmt
,
5183 tree
*def
, enum vect_def_type
*dt
)
5186 stmt_vec_info stmt_vinfo
;
5187 struct loop
*loop
= NULL
;
5190 loop
= LOOP_VINFO_LOOP (loop_vinfo
);
5195 if (vect_print_dump_info (REPORT_DETAILS
))
5197 fprintf (vect_dump
, "vect_is_simple_use: operand ");
5198 print_generic_expr (vect_dump
, operand
, TDF_SLIM
);
5201 if (TREE_CODE (operand
) == INTEGER_CST
|| TREE_CODE (operand
) == REAL_CST
)
5203 *dt
= vect_constant_def
;
5207 if (is_gimple_min_invariant (operand
))
5210 *dt
= vect_external_def
;
5214 if (TREE_CODE (operand
) == PAREN_EXPR
)
5216 if (vect_print_dump_info (REPORT_DETAILS
))
5217 fprintf (vect_dump
, "non-associatable copy.");
5218 operand
= TREE_OPERAND (operand
, 0);
5221 if (TREE_CODE (operand
) != SSA_NAME
)
5223 if (vect_print_dump_info (REPORT_DETAILS
))
5224 fprintf (vect_dump
, "not ssa-name.");
5228 *def_stmt
= SSA_NAME_DEF_STMT (operand
);
5229 if (*def_stmt
== NULL
)
5231 if (vect_print_dump_info (REPORT_DETAILS
))
5232 fprintf (vect_dump
, "no def_stmt.");
5236 if (vect_print_dump_info (REPORT_DETAILS
))
5238 fprintf (vect_dump
, "def_stmt: ");
5239 print_gimple_stmt (vect_dump
, *def_stmt
, 0, TDF_SLIM
);
5242 /* Empty stmt is expected only in case of a function argument.
5243 (Otherwise - we expect a phi_node or a GIMPLE_ASSIGN). */
5244 if (gimple_nop_p (*def_stmt
))
5247 *dt
= vect_external_def
;
5251 bb
= gimple_bb (*def_stmt
);
5253 if ((loop
&& !flow_bb_inside_loop_p (loop
, bb
))
5254 || (!loop
&& bb
!= BB_VINFO_BB (bb_vinfo
))
5255 || (!loop
&& gimple_code (*def_stmt
) == GIMPLE_PHI
))
5256 *dt
= vect_external_def
;
5259 stmt_vinfo
= vinfo_for_stmt (*def_stmt
);
5260 *dt
= STMT_VINFO_DEF_TYPE (stmt_vinfo
);
5263 if (*dt
== vect_unknown_def_type
)
5265 if (vect_print_dump_info (REPORT_DETAILS
))
5266 fprintf (vect_dump
, "Unsupported pattern.");
5270 if (vect_print_dump_info (REPORT_DETAILS
))
5271 fprintf (vect_dump
, "type of def: %d.",*dt
);
5273 switch (gimple_code (*def_stmt
))
5276 *def
= gimple_phi_result (*def_stmt
);
5280 *def
= gimple_assign_lhs (*def_stmt
);
5284 *def
= gimple_call_lhs (*def_stmt
);
5289 if (vect_print_dump_info (REPORT_DETAILS
))
5290 fprintf (vect_dump
, "unsupported defining stmt: ");
5297 /* Function vect_is_simple_use_1.
5299 Same as vect_is_simple_use_1 but also determines the vector operand
5300 type of OPERAND and stores it to *VECTYPE. If the definition of
5301 OPERAND is vect_uninitialized_def, vect_constant_def or
5302 vect_external_def *VECTYPE will be set to NULL_TREE and the caller
5303 is responsible to compute the best suited vector type for the
5307 vect_is_simple_use_1 (tree operand
, loop_vec_info loop_vinfo
,
5308 bb_vec_info bb_vinfo
, gimple
*def_stmt
,
5309 tree
*def
, enum vect_def_type
*dt
, tree
*vectype
)
5311 if (!vect_is_simple_use (operand
, loop_vinfo
, bb_vinfo
, def_stmt
, def
, dt
))
5314 /* Now get a vector type if the def is internal, otherwise supply
5315 NULL_TREE and leave it up to the caller to figure out a proper
5316 type for the use stmt. */
5317 if (*dt
== vect_internal_def
5318 || *dt
== vect_induction_def
5319 || *dt
== vect_reduction_def
5320 || *dt
== vect_double_reduction_def
5321 || *dt
== vect_nested_cycle
)
5323 stmt_vec_info stmt_info
= vinfo_for_stmt (*def_stmt
);
5324 if (STMT_VINFO_IN_PATTERN_P (stmt_info
))
5325 stmt_info
= vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info
));
5326 *vectype
= STMT_VINFO_VECTYPE (stmt_info
);
5327 gcc_assert (*vectype
!= NULL_TREE
);
5329 else if (*dt
== vect_uninitialized_def
5330 || *dt
== vect_constant_def
5331 || *dt
== vect_external_def
)
5332 *vectype
= NULL_TREE
;
5340 /* Function supportable_widening_operation
5342 Check whether an operation represented by the code CODE is a
5343 widening operation that is supported by the target platform in
5344 vector form (i.e., when operating on arguments of type VECTYPE_IN
5345 producing a result of type VECTYPE_OUT).
5347 Widening operations we currently support are NOP (CONVERT), FLOAT
5348 and WIDEN_MULT. This function checks if these operations are supported
5349 by the target platform either directly (via vector tree-codes), or via
5353 - CODE1 and CODE2 are codes of vector operations to be used when
5354 vectorizing the operation, if available.
5355 - DECL1 and DECL2 are decls of target builtin functions to be used
5356 when vectorizing the operation, if available. In this case,
5357 CODE1 and CODE2 are CALL_EXPR.
5358 - MULTI_STEP_CVT determines the number of required intermediate steps in
5359 case of multi-step conversion (like char->short->int - in that case
5360 MULTI_STEP_CVT will be 1).
5361 - INTERM_TYPES contains the intermediate type required to perform the
5362 widening operation (short in the above example). */
5365 supportable_widening_operation (enum tree_code code
, gimple stmt
,
5366 tree vectype_out
, tree vectype_in
,
5367 tree
*decl1
, tree
*decl2
,
5368 enum tree_code
*code1
, enum tree_code
*code2
,
5369 int *multi_step_cvt
,
5370 VEC (tree
, heap
) **interm_types
)
5372 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
5373 loop_vec_info loop_info
= STMT_VINFO_LOOP_VINFO (stmt_info
);
5374 struct loop
*vect_loop
= LOOP_VINFO_LOOP (loop_info
);
5376 enum machine_mode vec_mode
;
5377 enum insn_code icode1
, icode2
;
5378 optab optab1
, optab2
;
5379 tree vectype
= vectype_in
;
5380 tree wide_vectype
= vectype_out
;
5381 enum tree_code c1
, c2
;
5383 /* The result of a vectorized widening operation usually requires two vectors
5384 (because the widened results do not fit int one vector). The generated
5385 vector results would normally be expected to be generated in the same
5386 order as in the original scalar computation, i.e. if 8 results are
5387 generated in each vector iteration, they are to be organized as follows:
5388 vect1: [res1,res2,res3,res4], vect2: [res5,res6,res7,res8].
5390 However, in the special case that the result of the widening operation is
5391 used in a reduction computation only, the order doesn't matter (because
5392 when vectorizing a reduction we change the order of the computation).
5393 Some targets can take advantage of this and generate more efficient code.
5394 For example, targets like Altivec, that support widen_mult using a sequence
5395 of {mult_even,mult_odd} generate the following vectors:
5396 vect1: [res1,res3,res5,res7], vect2: [res2,res4,res6,res8].
5398 When vectorizing outer-loops, we execute the inner-loop sequentially
5399 (each vectorized inner-loop iteration contributes to VF outer-loop
5400 iterations in parallel). We therefore don't allow to change the order
5401 of the computation in the inner-loop during outer-loop vectorization. */
5403 if (STMT_VINFO_RELEVANT (stmt_info
) == vect_used_by_reduction
5404 && !nested_in_vect_loop_p (vect_loop
, stmt
))
5410 && code
== WIDEN_MULT_EXPR
5411 && targetm
.vectorize
.builtin_mul_widen_even
5412 && targetm
.vectorize
.builtin_mul_widen_even (vectype
)
5413 && targetm
.vectorize
.builtin_mul_widen_odd
5414 && targetm
.vectorize
.builtin_mul_widen_odd (vectype
))
5416 if (vect_print_dump_info (REPORT_DETAILS
))
5417 fprintf (vect_dump
, "Unordered widening operation detected.");
5419 *code1
= *code2
= CALL_EXPR
;
5420 *decl1
= targetm
.vectorize
.builtin_mul_widen_even (vectype
);
5421 *decl2
= targetm
.vectorize
.builtin_mul_widen_odd (vectype
);
5427 case WIDEN_MULT_EXPR
:
5428 if (BYTES_BIG_ENDIAN
)
5430 c1
= VEC_WIDEN_MULT_HI_EXPR
;
5431 c2
= VEC_WIDEN_MULT_LO_EXPR
;
5435 c2
= VEC_WIDEN_MULT_HI_EXPR
;
5436 c1
= VEC_WIDEN_MULT_LO_EXPR
;
5441 if (BYTES_BIG_ENDIAN
)
5443 c1
= VEC_UNPACK_HI_EXPR
;
5444 c2
= VEC_UNPACK_LO_EXPR
;
5448 c2
= VEC_UNPACK_HI_EXPR
;
5449 c1
= VEC_UNPACK_LO_EXPR
;
5454 if (BYTES_BIG_ENDIAN
)
5456 c1
= VEC_UNPACK_FLOAT_HI_EXPR
;
5457 c2
= VEC_UNPACK_FLOAT_LO_EXPR
;
5461 c2
= VEC_UNPACK_FLOAT_HI_EXPR
;
5462 c1
= VEC_UNPACK_FLOAT_LO_EXPR
;
5466 case FIX_TRUNC_EXPR
:
5467 /* ??? Not yet implemented due to missing VEC_UNPACK_FIX_TRUNC_HI_EXPR/
5468 VEC_UNPACK_FIX_TRUNC_LO_EXPR tree codes and optabs used for
5469 computing the operation. */
5476 if (code
== FIX_TRUNC_EXPR
)
5478 /* The signedness is determined from output operand. */
5479 optab1
= optab_for_tree_code (c1
, vectype_out
, optab_default
);
5480 optab2
= optab_for_tree_code (c2
, vectype_out
, optab_default
);
5484 optab1
= optab_for_tree_code (c1
, vectype
, optab_default
);
5485 optab2
= optab_for_tree_code (c2
, vectype
, optab_default
);
5488 if (!optab1
|| !optab2
)
5491 vec_mode
= TYPE_MODE (vectype
);
5492 if ((icode1
= optab_handler (optab1
, vec_mode
)) == CODE_FOR_nothing
5493 || (icode2
= optab_handler (optab2
, vec_mode
)) == CODE_FOR_nothing
)
5496 /* Check if it's a multi-step conversion that can be done using intermediate
5498 if (insn_data
[icode1
].operand
[0].mode
!= TYPE_MODE (wide_vectype
)
5499 || insn_data
[icode2
].operand
[0].mode
!= TYPE_MODE (wide_vectype
))
5502 tree prev_type
= vectype
, intermediate_type
;
5503 enum machine_mode intermediate_mode
, prev_mode
= vec_mode
;
5504 optab optab3
, optab4
;
5506 if (!CONVERT_EXPR_CODE_P (code
))
5512 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
5513 intermediate steps in promotion sequence. We try
5514 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do
5516 *interm_types
= VEC_alloc (tree
, heap
, MAX_INTERM_CVT_STEPS
);
5517 for (i
= 0; i
< 3; i
++)
5519 intermediate_mode
= insn_data
[icode1
].operand
[0].mode
;
5520 intermediate_type
= lang_hooks
.types
.type_for_mode (intermediate_mode
,
5521 TYPE_UNSIGNED (prev_type
));
5522 optab3
= optab_for_tree_code (c1
, intermediate_type
, optab_default
);
5523 optab4
= optab_for_tree_code (c2
, intermediate_type
, optab_default
);
5525 if (!optab3
|| !optab4
5526 || ((icode1
= optab_handler (optab1
, prev_mode
))
5527 == CODE_FOR_nothing
)
5528 || insn_data
[icode1
].operand
[0].mode
!= intermediate_mode
5529 || ((icode2
= optab_handler (optab2
, prev_mode
))
5530 == CODE_FOR_nothing
)
5531 || insn_data
[icode2
].operand
[0].mode
!= intermediate_mode
5532 || ((icode1
= optab_handler (optab3
, intermediate_mode
))
5533 == CODE_FOR_nothing
)
5534 || ((icode2
= optab_handler (optab4
, intermediate_mode
))
5535 == CODE_FOR_nothing
))
5538 VEC_quick_push (tree
, *interm_types
, intermediate_type
);
5539 (*multi_step_cvt
)++;
5541 if (insn_data
[icode1
].operand
[0].mode
== TYPE_MODE (wide_vectype
)
5542 && insn_data
[icode2
].operand
[0].mode
== TYPE_MODE (wide_vectype
))
5545 prev_type
= intermediate_type
;
5546 prev_mode
= intermediate_mode
;
5558 /* Function supportable_narrowing_operation
5560 Check whether an operation represented by the code CODE is a
5561 narrowing operation that is supported by the target platform in
5562 vector form (i.e., when operating on arguments of type VECTYPE_IN
5563 and producing a result of type VECTYPE_OUT).
5565 Narrowing operations we currently support are NOP (CONVERT) and
5566 FIX_TRUNC. This function checks if these operations are supported by
5567 the target platform directly via vector tree-codes.
5570 - CODE1 is the code of a vector operation to be used when
5571 vectorizing the operation, if available.
5572 - MULTI_STEP_CVT determines the number of required intermediate steps in
5573 case of multi-step conversion (like int->short->char - in that case
5574 MULTI_STEP_CVT will be 1).
5575 - INTERM_TYPES contains the intermediate type required to perform the
5576 narrowing operation (short in the above example). */
5579 supportable_narrowing_operation (enum tree_code code
,
5580 tree vectype_out
, tree vectype_in
,
5581 enum tree_code
*code1
, int *multi_step_cvt
,
5582 VEC (tree
, heap
) **interm_types
)
5584 enum machine_mode vec_mode
;
5585 enum insn_code icode1
;
5586 optab optab1
, interm_optab
;
5587 tree vectype
= vectype_in
;
5588 tree narrow_vectype
= vectype_out
;
5590 tree intermediate_type
, prev_type
;
5596 c1
= VEC_PACK_TRUNC_EXPR
;
5599 case FIX_TRUNC_EXPR
:
5600 c1
= VEC_PACK_FIX_TRUNC_EXPR
;
5604 /* ??? Not yet implemented due to missing VEC_PACK_FLOAT_EXPR
5605 tree code and optabs used for computing the operation. */
5612 if (code
== FIX_TRUNC_EXPR
)
5613 /* The signedness is determined from output operand. */
5614 optab1
= optab_for_tree_code (c1
, vectype_out
, optab_default
);
5616 optab1
= optab_for_tree_code (c1
, vectype
, optab_default
);
5621 vec_mode
= TYPE_MODE (vectype
);
5622 if ((icode1
= optab_handler (optab1
, vec_mode
)) == CODE_FOR_nothing
)
5625 /* Check if it's a multi-step conversion that can be done using intermediate
5627 if (insn_data
[icode1
].operand
[0].mode
!= TYPE_MODE (narrow_vectype
))
5629 enum machine_mode intermediate_mode
, prev_mode
= vec_mode
;
5632 prev_type
= vectype
;
5633 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
5634 intermediate steps in promotion sequence. We try
5635 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do
5637 *interm_types
= VEC_alloc (tree
, heap
, MAX_INTERM_CVT_STEPS
);
5638 for (i
= 0; i
< 3; i
++)
5640 intermediate_mode
= insn_data
[icode1
].operand
[0].mode
;
5641 intermediate_type
= lang_hooks
.types
.type_for_mode (intermediate_mode
,
5642 TYPE_UNSIGNED (prev_type
));
5643 interm_optab
= optab_for_tree_code (c1
, intermediate_type
,
5646 || ((icode1
= optab_handler (optab1
, prev_mode
))
5647 == CODE_FOR_nothing
)
5648 || insn_data
[icode1
].operand
[0].mode
!= intermediate_mode
5649 || ((icode1
= optab_handler (interm_optab
, intermediate_mode
))
5650 == CODE_FOR_nothing
))
5653 VEC_quick_push (tree
, *interm_types
, intermediate_type
);
5654 (*multi_step_cvt
)++;
5656 if (insn_data
[icode1
].operand
[0].mode
== TYPE_MODE (narrow_vectype
))
5659 prev_type
= intermediate_type
;
5660 prev_mode
= intermediate_mode
;