1 /* Statement Analysis and Transformation for Vectorization
2 Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
3 Free Software Foundation, Inc.
4 Contributed by Dorit Naishlos <dorit@il.ibm.com>
5 and Ira Rosen <irar@il.ibm.com>
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
25 #include "coretypes.h"
30 #include "basic-block.h"
31 #include "tree-pretty-print.h"
32 #include "gimple-pretty-print.h"
33 #include "tree-flow.h"
34 #include "tree-dump.h"
36 #include "cfglayout.h"
40 #include "diagnostic-core.h"
41 #include "tree-vectorizer.h"
42 #include "langhooks.h"
45 /* Return a variable of type ELEM_TYPE[NELEMS]. */
48 create_vector_array (tree elem_type
, unsigned HOST_WIDE_INT nelems
)
50 return create_tmp_var (build_array_type_nelts (elem_type
, nelems
),
54 /* ARRAY is an array of vectors created by create_vector_array.
55 Return an SSA_NAME for the vector in index N. The reference
56 is part of the vectorization of STMT and the vector is associated
57 with scalar destination SCALAR_DEST. */
60 read_vector_array (gimple stmt
, gimple_stmt_iterator
*gsi
, tree scalar_dest
,
61 tree array
, unsigned HOST_WIDE_INT n
)
63 tree vect_type
, vect
, vect_name
, array_ref
;
66 gcc_assert (TREE_CODE (TREE_TYPE (array
)) == ARRAY_TYPE
);
67 vect_type
= TREE_TYPE (TREE_TYPE (array
));
68 vect
= vect_create_destination_var (scalar_dest
, vect_type
);
69 array_ref
= build4 (ARRAY_REF
, vect_type
, array
,
70 build_int_cst (size_type_node
, n
),
71 NULL_TREE
, NULL_TREE
);
73 new_stmt
= gimple_build_assign (vect
, array_ref
);
74 vect_name
= make_ssa_name (vect
, new_stmt
);
75 gimple_assign_set_lhs (new_stmt
, vect_name
);
76 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
77 mark_symbols_for_renaming (new_stmt
);
82 /* ARRAY is an array of vectors created by create_vector_array.
83 Emit code to store SSA_NAME VECT in index N of the array.
84 The store is part of the vectorization of STMT. */
87 write_vector_array (gimple stmt
, gimple_stmt_iterator
*gsi
, tree vect
,
88 tree array
, unsigned HOST_WIDE_INT n
)
93 array_ref
= build4 (ARRAY_REF
, TREE_TYPE (vect
), array
,
94 build_int_cst (size_type_node
, n
),
95 NULL_TREE
, NULL_TREE
);
97 new_stmt
= gimple_build_assign (array_ref
, vect
);
98 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
99 mark_symbols_for_renaming (new_stmt
);
102 /* PTR is a pointer to an array of type TYPE. Return a representation
103 of *PTR. The memory reference replaces those in FIRST_DR
107 create_array_ref (tree type
, tree ptr
, struct data_reference
*first_dr
)
109 struct ptr_info_def
*pi
;
110 tree mem_ref
, alias_ptr_type
;
112 alias_ptr_type
= reference_alias_ptr_type (DR_REF (first_dr
));
113 mem_ref
= build2 (MEM_REF
, type
, ptr
, build_int_cst (alias_ptr_type
, 0));
114 /* Arrays have the same alignment as their type. */
115 pi
= get_ptr_info (ptr
);
116 pi
->align
= TYPE_ALIGN_UNIT (type
);
121 /* Utility functions used by vect_mark_stmts_to_be_vectorized. */
123 /* Function vect_mark_relevant.
125 Mark STMT as "relevant for vectorization" and add it to WORKLIST. */
128 vect_mark_relevant (VEC(gimple
,heap
) **worklist
, gimple stmt
,
129 enum vect_relevant relevant
, bool live_p
)
131 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
132 enum vect_relevant save_relevant
= STMT_VINFO_RELEVANT (stmt_info
);
133 bool save_live_p
= STMT_VINFO_LIVE_P (stmt_info
);
135 if (vect_print_dump_info (REPORT_DETAILS
))
136 fprintf (vect_dump
, "mark relevant %d, live %d.", relevant
, live_p
);
138 if (STMT_VINFO_IN_PATTERN_P (stmt_info
))
142 /* This is the last stmt in a sequence that was detected as a
143 pattern that can potentially be vectorized. Don't mark the stmt
144 as relevant/live because it's not going to be vectorized.
145 Instead mark the pattern-stmt that replaces it. */
147 pattern_stmt
= STMT_VINFO_RELATED_STMT (stmt_info
);
149 if (vect_print_dump_info (REPORT_DETAILS
))
150 fprintf (vect_dump
, "last stmt in pattern. don't mark relevant/live.");
151 stmt_info
= vinfo_for_stmt (pattern_stmt
);
152 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_info
) == stmt
);
153 save_relevant
= STMT_VINFO_RELEVANT (stmt_info
);
154 save_live_p
= STMT_VINFO_LIVE_P (stmt_info
);
158 STMT_VINFO_LIVE_P (stmt_info
) |= live_p
;
159 if (relevant
> STMT_VINFO_RELEVANT (stmt_info
))
160 STMT_VINFO_RELEVANT (stmt_info
) = relevant
;
162 if (STMT_VINFO_RELEVANT (stmt_info
) == save_relevant
163 && STMT_VINFO_LIVE_P (stmt_info
) == save_live_p
)
165 if (vect_print_dump_info (REPORT_DETAILS
))
166 fprintf (vect_dump
, "already marked relevant/live.");
170 VEC_safe_push (gimple
, heap
, *worklist
, stmt
);
174 /* Function vect_stmt_relevant_p.
176 Return true if STMT in loop that is represented by LOOP_VINFO is
177 "relevant for vectorization".
179 A stmt is considered "relevant for vectorization" if:
180 - it has uses outside the loop.
181 - it has vdefs (it alters memory).
182 - control stmts in the loop (except for the exit condition).
184 CHECKME: what other side effects would the vectorizer allow? */
187 vect_stmt_relevant_p (gimple stmt
, loop_vec_info loop_vinfo
,
188 enum vect_relevant
*relevant
, bool *live_p
)
190 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
192 imm_use_iterator imm_iter
;
196 *relevant
= vect_unused_in_scope
;
199 /* cond stmt other than loop exit cond. */
200 if (is_ctrl_stmt (stmt
)
201 && STMT_VINFO_TYPE (vinfo_for_stmt (stmt
))
202 != loop_exit_ctrl_vec_info_type
)
203 *relevant
= vect_used_in_scope
;
205 /* changing memory. */
206 if (gimple_code (stmt
) != GIMPLE_PHI
)
207 if (gimple_vdef (stmt
))
209 if (vect_print_dump_info (REPORT_DETAILS
))
210 fprintf (vect_dump
, "vec_stmt_relevant_p: stmt has vdefs.");
211 *relevant
= vect_used_in_scope
;
214 /* uses outside the loop. */
215 FOR_EACH_PHI_OR_STMT_DEF (def_p
, stmt
, op_iter
, SSA_OP_DEF
)
217 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, DEF_FROM_PTR (def_p
))
219 basic_block bb
= gimple_bb (USE_STMT (use_p
));
220 if (!flow_bb_inside_loop_p (loop
, bb
))
222 if (vect_print_dump_info (REPORT_DETAILS
))
223 fprintf (vect_dump
, "vec_stmt_relevant_p: used out of loop.");
225 if (is_gimple_debug (USE_STMT (use_p
)))
228 /* We expect all such uses to be in the loop exit phis
229 (because of loop closed form) */
230 gcc_assert (gimple_code (USE_STMT (use_p
)) == GIMPLE_PHI
);
231 gcc_assert (bb
== single_exit (loop
)->dest
);
238 return (*live_p
|| *relevant
);
242 /* Function exist_non_indexing_operands_for_use_p
244 USE is one of the uses attached to STMT. Check if USE is
245 used in STMT for anything other than indexing an array. */
248 exist_non_indexing_operands_for_use_p (tree use
, gimple stmt
)
251 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
253 /* USE corresponds to some operand in STMT. If there is no data
254 reference in STMT, then any operand that corresponds to USE
255 is not indexing an array. */
256 if (!STMT_VINFO_DATA_REF (stmt_info
))
259 /* STMT has a data_ref. FORNOW this means that its of one of
263 (This should have been verified in analyze_data_refs).
265 'var' in the second case corresponds to a def, not a use,
266 so USE cannot correspond to any operands that are not used
269 Therefore, all we need to check is if STMT falls into the
270 first case, and whether var corresponds to USE. */
272 if (!gimple_assign_copy_p (stmt
))
274 if (TREE_CODE (gimple_assign_lhs (stmt
)) == SSA_NAME
)
276 operand
= gimple_assign_rhs1 (stmt
);
277 if (TREE_CODE (operand
) != SSA_NAME
)
288 Function process_use.
291 - a USE in STMT in a loop represented by LOOP_VINFO
292 - LIVE_P, RELEVANT - enum values to be set in the STMT_VINFO of the stmt
293 that defined USE. This is done by calling mark_relevant and passing it
294 the WORKLIST (to add DEF_STMT to the WORKLIST in case it is relevant).
297 Generally, LIVE_P and RELEVANT are used to define the liveness and
298 relevance info of the DEF_STMT of this USE:
299 STMT_VINFO_LIVE_P (DEF_STMT_info) <-- live_p
300 STMT_VINFO_RELEVANT (DEF_STMT_info) <-- relevant
302 - case 1: If USE is used only for address computations (e.g. array indexing),
303 which does not need to be directly vectorized, then the liveness/relevance
304 of the respective DEF_STMT is left unchanged.
305 - case 2: If STMT is a reduction phi and DEF_STMT is a reduction stmt, we
306 skip DEF_STMT cause it had already been processed.
307 - case 3: If DEF_STMT and STMT are in different nests, then "relevant" will
308 be modified accordingly.
310 Return true if everything is as expected. Return false otherwise. */
313 process_use (gimple stmt
, tree use
, loop_vec_info loop_vinfo
, bool live_p
,
314 enum vect_relevant relevant
, VEC(gimple
,heap
) **worklist
)
316 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
317 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (stmt
);
318 stmt_vec_info dstmt_vinfo
;
319 basic_block bb
, def_bb
;
322 enum vect_def_type dt
;
324 /* case 1: we are only interested in uses that need to be vectorized. Uses
325 that are used for address computation are not considered relevant. */
326 if (!exist_non_indexing_operands_for_use_p (use
, stmt
))
329 if (!vect_is_simple_use (use
, loop_vinfo
, NULL
, &def_stmt
, &def
, &dt
))
331 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS
))
332 fprintf (vect_dump
, "not vectorized: unsupported use in stmt.");
336 if (!def_stmt
|| gimple_nop_p (def_stmt
))
339 def_bb
= gimple_bb (def_stmt
);
340 if (!flow_bb_inside_loop_p (loop
, def_bb
))
342 if (vect_print_dump_info (REPORT_DETAILS
))
343 fprintf (vect_dump
, "def_stmt is out of loop.");
347 /* case 2: A reduction phi (STMT) defined by a reduction stmt (DEF_STMT).
348 DEF_STMT must have already been processed, because this should be the
349 only way that STMT, which is a reduction-phi, was put in the worklist,
350 as there should be no other uses for DEF_STMT in the loop. So we just
351 check that everything is as expected, and we are done. */
352 dstmt_vinfo
= vinfo_for_stmt (def_stmt
);
353 bb
= gimple_bb (stmt
);
354 if (gimple_code (stmt
) == GIMPLE_PHI
355 && STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_reduction_def
356 && gimple_code (def_stmt
) != GIMPLE_PHI
357 && STMT_VINFO_DEF_TYPE (dstmt_vinfo
) == vect_reduction_def
358 && bb
->loop_father
== def_bb
->loop_father
)
360 if (vect_print_dump_info (REPORT_DETAILS
))
361 fprintf (vect_dump
, "reduc-stmt defining reduc-phi in the same nest.");
362 if (STMT_VINFO_IN_PATTERN_P (dstmt_vinfo
))
363 dstmt_vinfo
= vinfo_for_stmt (STMT_VINFO_RELATED_STMT (dstmt_vinfo
));
364 gcc_assert (STMT_VINFO_RELEVANT (dstmt_vinfo
) < vect_used_by_reduction
);
365 gcc_assert (STMT_VINFO_LIVE_P (dstmt_vinfo
)
366 || STMT_VINFO_RELEVANT (dstmt_vinfo
) > vect_unused_in_scope
);
370 /* case 3a: outer-loop stmt defining an inner-loop stmt:
371 outer-loop-header-bb:
377 if (flow_loop_nested_p (def_bb
->loop_father
, bb
->loop_father
))
379 if (vect_print_dump_info (REPORT_DETAILS
))
380 fprintf (vect_dump
, "outer-loop def-stmt defining inner-loop stmt.");
384 case vect_unused_in_scope
:
385 relevant
= (STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_nested_cycle
) ?
386 vect_used_in_scope
: vect_unused_in_scope
;
389 case vect_used_in_outer_by_reduction
:
390 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo
) != vect_reduction_def
);
391 relevant
= vect_used_by_reduction
;
394 case vect_used_in_outer
:
395 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo
) != vect_reduction_def
);
396 relevant
= vect_used_in_scope
;
399 case vect_used_in_scope
:
407 /* case 3b: inner-loop stmt defining an outer-loop stmt:
408 outer-loop-header-bb:
412 outer-loop-tail-bb (or outer-loop-exit-bb in double reduction):
414 else if (flow_loop_nested_p (bb
->loop_father
, def_bb
->loop_father
))
416 if (vect_print_dump_info (REPORT_DETAILS
))
417 fprintf (vect_dump
, "inner-loop def-stmt defining outer-loop stmt.");
421 case vect_unused_in_scope
:
422 relevant
= (STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_reduction_def
423 || STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_double_reduction_def
) ?
424 vect_used_in_outer_by_reduction
: vect_unused_in_scope
;
427 case vect_used_by_reduction
:
428 relevant
= vect_used_in_outer_by_reduction
;
431 case vect_used_in_scope
:
432 relevant
= vect_used_in_outer
;
440 vect_mark_relevant (worklist
, def_stmt
, relevant
, live_p
);
445 /* Function vect_mark_stmts_to_be_vectorized.
447 Not all stmts in the loop need to be vectorized. For example:
456 Stmt 1 and 3 do not need to be vectorized, because loop control and
457 addressing of vectorized data-refs are handled differently.
459 This pass detects such stmts. */
462 vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo
)
464 VEC(gimple
,heap
) *worklist
;
465 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
466 basic_block
*bbs
= LOOP_VINFO_BBS (loop_vinfo
);
467 unsigned int nbbs
= loop
->num_nodes
;
468 gimple_stmt_iterator si
;
471 stmt_vec_info stmt_vinfo
;
475 enum vect_relevant relevant
, tmp_relevant
;
476 enum vect_def_type def_type
;
478 if (vect_print_dump_info (REPORT_DETAILS
))
479 fprintf (vect_dump
, "=== vect_mark_stmts_to_be_vectorized ===");
481 worklist
= VEC_alloc (gimple
, heap
, 64);
483 /* 1. Init worklist. */
484 for (i
= 0; i
< nbbs
; i
++)
487 for (si
= gsi_start_phis (bb
); !gsi_end_p (si
); gsi_next (&si
))
490 if (vect_print_dump_info (REPORT_DETAILS
))
492 fprintf (vect_dump
, "init: phi relevant? ");
493 print_gimple_stmt (vect_dump
, phi
, 0, TDF_SLIM
);
496 if (vect_stmt_relevant_p (phi
, loop_vinfo
, &relevant
, &live_p
))
497 vect_mark_relevant (&worklist
, phi
, relevant
, live_p
);
499 for (si
= gsi_start_bb (bb
); !gsi_end_p (si
); gsi_next (&si
))
501 stmt
= gsi_stmt (si
);
502 if (vect_print_dump_info (REPORT_DETAILS
))
504 fprintf (vect_dump
, "init: stmt relevant? ");
505 print_gimple_stmt (vect_dump
, stmt
, 0, TDF_SLIM
);
508 if (vect_stmt_relevant_p (stmt
, loop_vinfo
, &relevant
, &live_p
))
509 vect_mark_relevant (&worklist
, stmt
, relevant
, live_p
);
513 /* 2. Process_worklist */
514 while (VEC_length (gimple
, worklist
) > 0)
519 stmt
= VEC_pop (gimple
, worklist
);
520 if (vect_print_dump_info (REPORT_DETAILS
))
522 fprintf (vect_dump
, "worklist: examine stmt: ");
523 print_gimple_stmt (vect_dump
, stmt
, 0, TDF_SLIM
);
526 /* Examine the USEs of STMT. For each USE, mark the stmt that defines it
527 (DEF_STMT) as relevant/irrelevant and live/dead according to the
528 liveness and relevance properties of STMT. */
529 stmt_vinfo
= vinfo_for_stmt (stmt
);
530 relevant
= STMT_VINFO_RELEVANT (stmt_vinfo
);
531 live_p
= STMT_VINFO_LIVE_P (stmt_vinfo
);
533 /* Generally, the liveness and relevance properties of STMT are
534 propagated as is to the DEF_STMTs of its USEs:
535 live_p <-- STMT_VINFO_LIVE_P (STMT_VINFO)
536 relevant <-- STMT_VINFO_RELEVANT (STMT_VINFO)
538 One exception is when STMT has been identified as defining a reduction
539 variable; in this case we set the liveness/relevance as follows:
541 relevant = vect_used_by_reduction
542 This is because we distinguish between two kinds of relevant stmts -
543 those that are used by a reduction computation, and those that are
544 (also) used by a regular computation. This allows us later on to
545 identify stmts that are used solely by a reduction, and therefore the
546 order of the results that they produce does not have to be kept. */
548 def_type
= STMT_VINFO_DEF_TYPE (stmt_vinfo
);
549 tmp_relevant
= relevant
;
552 case vect_reduction_def
:
553 switch (tmp_relevant
)
555 case vect_unused_in_scope
:
556 relevant
= vect_used_by_reduction
;
559 case vect_used_by_reduction
:
560 if (gimple_code (stmt
) == GIMPLE_PHI
)
565 if (vect_print_dump_info (REPORT_DETAILS
))
566 fprintf (vect_dump
, "unsupported use of reduction.");
568 VEC_free (gimple
, heap
, worklist
);
575 case vect_nested_cycle
:
576 if (tmp_relevant
!= vect_unused_in_scope
577 && tmp_relevant
!= vect_used_in_outer_by_reduction
578 && tmp_relevant
!= vect_used_in_outer
)
580 if (vect_print_dump_info (REPORT_DETAILS
))
581 fprintf (vect_dump
, "unsupported use of nested cycle.");
583 VEC_free (gimple
, heap
, worklist
);
590 case vect_double_reduction_def
:
591 if (tmp_relevant
!= vect_unused_in_scope
592 && tmp_relevant
!= vect_used_by_reduction
)
594 if (vect_print_dump_info (REPORT_DETAILS
))
595 fprintf (vect_dump
, "unsupported use of double reduction.");
597 VEC_free (gimple
, heap
, worklist
);
608 FOR_EACH_PHI_OR_STMT_USE (use_p
, stmt
, iter
, SSA_OP_USE
)
610 tree op
= USE_FROM_PTR (use_p
);
611 if (!process_use (stmt
, op
, loop_vinfo
, live_p
, relevant
, &worklist
))
613 VEC_free (gimple
, heap
, worklist
);
617 } /* while worklist */
619 VEC_free (gimple
, heap
, worklist
);
624 /* Get cost by calling cost target builtin. */
627 int vect_get_stmt_cost (enum vect_cost_for_stmt type_of_cost
)
629 tree dummy_type
= NULL
;
632 return targetm
.vectorize
.builtin_vectorization_cost (type_of_cost
,
637 /* Get cost for STMT. */
640 cost_for_stmt (gimple stmt
)
642 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
644 switch (STMT_VINFO_TYPE (stmt_info
))
646 case load_vec_info_type
:
647 return vect_get_stmt_cost (scalar_load
);
648 case store_vec_info_type
:
649 return vect_get_stmt_cost (scalar_store
);
650 case op_vec_info_type
:
651 case condition_vec_info_type
:
652 case assignment_vec_info_type
:
653 case reduc_vec_info_type
:
654 case induc_vec_info_type
:
655 case type_promotion_vec_info_type
:
656 case type_demotion_vec_info_type
:
657 case type_conversion_vec_info_type
:
658 case call_vec_info_type
:
659 return vect_get_stmt_cost (scalar_stmt
);
660 case undef_vec_info_type
:
666 /* Function vect_model_simple_cost.
668 Models cost for simple operations, i.e. those that only emit ncopies of a
669 single op. Right now, this does not account for multiple insns that could
670 be generated for the single vector op. We will handle that shortly. */
673 vect_model_simple_cost (stmt_vec_info stmt_info
, int ncopies
,
674 enum vect_def_type
*dt
, slp_tree slp_node
)
677 int inside_cost
= 0, outside_cost
= 0;
679 /* The SLP costs were already calculated during SLP tree build. */
680 if (PURE_SLP_STMT (stmt_info
))
683 inside_cost
= ncopies
* vect_get_stmt_cost (vector_stmt
);
685 /* FORNOW: Assuming maximum 2 args per stmts. */
686 for (i
= 0; i
< 2; i
++)
688 if (dt
[i
] == vect_constant_def
|| dt
[i
] == vect_external_def
)
689 outside_cost
+= vect_get_stmt_cost (vector_stmt
);
692 if (vect_print_dump_info (REPORT_COST
))
693 fprintf (vect_dump
, "vect_model_simple_cost: inside_cost = %d, "
694 "outside_cost = %d .", inside_cost
, outside_cost
);
696 /* Set the costs either in STMT_INFO or SLP_NODE (if exists). */
697 stmt_vinfo_set_inside_of_loop_cost (stmt_info
, slp_node
, inside_cost
);
698 stmt_vinfo_set_outside_of_loop_cost (stmt_info
, slp_node
, outside_cost
);
702 /* Function vect_cost_strided_group_size
704 For strided load or store, return the group_size only if it is the first
705 load or store of a group, else return 1. This ensures that group size is
706 only returned once per group. */
709 vect_cost_strided_group_size (stmt_vec_info stmt_info
)
711 gimple first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
713 if (first_stmt
== STMT_VINFO_STMT (stmt_info
))
714 return GROUP_SIZE (stmt_info
);
720 /* Function vect_model_store_cost
722 Models cost for stores. In the case of strided accesses, one access
723 has the overhead of the strided access attributed to it. */
726 vect_model_store_cost (stmt_vec_info stmt_info
, int ncopies
,
727 bool store_lanes_p
, enum vect_def_type dt
,
731 unsigned int inside_cost
= 0, outside_cost
= 0;
732 struct data_reference
*first_dr
;
735 /* The SLP costs were already calculated during SLP tree build. */
736 if (PURE_SLP_STMT (stmt_info
))
739 if (dt
== vect_constant_def
|| dt
== vect_external_def
)
740 outside_cost
= vect_get_stmt_cost (scalar_to_vec
);
742 /* Strided access? */
743 if (STMT_VINFO_STRIDED_ACCESS (stmt_info
))
747 first_stmt
= VEC_index (gimple
, SLP_TREE_SCALAR_STMTS (slp_node
), 0);
752 first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
753 group_size
= vect_cost_strided_group_size (stmt_info
);
756 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
758 /* Not a strided access. */
762 first_dr
= STMT_VINFO_DATA_REF (stmt_info
);
765 /* We assume that the cost of a single store-lanes instruction is
766 equivalent to the cost of GROUP_SIZE separate stores. If a strided
767 access is instead being provided by a permute-and-store operation,
768 include the cost of the permutes. */
769 if (!store_lanes_p
&& group_size
> 1)
771 /* Uses a high and low interleave operation for each needed permute. */
772 inside_cost
= ncopies
* exact_log2(group_size
) * group_size
773 * vect_get_stmt_cost (vector_stmt
);
775 if (vect_print_dump_info (REPORT_COST
))
776 fprintf (vect_dump
, "vect_model_store_cost: strided group_size = %d .",
781 /* Costs of the stores. */
782 vect_get_store_cost (first_dr
, ncopies
, &inside_cost
);
784 if (vect_print_dump_info (REPORT_COST
))
785 fprintf (vect_dump
, "vect_model_store_cost: inside_cost = %d, "
786 "outside_cost = %d .", inside_cost
, outside_cost
);
788 /* Set the costs either in STMT_INFO or SLP_NODE (if exists). */
789 stmt_vinfo_set_inside_of_loop_cost (stmt_info
, slp_node
, inside_cost
);
790 stmt_vinfo_set_outside_of_loop_cost (stmt_info
, slp_node
, outside_cost
);
794 /* Calculate cost of DR's memory access. */
796 vect_get_store_cost (struct data_reference
*dr
, int ncopies
,
797 unsigned int *inside_cost
)
799 int alignment_support_scheme
= vect_supportable_dr_alignment (dr
, false);
801 switch (alignment_support_scheme
)
805 *inside_cost
+= ncopies
* vect_get_stmt_cost (vector_store
);
807 if (vect_print_dump_info (REPORT_COST
))
808 fprintf (vect_dump
, "vect_model_store_cost: aligned.");
813 case dr_unaligned_supported
:
815 gimple stmt
= DR_STMT (dr
);
816 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
817 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
819 /* Here, we assign an additional cost for the unaligned store. */
820 *inside_cost
+= ncopies
821 * targetm
.vectorize
.builtin_vectorization_cost (unaligned_store
,
822 vectype
, DR_MISALIGNMENT (dr
));
824 if (vect_print_dump_info (REPORT_COST
))
825 fprintf (vect_dump
, "vect_model_store_cost: unaligned supported by "
837 /* Function vect_model_load_cost
839 Models cost for loads. In the case of strided accesses, the last access
840 has the overhead of the strided access attributed to it. Since unaligned
841 accesses are supported for loads, we also account for the costs of the
842 access scheme chosen. */
845 vect_model_load_cost (stmt_vec_info stmt_info
, int ncopies
, bool load_lanes_p
,
850 struct data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
), *first_dr
;
851 unsigned int inside_cost
= 0, outside_cost
= 0;
853 /* The SLP costs were already calculated during SLP tree build. */
854 if (PURE_SLP_STMT (stmt_info
))
857 /* Strided accesses? */
858 first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
859 if (STMT_VINFO_STRIDED_ACCESS (stmt_info
) && first_stmt
&& !slp_node
)
861 group_size
= vect_cost_strided_group_size (stmt_info
);
862 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
864 /* Not a strided access. */
871 /* We assume that the cost of a single load-lanes instruction is
872 equivalent to the cost of GROUP_SIZE separate loads. If a strided
873 access is instead being provided by a load-and-permute operation,
874 include the cost of the permutes. */
875 if (!load_lanes_p
&& group_size
> 1)
877 /* Uses an even and odd extract operations for each needed permute. */
878 inside_cost
= ncopies
* exact_log2(group_size
) * group_size
879 * vect_get_stmt_cost (vector_stmt
);
881 if (vect_print_dump_info (REPORT_COST
))
882 fprintf (vect_dump
, "vect_model_load_cost: strided group_size = %d .",
886 /* The loads themselves. */
887 vect_get_load_cost (first_dr
, ncopies
,
888 ((!STMT_VINFO_STRIDED_ACCESS (stmt_info
)) || group_size
> 1
890 &inside_cost
, &outside_cost
);
892 if (vect_print_dump_info (REPORT_COST
))
893 fprintf (vect_dump
, "vect_model_load_cost: inside_cost = %d, "
894 "outside_cost = %d .", inside_cost
, outside_cost
);
896 /* Set the costs either in STMT_INFO or SLP_NODE (if exists). */
897 stmt_vinfo_set_inside_of_loop_cost (stmt_info
, slp_node
, inside_cost
);
898 stmt_vinfo_set_outside_of_loop_cost (stmt_info
, slp_node
, outside_cost
);
902 /* Calculate cost of DR's memory access. */
904 vect_get_load_cost (struct data_reference
*dr
, int ncopies
,
905 bool add_realign_cost
, unsigned int *inside_cost
,
906 unsigned int *outside_cost
)
908 int alignment_support_scheme
= vect_supportable_dr_alignment (dr
, false);
910 switch (alignment_support_scheme
)
914 *inside_cost
+= ncopies
* vect_get_stmt_cost (vector_load
);
916 if (vect_print_dump_info (REPORT_COST
))
917 fprintf (vect_dump
, "vect_model_load_cost: aligned.");
921 case dr_unaligned_supported
:
923 gimple stmt
= DR_STMT (dr
);
924 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
925 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
927 /* Here, we assign an additional cost for the unaligned load. */
928 *inside_cost
+= ncopies
929 * targetm
.vectorize
.builtin_vectorization_cost (unaligned_load
,
930 vectype
, DR_MISALIGNMENT (dr
));
931 if (vect_print_dump_info (REPORT_COST
))
932 fprintf (vect_dump
, "vect_model_load_cost: unaligned supported by "
937 case dr_explicit_realign
:
939 *inside_cost
+= ncopies
* (2 * vect_get_stmt_cost (vector_load
)
940 + vect_get_stmt_cost (vector_stmt
));
942 /* FIXME: If the misalignment remains fixed across the iterations of
943 the containing loop, the following cost should be added to the
945 if (targetm
.vectorize
.builtin_mask_for_load
)
946 *inside_cost
+= vect_get_stmt_cost (vector_stmt
);
950 case dr_explicit_realign_optimized
:
952 if (vect_print_dump_info (REPORT_COST
))
953 fprintf (vect_dump
, "vect_model_load_cost: unaligned software "
956 /* Unaligned software pipeline has a load of an address, an initial
957 load, and possibly a mask operation to "prime" the loop. However,
958 if this is an access in a group of loads, which provide strided
959 access, then the above cost should only be considered for one
960 access in the group. Inside the loop, there is a load op
961 and a realignment op. */
963 if (add_realign_cost
)
965 *outside_cost
= 2 * vect_get_stmt_cost (vector_stmt
);
966 if (targetm
.vectorize
.builtin_mask_for_load
)
967 *outside_cost
+= vect_get_stmt_cost (vector_stmt
);
970 *inside_cost
+= ncopies
* (vect_get_stmt_cost (vector_load
)
971 + vect_get_stmt_cost (vector_stmt
));
981 /* Function vect_init_vector.
983 Insert a new stmt (INIT_STMT) that initializes a new vector variable with
984 the vector elements of VECTOR_VAR. Place the initialization at BSI if it
985 is not NULL. Otherwise, place the initialization at the loop preheader.
986 Return the DEF of INIT_STMT.
987 It will be used in the vectorization of STMT. */
990 vect_init_vector (gimple stmt
, tree vector_var
, tree vector_type
,
991 gimple_stmt_iterator
*gsi
)
993 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (stmt
);
1001 new_var
= vect_get_new_vect_var (vector_type
, vect_simple_var
, "cst_");
1002 add_referenced_var (new_var
);
1003 init_stmt
= gimple_build_assign (new_var
, vector_var
);
1004 new_temp
= make_ssa_name (new_var
, init_stmt
);
1005 gimple_assign_set_lhs (init_stmt
, new_temp
);
1008 vect_finish_stmt_generation (stmt
, init_stmt
, gsi
);
1011 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_vinfo
);
1015 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
1017 if (nested_in_vect_loop_p (loop
, stmt
))
1020 pe
= loop_preheader_edge (loop
);
1021 new_bb
= gsi_insert_on_edge_immediate (pe
, init_stmt
);
1022 gcc_assert (!new_bb
);
1026 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_vinfo
);
1028 gimple_stmt_iterator gsi_bb_start
;
1030 gcc_assert (bb_vinfo
);
1031 bb
= BB_VINFO_BB (bb_vinfo
);
1032 gsi_bb_start
= gsi_after_labels (bb
);
1033 gsi_insert_before (&gsi_bb_start
, init_stmt
, GSI_SAME_STMT
);
1037 if (vect_print_dump_info (REPORT_DETAILS
))
1039 fprintf (vect_dump
, "created new init_stmt: ");
1040 print_gimple_stmt (vect_dump
, init_stmt
, 0, TDF_SLIM
);
1043 vec_oprnd
= gimple_assign_lhs (init_stmt
);
1048 /* Function vect_get_vec_def_for_operand.
1050 OP is an operand in STMT. This function returns a (vector) def that will be
1051 used in the vectorized stmt for STMT.
1053 In the case that OP is an SSA_NAME which is defined in the loop, then
1054 STMT_VINFO_VEC_STMT of the defining stmt holds the relevant def.
1056 In case OP is an invariant or constant, a new stmt that creates a vector def
1057 needs to be introduced. */
1060 vect_get_vec_def_for_operand (tree op
, gimple stmt
, tree
*scalar_def
)
1065 stmt_vec_info def_stmt_info
= NULL
;
1066 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (stmt
);
1067 unsigned int nunits
;
1068 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_vinfo
);
1074 enum vect_def_type dt
;
1078 if (vect_print_dump_info (REPORT_DETAILS
))
1080 fprintf (vect_dump
, "vect_get_vec_def_for_operand: ");
1081 print_generic_expr (vect_dump
, op
, TDF_SLIM
);
1084 is_simple_use
= vect_is_simple_use (op
, loop_vinfo
, NULL
, &def_stmt
, &def
,
1086 gcc_assert (is_simple_use
);
1087 if (vect_print_dump_info (REPORT_DETAILS
))
1091 fprintf (vect_dump
, "def = ");
1092 print_generic_expr (vect_dump
, def
, TDF_SLIM
);
1096 fprintf (vect_dump
, " def_stmt = ");
1097 print_gimple_stmt (vect_dump
, def_stmt
, 0, TDF_SLIM
);
1103 /* Case 1: operand is a constant. */
1104 case vect_constant_def
:
1106 vector_type
= get_vectype_for_scalar_type (TREE_TYPE (op
));
1107 gcc_assert (vector_type
);
1108 nunits
= TYPE_VECTOR_SUBPARTS (vector_type
);
1113 /* Create 'vect_cst_ = {cst,cst,...,cst}' */
1114 if (vect_print_dump_info (REPORT_DETAILS
))
1115 fprintf (vect_dump
, "Create vector_cst. nunits = %d", nunits
);
1117 vec_cst
= build_vector_from_val (vector_type
, op
);
1118 return vect_init_vector (stmt
, vec_cst
, vector_type
, NULL
);
1121 /* Case 2: operand is defined outside the loop - loop invariant. */
1122 case vect_external_def
:
1124 vector_type
= get_vectype_for_scalar_type (TREE_TYPE (def
));
1125 gcc_assert (vector_type
);
1126 nunits
= TYPE_VECTOR_SUBPARTS (vector_type
);
1131 /* Create 'vec_inv = {inv,inv,..,inv}' */
1132 if (vect_print_dump_info (REPORT_DETAILS
))
1133 fprintf (vect_dump
, "Create vector_inv.");
1135 for (i
= nunits
- 1; i
>= 0; --i
)
1137 t
= tree_cons (NULL_TREE
, def
, t
);
1140 /* FIXME: use build_constructor directly. */
1141 vec_inv
= build_constructor_from_list (vector_type
, t
);
1142 return vect_init_vector (stmt
, vec_inv
, vector_type
, NULL
);
1145 /* Case 3: operand is defined inside the loop. */
1146 case vect_internal_def
:
1149 *scalar_def
= NULL
/* FIXME tuples: def_stmt*/;
1151 /* Get the def from the vectorized stmt. */
1152 def_stmt_info
= vinfo_for_stmt (def_stmt
);
1153 vec_stmt
= STMT_VINFO_VEC_STMT (def_stmt_info
);
1154 gcc_assert (vec_stmt
);
1155 if (gimple_code (vec_stmt
) == GIMPLE_PHI
)
1156 vec_oprnd
= PHI_RESULT (vec_stmt
);
1157 else if (is_gimple_call (vec_stmt
))
1158 vec_oprnd
= gimple_call_lhs (vec_stmt
);
1160 vec_oprnd
= gimple_assign_lhs (vec_stmt
);
1164 /* Case 4: operand is defined by a loop header phi - reduction */
1165 case vect_reduction_def
:
1166 case vect_double_reduction_def
:
1167 case vect_nested_cycle
:
1171 gcc_assert (gimple_code (def_stmt
) == GIMPLE_PHI
);
1172 loop
= (gimple_bb (def_stmt
))->loop_father
;
1174 /* Get the def before the loop */
1175 op
= PHI_ARG_DEF_FROM_EDGE (def_stmt
, loop_preheader_edge (loop
));
1176 return get_initial_def_for_reduction (stmt
, op
, scalar_def
);
1179 /* Case 5: operand is defined by loop-header phi - induction. */
1180 case vect_induction_def
:
1182 gcc_assert (gimple_code (def_stmt
) == GIMPLE_PHI
);
1184 /* Get the def from the vectorized stmt. */
1185 def_stmt_info
= vinfo_for_stmt (def_stmt
);
1186 vec_stmt
= STMT_VINFO_VEC_STMT (def_stmt_info
);
1187 if (gimple_code (vec_stmt
) == GIMPLE_PHI
)
1188 vec_oprnd
= PHI_RESULT (vec_stmt
);
1190 vec_oprnd
= gimple_get_lhs (vec_stmt
);
1200 /* Function vect_get_vec_def_for_stmt_copy
1202 Return a vector-def for an operand. This function is used when the
1203 vectorized stmt to be created (by the caller to this function) is a "copy"
1204 created in case the vectorized result cannot fit in one vector, and several
1205 copies of the vector-stmt are required. In this case the vector-def is
1206 retrieved from the vector stmt recorded in the STMT_VINFO_RELATED_STMT field
1207 of the stmt that defines VEC_OPRND.
1208 DT is the type of the vector def VEC_OPRND.
1211 In case the vectorization factor (VF) is bigger than the number
1212 of elements that can fit in a vectype (nunits), we have to generate
1213 more than one vector stmt to vectorize the scalar stmt. This situation
1214 arises when there are multiple data-types operated upon in the loop; the
1215 smallest data-type determines the VF, and as a result, when vectorizing
1216 stmts operating on wider types we need to create 'VF/nunits' "copies" of the
1217 vector stmt (each computing a vector of 'nunits' results, and together
1218 computing 'VF' results in each iteration). This function is called when
1219 vectorizing such a stmt (e.g. vectorizing S2 in the illustration below, in
1220 which VF=16 and nunits=4, so the number of copies required is 4):
1222 scalar stmt: vectorized into: STMT_VINFO_RELATED_STMT
1224 S1: x = load VS1.0: vx.0 = memref0 VS1.1
1225 VS1.1: vx.1 = memref1 VS1.2
1226 VS1.2: vx.2 = memref2 VS1.3
1227 VS1.3: vx.3 = memref3
1229 S2: z = x + ... VSnew.0: vz0 = vx.0 + ... VSnew.1
1230 VSnew.1: vz1 = vx.1 + ... VSnew.2
1231 VSnew.2: vz2 = vx.2 + ... VSnew.3
1232 VSnew.3: vz3 = vx.3 + ...
1234 The vectorization of S1 is explained in vectorizable_load.
1235 The vectorization of S2:
1236 To create the first vector-stmt out of the 4 copies - VSnew.0 -
1237 the function 'vect_get_vec_def_for_operand' is called to
1238 get the relevant vector-def for each operand of S2. For operand x it
1239 returns the vector-def 'vx.0'.
1241 To create the remaining copies of the vector-stmt (VSnew.j), this
1242 function is called to get the relevant vector-def for each operand. It is
1243 obtained from the respective VS1.j stmt, which is recorded in the
1244 STMT_VINFO_RELATED_STMT field of the stmt that defines VEC_OPRND.
1246 For example, to obtain the vector-def 'vx.1' in order to create the
1247 vector stmt 'VSnew.1', this function is called with VEC_OPRND='vx.0'.
1248 Given 'vx0' we obtain the stmt that defines it ('VS1.0'); from the
1249 STMT_VINFO_RELATED_STMT field of 'VS1.0' we obtain the next copy - 'VS1.1',
1250 and return its def ('vx.1').
1251 Overall, to create the above sequence this function will be called 3 times:
1252 vx.1 = vect_get_vec_def_for_stmt_copy (dt, vx.0);
1253 vx.2 = vect_get_vec_def_for_stmt_copy (dt, vx.1);
1254 vx.3 = vect_get_vec_def_for_stmt_copy (dt, vx.2); */
1257 vect_get_vec_def_for_stmt_copy (enum vect_def_type dt
, tree vec_oprnd
)
1259 gimple vec_stmt_for_operand
;
1260 stmt_vec_info def_stmt_info
;
1262 /* Do nothing; can reuse same def. */
1263 if (dt
== vect_external_def
|| dt
== vect_constant_def
)
1266 vec_stmt_for_operand
= SSA_NAME_DEF_STMT (vec_oprnd
);
1267 def_stmt_info
= vinfo_for_stmt (vec_stmt_for_operand
);
1268 gcc_assert (def_stmt_info
);
1269 vec_stmt_for_operand
= STMT_VINFO_RELATED_STMT (def_stmt_info
);
1270 gcc_assert (vec_stmt_for_operand
);
1271 vec_oprnd
= gimple_get_lhs (vec_stmt_for_operand
);
1272 if (gimple_code (vec_stmt_for_operand
) == GIMPLE_PHI
)
1273 vec_oprnd
= PHI_RESULT (vec_stmt_for_operand
);
1275 vec_oprnd
= gimple_get_lhs (vec_stmt_for_operand
);
1280 /* Get vectorized definitions for the operands to create a copy of an original
1281 stmt. See vect_get_vec_def_for_stmt_copy () for details. */
1284 vect_get_vec_defs_for_stmt_copy (enum vect_def_type
*dt
,
1285 VEC(tree
,heap
) **vec_oprnds0
,
1286 VEC(tree
,heap
) **vec_oprnds1
)
1288 tree vec_oprnd
= VEC_pop (tree
, *vec_oprnds0
);
1290 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
[0], vec_oprnd
);
1291 VEC_quick_push (tree
, *vec_oprnds0
, vec_oprnd
);
1293 if (vec_oprnds1
&& *vec_oprnds1
)
1295 vec_oprnd
= VEC_pop (tree
, *vec_oprnds1
);
1296 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
[1], vec_oprnd
);
1297 VEC_quick_push (tree
, *vec_oprnds1
, vec_oprnd
);
1302 /* Get vectorized definitions for OP0 and OP1, or SLP_NODE if it is not
1306 vect_get_vec_defs (tree op0
, tree op1
, gimple stmt
,
1307 VEC(tree
,heap
) **vec_oprnds0
, VEC(tree
,heap
) **vec_oprnds1
,
1311 vect_get_slp_defs (op0
, op1
, slp_node
, vec_oprnds0
, vec_oprnds1
, -1);
1316 *vec_oprnds0
= VEC_alloc (tree
, heap
, 1);
1317 vec_oprnd
= vect_get_vec_def_for_operand (op0
, stmt
, NULL
);
1318 VEC_quick_push (tree
, *vec_oprnds0
, vec_oprnd
);
1322 *vec_oprnds1
= VEC_alloc (tree
, heap
, 1);
1323 vec_oprnd
= vect_get_vec_def_for_operand (op1
, stmt
, NULL
);
1324 VEC_quick_push (tree
, *vec_oprnds1
, vec_oprnd
);
1330 /* Function vect_finish_stmt_generation.
1332 Insert a new stmt. */
1335 vect_finish_stmt_generation (gimple stmt
, gimple vec_stmt
,
1336 gimple_stmt_iterator
*gsi
)
1338 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1339 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
1340 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
1342 gcc_assert (gimple_code (stmt
) != GIMPLE_LABEL
);
1344 gsi_insert_before (gsi
, vec_stmt
, GSI_SAME_STMT
);
1346 set_vinfo_for_stmt (vec_stmt
, new_stmt_vec_info (vec_stmt
, loop_vinfo
,
1349 if (vect_print_dump_info (REPORT_DETAILS
))
1351 fprintf (vect_dump
, "add new stmt: ");
1352 print_gimple_stmt (vect_dump
, vec_stmt
, 0, TDF_SLIM
);
1355 gimple_set_location (vec_stmt
, gimple_location (gsi_stmt (*gsi
)));
1358 /* Checks if CALL can be vectorized in type VECTYPE. Returns
1359 a function declaration if the target has a vectorized version
1360 of the function, or NULL_TREE if the function cannot be vectorized. */
1363 vectorizable_function (gimple call
, tree vectype_out
, tree vectype_in
)
1365 tree fndecl
= gimple_call_fndecl (call
);
1367 /* We only handle functions that do not read or clobber memory -- i.e.
1368 const or novops ones. */
1369 if (!(gimple_call_flags (call
) & (ECF_CONST
| ECF_NOVOPS
)))
1373 || TREE_CODE (fndecl
) != FUNCTION_DECL
1374 || !DECL_BUILT_IN (fndecl
))
1377 return targetm
.vectorize
.builtin_vectorized_function (fndecl
, vectype_out
,
1381 /* Function vectorizable_call.
1383 Check if STMT performs a function call that can be vectorized.
1384 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
1385 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
1386 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1389 vectorizable_call (gimple stmt
, gimple_stmt_iterator
*gsi
, gimple
*vec_stmt
)
1394 tree vec_oprnd0
= NULL_TREE
, vec_oprnd1
= NULL_TREE
;
1395 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
), prev_stmt_info
;
1396 tree vectype_out
, vectype_in
;
1399 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
1400 tree fndecl
, new_temp
, def
, rhs_type
;
1402 enum vect_def_type dt
[3]
1403 = {vect_unknown_def_type
, vect_unknown_def_type
, vect_unknown_def_type
};
1404 gimple new_stmt
= NULL
;
1406 VEC(tree
, heap
) *vargs
= NULL
;
1407 enum { NARROW
, NONE
, WIDEN
} modifier
;
1410 /* FORNOW: unsupported in basic block SLP. */
1411 gcc_assert (loop_vinfo
);
1413 if (!STMT_VINFO_RELEVANT_P (stmt_info
))
1416 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
1419 /* FORNOW: SLP not supported. */
1420 if (STMT_SLP_TYPE (stmt_info
))
1423 /* Is STMT a vectorizable call? */
1424 if (!is_gimple_call (stmt
))
1427 if (TREE_CODE (gimple_call_lhs (stmt
)) != SSA_NAME
)
1430 if (stmt_can_throw_internal (stmt
))
1433 vectype_out
= STMT_VINFO_VECTYPE (stmt_info
);
1435 /* Process function arguments. */
1436 rhs_type
= NULL_TREE
;
1437 vectype_in
= NULL_TREE
;
1438 nargs
= gimple_call_num_args (stmt
);
1440 /* Bail out if the function has more than three arguments, we do not have
1441 interesting builtin functions to vectorize with more than two arguments
1442 except for fma. No arguments is also not good. */
1443 if (nargs
== 0 || nargs
> 3)
1446 for (i
= 0; i
< nargs
; i
++)
1450 op
= gimple_call_arg (stmt
, i
);
1452 /* We can only handle calls with arguments of the same type. */
1454 && !types_compatible_p (rhs_type
, TREE_TYPE (op
)))
1456 if (vect_print_dump_info (REPORT_DETAILS
))
1457 fprintf (vect_dump
, "argument types differ.");
1461 rhs_type
= TREE_TYPE (op
);
1463 if (!vect_is_simple_use_1 (op
, loop_vinfo
, NULL
,
1464 &def_stmt
, &def
, &dt
[i
], &opvectype
))
1466 if (vect_print_dump_info (REPORT_DETAILS
))
1467 fprintf (vect_dump
, "use not simple.");
1472 vectype_in
= opvectype
;
1474 && opvectype
!= vectype_in
)
1476 if (vect_print_dump_info (REPORT_DETAILS
))
1477 fprintf (vect_dump
, "argument vector types differ.");
1481 /* If all arguments are external or constant defs use a vector type with
1482 the same size as the output vector type. */
1484 vectype_in
= get_same_sized_vectype (rhs_type
, vectype_out
);
1486 gcc_assert (vectype_in
);
1489 if (vect_print_dump_info (REPORT_DETAILS
))
1491 fprintf (vect_dump
, "no vectype for scalar type ");
1492 print_generic_expr (vect_dump
, rhs_type
, TDF_SLIM
);
1499 nunits_in
= TYPE_VECTOR_SUBPARTS (vectype_in
);
1500 nunits_out
= TYPE_VECTOR_SUBPARTS (vectype_out
);
1501 if (nunits_in
== nunits_out
/ 2)
1503 else if (nunits_out
== nunits_in
)
1505 else if (nunits_out
== nunits_in
/ 2)
1510 /* For now, we only vectorize functions if a target specific builtin
1511 is available. TODO -- in some cases, it might be profitable to
1512 insert the calls for pieces of the vector, in order to be able
1513 to vectorize other operations in the loop. */
1514 fndecl
= vectorizable_function (stmt
, vectype_out
, vectype_in
);
1515 if (fndecl
== NULL_TREE
)
1517 if (vect_print_dump_info (REPORT_DETAILS
))
1518 fprintf (vect_dump
, "function is not vectorizable.");
1523 gcc_assert (!gimple_vuse (stmt
));
1525 if (modifier
== NARROW
)
1526 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits_out
;
1528 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits_in
;
1530 /* Sanity check: make sure that at least one copy of the vectorized stmt
1531 needs to be generated. */
1532 gcc_assert (ncopies
>= 1);
1534 if (!vec_stmt
) /* transformation not required. */
1536 STMT_VINFO_TYPE (stmt_info
) = call_vec_info_type
;
1537 if (vect_print_dump_info (REPORT_DETAILS
))
1538 fprintf (vect_dump
, "=== vectorizable_call ===");
1539 vect_model_simple_cost (stmt_info
, ncopies
, dt
, NULL
);
1545 if (vect_print_dump_info (REPORT_DETAILS
))
1546 fprintf (vect_dump
, "transform operation.");
1549 scalar_dest
= gimple_call_lhs (stmt
);
1550 vec_dest
= vect_create_destination_var (scalar_dest
, vectype_out
);
1552 prev_stmt_info
= NULL
;
1556 for (j
= 0; j
< ncopies
; ++j
)
1558 /* Build argument list for the vectorized call. */
1560 vargs
= VEC_alloc (tree
, heap
, nargs
);
1562 VEC_truncate (tree
, vargs
, 0);
1564 for (i
= 0; i
< nargs
; i
++)
1566 op
= gimple_call_arg (stmt
, i
);
1569 = vect_get_vec_def_for_operand (op
, stmt
, NULL
);
1572 vec_oprnd0
= gimple_call_arg (new_stmt
, i
);
1574 = vect_get_vec_def_for_stmt_copy (dt
[i
], vec_oprnd0
);
1577 VEC_quick_push (tree
, vargs
, vec_oprnd0
);
1580 new_stmt
= gimple_build_call_vec (fndecl
, vargs
);
1581 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
1582 gimple_call_set_lhs (new_stmt
, new_temp
);
1584 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
1585 mark_symbols_for_renaming (new_stmt
);
1588 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
1590 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
1592 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
1598 for (j
= 0; j
< ncopies
; ++j
)
1600 /* Build argument list for the vectorized call. */
1602 vargs
= VEC_alloc (tree
, heap
, nargs
* 2);
1604 VEC_truncate (tree
, vargs
, 0);
1606 for (i
= 0; i
< nargs
; i
++)
1608 op
= gimple_call_arg (stmt
, i
);
1612 = vect_get_vec_def_for_operand (op
, stmt
, NULL
);
1614 = vect_get_vec_def_for_stmt_copy (dt
[i
], vec_oprnd0
);
1618 vec_oprnd1
= gimple_call_arg (new_stmt
, 2*i
);
1620 = vect_get_vec_def_for_stmt_copy (dt
[i
], vec_oprnd1
);
1622 = vect_get_vec_def_for_stmt_copy (dt
[i
], vec_oprnd0
);
1625 VEC_quick_push (tree
, vargs
, vec_oprnd0
);
1626 VEC_quick_push (tree
, vargs
, vec_oprnd1
);
1629 new_stmt
= gimple_build_call_vec (fndecl
, vargs
);
1630 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
1631 gimple_call_set_lhs (new_stmt
, new_temp
);
1633 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
1634 mark_symbols_for_renaming (new_stmt
);
1637 STMT_VINFO_VEC_STMT (stmt_info
) = new_stmt
;
1639 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
1641 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
1644 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
1649 /* No current target implements this case. */
1653 VEC_free (tree
, heap
, vargs
);
1655 /* Update the exception handling table with the vector stmt if necessary. */
1656 if (maybe_clean_or_replace_eh_stmt (stmt
, *vec_stmt
))
1657 gimple_purge_dead_eh_edges (gimple_bb (stmt
));
1659 /* The call in STMT might prevent it from being removed in dce.
1660 We however cannot remove it here, due to the way the ssa name
1661 it defines is mapped to the new definition. So just replace
1662 rhs of the statement with something harmless. */
1664 type
= TREE_TYPE (scalar_dest
);
1665 new_stmt
= gimple_build_assign (gimple_call_lhs (stmt
),
1666 build_zero_cst (type
));
1667 set_vinfo_for_stmt (new_stmt
, stmt_info
);
1668 set_vinfo_for_stmt (stmt
, NULL
);
1669 STMT_VINFO_STMT (stmt_info
) = new_stmt
;
1670 gsi_replace (gsi
, new_stmt
, false);
1671 SSA_NAME_DEF_STMT (gimple_assign_lhs (new_stmt
)) = new_stmt
;
1677 /* Function vect_gen_widened_results_half
1679 Create a vector stmt whose code, type, number of arguments, and result
1680 variable are CODE, OP_TYPE, and VEC_DEST, and its arguments are
1681 VEC_OPRND0 and VEC_OPRND1. The new vector stmt is to be inserted at BSI.
1682 In the case that CODE is a CALL_EXPR, this means that a call to DECL
1683 needs to be created (DECL is a function-decl of a target-builtin).
1684 STMT is the original scalar stmt that we are vectorizing. */
1687 vect_gen_widened_results_half (enum tree_code code
,
1689 tree vec_oprnd0
, tree vec_oprnd1
, int op_type
,
1690 tree vec_dest
, gimple_stmt_iterator
*gsi
,
1696 /* Generate half of the widened result: */
1697 if (code
== CALL_EXPR
)
1699 /* Target specific support */
1700 if (op_type
== binary_op
)
1701 new_stmt
= gimple_build_call (decl
, 2, vec_oprnd0
, vec_oprnd1
);
1703 new_stmt
= gimple_build_call (decl
, 1, vec_oprnd0
);
1704 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
1705 gimple_call_set_lhs (new_stmt
, new_temp
);
1709 /* Generic support */
1710 gcc_assert (op_type
== TREE_CODE_LENGTH (code
));
1711 if (op_type
!= binary_op
)
1713 new_stmt
= gimple_build_assign_with_ops (code
, vec_dest
, vec_oprnd0
,
1715 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
1716 gimple_assign_set_lhs (new_stmt
, new_temp
);
1718 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
1724 /* Check if STMT performs a conversion operation, that can be vectorized.
1725 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
1726 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
1727 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1730 vectorizable_conversion (gimple stmt
, gimple_stmt_iterator
*gsi
,
1731 gimple
*vec_stmt
, slp_tree slp_node
)
1736 tree vec_oprnd0
= NULL_TREE
, vec_oprnd1
= NULL_TREE
;
1737 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1738 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
1739 enum tree_code code
, code1
= ERROR_MARK
, code2
= ERROR_MARK
;
1740 tree decl1
= NULL_TREE
, decl2
= NULL_TREE
;
1744 enum vect_def_type dt
[2] = {vect_unknown_def_type
, vect_unknown_def_type
};
1745 gimple new_stmt
= NULL
;
1746 stmt_vec_info prev_stmt_info
;
1749 tree vectype_out
, vectype_in
;
1753 enum { NARROW
, NONE
, WIDEN
} modifier
;
1755 VEC(tree
,heap
) *vec_oprnds0
= NULL
;
1757 VEC(tree
,heap
) *dummy
= NULL
;
1760 /* Is STMT a vectorizable conversion? */
1762 /* FORNOW: unsupported in basic block SLP. */
1763 gcc_assert (loop_vinfo
);
1765 if (!STMT_VINFO_RELEVANT_P (stmt_info
))
1768 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
1771 if (!is_gimple_assign (stmt
))
1774 if (TREE_CODE (gimple_assign_lhs (stmt
)) != SSA_NAME
)
1777 code
= gimple_assign_rhs_code (stmt
);
1778 if (code
!= FIX_TRUNC_EXPR
&& code
!= FLOAT_EXPR
)
1781 /* Check types of lhs and rhs. */
1782 scalar_dest
= gimple_assign_lhs (stmt
);
1783 vectype_out
= STMT_VINFO_VECTYPE (stmt_info
);
1785 op0
= gimple_assign_rhs1 (stmt
);
1786 rhs_type
= TREE_TYPE (op0
);
1787 /* Check the operands of the operation. */
1788 if (!vect_is_simple_use_1 (op0
, loop_vinfo
, NULL
,
1789 &def_stmt
, &def
, &dt
[0], &vectype_in
))
1791 if (vect_print_dump_info (REPORT_DETAILS
))
1792 fprintf (vect_dump
, "use not simple.");
1795 /* If op0 is an external or constant defs use a vector type of
1796 the same size as the output vector type. */
1798 vectype_in
= get_same_sized_vectype (rhs_type
, vectype_out
);
1800 gcc_assert (vectype_in
);
1803 if (vect_print_dump_info (REPORT_DETAILS
))
1805 fprintf (vect_dump
, "no vectype for scalar type ");
1806 print_generic_expr (vect_dump
, rhs_type
, TDF_SLIM
);
1813 nunits_in
= TYPE_VECTOR_SUBPARTS (vectype_in
);
1814 nunits_out
= TYPE_VECTOR_SUBPARTS (vectype_out
);
1815 if (nunits_in
== nunits_out
/ 2)
1817 else if (nunits_out
== nunits_in
)
1819 else if (nunits_out
== nunits_in
/ 2)
1824 if (modifier
== NARROW
)
1825 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits_out
;
1827 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits_in
;
1829 /* Multiple types in SLP are handled by creating the appropriate number of
1830 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
1832 if (slp_node
|| PURE_SLP_STMT (stmt_info
))
1835 /* Sanity check: make sure that at least one copy of the vectorized stmt
1836 needs to be generated. */
1837 gcc_assert (ncopies
>= 1);
1839 /* Supportable by target? */
1840 if ((modifier
== NONE
1841 && !targetm
.vectorize
.builtin_conversion (code
, vectype_out
, vectype_in
))
1842 || (modifier
== WIDEN
1843 && !supportable_widening_operation (code
, stmt
,
1844 vectype_out
, vectype_in
,
1847 &dummy_int
, &dummy
))
1848 || (modifier
== NARROW
1849 && !supportable_narrowing_operation (code
, vectype_out
, vectype_in
,
1850 &code1
, &dummy_int
, &dummy
)))
1852 if (vect_print_dump_info (REPORT_DETAILS
))
1853 fprintf (vect_dump
, "conversion not supported by target.");
1857 if (modifier
!= NONE
)
1859 /* FORNOW: SLP not supported. */
1860 if (STMT_SLP_TYPE (stmt_info
))
1864 if (!vec_stmt
) /* transformation not required. */
1866 STMT_VINFO_TYPE (stmt_info
) = type_conversion_vec_info_type
;
1871 if (vect_print_dump_info (REPORT_DETAILS
))
1872 fprintf (vect_dump
, "transform conversion.");
1875 vec_dest
= vect_create_destination_var (scalar_dest
, vectype_out
);
1877 if (modifier
== NONE
&& !slp_node
)
1878 vec_oprnds0
= VEC_alloc (tree
, heap
, 1);
1880 prev_stmt_info
= NULL
;
1884 for (j
= 0; j
< ncopies
; j
++)
1887 vect_get_vec_defs (op0
, NULL
, stmt
, &vec_oprnds0
, NULL
, slp_node
);
1889 vect_get_vec_defs_for_stmt_copy (dt
, &vec_oprnds0
, NULL
);
1892 targetm
.vectorize
.builtin_conversion (code
,
1893 vectype_out
, vectype_in
);
1894 FOR_EACH_VEC_ELT (tree
, vec_oprnds0
, i
, vop0
)
1896 /* Arguments are ready. create the new vector stmt. */
1897 new_stmt
= gimple_build_call (builtin_decl
, 1, vop0
);
1898 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
1899 gimple_call_set_lhs (new_stmt
, new_temp
);
1900 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
1902 VEC_quick_push (gimple
, SLP_TREE_VEC_STMTS (slp_node
), new_stmt
);
1906 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
1908 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
1909 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
1914 /* In case the vectorization factor (VF) is bigger than the number
1915 of elements that we can fit in a vectype (nunits), we have to
1916 generate more than one vector stmt - i.e - we need to "unroll"
1917 the vector stmt by a factor VF/nunits. */
1918 for (j
= 0; j
< ncopies
; j
++)
1921 vec_oprnd0
= vect_get_vec_def_for_operand (op0
, stmt
, NULL
);
1923 vec_oprnd0
= vect_get_vec_def_for_stmt_copy (dt
[0], vec_oprnd0
);
1925 /* Generate first half of the widened result: */
1927 = vect_gen_widened_results_half (code1
, decl1
,
1928 vec_oprnd0
, vec_oprnd1
,
1929 unary_op
, vec_dest
, gsi
, stmt
);
1931 STMT_VINFO_VEC_STMT (stmt_info
) = new_stmt
;
1933 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
1934 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
1936 /* Generate second half of the widened result: */
1938 = vect_gen_widened_results_half (code2
, decl2
,
1939 vec_oprnd0
, vec_oprnd1
,
1940 unary_op
, vec_dest
, gsi
, stmt
);
1941 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
1942 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
1947 /* In case the vectorization factor (VF) is bigger than the number
1948 of elements that we can fit in a vectype (nunits), we have to
1949 generate more than one vector stmt - i.e - we need to "unroll"
1950 the vector stmt by a factor VF/nunits. */
1951 for (j
= 0; j
< ncopies
; j
++)
1956 vec_oprnd0
= vect_get_vec_def_for_operand (op0
, stmt
, NULL
);
1957 vec_oprnd1
= vect_get_vec_def_for_stmt_copy (dt
[0], vec_oprnd0
);
1961 vec_oprnd0
= vect_get_vec_def_for_stmt_copy (dt
[0], vec_oprnd1
);
1962 vec_oprnd1
= vect_get_vec_def_for_stmt_copy (dt
[0], vec_oprnd0
);
1965 /* Arguments are ready. Create the new vector stmt. */
1966 new_stmt
= gimple_build_assign_with_ops (code1
, vec_dest
, vec_oprnd0
,
1968 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
1969 gimple_assign_set_lhs (new_stmt
, new_temp
);
1970 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
1973 STMT_VINFO_VEC_STMT (stmt_info
) = new_stmt
;
1975 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
1977 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
1980 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
1984 VEC_free (tree
, heap
, vec_oprnds0
);
1990 /* Function vectorizable_assignment.
1992 Check if STMT performs an assignment (copy) that can be vectorized.
1993 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
1994 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
1995 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1998 vectorizable_assignment (gimple stmt
, gimple_stmt_iterator
*gsi
,
1999 gimple
*vec_stmt
, slp_tree slp_node
)
2004 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
2005 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
2006 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
2010 enum vect_def_type dt
[2] = {vect_unknown_def_type
, vect_unknown_def_type
};
2011 unsigned int nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
2014 VEC(tree
,heap
) *vec_oprnds
= NULL
;
2016 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
2017 gimple new_stmt
= NULL
;
2018 stmt_vec_info prev_stmt_info
= NULL
;
2019 enum tree_code code
;
2022 /* Multiple types in SLP are handled by creating the appropriate number of
2023 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
2025 if (slp_node
|| PURE_SLP_STMT (stmt_info
))
2028 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits
;
2030 gcc_assert (ncopies
>= 1);
2032 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
2035 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
2038 /* Is vectorizable assignment? */
2039 if (!is_gimple_assign (stmt
))
2042 scalar_dest
= gimple_assign_lhs (stmt
);
2043 if (TREE_CODE (scalar_dest
) != SSA_NAME
)
2046 code
= gimple_assign_rhs_code (stmt
);
2047 if (gimple_assign_single_p (stmt
)
2048 || code
== PAREN_EXPR
2049 || CONVERT_EXPR_CODE_P (code
))
2050 op
= gimple_assign_rhs1 (stmt
);
2054 if (!vect_is_simple_use_1 (op
, loop_vinfo
, bb_vinfo
,
2055 &def_stmt
, &def
, &dt
[0], &vectype_in
))
2057 if (vect_print_dump_info (REPORT_DETAILS
))
2058 fprintf (vect_dump
, "use not simple.");
2062 /* We can handle NOP_EXPR conversions that do not change the number
2063 of elements or the vector size. */
2064 if (CONVERT_EXPR_CODE_P (code
)
2066 || TYPE_VECTOR_SUBPARTS (vectype_in
) != nunits
2067 || (GET_MODE_SIZE (TYPE_MODE (vectype
))
2068 != GET_MODE_SIZE (TYPE_MODE (vectype_in
)))))
2071 if (!vec_stmt
) /* transformation not required. */
2073 STMT_VINFO_TYPE (stmt_info
) = assignment_vec_info_type
;
2074 if (vect_print_dump_info (REPORT_DETAILS
))
2075 fprintf (vect_dump
, "=== vectorizable_assignment ===");
2076 vect_model_simple_cost (stmt_info
, ncopies
, dt
, NULL
);
2081 if (vect_print_dump_info (REPORT_DETAILS
))
2082 fprintf (vect_dump
, "transform assignment.");
2085 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
2088 for (j
= 0; j
< ncopies
; j
++)
2092 vect_get_vec_defs (op
, NULL
, stmt
, &vec_oprnds
, NULL
, slp_node
);
2094 vect_get_vec_defs_for_stmt_copy (dt
, &vec_oprnds
, NULL
);
2096 /* Arguments are ready. create the new vector stmt. */
2097 FOR_EACH_VEC_ELT (tree
, vec_oprnds
, i
, vop
)
2099 if (CONVERT_EXPR_CODE_P (code
))
2100 vop
= build1 (VIEW_CONVERT_EXPR
, vectype
, vop
);
2101 new_stmt
= gimple_build_assign (vec_dest
, vop
);
2102 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
2103 gimple_assign_set_lhs (new_stmt
, new_temp
);
2104 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2106 VEC_quick_push (gimple
, SLP_TREE_VEC_STMTS (slp_node
), new_stmt
);
2113 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
2115 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
2117 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
2120 VEC_free (tree
, heap
, vec_oprnds
);
2125 /* Function vectorizable_shift.
2127 Check if STMT performs a shift operation that can be vectorized.
2128 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2129 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2130 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2133 vectorizable_shift (gimple stmt
, gimple_stmt_iterator
*gsi
,
2134 gimple
*vec_stmt
, slp_tree slp_node
)
2138 tree op0
, op1
= NULL
;
2139 tree vec_oprnd1
= NULL_TREE
;
2140 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
2142 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
2143 enum tree_code code
;
2144 enum machine_mode vec_mode
;
2148 enum machine_mode optab_op2_mode
;
2151 enum vect_def_type dt
[2] = {vect_unknown_def_type
, vect_unknown_def_type
};
2152 gimple new_stmt
= NULL
;
2153 stmt_vec_info prev_stmt_info
;
2159 VEC (tree
, heap
) *vec_oprnds0
= NULL
, *vec_oprnds1
= NULL
;
2162 bool scalar_shift_arg
= true;
2163 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
2166 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
2169 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
2172 /* Is STMT a vectorizable binary/unary operation? */
2173 if (!is_gimple_assign (stmt
))
2176 if (TREE_CODE (gimple_assign_lhs (stmt
)) != SSA_NAME
)
2179 code
= gimple_assign_rhs_code (stmt
);
2181 if (!(code
== LSHIFT_EXPR
|| code
== RSHIFT_EXPR
|| code
== LROTATE_EXPR
2182 || code
== RROTATE_EXPR
))
2185 scalar_dest
= gimple_assign_lhs (stmt
);
2186 vectype_out
= STMT_VINFO_VECTYPE (stmt_info
);
2188 op0
= gimple_assign_rhs1 (stmt
);
2189 if (!vect_is_simple_use_1 (op0
, loop_vinfo
, bb_vinfo
,
2190 &def_stmt
, &def
, &dt
[0], &vectype
))
2192 if (vect_print_dump_info (REPORT_DETAILS
))
2193 fprintf (vect_dump
, "use not simple.");
2196 /* If op0 is an external or constant def use a vector type with
2197 the same size as the output vector type. */
2199 vectype
= get_same_sized_vectype (TREE_TYPE (op0
), vectype_out
);
2201 gcc_assert (vectype
);
2204 if (vect_print_dump_info (REPORT_DETAILS
))
2206 fprintf (vect_dump
, "no vectype for scalar type ");
2207 print_generic_expr (vect_dump
, TREE_TYPE (op0
), TDF_SLIM
);
2213 nunits_out
= TYPE_VECTOR_SUBPARTS (vectype_out
);
2214 nunits_in
= TYPE_VECTOR_SUBPARTS (vectype
);
2215 if (nunits_out
!= nunits_in
)
2218 op1
= gimple_assign_rhs2 (stmt
);
2219 if (!vect_is_simple_use (op1
, loop_vinfo
, bb_vinfo
, &def_stmt
, &def
, &dt
[1]))
2221 if (vect_print_dump_info (REPORT_DETAILS
))
2222 fprintf (vect_dump
, "use not simple.");
2227 vf
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
2231 /* Multiple types in SLP are handled by creating the appropriate number of
2232 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
2234 if (slp_node
|| PURE_SLP_STMT (stmt_info
))
2237 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits_in
;
2239 gcc_assert (ncopies
>= 1);
2241 /* Determine whether the shift amount is a vector, or scalar. If the
2242 shift/rotate amount is a vector, use the vector/vector shift optabs. */
2244 if (dt
[1] == vect_internal_def
&& !slp_node
)
2245 scalar_shift_arg
= false;
2246 else if (dt
[1] == vect_constant_def
2247 || dt
[1] == vect_external_def
2248 || dt
[1] == vect_internal_def
)
2250 /* In SLP, need to check whether the shift count is the same,
2251 in loops if it is a constant or invariant, it is always
2255 VEC (gimple
, heap
) *stmts
= SLP_TREE_SCALAR_STMTS (slp_node
);
2258 FOR_EACH_VEC_ELT (gimple
, stmts
, k
, slpstmt
)
2259 if (!operand_equal_p (gimple_assign_rhs2 (slpstmt
), op1
, 0))
2260 scalar_shift_arg
= false;
2265 if (vect_print_dump_info (REPORT_DETAILS
))
2266 fprintf (vect_dump
, "operand mode requires invariant argument.");
2270 /* Vector shifted by vector. */
2271 if (!scalar_shift_arg
)
2273 optab
= optab_for_tree_code (code
, vectype
, optab_vector
);
2274 if (vect_print_dump_info (REPORT_DETAILS
))
2275 fprintf (vect_dump
, "vector/vector shift/rotate found.");
2277 /* See if the machine has a vector shifted by scalar insn and if not
2278 then see if it has a vector shifted by vector insn. */
2281 optab
= optab_for_tree_code (code
, vectype
, optab_scalar
);
2283 && optab_handler (optab
, TYPE_MODE (vectype
)) != CODE_FOR_nothing
)
2285 if (vect_print_dump_info (REPORT_DETAILS
))
2286 fprintf (vect_dump
, "vector/scalar shift/rotate found.");
2290 optab
= optab_for_tree_code (code
, vectype
, optab_vector
);
2292 && (optab_handler (optab
, TYPE_MODE (vectype
))
2293 != CODE_FOR_nothing
))
2295 scalar_shift_arg
= false;
2297 if (vect_print_dump_info (REPORT_DETAILS
))
2298 fprintf (vect_dump
, "vector/vector shift/rotate found.");
2300 /* Unlike the other binary operators, shifts/rotates have
2301 the rhs being int, instead of the same type as the lhs,
2302 so make sure the scalar is the right type if we are
2303 dealing with vectors of short/char. */
2304 if (dt
[1] == vect_constant_def
)
2305 op1
= fold_convert (TREE_TYPE (vectype
), op1
);
2310 /* Supportable by target? */
2313 if (vect_print_dump_info (REPORT_DETAILS
))
2314 fprintf (vect_dump
, "no optab.");
2317 vec_mode
= TYPE_MODE (vectype
);
2318 icode
= (int) optab_handler (optab
, vec_mode
);
2319 if (icode
== CODE_FOR_nothing
)
2321 if (vect_print_dump_info (REPORT_DETAILS
))
2322 fprintf (vect_dump
, "op not supported by target.");
2323 /* Check only during analysis. */
2324 if (GET_MODE_SIZE (vec_mode
) != UNITS_PER_WORD
2325 || (vf
< vect_min_worthwhile_factor (code
)
2328 if (vect_print_dump_info (REPORT_DETAILS
))
2329 fprintf (vect_dump
, "proceeding using word mode.");
2332 /* Worthwhile without SIMD support? Check only during analysis. */
2333 if (!VECTOR_MODE_P (TYPE_MODE (vectype
))
2334 && vf
< vect_min_worthwhile_factor (code
)
2337 if (vect_print_dump_info (REPORT_DETAILS
))
2338 fprintf (vect_dump
, "not worthwhile without SIMD support.");
2342 if (!vec_stmt
) /* transformation not required. */
2344 STMT_VINFO_TYPE (stmt_info
) = shift_vec_info_type
;
2345 if (vect_print_dump_info (REPORT_DETAILS
))
2346 fprintf (vect_dump
, "=== vectorizable_shift ===");
2347 vect_model_simple_cost (stmt_info
, ncopies
, dt
, NULL
);
2353 if (vect_print_dump_info (REPORT_DETAILS
))
2354 fprintf (vect_dump
, "transform binary/unary operation.");
2357 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
2359 /* Allocate VECs for vector operands. In case of SLP, vector operands are
2360 created in the previous stages of the recursion, so no allocation is
2361 needed, except for the case of shift with scalar shift argument. In that
2362 case we store the scalar operand in VEC_OPRNDS1 for every vector stmt to
2363 be created to vectorize the SLP group, i.e., SLP_NODE->VEC_STMTS_SIZE.
2364 In case of loop-based vectorization we allocate VECs of size 1. We
2365 allocate VEC_OPRNDS1 only in case of binary operation. */
2368 vec_oprnds0
= VEC_alloc (tree
, heap
, 1);
2369 vec_oprnds1
= VEC_alloc (tree
, heap
, 1);
2371 else if (scalar_shift_arg
)
2372 vec_oprnds1
= VEC_alloc (tree
, heap
, slp_node
->vec_stmts_size
);
2374 prev_stmt_info
= NULL
;
2375 for (j
= 0; j
< ncopies
; j
++)
2380 if (scalar_shift_arg
)
2382 /* Vector shl and shr insn patterns can be defined with scalar
2383 operand 2 (shift operand). In this case, use constant or loop
2384 invariant op1 directly, without extending it to vector mode
2386 optab_op2_mode
= insn_data
[icode
].operand
[2].mode
;
2387 if (!VECTOR_MODE_P (optab_op2_mode
))
2389 if (vect_print_dump_info (REPORT_DETAILS
))
2390 fprintf (vect_dump
, "operand 1 using scalar mode.");
2392 VEC_quick_push (tree
, vec_oprnds1
, vec_oprnd1
);
2395 /* Store vec_oprnd1 for every vector stmt to be created
2396 for SLP_NODE. We check during the analysis that all
2397 the shift arguments are the same.
2398 TODO: Allow different constants for different vector
2399 stmts generated for an SLP instance. */
2400 for (k
= 0; k
< slp_node
->vec_stmts_size
- 1; k
++)
2401 VEC_quick_push (tree
, vec_oprnds1
, vec_oprnd1
);
2406 /* vec_oprnd1 is available if operand 1 should be of a scalar-type
2407 (a special case for certain kind of vector shifts); otherwise,
2408 operand 1 should be of a vector type (the usual case). */
2410 vect_get_vec_defs (op0
, NULL_TREE
, stmt
, &vec_oprnds0
, NULL
,
2413 vect_get_vec_defs (op0
, op1
, stmt
, &vec_oprnds0
, &vec_oprnds1
,
2417 vect_get_vec_defs_for_stmt_copy (dt
, &vec_oprnds0
, &vec_oprnds1
);
2419 /* Arguments are ready. Create the new vector stmt. */
2420 FOR_EACH_VEC_ELT (tree
, vec_oprnds0
, i
, vop0
)
2422 vop1
= VEC_index (tree
, vec_oprnds1
, i
);
2423 new_stmt
= gimple_build_assign_with_ops (code
, vec_dest
, vop0
, vop1
);
2424 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
2425 gimple_assign_set_lhs (new_stmt
, new_temp
);
2426 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2428 VEC_quick_push (gimple
, SLP_TREE_VEC_STMTS (slp_node
), new_stmt
);
2435 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
2437 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
2438 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
2441 VEC_free (tree
, heap
, vec_oprnds0
);
2442 VEC_free (tree
, heap
, vec_oprnds1
);
2448 /* Function vectorizable_operation.
2450 Check if STMT performs a binary, unary or ternary operation that can
2452 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2453 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2454 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2457 vectorizable_operation (gimple stmt
, gimple_stmt_iterator
*gsi
,
2458 gimple
*vec_stmt
, slp_tree slp_node
)
2462 tree op0
, op1
= NULL_TREE
, op2
= NULL_TREE
;
2463 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
2465 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
2466 enum tree_code code
;
2467 enum machine_mode vec_mode
;
2474 enum vect_def_type dt
[3]
2475 = {vect_unknown_def_type
, vect_unknown_def_type
, vect_unknown_def_type
};
2476 gimple new_stmt
= NULL
;
2477 stmt_vec_info prev_stmt_info
;
2483 VEC(tree
,heap
) *vec_oprnds0
= NULL
, *vec_oprnds1
= NULL
, *vec_oprnds2
= NULL
;
2484 tree vop0
, vop1
, vop2
;
2485 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
2488 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
2491 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
2494 /* Is STMT a vectorizable binary/unary operation? */
2495 if (!is_gimple_assign (stmt
))
2498 if (TREE_CODE (gimple_assign_lhs (stmt
)) != SSA_NAME
)
2501 code
= gimple_assign_rhs_code (stmt
);
2503 /* For pointer addition, we should use the normal plus for
2504 the vector addition. */
2505 if (code
== POINTER_PLUS_EXPR
)
2508 /* Support only unary or binary operations. */
2509 op_type
= TREE_CODE_LENGTH (code
);
2510 if (op_type
!= unary_op
&& op_type
!= binary_op
&& op_type
!= ternary_op
)
2512 if (vect_print_dump_info (REPORT_DETAILS
))
2513 fprintf (vect_dump
, "num. args = %d (not unary/binary/ternary op).",
2518 scalar_dest
= gimple_assign_lhs (stmt
);
2519 vectype_out
= STMT_VINFO_VECTYPE (stmt_info
);
2521 op0
= gimple_assign_rhs1 (stmt
);
2522 if (!vect_is_simple_use_1 (op0
, loop_vinfo
, bb_vinfo
,
2523 &def_stmt
, &def
, &dt
[0], &vectype
))
2525 if (vect_print_dump_info (REPORT_DETAILS
))
2526 fprintf (vect_dump
, "use not simple.");
2529 /* If op0 is an external or constant def use a vector type with
2530 the same size as the output vector type. */
2532 vectype
= get_same_sized_vectype (TREE_TYPE (op0
), vectype_out
);
2534 gcc_assert (vectype
);
2537 if (vect_print_dump_info (REPORT_DETAILS
))
2539 fprintf (vect_dump
, "no vectype for scalar type ");
2540 print_generic_expr (vect_dump
, TREE_TYPE (op0
), TDF_SLIM
);
2546 nunits_out
= TYPE_VECTOR_SUBPARTS (vectype_out
);
2547 nunits_in
= TYPE_VECTOR_SUBPARTS (vectype
);
2548 if (nunits_out
!= nunits_in
)
2551 if (op_type
== binary_op
|| op_type
== ternary_op
)
2553 op1
= gimple_assign_rhs2 (stmt
);
2554 if (!vect_is_simple_use (op1
, loop_vinfo
, bb_vinfo
, &def_stmt
, &def
,
2557 if (vect_print_dump_info (REPORT_DETAILS
))
2558 fprintf (vect_dump
, "use not simple.");
2562 if (op_type
== ternary_op
)
2564 op2
= gimple_assign_rhs3 (stmt
);
2565 if (!vect_is_simple_use (op2
, loop_vinfo
, bb_vinfo
, &def_stmt
, &def
,
2568 if (vect_print_dump_info (REPORT_DETAILS
))
2569 fprintf (vect_dump
, "use not simple.");
2575 vf
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
2579 /* Multiple types in SLP are handled by creating the appropriate number of
2580 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
2582 if (slp_node
|| PURE_SLP_STMT (stmt_info
))
2585 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits_in
;
2587 gcc_assert (ncopies
>= 1);
2589 /* Shifts are handled in vectorizable_shift (). */
2590 if (code
== LSHIFT_EXPR
|| code
== RSHIFT_EXPR
|| code
== LROTATE_EXPR
2591 || code
== RROTATE_EXPR
)
2594 optab
= optab_for_tree_code (code
, vectype
, optab_default
);
2596 /* Supportable by target? */
2599 if (vect_print_dump_info (REPORT_DETAILS
))
2600 fprintf (vect_dump
, "no optab.");
2603 vec_mode
= TYPE_MODE (vectype
);
2604 icode
= (int) optab_handler (optab
, vec_mode
);
2605 if (icode
== CODE_FOR_nothing
)
2607 if (vect_print_dump_info (REPORT_DETAILS
))
2608 fprintf (vect_dump
, "op not supported by target.");
2609 /* Check only during analysis. */
2610 if (GET_MODE_SIZE (vec_mode
) != UNITS_PER_WORD
2611 || (vf
< vect_min_worthwhile_factor (code
)
2614 if (vect_print_dump_info (REPORT_DETAILS
))
2615 fprintf (vect_dump
, "proceeding using word mode.");
2618 /* Worthwhile without SIMD support? Check only during analysis. */
2619 if (!VECTOR_MODE_P (TYPE_MODE (vectype
))
2620 && vf
< vect_min_worthwhile_factor (code
)
2623 if (vect_print_dump_info (REPORT_DETAILS
))
2624 fprintf (vect_dump
, "not worthwhile without SIMD support.");
2628 if (!vec_stmt
) /* transformation not required. */
2630 STMT_VINFO_TYPE (stmt_info
) = op_vec_info_type
;
2631 if (vect_print_dump_info (REPORT_DETAILS
))
2632 fprintf (vect_dump
, "=== vectorizable_operation ===");
2633 vect_model_simple_cost (stmt_info
, ncopies
, dt
, NULL
);
2639 if (vect_print_dump_info (REPORT_DETAILS
))
2640 fprintf (vect_dump
, "transform binary/unary operation.");
2643 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
2645 /* Allocate VECs for vector operands. In case of SLP, vector operands are
2646 created in the previous stages of the recursion, so no allocation is
2647 needed, except for the case of shift with scalar shift argument. In that
2648 case we store the scalar operand in VEC_OPRNDS1 for every vector stmt to
2649 be created to vectorize the SLP group, i.e., SLP_NODE->VEC_STMTS_SIZE.
2650 In case of loop-based vectorization we allocate VECs of size 1. We
2651 allocate VEC_OPRNDS1 only in case of binary operation. */
2654 vec_oprnds0
= VEC_alloc (tree
, heap
, 1);
2655 if (op_type
== binary_op
|| op_type
== ternary_op
)
2656 vec_oprnds1
= VEC_alloc (tree
, heap
, 1);
2657 if (op_type
== ternary_op
)
2658 vec_oprnds2
= VEC_alloc (tree
, heap
, 1);
2661 /* In case the vectorization factor (VF) is bigger than the number
2662 of elements that we can fit in a vectype (nunits), we have to generate
2663 more than one vector stmt - i.e - we need to "unroll" the
2664 vector stmt by a factor VF/nunits. In doing so, we record a pointer
2665 from one copy of the vector stmt to the next, in the field
2666 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
2667 stages to find the correct vector defs to be used when vectorizing
2668 stmts that use the defs of the current stmt. The example below
2669 illustrates the vectorization process when VF=16 and nunits=4 (i.e.,
2670 we need to create 4 vectorized stmts):
2672 before vectorization:
2673 RELATED_STMT VEC_STMT
2677 step 1: vectorize stmt S1 (done in vectorizable_load. See more details
2679 RELATED_STMT VEC_STMT
2680 VS1_0: vx0 = memref0 VS1_1 -
2681 VS1_1: vx1 = memref1 VS1_2 -
2682 VS1_2: vx2 = memref2 VS1_3 -
2683 VS1_3: vx3 = memref3 - -
2684 S1: x = load - VS1_0
2687 step2: vectorize stmt S2 (done here):
2688 To vectorize stmt S2 we first need to find the relevant vector
2689 def for the first operand 'x'. This is, as usual, obtained from
2690 the vector stmt recorded in the STMT_VINFO_VEC_STMT of the stmt
2691 that defines 'x' (S1). This way we find the stmt VS1_0, and the
2692 relevant vector def 'vx0'. Having found 'vx0' we can generate
2693 the vector stmt VS2_0, and as usual, record it in the
2694 STMT_VINFO_VEC_STMT of stmt S2.
2695 When creating the second copy (VS2_1), we obtain the relevant vector
2696 def from the vector stmt recorded in the STMT_VINFO_RELATED_STMT of
2697 stmt VS1_0. This way we find the stmt VS1_1 and the relevant
2698 vector def 'vx1'. Using 'vx1' we create stmt VS2_1 and record a
2699 pointer to it in the STMT_VINFO_RELATED_STMT of the vector stmt VS2_0.
2700 Similarly when creating stmts VS2_2 and VS2_3. This is the resulting
2701 chain of stmts and pointers:
2702 RELATED_STMT VEC_STMT
2703 VS1_0: vx0 = memref0 VS1_1 -
2704 VS1_1: vx1 = memref1 VS1_2 -
2705 VS1_2: vx2 = memref2 VS1_3 -
2706 VS1_3: vx3 = memref3 - -
2707 S1: x = load - VS1_0
2708 VS2_0: vz0 = vx0 + v1 VS2_1 -
2709 VS2_1: vz1 = vx1 + v1 VS2_2 -
2710 VS2_2: vz2 = vx2 + v1 VS2_3 -
2711 VS2_3: vz3 = vx3 + v1 - -
2712 S2: z = x + 1 - VS2_0 */
2714 prev_stmt_info
= NULL
;
2715 for (j
= 0; j
< ncopies
; j
++)
2720 if (op_type
== binary_op
|| op_type
== ternary_op
)
2721 vect_get_vec_defs (op0
, op1
, stmt
, &vec_oprnds0
, &vec_oprnds1
,
2724 vect_get_vec_defs (op0
, NULL_TREE
, stmt
, &vec_oprnds0
, NULL
,
2726 if (op_type
== ternary_op
)
2728 vec_oprnds2
= VEC_alloc (tree
, heap
, 1);
2729 VEC_quick_push (tree
, vec_oprnds2
,
2730 vect_get_vec_def_for_operand (op2
, stmt
, NULL
));
2735 vect_get_vec_defs_for_stmt_copy (dt
, &vec_oprnds0
, &vec_oprnds1
);
2736 if (op_type
== ternary_op
)
2738 tree vec_oprnd
= VEC_pop (tree
, vec_oprnds2
);
2739 VEC_quick_push (tree
, vec_oprnds2
,
2740 vect_get_vec_def_for_stmt_copy (dt
[2],
2745 /* Arguments are ready. Create the new vector stmt. */
2746 FOR_EACH_VEC_ELT (tree
, vec_oprnds0
, i
, vop0
)
2748 vop1
= ((op_type
== binary_op
|| op_type
== ternary_op
)
2749 ? VEC_index (tree
, vec_oprnds1
, i
) : NULL_TREE
);
2750 vop2
= ((op_type
== ternary_op
)
2751 ? VEC_index (tree
, vec_oprnds2
, i
) : NULL_TREE
);
2752 new_stmt
= gimple_build_assign_with_ops3 (code
, vec_dest
,
2754 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
2755 gimple_assign_set_lhs (new_stmt
, new_temp
);
2756 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2758 VEC_quick_push (gimple
, SLP_TREE_VEC_STMTS (slp_node
), new_stmt
);
2765 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
2767 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
2768 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
2771 VEC_free (tree
, heap
, vec_oprnds0
);
2773 VEC_free (tree
, heap
, vec_oprnds1
);
2775 VEC_free (tree
, heap
, vec_oprnds2
);
2781 /* Get vectorized definitions for loop-based vectorization. For the first
2782 operand we call vect_get_vec_def_for_operand() (with OPRND containing
2783 scalar operand), and for the rest we get a copy with
2784 vect_get_vec_def_for_stmt_copy() using the previous vector definition
2785 (stored in OPRND). See vect_get_vec_def_for_stmt_copy() for details.
2786 The vectors are collected into VEC_OPRNDS. */
2789 vect_get_loop_based_defs (tree
*oprnd
, gimple stmt
, enum vect_def_type dt
,
2790 VEC (tree
, heap
) **vec_oprnds
, int multi_step_cvt
)
2794 /* Get first vector operand. */
2795 /* All the vector operands except the very first one (that is scalar oprnd)
2797 if (TREE_CODE (TREE_TYPE (*oprnd
)) != VECTOR_TYPE
)
2798 vec_oprnd
= vect_get_vec_def_for_operand (*oprnd
, stmt
, NULL
);
2800 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
, *oprnd
);
2802 VEC_quick_push (tree
, *vec_oprnds
, vec_oprnd
);
2804 /* Get second vector operand. */
2805 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
, vec_oprnd
);
2806 VEC_quick_push (tree
, *vec_oprnds
, vec_oprnd
);
2810 /* For conversion in multiple steps, continue to get operands
2813 vect_get_loop_based_defs (oprnd
, stmt
, dt
, vec_oprnds
, multi_step_cvt
- 1);
2817 /* Create vectorized demotion statements for vector operands from VEC_OPRNDS.
2818 For multi-step conversions store the resulting vectors and call the function
2822 vect_create_vectorized_demotion_stmts (VEC (tree
, heap
) **vec_oprnds
,
2823 int multi_step_cvt
, gimple stmt
,
2824 VEC (tree
, heap
) *vec_dsts
,
2825 gimple_stmt_iterator
*gsi
,
2826 slp_tree slp_node
, enum tree_code code
,
2827 stmt_vec_info
*prev_stmt_info
)
2830 tree vop0
, vop1
, new_tmp
, vec_dest
;
2832 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
2834 vec_dest
= VEC_pop (tree
, vec_dsts
);
2836 for (i
= 0; i
< VEC_length (tree
, *vec_oprnds
); i
+= 2)
2838 /* Create demotion operation. */
2839 vop0
= VEC_index (tree
, *vec_oprnds
, i
);
2840 vop1
= VEC_index (tree
, *vec_oprnds
, i
+ 1);
2841 new_stmt
= gimple_build_assign_with_ops (code
, vec_dest
, vop0
, vop1
);
2842 new_tmp
= make_ssa_name (vec_dest
, new_stmt
);
2843 gimple_assign_set_lhs (new_stmt
, new_tmp
);
2844 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2847 /* Store the resulting vector for next recursive call. */
2848 VEC_replace (tree
, *vec_oprnds
, i
/2, new_tmp
);
2851 /* This is the last step of the conversion sequence. Store the
2852 vectors in SLP_NODE or in vector info of the scalar statement
2853 (or in STMT_VINFO_RELATED_STMT chain). */
2855 VEC_quick_push (gimple
, SLP_TREE_VEC_STMTS (slp_node
), new_stmt
);
2858 if (!*prev_stmt_info
)
2859 STMT_VINFO_VEC_STMT (stmt_info
) = new_stmt
;
2861 STMT_VINFO_RELATED_STMT (*prev_stmt_info
) = new_stmt
;
2863 *prev_stmt_info
= vinfo_for_stmt (new_stmt
);
2868 /* For multi-step demotion operations we first generate demotion operations
2869 from the source type to the intermediate types, and then combine the
2870 results (stored in VEC_OPRNDS) in demotion operation to the destination
2874 /* At each level of recursion we have have of the operands we had at the
2876 VEC_truncate (tree
, *vec_oprnds
, (i
+1)/2);
2877 vect_create_vectorized_demotion_stmts (vec_oprnds
, multi_step_cvt
- 1,
2878 stmt
, vec_dsts
, gsi
, slp_node
,
2879 code
, prev_stmt_info
);
2884 /* Function vectorizable_type_demotion
2886 Check if STMT performs a binary or unary operation that involves
2887 type demotion, and if it can be vectorized.
2888 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2889 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2890 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2893 vectorizable_type_demotion (gimple stmt
, gimple_stmt_iterator
*gsi
,
2894 gimple
*vec_stmt
, slp_tree slp_node
)
2899 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
2900 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
2901 enum tree_code code
, code1
= ERROR_MARK
;
2904 enum vect_def_type dt
[2] = {vect_unknown_def_type
, vect_unknown_def_type
};
2905 stmt_vec_info prev_stmt_info
;
2912 int multi_step_cvt
= 0;
2913 VEC (tree
, heap
) *vec_oprnds0
= NULL
;
2914 VEC (tree
, heap
) *vec_dsts
= NULL
, *interm_types
= NULL
, *tmp_vec_dsts
= NULL
;
2915 tree last_oprnd
, intermediate_type
;
2917 /* FORNOW: not supported by basic block SLP vectorization. */
2918 gcc_assert (loop_vinfo
);
2920 if (!STMT_VINFO_RELEVANT_P (stmt_info
))
2923 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
2926 /* Is STMT a vectorizable type-demotion operation? */
2927 if (!is_gimple_assign (stmt
))
2930 if (TREE_CODE (gimple_assign_lhs (stmt
)) != SSA_NAME
)
2933 code
= gimple_assign_rhs_code (stmt
);
2934 if (!CONVERT_EXPR_CODE_P (code
))
2937 scalar_dest
= gimple_assign_lhs (stmt
);
2938 vectype_out
= STMT_VINFO_VECTYPE (stmt_info
);
2940 /* Check the operands of the operation. */
2941 op0
= gimple_assign_rhs1 (stmt
);
2942 if (! ((INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest
))
2943 && INTEGRAL_TYPE_P (TREE_TYPE (op0
)))
2944 || (SCALAR_FLOAT_TYPE_P (TREE_TYPE (scalar_dest
))
2945 && SCALAR_FLOAT_TYPE_P (TREE_TYPE (op0
))
2946 && CONVERT_EXPR_CODE_P (code
))))
2948 if (!vect_is_simple_use_1 (op0
, loop_vinfo
, NULL
,
2949 &def_stmt
, &def
, &dt
[0], &vectype_in
))
2951 if (vect_print_dump_info (REPORT_DETAILS
))
2952 fprintf (vect_dump
, "use not simple.");
2955 /* If op0 is an external def use a vector type with the
2956 same size as the output vector type if possible. */
2958 vectype_in
= get_same_sized_vectype (TREE_TYPE (op0
), vectype_out
);
2960 gcc_assert (vectype_in
);
2963 if (vect_print_dump_info (REPORT_DETAILS
))
2965 fprintf (vect_dump
, "no vectype for scalar type ");
2966 print_generic_expr (vect_dump
, TREE_TYPE (op0
), TDF_SLIM
);
2972 nunits_in
= TYPE_VECTOR_SUBPARTS (vectype_in
);
2973 nunits_out
= TYPE_VECTOR_SUBPARTS (vectype_out
);
2974 if (nunits_in
>= nunits_out
)
2977 /* Multiple types in SLP are handled by creating the appropriate number of
2978 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
2980 if (slp_node
|| PURE_SLP_STMT (stmt_info
))
2983 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits_out
;
2984 gcc_assert (ncopies
>= 1);
2986 /* Supportable by target? */
2987 if (!supportable_narrowing_operation (code
, vectype_out
, vectype_in
,
2988 &code1
, &multi_step_cvt
, &interm_types
))
2991 if (!vec_stmt
) /* transformation not required. */
2993 STMT_VINFO_TYPE (stmt_info
) = type_demotion_vec_info_type
;
2994 if (vect_print_dump_info (REPORT_DETAILS
))
2995 fprintf (vect_dump
, "=== vectorizable_demotion ===");
2996 vect_model_simple_cost (stmt_info
, ncopies
, dt
, NULL
);
3001 if (vect_print_dump_info (REPORT_DETAILS
))
3002 fprintf (vect_dump
, "transform type demotion operation. ncopies = %d.",
3005 /* In case of multi-step demotion, we first generate demotion operations to
3006 the intermediate types, and then from that types to the final one.
3007 We create vector destinations for the intermediate type (TYPES) received
3008 from supportable_narrowing_operation, and store them in the correct order
3009 for future use in vect_create_vectorized_demotion_stmts(). */
3011 vec_dsts
= VEC_alloc (tree
, heap
, multi_step_cvt
+ 1);
3013 vec_dsts
= VEC_alloc (tree
, heap
, 1);
3015 vec_dest
= vect_create_destination_var (scalar_dest
, vectype_out
);
3016 VEC_quick_push (tree
, vec_dsts
, vec_dest
);
3020 for (i
= VEC_length (tree
, interm_types
) - 1;
3021 VEC_iterate (tree
, interm_types
, i
, intermediate_type
); i
--)
3023 vec_dest
= vect_create_destination_var (scalar_dest
,
3025 VEC_quick_push (tree
, vec_dsts
, vec_dest
);
3029 /* In case the vectorization factor (VF) is bigger than the number
3030 of elements that we can fit in a vectype (nunits), we have to generate
3031 more than one vector stmt - i.e - we need to "unroll" the
3032 vector stmt by a factor VF/nunits. */
3034 prev_stmt_info
= NULL
;
3035 for (j
= 0; j
< ncopies
; j
++)
3039 vect_get_slp_defs (op0
, NULL_TREE
, slp_node
, &vec_oprnds0
, NULL
, -1);
3042 VEC_free (tree
, heap
, vec_oprnds0
);
3043 vec_oprnds0
= VEC_alloc (tree
, heap
,
3044 (multi_step_cvt
? vect_pow2 (multi_step_cvt
) * 2 : 2));
3045 vect_get_loop_based_defs (&last_oprnd
, stmt
, dt
[0], &vec_oprnds0
,
3046 vect_pow2 (multi_step_cvt
) - 1);
3049 /* Arguments are ready. Create the new vector stmts. */
3050 tmp_vec_dsts
= VEC_copy (tree
, heap
, vec_dsts
);
3051 vect_create_vectorized_demotion_stmts (&vec_oprnds0
,
3052 multi_step_cvt
, stmt
, tmp_vec_dsts
,
3053 gsi
, slp_node
, code1
,
3057 VEC_free (tree
, heap
, vec_oprnds0
);
3058 VEC_free (tree
, heap
, vec_dsts
);
3059 VEC_free (tree
, heap
, tmp_vec_dsts
);
3060 VEC_free (tree
, heap
, interm_types
);
3062 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
3067 /* Create vectorized promotion statements for vector operands from VEC_OPRNDS0
3068 and VEC_OPRNDS1 (for binary operations). For multi-step conversions store
3069 the resulting vectors and call the function recursively. */
3072 vect_create_vectorized_promotion_stmts (VEC (tree
, heap
) **vec_oprnds0
,
3073 VEC (tree
, heap
) **vec_oprnds1
,
3074 int multi_step_cvt
, gimple stmt
,
3075 VEC (tree
, heap
) *vec_dsts
,
3076 gimple_stmt_iterator
*gsi
,
3077 slp_tree slp_node
, enum tree_code code1
,
3078 enum tree_code code2
, tree decl1
,
3079 tree decl2
, int op_type
,
3080 stmt_vec_info
*prev_stmt_info
)
3083 tree vop0
, vop1
, new_tmp1
, new_tmp2
, vec_dest
;
3084 gimple new_stmt1
, new_stmt2
;
3085 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
3086 VEC (tree
, heap
) *vec_tmp
;
3088 vec_dest
= VEC_pop (tree
, vec_dsts
);
3089 vec_tmp
= VEC_alloc (tree
, heap
, VEC_length (tree
, *vec_oprnds0
) * 2);
3091 FOR_EACH_VEC_ELT (tree
, *vec_oprnds0
, i
, vop0
)
3093 if (op_type
== binary_op
)
3094 vop1
= VEC_index (tree
, *vec_oprnds1
, i
);
3098 /* Generate the two halves of promotion operation. */
3099 new_stmt1
= vect_gen_widened_results_half (code1
, decl1
, vop0
, vop1
,
3100 op_type
, vec_dest
, gsi
, stmt
);
3101 new_stmt2
= vect_gen_widened_results_half (code2
, decl2
, vop0
, vop1
,
3102 op_type
, vec_dest
, gsi
, stmt
);
3103 if (is_gimple_call (new_stmt1
))
3105 new_tmp1
= gimple_call_lhs (new_stmt1
);
3106 new_tmp2
= gimple_call_lhs (new_stmt2
);
3110 new_tmp1
= gimple_assign_lhs (new_stmt1
);
3111 new_tmp2
= gimple_assign_lhs (new_stmt2
);
3116 /* Store the results for the recursive call. */
3117 VEC_quick_push (tree
, vec_tmp
, new_tmp1
);
3118 VEC_quick_push (tree
, vec_tmp
, new_tmp2
);
3122 /* Last step of promotion sequience - store the results. */
3125 VEC_quick_push (gimple
, SLP_TREE_VEC_STMTS (slp_node
), new_stmt1
);
3126 VEC_quick_push (gimple
, SLP_TREE_VEC_STMTS (slp_node
), new_stmt2
);
3130 if (!*prev_stmt_info
)
3131 STMT_VINFO_VEC_STMT (stmt_info
) = new_stmt1
;
3133 STMT_VINFO_RELATED_STMT (*prev_stmt_info
) = new_stmt1
;
3135 *prev_stmt_info
= vinfo_for_stmt (new_stmt1
);
3136 STMT_VINFO_RELATED_STMT (*prev_stmt_info
) = new_stmt2
;
3137 *prev_stmt_info
= vinfo_for_stmt (new_stmt2
);
3144 /* For multi-step promotion operation we first generate we call the
3145 function recurcively for every stage. We start from the input type,
3146 create promotion operations to the intermediate types, and then
3147 create promotions to the output type. */
3148 *vec_oprnds0
= VEC_copy (tree
, heap
, vec_tmp
);
3149 vect_create_vectorized_promotion_stmts (vec_oprnds0
, vec_oprnds1
,
3150 multi_step_cvt
- 1, stmt
,
3151 vec_dsts
, gsi
, slp_node
, code1
,
3152 code2
, decl2
, decl2
, op_type
,
3156 VEC_free (tree
, heap
, vec_tmp
);
3160 /* Function vectorizable_type_promotion
3162 Check if STMT performs a binary or unary operation that involves
3163 type promotion, and if it can be vectorized.
3164 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3165 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
3166 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3169 vectorizable_type_promotion (gimple stmt
, gimple_stmt_iterator
*gsi
,
3170 gimple
*vec_stmt
, slp_tree slp_node
)
3174 tree op0
, op1
= NULL
;
3175 tree vec_oprnd0
=NULL
, vec_oprnd1
=NULL
;
3176 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
3177 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
3178 enum tree_code code
, code1
= ERROR_MARK
, code2
= ERROR_MARK
;
3179 tree decl1
= NULL_TREE
, decl2
= NULL_TREE
;
3183 enum vect_def_type dt
[2] = {vect_unknown_def_type
, vect_unknown_def_type
};
3184 stmt_vec_info prev_stmt_info
;
3191 tree intermediate_type
= NULL_TREE
;
3192 int multi_step_cvt
= 0;
3193 VEC (tree
, heap
) *vec_oprnds0
= NULL
, *vec_oprnds1
= NULL
;
3194 VEC (tree
, heap
) *vec_dsts
= NULL
, *interm_types
= NULL
, *tmp_vec_dsts
= NULL
;
3196 /* FORNOW: not supported by basic block SLP vectorization. */
3197 gcc_assert (loop_vinfo
);
3199 if (!STMT_VINFO_RELEVANT_P (stmt_info
))
3202 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
3205 /* Is STMT a vectorizable type-promotion operation? */
3206 if (!is_gimple_assign (stmt
))
3209 if (TREE_CODE (gimple_assign_lhs (stmt
)) != SSA_NAME
)
3212 code
= gimple_assign_rhs_code (stmt
);
3213 if (!CONVERT_EXPR_CODE_P (code
)
3214 && code
!= WIDEN_MULT_EXPR
)
3217 scalar_dest
= gimple_assign_lhs (stmt
);
3218 vectype_out
= STMT_VINFO_VECTYPE (stmt_info
);
3220 /* Check the operands of the operation. */
3221 op0
= gimple_assign_rhs1 (stmt
);
3222 if (! ((INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest
))
3223 && INTEGRAL_TYPE_P (TREE_TYPE (op0
)))
3224 || (SCALAR_FLOAT_TYPE_P (TREE_TYPE (scalar_dest
))
3225 && SCALAR_FLOAT_TYPE_P (TREE_TYPE (op0
))
3226 && CONVERT_EXPR_CODE_P (code
))))
3228 if (!vect_is_simple_use_1 (op0
, loop_vinfo
, NULL
,
3229 &def_stmt
, &def
, &dt
[0], &vectype_in
))
3231 if (vect_print_dump_info (REPORT_DETAILS
))
3232 fprintf (vect_dump
, "use not simple.");
3235 /* If op0 is an external or constant def use a vector type with
3236 the same size as the output vector type. */
3238 vectype_in
= get_same_sized_vectype (TREE_TYPE (op0
), vectype_out
);
3240 gcc_assert (vectype_in
);
3243 if (vect_print_dump_info (REPORT_DETAILS
))
3245 fprintf (vect_dump
, "no vectype for scalar type ");
3246 print_generic_expr (vect_dump
, TREE_TYPE (op0
), TDF_SLIM
);
3252 nunits_in
= TYPE_VECTOR_SUBPARTS (vectype_in
);
3253 nunits_out
= TYPE_VECTOR_SUBPARTS (vectype_out
);
3254 if (nunits_in
<= nunits_out
)
3257 /* Multiple types in SLP are handled by creating the appropriate number of
3258 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
3260 if (slp_node
|| PURE_SLP_STMT (stmt_info
))
3263 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits_in
;
3265 gcc_assert (ncopies
>= 1);
3267 op_type
= TREE_CODE_LENGTH (code
);
3268 if (op_type
== binary_op
)
3270 op1
= gimple_assign_rhs2 (stmt
);
3271 if (!vect_is_simple_use (op1
, loop_vinfo
, NULL
, &def_stmt
, &def
, &dt
[1]))
3273 if (vect_print_dump_info (REPORT_DETAILS
))
3274 fprintf (vect_dump
, "use not simple.");
3279 /* Supportable by target? */
3280 if (!supportable_widening_operation (code
, stmt
, vectype_out
, vectype_in
,
3281 &decl1
, &decl2
, &code1
, &code2
,
3282 &multi_step_cvt
, &interm_types
))
3285 /* Binary widening operation can only be supported directly by the
3287 gcc_assert (!(multi_step_cvt
&& op_type
== binary_op
));
3289 if (!vec_stmt
) /* transformation not required. */
3291 STMT_VINFO_TYPE (stmt_info
) = type_promotion_vec_info_type
;
3292 if (vect_print_dump_info (REPORT_DETAILS
))
3293 fprintf (vect_dump
, "=== vectorizable_promotion ===");
3294 vect_model_simple_cost (stmt_info
, 2*ncopies
, dt
, NULL
);
3300 if (vect_print_dump_info (REPORT_DETAILS
))
3301 fprintf (vect_dump
, "transform type promotion operation. ncopies = %d.",
3305 /* In case of multi-step promotion, we first generate promotion operations
3306 to the intermediate types, and then from that types to the final one.
3307 We store vector destination in VEC_DSTS in the correct order for
3308 recursive creation of promotion operations in
3309 vect_create_vectorized_promotion_stmts(). Vector destinations are created
3310 according to TYPES recieved from supportable_widening_operation(). */
3312 vec_dsts
= VEC_alloc (tree
, heap
, multi_step_cvt
+ 1);
3314 vec_dsts
= VEC_alloc (tree
, heap
, 1);
3316 vec_dest
= vect_create_destination_var (scalar_dest
, vectype_out
);
3317 VEC_quick_push (tree
, vec_dsts
, vec_dest
);
3321 for (i
= VEC_length (tree
, interm_types
) - 1;
3322 VEC_iterate (tree
, interm_types
, i
, intermediate_type
); i
--)
3324 vec_dest
= vect_create_destination_var (scalar_dest
,
3326 VEC_quick_push (tree
, vec_dsts
, vec_dest
);
3332 vec_oprnds0
= VEC_alloc (tree
, heap
,
3333 (multi_step_cvt
? vect_pow2 (multi_step_cvt
) : 1));
3334 if (op_type
== binary_op
)
3335 vec_oprnds1
= VEC_alloc (tree
, heap
, 1);
3338 /* In case the vectorization factor (VF) is bigger than the number
3339 of elements that we can fit in a vectype (nunits), we have to generate
3340 more than one vector stmt - i.e - we need to "unroll" the
3341 vector stmt by a factor VF/nunits. */
3343 prev_stmt_info
= NULL
;
3344 for (j
= 0; j
< ncopies
; j
++)
3350 vect_get_slp_defs (op0
, op1
, slp_node
, &vec_oprnds0
,
3354 vec_oprnd0
= vect_get_vec_def_for_operand (op0
, stmt
, NULL
);
3355 VEC_quick_push (tree
, vec_oprnds0
, vec_oprnd0
);
3356 if (op_type
== binary_op
)
3358 vec_oprnd1
= vect_get_vec_def_for_operand (op1
, stmt
, NULL
);
3359 VEC_quick_push (tree
, vec_oprnds1
, vec_oprnd1
);
3365 vec_oprnd0
= vect_get_vec_def_for_stmt_copy (dt
[0], vec_oprnd0
);
3366 VEC_replace (tree
, vec_oprnds0
, 0, vec_oprnd0
);
3367 if (op_type
== binary_op
)
3369 vec_oprnd1
= vect_get_vec_def_for_stmt_copy (dt
[1], vec_oprnd1
);
3370 VEC_replace (tree
, vec_oprnds1
, 0, vec_oprnd1
);
3374 /* Arguments are ready. Create the new vector stmts. */
3375 tmp_vec_dsts
= VEC_copy (tree
, heap
, vec_dsts
);
3376 vect_create_vectorized_promotion_stmts (&vec_oprnds0
, &vec_oprnds1
,
3377 multi_step_cvt
, stmt
,
3379 gsi
, slp_node
, code1
, code2
,
3380 decl1
, decl2
, op_type
,
3384 VEC_free (tree
, heap
, vec_dsts
);
3385 VEC_free (tree
, heap
, tmp_vec_dsts
);
3386 VEC_free (tree
, heap
, interm_types
);
3387 VEC_free (tree
, heap
, vec_oprnds0
);
3388 VEC_free (tree
, heap
, vec_oprnds1
);
3390 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
3395 /* Function vectorizable_store.
3397 Check if STMT defines a non scalar data-ref (array/pointer/structure) that
3399 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3400 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
3401 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3404 vectorizable_store (gimple stmt
, gimple_stmt_iterator
*gsi
, gimple
*vec_stmt
,
3410 tree vec_oprnd
= NULL_TREE
;
3411 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
3412 struct data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
), *first_dr
= NULL
;
3413 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
3415 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
3416 struct loop
*loop
= NULL
;
3417 enum machine_mode vec_mode
;
3419 enum dr_alignment_support alignment_support_scheme
;
3422 enum vect_def_type dt
;
3423 stmt_vec_info prev_stmt_info
= NULL
;
3424 tree dataref_ptr
= NULL_TREE
;
3425 int nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
3428 gimple next_stmt
, first_stmt
= NULL
;
3429 bool strided_store
= false;
3430 bool store_lanes_p
= false;
3431 unsigned int group_size
, i
;
3432 VEC(tree
,heap
) *dr_chain
= NULL
, *oprnds
= NULL
, *result_chain
= NULL
;
3434 VEC(tree
,heap
) *vec_oprnds
= NULL
;
3435 bool slp
= (slp_node
!= NULL
);
3436 unsigned int vec_num
;
3437 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
3441 loop
= LOOP_VINFO_LOOP (loop_vinfo
);
3443 /* Multiple types in SLP are handled by creating the appropriate number of
3444 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
3446 if (slp
|| PURE_SLP_STMT (stmt_info
))
3449 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits
;
3451 gcc_assert (ncopies
>= 1);
3453 /* FORNOW. This restriction should be relaxed. */
3454 if (loop
&& nested_in_vect_loop_p (loop
, stmt
) && ncopies
> 1)
3456 if (vect_print_dump_info (REPORT_DETAILS
))
3457 fprintf (vect_dump
, "multiple types in nested loop.");
3461 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
3464 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
3467 /* Is vectorizable store? */
3469 if (!is_gimple_assign (stmt
))
3472 scalar_dest
= gimple_assign_lhs (stmt
);
3473 if (TREE_CODE (scalar_dest
) != ARRAY_REF
3474 && TREE_CODE (scalar_dest
) != INDIRECT_REF
3475 && TREE_CODE (scalar_dest
) != COMPONENT_REF
3476 && TREE_CODE (scalar_dest
) != IMAGPART_EXPR
3477 && TREE_CODE (scalar_dest
) != REALPART_EXPR
3478 && TREE_CODE (scalar_dest
) != MEM_REF
)
3481 gcc_assert (gimple_assign_single_p (stmt
));
3482 op
= gimple_assign_rhs1 (stmt
);
3483 if (!vect_is_simple_use (op
, loop_vinfo
, bb_vinfo
, &def_stmt
, &def
, &dt
))
3485 if (vect_print_dump_info (REPORT_DETAILS
))
3486 fprintf (vect_dump
, "use not simple.");
3490 /* The scalar rhs type needs to be trivially convertible to the vector
3491 component type. This should always be the case. */
3492 elem_type
= TREE_TYPE (vectype
);
3493 if (!useless_type_conversion_p (elem_type
, TREE_TYPE (op
)))
3495 if (vect_print_dump_info (REPORT_DETAILS
))
3496 fprintf (vect_dump
, "??? operands of different types");
3500 vec_mode
= TYPE_MODE (vectype
);
3501 /* FORNOW. In some cases can vectorize even if data-type not supported
3502 (e.g. - array initialization with 0). */
3503 if (optab_handler (mov_optab
, vec_mode
) == CODE_FOR_nothing
)
3506 if (!STMT_VINFO_DATA_REF (stmt_info
))
3509 if (tree_int_cst_compare (DR_STEP (dr
), size_zero_node
) < 0)
3511 if (vect_print_dump_info (REPORT_DETAILS
))
3512 fprintf (vect_dump
, "negative step for store.");
3516 if (STMT_VINFO_STRIDED_ACCESS (stmt_info
))
3518 strided_store
= true;
3519 first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
3520 if (!slp
&& !PURE_SLP_STMT (stmt_info
))
3522 group_size
= GROUP_SIZE (vinfo_for_stmt (first_stmt
));
3523 if (vect_store_lanes_supported (vectype
, group_size
))
3524 store_lanes_p
= true;
3525 else if (!vect_strided_store_supported (vectype
, group_size
))
3529 if (first_stmt
== stmt
)
3531 /* STMT is the leader of the group. Check the operands of all the
3532 stmts of the group. */
3533 next_stmt
= GROUP_NEXT_ELEMENT (stmt_info
);
3536 gcc_assert (gimple_assign_single_p (next_stmt
));
3537 op
= gimple_assign_rhs1 (next_stmt
);
3538 if (!vect_is_simple_use (op
, loop_vinfo
, bb_vinfo
, &def_stmt
,
3541 if (vect_print_dump_info (REPORT_DETAILS
))
3542 fprintf (vect_dump
, "use not simple.");
3545 next_stmt
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt
));
3550 if (!vec_stmt
) /* transformation not required. */
3552 STMT_VINFO_TYPE (stmt_info
) = store_vec_info_type
;
3553 vect_model_store_cost (stmt_info
, ncopies
, store_lanes_p
, dt
, NULL
);
3561 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
3562 group_size
= GROUP_SIZE (vinfo_for_stmt (first_stmt
));
3564 GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt
))++;
3567 gcc_assert (!loop
|| !nested_in_vect_loop_p (loop
, stmt
));
3569 /* We vectorize all the stmts of the interleaving group when we
3570 reach the last stmt in the group. */
3571 if (GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt
))
3572 < GROUP_SIZE (vinfo_for_stmt (first_stmt
))
3581 strided_store
= false;
3582 /* VEC_NUM is the number of vect stmts to be created for this
3584 vec_num
= SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node
);
3585 first_stmt
= VEC_index (gimple
, SLP_TREE_SCALAR_STMTS (slp_node
), 0);
3586 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
3589 /* VEC_NUM is the number of vect stmts to be created for this
3591 vec_num
= group_size
;
3597 group_size
= vec_num
= 1;
3600 if (vect_print_dump_info (REPORT_DETAILS
))
3601 fprintf (vect_dump
, "transform store. ncopies = %d",ncopies
);
3603 dr_chain
= VEC_alloc (tree
, heap
, group_size
);
3604 oprnds
= VEC_alloc (tree
, heap
, group_size
);
3606 alignment_support_scheme
= vect_supportable_dr_alignment (first_dr
, false);
3607 gcc_assert (alignment_support_scheme
);
3608 /* Targets with store-lane instructions must not require explicit
3610 gcc_assert (!store_lanes_p
3611 || alignment_support_scheme
== dr_aligned
3612 || alignment_support_scheme
== dr_unaligned_supported
);
3615 aggr_type
= build_array_type_nelts (elem_type
, vec_num
* nunits
);
3617 aggr_type
= vectype
;
3619 /* In case the vectorization factor (VF) is bigger than the number
3620 of elements that we can fit in a vectype (nunits), we have to generate
3621 more than one vector stmt - i.e - we need to "unroll" the
3622 vector stmt by a factor VF/nunits. For more details see documentation in
3623 vect_get_vec_def_for_copy_stmt. */
3625 /* In case of interleaving (non-unit strided access):
3632 We create vectorized stores starting from base address (the access of the
3633 first stmt in the chain (S2 in the above example), when the last store stmt
3634 of the chain (S4) is reached:
3637 VS2: &base + vec_size*1 = vx0
3638 VS3: &base + vec_size*2 = vx1
3639 VS4: &base + vec_size*3 = vx3
3641 Then permutation statements are generated:
3643 VS5: vx5 = VEC_INTERLEAVE_HIGH_EXPR < vx0, vx3 >
3644 VS6: vx6 = VEC_INTERLEAVE_LOW_EXPR < vx0, vx3 >
3647 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
3648 (the order of the data-refs in the output of vect_permute_store_chain
3649 corresponds to the order of scalar stmts in the interleaving chain - see
3650 the documentation of vect_permute_store_chain()).
3652 In case of both multiple types and interleaving, above vector stores and
3653 permutation stmts are created for every copy. The result vector stmts are
3654 put in STMT_VINFO_VEC_STMT for the first copy and in the corresponding
3655 STMT_VINFO_RELATED_STMT for the next copies.
3658 prev_stmt_info
= NULL
;
3659 for (j
= 0; j
< ncopies
; j
++)
3668 /* Get vectorized arguments for SLP_NODE. */
3669 vect_get_slp_defs (NULL_TREE
, NULL_TREE
, slp_node
, &vec_oprnds
,
3672 vec_oprnd
= VEC_index (tree
, vec_oprnds
, 0);
3676 /* For interleaved stores we collect vectorized defs for all the
3677 stores in the group in DR_CHAIN and OPRNDS. DR_CHAIN is then
3678 used as an input to vect_permute_store_chain(), and OPRNDS as
3679 an input to vect_get_vec_def_for_stmt_copy() for the next copy.
3681 If the store is not strided, GROUP_SIZE is 1, and DR_CHAIN and
3682 OPRNDS are of size 1. */
3683 next_stmt
= first_stmt
;
3684 for (i
= 0; i
< group_size
; i
++)
3686 /* Since gaps are not supported for interleaved stores,
3687 GROUP_SIZE is the exact number of stmts in the chain.
3688 Therefore, NEXT_STMT can't be NULL_TREE. In case that
3689 there is no interleaving, GROUP_SIZE is 1, and only one
3690 iteration of the loop will be executed. */
3691 gcc_assert (next_stmt
3692 && gimple_assign_single_p (next_stmt
));
3693 op
= gimple_assign_rhs1 (next_stmt
);
3695 vec_oprnd
= vect_get_vec_def_for_operand (op
, next_stmt
,
3697 VEC_quick_push(tree
, dr_chain
, vec_oprnd
);
3698 VEC_quick_push(tree
, oprnds
, vec_oprnd
);
3699 next_stmt
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt
));
3703 /* We should have catched mismatched types earlier. */
3704 gcc_assert (useless_type_conversion_p (vectype
,
3705 TREE_TYPE (vec_oprnd
)));
3706 dataref_ptr
= vect_create_data_ref_ptr (first_stmt
, aggr_type
, NULL
,
3707 NULL_TREE
, &dummy
, gsi
,
3708 &ptr_incr
, false, &inv_p
);
3709 gcc_assert (bb_vinfo
|| !inv_p
);
3713 /* For interleaved stores we created vectorized defs for all the
3714 defs stored in OPRNDS in the previous iteration (previous copy).
3715 DR_CHAIN is then used as an input to vect_permute_store_chain(),
3716 and OPRNDS as an input to vect_get_vec_def_for_stmt_copy() for the
3718 If the store is not strided, GROUP_SIZE is 1, and DR_CHAIN and
3719 OPRNDS are of size 1. */
3720 for (i
= 0; i
< group_size
; i
++)
3722 op
= VEC_index (tree
, oprnds
, i
);
3723 vect_is_simple_use (op
, loop_vinfo
, bb_vinfo
, &def_stmt
, &def
,
3725 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
, op
);
3726 VEC_replace(tree
, dr_chain
, i
, vec_oprnd
);
3727 VEC_replace(tree
, oprnds
, i
, vec_oprnd
);
3729 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
, stmt
,
3730 TYPE_SIZE_UNIT (aggr_type
));
3737 /* Combine all the vectors into an array. */
3738 vec_array
= create_vector_array (vectype
, vec_num
);
3739 for (i
= 0; i
< vec_num
; i
++)
3741 vec_oprnd
= VEC_index (tree
, dr_chain
, i
);
3742 write_vector_array (stmt
, gsi
, vec_oprnd
, vec_array
, i
);
3746 MEM_REF[...all elements...] = STORE_LANES (VEC_ARRAY). */
3747 data_ref
= create_array_ref (aggr_type
, dataref_ptr
, first_dr
);
3748 new_stmt
= gimple_build_call_internal (IFN_STORE_LANES
, 1, vec_array
);
3749 gimple_call_set_lhs (new_stmt
, data_ref
);
3750 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3751 mark_symbols_for_renaming (new_stmt
);
3758 result_chain
= VEC_alloc (tree
, heap
, group_size
);
3760 vect_permute_store_chain (dr_chain
, group_size
, stmt
, gsi
,
3764 next_stmt
= first_stmt
;
3765 for (i
= 0; i
< vec_num
; i
++)
3767 struct ptr_info_def
*pi
;
3770 /* Bump the vector pointer. */
3771 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
,
3775 vec_oprnd
= VEC_index (tree
, vec_oprnds
, i
);
3776 else if (strided_store
)
3777 /* For strided stores vectorized defs are interleaved in
3778 vect_permute_store_chain(). */
3779 vec_oprnd
= VEC_index (tree
, result_chain
, i
);
3781 data_ref
= build2 (MEM_REF
, TREE_TYPE (vec_oprnd
), dataref_ptr
,
3782 build_int_cst (reference_alias_ptr_type
3783 (DR_REF (first_dr
)), 0));
3784 pi
= get_ptr_info (dataref_ptr
);
3785 pi
->align
= TYPE_ALIGN_UNIT (vectype
);
3786 if (aligned_access_p (first_dr
))
3788 else if (DR_MISALIGNMENT (first_dr
) == -1)
3790 TREE_TYPE (data_ref
)
3791 = build_aligned_type (TREE_TYPE (data_ref
),
3792 TYPE_ALIGN (elem_type
));
3793 pi
->align
= TYPE_ALIGN_UNIT (elem_type
);
3798 TREE_TYPE (data_ref
)
3799 = build_aligned_type (TREE_TYPE (data_ref
),
3800 TYPE_ALIGN (elem_type
));
3801 pi
->misalign
= DR_MISALIGNMENT (first_dr
);
3804 /* Arguments are ready. Create the new vector stmt. */
3805 new_stmt
= gimple_build_assign (data_ref
, vec_oprnd
);
3806 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3807 mark_symbols_for_renaming (new_stmt
);
3812 next_stmt
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt
));
3820 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
3822 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
3823 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
3827 VEC_free (tree
, heap
, dr_chain
);
3828 VEC_free (tree
, heap
, oprnds
);
3830 VEC_free (tree
, heap
, result_chain
);
3832 VEC_free (tree
, heap
, vec_oprnds
);
3837 /* Given a vector type VECTYPE returns a builtin DECL to be used
3838 for vector permutation and stores a mask into *MASK that implements
3839 reversal of the vector elements. If that is impossible to do
3840 returns NULL (and *MASK is unchanged). */
3843 perm_mask_for_reverse (tree vectype
, tree
*mask
)
3846 tree mask_element_type
, mask_type
;
3847 tree mask_vec
= NULL
;
3850 if (!targetm
.vectorize
.builtin_vec_perm
)
3853 builtin_decl
= targetm
.vectorize
.builtin_vec_perm (vectype
,
3854 &mask_element_type
);
3855 if (!builtin_decl
|| !mask_element_type
)
3858 mask_type
= get_vectype_for_scalar_type (mask_element_type
);
3859 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
3861 || TYPE_VECTOR_SUBPARTS (vectype
) != TYPE_VECTOR_SUBPARTS (mask_type
))
3864 for (i
= 0; i
< nunits
; i
++)
3865 mask_vec
= tree_cons (NULL
, build_int_cst (mask_element_type
, i
), mask_vec
);
3866 mask_vec
= build_vector (mask_type
, mask_vec
);
3868 if (!targetm
.vectorize
.builtin_vec_perm_ok (vectype
, mask_vec
))
3872 return builtin_decl
;
3875 /* Given a vector variable X, that was generated for the scalar LHS of
3876 STMT, generate instructions to reverse the vector elements of X,
3877 insert them a *GSI and return the permuted vector variable. */
3880 reverse_vec_elements (tree x
, gimple stmt
, gimple_stmt_iterator
*gsi
)
3882 tree vectype
= TREE_TYPE (x
);
3883 tree mask_vec
, builtin_decl
;
3884 tree perm_dest
, data_ref
;
3887 builtin_decl
= perm_mask_for_reverse (vectype
, &mask_vec
);
3889 perm_dest
= vect_create_destination_var (gimple_assign_lhs (stmt
), vectype
);
3891 /* Generate the permute statement. */
3892 perm_stmt
= gimple_build_call (builtin_decl
, 3, x
, x
, mask_vec
);
3893 if (!useless_type_conversion_p (vectype
,
3894 TREE_TYPE (TREE_TYPE (builtin_decl
))))
3896 tree tem
= create_tmp_reg (TREE_TYPE (TREE_TYPE (builtin_decl
)), NULL
);
3897 tem
= make_ssa_name (tem
, perm_stmt
);
3898 gimple_call_set_lhs (perm_stmt
, tem
);
3899 vect_finish_stmt_generation (stmt
, perm_stmt
, gsi
);
3900 perm_stmt
= gimple_build_assign (NULL_TREE
,
3901 build1 (VIEW_CONVERT_EXPR
,
3904 data_ref
= make_ssa_name (perm_dest
, perm_stmt
);
3905 gimple_set_lhs (perm_stmt
, data_ref
);
3906 vect_finish_stmt_generation (stmt
, perm_stmt
, gsi
);
3911 /* vectorizable_load.
3913 Check if STMT reads a non scalar data-ref (array/pointer/structure) that
3915 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3916 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
3917 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3920 vectorizable_load (gimple stmt
, gimple_stmt_iterator
*gsi
, gimple
*vec_stmt
,
3921 slp_tree slp_node
, slp_instance slp_node_instance
)
3924 tree vec_dest
= NULL
;
3925 tree data_ref
= NULL
;
3926 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
3927 stmt_vec_info prev_stmt_info
;
3928 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
3929 struct loop
*loop
= NULL
;
3930 struct loop
*containing_loop
= (gimple_bb (stmt
))->loop_father
;
3931 bool nested_in_vect_loop
= false;
3932 struct data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
), *first_dr
;
3933 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
3936 enum machine_mode mode
;
3937 gimple new_stmt
= NULL
;
3939 enum dr_alignment_support alignment_support_scheme
;
3940 tree dataref_ptr
= NULL_TREE
;
3942 int nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
3944 int i
, j
, group_size
;
3945 tree msq
= NULL_TREE
, lsq
;
3946 tree offset
= NULL_TREE
;
3947 tree realignment_token
= NULL_TREE
;
3949 VEC(tree
,heap
) *dr_chain
= NULL
;
3950 bool strided_load
= false;
3951 bool load_lanes_p
= false;
3956 bool compute_in_loop
= false;
3957 struct loop
*at_loop
;
3959 bool slp
= (slp_node
!= NULL
);
3960 bool slp_perm
= false;
3961 enum tree_code code
;
3962 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
3968 loop
= LOOP_VINFO_LOOP (loop_vinfo
);
3969 nested_in_vect_loop
= nested_in_vect_loop_p (loop
, stmt
);
3970 vf
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
3975 /* Multiple types in SLP are handled by creating the appropriate number of
3976 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
3978 if (slp
|| PURE_SLP_STMT (stmt_info
))
3981 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits
;
3983 gcc_assert (ncopies
>= 1);
3985 /* FORNOW. This restriction should be relaxed. */
3986 if (nested_in_vect_loop
&& ncopies
> 1)
3988 if (vect_print_dump_info (REPORT_DETAILS
))
3989 fprintf (vect_dump
, "multiple types in nested loop.");
3993 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
3996 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
3999 /* Is vectorizable load? */
4000 if (!is_gimple_assign (stmt
))
4003 scalar_dest
= gimple_assign_lhs (stmt
);
4004 if (TREE_CODE (scalar_dest
) != SSA_NAME
)
4007 code
= gimple_assign_rhs_code (stmt
);
4008 if (code
!= ARRAY_REF
4009 && code
!= INDIRECT_REF
4010 && code
!= COMPONENT_REF
4011 && code
!= IMAGPART_EXPR
4012 && code
!= REALPART_EXPR
4016 if (!STMT_VINFO_DATA_REF (stmt_info
))
4019 negative
= tree_int_cst_compare (DR_STEP (dr
), size_zero_node
) < 0;
4020 if (negative
&& ncopies
> 1)
4022 if (vect_print_dump_info (REPORT_DETAILS
))
4023 fprintf (vect_dump
, "multiple types with negative step.");
4027 scalar_type
= TREE_TYPE (DR_REF (dr
));
4028 mode
= TYPE_MODE (vectype
);
4030 /* FORNOW. In some cases can vectorize even if data-type not supported
4031 (e.g. - data copies). */
4032 if (optab_handler (mov_optab
, mode
) == CODE_FOR_nothing
)
4034 if (vect_print_dump_info (REPORT_DETAILS
))
4035 fprintf (vect_dump
, "Aligned load, but unsupported type.");
4039 /* The vector component type needs to be trivially convertible to the
4040 scalar lhs. This should always be the case. */
4041 elem_type
= TREE_TYPE (vectype
);
4042 if (!useless_type_conversion_p (TREE_TYPE (scalar_dest
), elem_type
))
4044 if (vect_print_dump_info (REPORT_DETAILS
))
4045 fprintf (vect_dump
, "??? operands of different types");
4049 /* Check if the load is a part of an interleaving chain. */
4050 if (STMT_VINFO_STRIDED_ACCESS (stmt_info
))
4052 strided_load
= true;
4054 gcc_assert (! nested_in_vect_loop
);
4056 first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
4057 if (!slp
&& !PURE_SLP_STMT (stmt_info
))
4059 group_size
= GROUP_SIZE (vinfo_for_stmt (first_stmt
));
4060 if (vect_load_lanes_supported (vectype
, group_size
))
4061 load_lanes_p
= true;
4062 else if (!vect_strided_load_supported (vectype
, group_size
))
4069 gcc_assert (!strided_load
);
4070 alignment_support_scheme
= vect_supportable_dr_alignment (dr
, false);
4071 if (alignment_support_scheme
!= dr_aligned
4072 && alignment_support_scheme
!= dr_unaligned_supported
)
4074 if (vect_print_dump_info (REPORT_DETAILS
))
4075 fprintf (vect_dump
, "negative step but alignment required.");
4078 if (!perm_mask_for_reverse (vectype
, NULL
))
4080 if (vect_print_dump_info (REPORT_DETAILS
))
4081 fprintf (vect_dump
, "negative step and reversing not supported.");
4086 if (!vec_stmt
) /* transformation not required. */
4088 STMT_VINFO_TYPE (stmt_info
) = load_vec_info_type
;
4089 vect_model_load_cost (stmt_info
, ncopies
, load_lanes_p
, NULL
);
4093 if (vect_print_dump_info (REPORT_DETAILS
))
4094 fprintf (vect_dump
, "transform load. ncopies = %d", ncopies
);
4100 first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
4101 /* Check if the chain of loads is already vectorized. */
4102 if (STMT_VINFO_VEC_STMT (vinfo_for_stmt (first_stmt
)))
4104 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
4107 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
4108 group_size
= GROUP_SIZE (vinfo_for_stmt (first_stmt
));
4110 /* VEC_NUM is the number of vect stmts to be created for this group. */
4113 strided_load
= false;
4114 vec_num
= SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node
);
4115 if (SLP_INSTANCE_LOAD_PERMUTATION (slp_node_instance
))
4119 vec_num
= group_size
;
4125 group_size
= vec_num
= 1;
4128 alignment_support_scheme
= vect_supportable_dr_alignment (first_dr
, false);
4129 gcc_assert (alignment_support_scheme
);
4130 /* Targets with load-lane instructions must not require explicit
4132 gcc_assert (!load_lanes_p
4133 || alignment_support_scheme
== dr_aligned
4134 || alignment_support_scheme
== dr_unaligned_supported
);
4136 /* In case the vectorization factor (VF) is bigger than the number
4137 of elements that we can fit in a vectype (nunits), we have to generate
4138 more than one vector stmt - i.e - we need to "unroll" the
4139 vector stmt by a factor VF/nunits. In doing so, we record a pointer
4140 from one copy of the vector stmt to the next, in the field
4141 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
4142 stages to find the correct vector defs to be used when vectorizing
4143 stmts that use the defs of the current stmt. The example below
4144 illustrates the vectorization process when VF=16 and nunits=4 (i.e., we
4145 need to create 4 vectorized stmts):
4147 before vectorization:
4148 RELATED_STMT VEC_STMT
4152 step 1: vectorize stmt S1:
4153 We first create the vector stmt VS1_0, and, as usual, record a
4154 pointer to it in the STMT_VINFO_VEC_STMT of the scalar stmt S1.
4155 Next, we create the vector stmt VS1_1, and record a pointer to
4156 it in the STMT_VINFO_RELATED_STMT of the vector stmt VS1_0.
4157 Similarly, for VS1_2 and VS1_3. This is the resulting chain of
4159 RELATED_STMT VEC_STMT
4160 VS1_0: vx0 = memref0 VS1_1 -
4161 VS1_1: vx1 = memref1 VS1_2 -
4162 VS1_2: vx2 = memref2 VS1_3 -
4163 VS1_3: vx3 = memref3 - -
4164 S1: x = load - VS1_0
4167 See in documentation in vect_get_vec_def_for_stmt_copy for how the
4168 information we recorded in RELATED_STMT field is used to vectorize
4171 /* In case of interleaving (non-unit strided access):
4178 Vectorized loads are created in the order of memory accesses
4179 starting from the access of the first stmt of the chain:
4182 VS2: vx1 = &base + vec_size*1
4183 VS3: vx3 = &base + vec_size*2
4184 VS4: vx4 = &base + vec_size*3
4186 Then permutation statements are generated:
4188 VS5: vx5 = VEC_EXTRACT_EVEN_EXPR < vx0, vx1 >
4189 VS6: vx6 = VEC_EXTRACT_ODD_EXPR < vx0, vx1 >
4192 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
4193 (the order of the data-refs in the output of vect_permute_load_chain
4194 corresponds to the order of scalar stmts in the interleaving chain - see
4195 the documentation of vect_permute_load_chain()).
4196 The generation of permutation stmts and recording them in
4197 STMT_VINFO_VEC_STMT is done in vect_transform_strided_load().
4199 In case of both multiple types and interleaving, the vector loads and
4200 permutation stmts above are created for every copy. The result vector
4201 stmts are put in STMT_VINFO_VEC_STMT for the first copy and in the
4202 corresponding STMT_VINFO_RELATED_STMT for the next copies. */
4204 /* If the data reference is aligned (dr_aligned) or potentially unaligned
4205 on a target that supports unaligned accesses (dr_unaligned_supported)
4206 we generate the following code:
4210 p = p + indx * vectype_size;
4215 Otherwise, the data reference is potentially unaligned on a target that
4216 does not support unaligned accesses (dr_explicit_realign_optimized) -
4217 then generate the following code, in which the data in each iteration is
4218 obtained by two vector loads, one from the previous iteration, and one
4219 from the current iteration:
4221 msq_init = *(floor(p1))
4222 p2 = initial_addr + VS - 1;
4223 realignment_token = call target_builtin;
4226 p2 = p2 + indx * vectype_size
4228 vec_dest = realign_load (msq, lsq, realignment_token)
4233 /* If the misalignment remains the same throughout the execution of the
4234 loop, we can create the init_addr and permutation mask at the loop
4235 preheader. Otherwise, it needs to be created inside the loop.
4236 This can only occur when vectorizing memory accesses in the inner-loop
4237 nested within an outer-loop that is being vectorized. */
4239 if (loop
&& nested_in_vect_loop_p (loop
, stmt
)
4240 && (TREE_INT_CST_LOW (DR_STEP (dr
))
4241 % GET_MODE_SIZE (TYPE_MODE (vectype
)) != 0))
4243 gcc_assert (alignment_support_scheme
!= dr_explicit_realign_optimized
);
4244 compute_in_loop
= true;
4247 if ((alignment_support_scheme
== dr_explicit_realign_optimized
4248 || alignment_support_scheme
== dr_explicit_realign
)
4249 && !compute_in_loop
)
4251 msq
= vect_setup_realignment (first_stmt
, gsi
, &realignment_token
,
4252 alignment_support_scheme
, NULL_TREE
,
4254 if (alignment_support_scheme
== dr_explicit_realign_optimized
)
4256 phi
= SSA_NAME_DEF_STMT (msq
);
4257 offset
= size_int (TYPE_VECTOR_SUBPARTS (vectype
) - 1);
4264 offset
= size_int (-TYPE_VECTOR_SUBPARTS (vectype
) + 1);
4267 aggr_type
= build_array_type_nelts (elem_type
, vec_num
* nunits
);
4269 aggr_type
= vectype
;
4271 prev_stmt_info
= NULL
;
4272 for (j
= 0; j
< ncopies
; j
++)
4274 /* 1. Create the vector or array pointer update chain. */
4276 dataref_ptr
= vect_create_data_ref_ptr (first_stmt
, aggr_type
, at_loop
,
4277 offset
, &dummy
, gsi
,
4278 &ptr_incr
, false, &inv_p
);
4280 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
, stmt
,
4281 TYPE_SIZE_UNIT (aggr_type
));
4283 if (strided_load
|| slp_perm
)
4284 dr_chain
= VEC_alloc (tree
, heap
, vec_num
);
4290 vec_array
= create_vector_array (vectype
, vec_num
);
4293 VEC_ARRAY = LOAD_LANES (MEM_REF[...all elements...]). */
4294 data_ref
= create_array_ref (aggr_type
, dataref_ptr
, first_dr
);
4295 new_stmt
= gimple_build_call_internal (IFN_LOAD_LANES
, 1, data_ref
);
4296 gimple_call_set_lhs (new_stmt
, vec_array
);
4297 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
4298 mark_symbols_for_renaming (new_stmt
);
4300 /* Extract each vector into an SSA_NAME. */
4301 for (i
= 0; i
< vec_num
; i
++)
4303 new_temp
= read_vector_array (stmt
, gsi
, scalar_dest
,
4305 VEC_quick_push (tree
, dr_chain
, new_temp
);
4308 /* Record the mapping between SSA_NAMEs and statements. */
4309 vect_record_strided_load_vectors (stmt
, dr_chain
);
4313 for (i
= 0; i
< vec_num
; i
++)
4316 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
,
4319 /* 2. Create the vector-load in the loop. */
4320 switch (alignment_support_scheme
)
4323 case dr_unaligned_supported
:
4325 struct ptr_info_def
*pi
;
4327 = build2 (MEM_REF
, vectype
, dataref_ptr
,
4328 build_int_cst (reference_alias_ptr_type
4329 (DR_REF (first_dr
)), 0));
4330 pi
= get_ptr_info (dataref_ptr
);
4331 pi
->align
= TYPE_ALIGN_UNIT (vectype
);
4332 if (alignment_support_scheme
== dr_aligned
)
4334 gcc_assert (aligned_access_p (first_dr
));
4337 else if (DR_MISALIGNMENT (first_dr
) == -1)
4339 TREE_TYPE (data_ref
)
4340 = build_aligned_type (TREE_TYPE (data_ref
),
4341 TYPE_ALIGN (elem_type
));
4342 pi
->align
= TYPE_ALIGN_UNIT (elem_type
);
4347 TREE_TYPE (data_ref
)
4348 = build_aligned_type (TREE_TYPE (data_ref
),
4349 TYPE_ALIGN (elem_type
));
4350 pi
->misalign
= DR_MISALIGNMENT (first_dr
);
4354 case dr_explicit_realign
:
4359 vs_minus_1
= size_int (TYPE_VECTOR_SUBPARTS (vectype
) - 1);
4361 if (compute_in_loop
)
4362 msq
= vect_setup_realignment (first_stmt
, gsi
,
4364 dr_explicit_realign
,
4367 new_stmt
= gimple_build_assign_with_ops
4368 (BIT_AND_EXPR
, NULL_TREE
, dataref_ptr
,
4370 (TREE_TYPE (dataref_ptr
),
4371 -(HOST_WIDE_INT
)TYPE_ALIGN_UNIT (vectype
)));
4372 ptr
= make_ssa_name (SSA_NAME_VAR (dataref_ptr
), new_stmt
);
4373 gimple_assign_set_lhs (new_stmt
, ptr
);
4374 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
4376 = build2 (MEM_REF
, vectype
, ptr
,
4377 build_int_cst (reference_alias_ptr_type
4378 (DR_REF (first_dr
)), 0));
4379 vec_dest
= vect_create_destination_var (scalar_dest
,
4381 new_stmt
= gimple_build_assign (vec_dest
, data_ref
);
4382 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
4383 gimple_assign_set_lhs (new_stmt
, new_temp
);
4384 gimple_set_vdef (new_stmt
, gimple_vdef (stmt
));
4385 gimple_set_vuse (new_stmt
, gimple_vuse (stmt
));
4386 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
4389 bump
= size_binop (MULT_EXPR
, vs_minus_1
,
4390 TYPE_SIZE_UNIT (scalar_type
));
4391 ptr
= bump_vector_ptr (dataref_ptr
, NULL
, gsi
, stmt
, bump
);
4392 new_stmt
= gimple_build_assign_with_ops
4393 (BIT_AND_EXPR
, NULL_TREE
, ptr
,
4396 -(HOST_WIDE_INT
)TYPE_ALIGN_UNIT (vectype
)));
4397 ptr
= make_ssa_name (SSA_NAME_VAR (dataref_ptr
), new_stmt
);
4398 gimple_assign_set_lhs (new_stmt
, ptr
);
4399 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
4401 = build2 (MEM_REF
, vectype
, ptr
,
4402 build_int_cst (reference_alias_ptr_type
4403 (DR_REF (first_dr
)), 0));
4406 case dr_explicit_realign_optimized
:
4407 new_stmt
= gimple_build_assign_with_ops
4408 (BIT_AND_EXPR
, NULL_TREE
, dataref_ptr
,
4410 (TREE_TYPE (dataref_ptr
),
4411 -(HOST_WIDE_INT
)TYPE_ALIGN_UNIT (vectype
)));
4412 new_temp
= make_ssa_name (SSA_NAME_VAR (dataref_ptr
),
4414 gimple_assign_set_lhs (new_stmt
, new_temp
);
4415 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
4417 = build2 (MEM_REF
, vectype
, new_temp
,
4418 build_int_cst (reference_alias_ptr_type
4419 (DR_REF (first_dr
)), 0));
4424 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
4425 new_stmt
= gimple_build_assign (vec_dest
, data_ref
);
4426 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
4427 gimple_assign_set_lhs (new_stmt
, new_temp
);
4428 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
4429 mark_symbols_for_renaming (new_stmt
);
4431 /* 3. Handle explicit realignment if necessary/supported.
4433 vec_dest = realign_load (msq, lsq, realignment_token) */
4434 if (alignment_support_scheme
== dr_explicit_realign_optimized
4435 || alignment_support_scheme
== dr_explicit_realign
)
4437 lsq
= gimple_assign_lhs (new_stmt
);
4438 if (!realignment_token
)
4439 realignment_token
= dataref_ptr
;
4440 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
4442 = gimple_build_assign_with_ops3 (REALIGN_LOAD_EXPR
,
4445 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
4446 gimple_assign_set_lhs (new_stmt
, new_temp
);
4447 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
4449 if (alignment_support_scheme
== dr_explicit_realign_optimized
)
4452 if (i
== vec_num
- 1 && j
== ncopies
- 1)
4453 add_phi_arg (phi
, lsq
,
4454 loop_latch_edge (containing_loop
),
4460 /* 4. Handle invariant-load. */
4461 if (inv_p
&& !bb_vinfo
)
4463 gcc_assert (!strided_load
);
4464 gcc_assert (nested_in_vect_loop_p (loop
, stmt
));
4469 tree vec_inv
, bitpos
, bitsize
= TYPE_SIZE (scalar_type
);
4471 /* CHECKME: bitpos depends on endianess? */
4472 bitpos
= bitsize_zero_node
;
4473 vec_inv
= build3 (BIT_FIELD_REF
, scalar_type
, new_temp
,
4475 vec_dest
= vect_create_destination_var (scalar_dest
,
4477 new_stmt
= gimple_build_assign (vec_dest
, vec_inv
);
4478 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
4479 gimple_assign_set_lhs (new_stmt
, new_temp
);
4480 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
4482 for (k
= nunits
- 1; k
>= 0; --k
)
4483 t
= tree_cons (NULL_TREE
, new_temp
, t
);
4484 /* FIXME: use build_constructor directly. */
4485 vec_inv
= build_constructor_from_list (vectype
, t
);
4486 new_temp
= vect_init_vector (stmt
, vec_inv
,
4488 new_stmt
= SSA_NAME_DEF_STMT (new_temp
);
4491 gcc_unreachable (); /* FORNOW. */
4496 new_temp
= reverse_vec_elements (new_temp
, stmt
, gsi
);
4497 new_stmt
= SSA_NAME_DEF_STMT (new_temp
);
4500 /* Collect vector loads and later create their permutation in
4501 vect_transform_strided_load (). */
4502 if (strided_load
|| slp_perm
)
4503 VEC_quick_push (tree
, dr_chain
, new_temp
);
4505 /* Store vector loads in the corresponding SLP_NODE. */
4506 if (slp
&& !slp_perm
)
4507 VEC_quick_push (gimple
, SLP_TREE_VEC_STMTS (slp_node
),
4512 if (slp
&& !slp_perm
)
4517 if (!vect_transform_slp_perm_load (stmt
, dr_chain
, gsi
, vf
,
4518 slp_node_instance
, false))
4520 VEC_free (tree
, heap
, dr_chain
);
4529 vect_transform_strided_load (stmt
, dr_chain
, group_size
, gsi
);
4530 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
4535 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
4537 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
4538 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
4542 VEC_free (tree
, heap
, dr_chain
);
4548 /* Function vect_is_simple_cond.
4551 LOOP - the loop that is being vectorized.
4552 COND - Condition that is checked for simple use.
4554 Returns whether a COND can be vectorized. Checks whether
4555 condition operands are supportable using vec_is_simple_use. */
4558 vect_is_simple_cond (tree cond
, loop_vec_info loop_vinfo
)
4562 enum vect_def_type dt
;
4564 if (!COMPARISON_CLASS_P (cond
))
4567 lhs
= TREE_OPERAND (cond
, 0);
4568 rhs
= TREE_OPERAND (cond
, 1);
4570 if (TREE_CODE (lhs
) == SSA_NAME
)
4572 gimple lhs_def_stmt
= SSA_NAME_DEF_STMT (lhs
);
4573 if (!vect_is_simple_use (lhs
, loop_vinfo
, NULL
, &lhs_def_stmt
, &def
,
4577 else if (TREE_CODE (lhs
) != INTEGER_CST
&& TREE_CODE (lhs
) != REAL_CST
4578 && TREE_CODE (lhs
) != FIXED_CST
)
4581 if (TREE_CODE (rhs
) == SSA_NAME
)
4583 gimple rhs_def_stmt
= SSA_NAME_DEF_STMT (rhs
);
4584 if (!vect_is_simple_use (rhs
, loop_vinfo
, NULL
, &rhs_def_stmt
, &def
,
4588 else if (TREE_CODE (rhs
) != INTEGER_CST
&& TREE_CODE (rhs
) != REAL_CST
4589 && TREE_CODE (rhs
) != FIXED_CST
)
4595 /* vectorizable_condition.
4597 Check if STMT is conditional modify expression that can be vectorized.
4598 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4599 stmt using VEC_COND_EXPR to replace it, put it in VEC_STMT, and insert it
4602 When STMT is vectorized as nested cycle, REDUC_DEF is the vector variable
4603 to be used at REDUC_INDEX (in then clause if REDUC_INDEX is 1, and in
4604 else caluse if it is 2).
4606 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4609 vectorizable_condition (gimple stmt
, gimple_stmt_iterator
*gsi
,
4610 gimple
*vec_stmt
, tree reduc_def
, int reduc_index
)
4612 tree scalar_dest
= NULL_TREE
;
4613 tree vec_dest
= NULL_TREE
;
4614 tree op
= NULL_TREE
;
4615 tree cond_expr
, then_clause
, else_clause
;
4616 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
4617 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
4618 tree vec_cond_lhs
= NULL_TREE
, vec_cond_rhs
= NULL_TREE
;
4619 tree vec_then_clause
= NULL_TREE
, vec_else_clause
= NULL_TREE
;
4620 tree vec_compare
, vec_cond_expr
;
4622 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
4623 enum machine_mode vec_mode
;
4625 enum vect_def_type dt
, dts
[4];
4626 int nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
4627 int ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits
;
4628 enum tree_code code
;
4629 stmt_vec_info prev_stmt_info
= NULL
;
4632 /* FORNOW: unsupported in basic block SLP. */
4633 gcc_assert (loop_vinfo
);
4635 /* FORNOW: SLP not supported. */
4636 if (STMT_SLP_TYPE (stmt_info
))
4639 gcc_assert (ncopies
>= 1);
4640 if (reduc_index
&& ncopies
> 1)
4641 return false; /* FORNOW */
4643 if (!STMT_VINFO_RELEVANT_P (stmt_info
))
4646 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
4647 && !(STMT_VINFO_DEF_TYPE (stmt_info
) == vect_nested_cycle
4651 /* FORNOW: not yet supported. */
4652 if (STMT_VINFO_LIVE_P (stmt_info
))
4654 if (vect_print_dump_info (REPORT_DETAILS
))
4655 fprintf (vect_dump
, "value used after loop.");
4659 /* Is vectorizable conditional operation? */
4660 if (!is_gimple_assign (stmt
))
4663 code
= gimple_assign_rhs_code (stmt
);
4665 if (code
!= COND_EXPR
)
4668 gcc_assert (gimple_assign_single_p (stmt
));
4669 op
= gimple_assign_rhs1 (stmt
);
4670 cond_expr
= TREE_OPERAND (op
, 0);
4671 then_clause
= TREE_OPERAND (op
, 1);
4672 else_clause
= TREE_OPERAND (op
, 2);
4674 if (!vect_is_simple_cond (cond_expr
, loop_vinfo
))
4677 /* We do not handle two different vector types for the condition
4679 if (!types_compatible_p (TREE_TYPE (TREE_OPERAND (cond_expr
, 0)),
4680 TREE_TYPE (vectype
)))
4683 if (TREE_CODE (then_clause
) == SSA_NAME
)
4685 gimple then_def_stmt
= SSA_NAME_DEF_STMT (then_clause
);
4686 if (!vect_is_simple_use (then_clause
, loop_vinfo
, NULL
,
4687 &then_def_stmt
, &def
, &dt
))
4690 else if (TREE_CODE (then_clause
) != INTEGER_CST
4691 && TREE_CODE (then_clause
) != REAL_CST
4692 && TREE_CODE (then_clause
) != FIXED_CST
)
4695 if (TREE_CODE (else_clause
) == SSA_NAME
)
4697 gimple else_def_stmt
= SSA_NAME_DEF_STMT (else_clause
);
4698 if (!vect_is_simple_use (else_clause
, loop_vinfo
, NULL
,
4699 &else_def_stmt
, &def
, &dt
))
4702 else if (TREE_CODE (else_clause
) != INTEGER_CST
4703 && TREE_CODE (else_clause
) != REAL_CST
4704 && TREE_CODE (else_clause
) != FIXED_CST
)
4708 vec_mode
= TYPE_MODE (vectype
);
4712 STMT_VINFO_TYPE (stmt_info
) = condition_vec_info_type
;
4713 return expand_vec_cond_expr_p (TREE_TYPE (op
), vec_mode
);
4719 scalar_dest
= gimple_assign_lhs (stmt
);
4720 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
4722 /* Handle cond expr. */
4723 for (j
= 0; j
< ncopies
; j
++)
4730 vect_get_vec_def_for_operand (TREE_OPERAND (cond_expr
, 0),
4732 vect_is_simple_use (TREE_OPERAND (cond_expr
, 0), loop_vinfo
,
4733 NULL
, >emp
, &def
, &dts
[0]);
4735 vect_get_vec_def_for_operand (TREE_OPERAND (cond_expr
, 1),
4737 vect_is_simple_use (TREE_OPERAND (cond_expr
, 1), loop_vinfo
,
4738 NULL
, >emp
, &def
, &dts
[1]);
4739 if (reduc_index
== 1)
4740 vec_then_clause
= reduc_def
;
4743 vec_then_clause
= vect_get_vec_def_for_operand (then_clause
,
4745 vect_is_simple_use (then_clause
, loop_vinfo
,
4746 NULL
, >emp
, &def
, &dts
[2]);
4748 if (reduc_index
== 2)
4749 vec_else_clause
= reduc_def
;
4752 vec_else_clause
= vect_get_vec_def_for_operand (else_clause
,
4754 vect_is_simple_use (else_clause
, loop_vinfo
,
4755 NULL
, >emp
, &def
, &dts
[3]);
4760 vec_cond_lhs
= vect_get_vec_def_for_stmt_copy (dts
[0], vec_cond_lhs
);
4761 vec_cond_rhs
= vect_get_vec_def_for_stmt_copy (dts
[1], vec_cond_rhs
);
4762 vec_then_clause
= vect_get_vec_def_for_stmt_copy (dts
[2],
4764 vec_else_clause
= vect_get_vec_def_for_stmt_copy (dts
[3],
4768 /* Arguments are ready. Create the new vector stmt. */
4769 vec_compare
= build2 (TREE_CODE (cond_expr
), vectype
,
4770 vec_cond_lhs
, vec_cond_rhs
);
4771 vec_cond_expr
= build3 (VEC_COND_EXPR
, vectype
,
4772 vec_compare
, vec_then_clause
, vec_else_clause
);
4774 new_stmt
= gimple_build_assign (vec_dest
, vec_cond_expr
);
4775 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
4776 gimple_assign_set_lhs (new_stmt
, new_temp
);
4777 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
4779 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
4781 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
4783 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
4790 /* Make sure the statement is vectorizable. */
4793 vect_analyze_stmt (gimple stmt
, bool *need_to_vectorize
, slp_tree node
)
4795 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
4796 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
4797 enum vect_relevant relevance
= STMT_VINFO_RELEVANT (stmt_info
);
4799 tree scalar_type
, vectype
;
4801 if (vect_print_dump_info (REPORT_DETAILS
))
4803 fprintf (vect_dump
, "==> examining statement: ");
4804 print_gimple_stmt (vect_dump
, stmt
, 0, TDF_SLIM
);
4807 if (gimple_has_volatile_ops (stmt
))
4809 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS
))
4810 fprintf (vect_dump
, "not vectorized: stmt has volatile operands");
4815 /* Skip stmts that do not need to be vectorized. In loops this is expected
4817 - the COND_EXPR which is the loop exit condition
4818 - any LABEL_EXPRs in the loop
4819 - computations that are used only for array indexing or loop control.
4820 In basic blocks we only analyze statements that are a part of some SLP
4821 instance, therefore, all the statements are relevant. */
4823 if (!STMT_VINFO_RELEVANT_P (stmt_info
)
4824 && !STMT_VINFO_LIVE_P (stmt_info
))
4826 if (vect_print_dump_info (REPORT_DETAILS
))
4827 fprintf (vect_dump
, "irrelevant.");
4832 switch (STMT_VINFO_DEF_TYPE (stmt_info
))
4834 case vect_internal_def
:
4837 case vect_reduction_def
:
4838 case vect_nested_cycle
:
4839 gcc_assert (!bb_vinfo
&& (relevance
== vect_used_in_outer
4840 || relevance
== vect_used_in_outer_by_reduction
4841 || relevance
== vect_unused_in_scope
));
4844 case vect_induction_def
:
4845 case vect_constant_def
:
4846 case vect_external_def
:
4847 case vect_unknown_def_type
:
4854 gcc_assert (PURE_SLP_STMT (stmt_info
));
4856 scalar_type
= TREE_TYPE (gimple_get_lhs (stmt
));
4857 if (vect_print_dump_info (REPORT_DETAILS
))
4859 fprintf (vect_dump
, "get vectype for scalar type: ");
4860 print_generic_expr (vect_dump
, scalar_type
, TDF_SLIM
);
4863 vectype
= get_vectype_for_scalar_type (scalar_type
);
4866 if (vect_print_dump_info (REPORT_DETAILS
))
4868 fprintf (vect_dump
, "not SLPed: unsupported data-type ");
4869 print_generic_expr (vect_dump
, scalar_type
, TDF_SLIM
);
4874 if (vect_print_dump_info (REPORT_DETAILS
))
4876 fprintf (vect_dump
, "vectype: ");
4877 print_generic_expr (vect_dump
, vectype
, TDF_SLIM
);
4880 STMT_VINFO_VECTYPE (stmt_info
) = vectype
;
4883 if (STMT_VINFO_RELEVANT_P (stmt_info
))
4885 gcc_assert (!VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt
))));
4886 gcc_assert (STMT_VINFO_VECTYPE (stmt_info
));
4887 *need_to_vectorize
= true;
4892 && (STMT_VINFO_RELEVANT_P (stmt_info
)
4893 || STMT_VINFO_DEF_TYPE (stmt_info
) == vect_reduction_def
))
4894 ok
= (vectorizable_type_promotion (stmt
, NULL
, NULL
, NULL
)
4895 || vectorizable_type_demotion (stmt
, NULL
, NULL
, NULL
)
4896 || vectorizable_conversion (stmt
, NULL
, NULL
, NULL
)
4897 || vectorizable_shift (stmt
, NULL
, NULL
, NULL
)
4898 || vectorizable_operation (stmt
, NULL
, NULL
, NULL
)
4899 || vectorizable_assignment (stmt
, NULL
, NULL
, NULL
)
4900 || vectorizable_load (stmt
, NULL
, NULL
, NULL
, NULL
)
4901 || vectorizable_call (stmt
, NULL
, NULL
)
4902 || vectorizable_store (stmt
, NULL
, NULL
, NULL
)
4903 || vectorizable_reduction (stmt
, NULL
, NULL
, NULL
)
4904 || vectorizable_condition (stmt
, NULL
, NULL
, NULL
, 0));
4908 ok
= (vectorizable_shift (stmt
, NULL
, NULL
, node
)
4909 || vectorizable_operation (stmt
, NULL
, NULL
, node
)
4910 || vectorizable_assignment (stmt
, NULL
, NULL
, node
)
4911 || vectorizable_load (stmt
, NULL
, NULL
, node
, NULL
)
4912 || vectorizable_store (stmt
, NULL
, NULL
, node
));
4917 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS
))
4919 fprintf (vect_dump
, "not vectorized: relevant stmt not ");
4920 fprintf (vect_dump
, "supported: ");
4921 print_gimple_stmt (vect_dump
, stmt
, 0, TDF_SLIM
);
4930 /* Stmts that are (also) "live" (i.e. - that are used out of the loop)
4931 need extra handling, except for vectorizable reductions. */
4932 if (STMT_VINFO_LIVE_P (stmt_info
)
4933 && STMT_VINFO_TYPE (stmt_info
) != reduc_vec_info_type
)
4934 ok
= vectorizable_live_operation (stmt
, NULL
, NULL
);
4938 if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS
))
4940 fprintf (vect_dump
, "not vectorized: live stmt not ");
4941 fprintf (vect_dump
, "supported: ");
4942 print_gimple_stmt (vect_dump
, stmt
, 0, TDF_SLIM
);
4952 /* Function vect_transform_stmt.
4954 Create a vectorized stmt to replace STMT, and insert it at BSI. */
4957 vect_transform_stmt (gimple stmt
, gimple_stmt_iterator
*gsi
,
4958 bool *strided_store
, slp_tree slp_node
,
4959 slp_instance slp_node_instance
)
4961 bool is_store
= false;
4962 gimple vec_stmt
= NULL
;
4963 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
4964 gimple orig_stmt_in_pattern
, orig_scalar_stmt
= stmt
;
4967 switch (STMT_VINFO_TYPE (stmt_info
))
4969 case type_demotion_vec_info_type
:
4970 done
= vectorizable_type_demotion (stmt
, gsi
, &vec_stmt
, slp_node
);
4974 case type_promotion_vec_info_type
:
4975 done
= vectorizable_type_promotion (stmt
, gsi
, &vec_stmt
, slp_node
);
4979 case type_conversion_vec_info_type
:
4980 done
= vectorizable_conversion (stmt
, gsi
, &vec_stmt
, slp_node
);
4984 case induc_vec_info_type
:
4985 gcc_assert (!slp_node
);
4986 done
= vectorizable_induction (stmt
, gsi
, &vec_stmt
);
4990 case shift_vec_info_type
:
4991 done
= vectorizable_shift (stmt
, gsi
, &vec_stmt
, slp_node
);
4995 case op_vec_info_type
:
4996 done
= vectorizable_operation (stmt
, gsi
, &vec_stmt
, slp_node
);
5000 case assignment_vec_info_type
:
5001 done
= vectorizable_assignment (stmt
, gsi
, &vec_stmt
, slp_node
);
5005 case load_vec_info_type
:
5006 done
= vectorizable_load (stmt
, gsi
, &vec_stmt
, slp_node
,
5011 case store_vec_info_type
:
5012 done
= vectorizable_store (stmt
, gsi
, &vec_stmt
, slp_node
);
5014 if (STMT_VINFO_STRIDED_ACCESS (stmt_info
) && !slp_node
)
5016 /* In case of interleaving, the whole chain is vectorized when the
5017 last store in the chain is reached. Store stmts before the last
5018 one are skipped, and there vec_stmt_info shouldn't be freed
5020 *strided_store
= true;
5021 if (STMT_VINFO_VEC_STMT (stmt_info
))
5028 case condition_vec_info_type
:
5029 gcc_assert (!slp_node
);
5030 done
= vectorizable_condition (stmt
, gsi
, &vec_stmt
, NULL
, 0);
5034 case call_vec_info_type
:
5035 gcc_assert (!slp_node
);
5036 done
= vectorizable_call (stmt
, gsi
, &vec_stmt
);
5037 stmt
= gsi_stmt (*gsi
);
5040 case reduc_vec_info_type
:
5041 done
= vectorizable_reduction (stmt
, gsi
, &vec_stmt
, slp_node
);
5046 if (!STMT_VINFO_LIVE_P (stmt_info
))
5048 if (vect_print_dump_info (REPORT_DETAILS
))
5049 fprintf (vect_dump
, "stmt not supported.");
5054 /* Handle inner-loop stmts whose DEF is used in the loop-nest that
5055 is being vectorized, but outside the immediately enclosing loop. */
5057 && STMT_VINFO_LOOP_VINFO (stmt_info
)
5058 && nested_in_vect_loop_p (LOOP_VINFO_LOOP (
5059 STMT_VINFO_LOOP_VINFO (stmt_info
)), stmt
)
5060 && STMT_VINFO_TYPE (stmt_info
) != reduc_vec_info_type
5061 && (STMT_VINFO_RELEVANT (stmt_info
) == vect_used_in_outer
5062 || STMT_VINFO_RELEVANT (stmt_info
) ==
5063 vect_used_in_outer_by_reduction
))
5065 struct loop
*innerloop
= LOOP_VINFO_LOOP (
5066 STMT_VINFO_LOOP_VINFO (stmt_info
))->inner
;
5067 imm_use_iterator imm_iter
;
5068 use_operand_p use_p
;
5072 if (vect_print_dump_info (REPORT_DETAILS
))
5073 fprintf (vect_dump
, "Record the vdef for outer-loop vectorization.");
5075 /* Find the relevant loop-exit phi-node, and reord the vec_stmt there
5076 (to be used when vectorizing outer-loop stmts that use the DEF of
5078 if (gimple_code (stmt
) == GIMPLE_PHI
)
5079 scalar_dest
= PHI_RESULT (stmt
);
5081 scalar_dest
= gimple_assign_lhs (stmt
);
5083 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, scalar_dest
)
5085 if (!flow_bb_inside_loop_p (innerloop
, gimple_bb (USE_STMT (use_p
))))
5087 exit_phi
= USE_STMT (use_p
);
5088 STMT_VINFO_VEC_STMT (vinfo_for_stmt (exit_phi
)) = vec_stmt
;
5093 /* Handle stmts whose DEF is used outside the loop-nest that is
5094 being vectorized. */
5095 if (STMT_VINFO_LIVE_P (stmt_info
)
5096 && STMT_VINFO_TYPE (stmt_info
) != reduc_vec_info_type
)
5098 done
= vectorizable_live_operation (stmt
, gsi
, &vec_stmt
);
5104 STMT_VINFO_VEC_STMT (stmt_info
) = vec_stmt
;
5105 orig_stmt_in_pattern
= STMT_VINFO_RELATED_STMT (stmt_info
);
5106 if (orig_stmt_in_pattern
)
5108 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (orig_stmt_in_pattern
);
5109 /* STMT was inserted by the vectorizer to replace a computation idiom.
5110 ORIG_STMT_IN_PATTERN is a stmt in the original sequence that
5111 computed this idiom. We need to record a pointer to VEC_STMT in
5112 the stmt_info of ORIG_STMT_IN_PATTERN. See more details in the
5113 documentation of vect_pattern_recog. */
5114 if (STMT_VINFO_IN_PATTERN_P (stmt_vinfo
))
5116 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_vinfo
)
5117 == orig_scalar_stmt
);
5118 STMT_VINFO_VEC_STMT (stmt_vinfo
) = vec_stmt
;
5127 /* Remove a group of stores (for SLP or interleaving), free their
5131 vect_remove_stores (gimple first_stmt
)
5133 gimple next
= first_stmt
;
5135 gimple_stmt_iterator next_si
;
5139 /* Free the attached stmt_vec_info and remove the stmt. */
5140 next_si
= gsi_for_stmt (next
);
5141 gsi_remove (&next_si
, true);
5142 tmp
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (next
));
5143 free_stmt_vec_info (next
);
5149 /* Function new_stmt_vec_info.
5151 Create and initialize a new stmt_vec_info struct for STMT. */
5154 new_stmt_vec_info (gimple stmt
, loop_vec_info loop_vinfo
,
5155 bb_vec_info bb_vinfo
)
5158 res
= (stmt_vec_info
) xcalloc (1, sizeof (struct _stmt_vec_info
));
5160 STMT_VINFO_TYPE (res
) = undef_vec_info_type
;
5161 STMT_VINFO_STMT (res
) = stmt
;
5162 STMT_VINFO_LOOP_VINFO (res
) = loop_vinfo
;
5163 STMT_VINFO_BB_VINFO (res
) = bb_vinfo
;
5164 STMT_VINFO_RELEVANT (res
) = vect_unused_in_scope
;
5165 STMT_VINFO_LIVE_P (res
) = false;
5166 STMT_VINFO_VECTYPE (res
) = NULL
;
5167 STMT_VINFO_VEC_STMT (res
) = NULL
;
5168 STMT_VINFO_VECTORIZABLE (res
) = true;
5169 STMT_VINFO_IN_PATTERN_P (res
) = false;
5170 STMT_VINFO_RELATED_STMT (res
) = NULL
;
5171 STMT_VINFO_DATA_REF (res
) = NULL
;
5173 STMT_VINFO_DR_BASE_ADDRESS (res
) = NULL
;
5174 STMT_VINFO_DR_OFFSET (res
) = NULL
;
5175 STMT_VINFO_DR_INIT (res
) = NULL
;
5176 STMT_VINFO_DR_STEP (res
) = NULL
;
5177 STMT_VINFO_DR_ALIGNED_TO (res
) = NULL
;
5179 if (gimple_code (stmt
) == GIMPLE_PHI
5180 && is_loop_header_bb_p (gimple_bb (stmt
)))
5181 STMT_VINFO_DEF_TYPE (res
) = vect_unknown_def_type
;
5183 STMT_VINFO_DEF_TYPE (res
) = vect_internal_def
;
5185 STMT_VINFO_SAME_ALIGN_REFS (res
) = VEC_alloc (dr_p
, heap
, 5);
5186 STMT_VINFO_INSIDE_OF_LOOP_COST (res
) = 0;
5187 STMT_VINFO_OUTSIDE_OF_LOOP_COST (res
) = 0;
5188 STMT_SLP_TYPE (res
) = loop_vect
;
5189 GROUP_FIRST_ELEMENT (res
) = NULL
;
5190 GROUP_NEXT_ELEMENT (res
) = NULL
;
5191 GROUP_SIZE (res
) = 0;
5192 GROUP_STORE_COUNT (res
) = 0;
5193 GROUP_GAP (res
) = 0;
5194 GROUP_SAME_DR_STMT (res
) = NULL
;
5195 GROUP_READ_WRITE_DEPENDENCE (res
) = false;
5201 /* Create a hash table for stmt_vec_info. */
5204 init_stmt_vec_info_vec (void)
5206 gcc_assert (!stmt_vec_info_vec
);
5207 stmt_vec_info_vec
= VEC_alloc (vec_void_p
, heap
, 50);
5211 /* Free hash table for stmt_vec_info. */
5214 free_stmt_vec_info_vec (void)
5216 gcc_assert (stmt_vec_info_vec
);
5217 VEC_free (vec_void_p
, heap
, stmt_vec_info_vec
);
5221 /* Free stmt vectorization related info. */
5224 free_stmt_vec_info (gimple stmt
)
5226 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
5231 VEC_free (dr_p
, heap
, STMT_VINFO_SAME_ALIGN_REFS (stmt_info
));
5232 set_vinfo_for_stmt (stmt
, NULL
);
5237 /* Function get_vectype_for_scalar_type_and_size.
5239 Returns the vector type corresponding to SCALAR_TYPE and SIZE as supported
5243 get_vectype_for_scalar_type_and_size (tree scalar_type
, unsigned size
)
5245 enum machine_mode inner_mode
= TYPE_MODE (scalar_type
);
5246 enum machine_mode simd_mode
;
5247 unsigned int nbytes
= GET_MODE_SIZE (inner_mode
);
5254 /* We can't build a vector type of elements with alignment bigger than
5256 if (nbytes
< TYPE_ALIGN_UNIT (scalar_type
))
5259 /* If we'd build a vector type of elements whose mode precision doesn't
5260 match their types precision we'll get mismatched types on vector
5261 extracts via BIT_FIELD_REFs. This effectively means we disable
5262 vectorization of bool and/or enum types in some languages. */
5263 if (INTEGRAL_TYPE_P (scalar_type
)
5264 && GET_MODE_BITSIZE (inner_mode
) != TYPE_PRECISION (scalar_type
))
5267 if (GET_MODE_CLASS (inner_mode
) != MODE_INT
5268 && GET_MODE_CLASS (inner_mode
) != MODE_FLOAT
)
5271 /* If no size was supplied use the mode the target prefers. Otherwise
5272 lookup a vector mode of the specified size. */
5274 simd_mode
= targetm
.vectorize
.preferred_simd_mode (inner_mode
);
5276 simd_mode
= mode_for_vector (inner_mode
, size
/ nbytes
);
5277 nunits
= GET_MODE_SIZE (simd_mode
) / nbytes
;
5281 vectype
= build_vector_type (scalar_type
, nunits
);
5282 if (vect_print_dump_info (REPORT_DETAILS
))
5284 fprintf (vect_dump
, "get vectype with %d units of type ", nunits
);
5285 print_generic_expr (vect_dump
, scalar_type
, TDF_SLIM
);
5291 if (vect_print_dump_info (REPORT_DETAILS
))
5293 fprintf (vect_dump
, "vectype: ");
5294 print_generic_expr (vect_dump
, vectype
, TDF_SLIM
);
5297 if (!VECTOR_MODE_P (TYPE_MODE (vectype
))
5298 && !INTEGRAL_MODE_P (TYPE_MODE (vectype
)))
5300 if (vect_print_dump_info (REPORT_DETAILS
))
5301 fprintf (vect_dump
, "mode not supported by target.");
5308 unsigned int current_vector_size
;
5310 /* Function get_vectype_for_scalar_type.
5312 Returns the vector type corresponding to SCALAR_TYPE as supported
5316 get_vectype_for_scalar_type (tree scalar_type
)
5319 vectype
= get_vectype_for_scalar_type_and_size (scalar_type
,
5320 current_vector_size
);
5322 && current_vector_size
== 0)
5323 current_vector_size
= GET_MODE_SIZE (TYPE_MODE (vectype
));
5327 /* Function get_same_sized_vectype
5329 Returns a vector type corresponding to SCALAR_TYPE of size
5330 VECTOR_TYPE if supported by the target. */
5333 get_same_sized_vectype (tree scalar_type
, tree vector_type
)
5335 return get_vectype_for_scalar_type_and_size
5336 (scalar_type
, GET_MODE_SIZE (TYPE_MODE (vector_type
)));
5339 /* Function vect_is_simple_use.
5342 LOOP_VINFO - the vect info of the loop that is being vectorized.
5343 BB_VINFO - the vect info of the basic block that is being vectorized.
5344 OPERAND - operand of a stmt in the loop or bb.
5345 DEF - the defining stmt in case OPERAND is an SSA_NAME.
5347 Returns whether a stmt with OPERAND can be vectorized.
5348 For loops, supportable operands are constants, loop invariants, and operands
5349 that are defined by the current iteration of the loop. Unsupportable
5350 operands are those that are defined by a previous iteration of the loop (as
5351 is the case in reduction/induction computations).
5352 For basic blocks, supportable operands are constants and bb invariants.
5353 For now, operands defined outside the basic block are not supported. */
5356 vect_is_simple_use (tree operand
, loop_vec_info loop_vinfo
,
5357 bb_vec_info bb_vinfo
, gimple
*def_stmt
,
5358 tree
*def
, enum vect_def_type
*dt
)
5361 stmt_vec_info stmt_vinfo
;
5362 struct loop
*loop
= NULL
;
5365 loop
= LOOP_VINFO_LOOP (loop_vinfo
);
5370 if (vect_print_dump_info (REPORT_DETAILS
))
5372 fprintf (vect_dump
, "vect_is_simple_use: operand ");
5373 print_generic_expr (vect_dump
, operand
, TDF_SLIM
);
5376 if (TREE_CODE (operand
) == INTEGER_CST
|| TREE_CODE (operand
) == REAL_CST
)
5378 *dt
= vect_constant_def
;
5382 if (is_gimple_min_invariant (operand
))
5385 *dt
= vect_external_def
;
5389 if (TREE_CODE (operand
) == PAREN_EXPR
)
5391 if (vect_print_dump_info (REPORT_DETAILS
))
5392 fprintf (vect_dump
, "non-associatable copy.");
5393 operand
= TREE_OPERAND (operand
, 0);
5396 if (TREE_CODE (operand
) != SSA_NAME
)
5398 if (vect_print_dump_info (REPORT_DETAILS
))
5399 fprintf (vect_dump
, "not ssa-name.");
5403 *def_stmt
= SSA_NAME_DEF_STMT (operand
);
5404 if (*def_stmt
== NULL
)
5406 if (vect_print_dump_info (REPORT_DETAILS
))
5407 fprintf (vect_dump
, "no def_stmt.");
5411 if (vect_print_dump_info (REPORT_DETAILS
))
5413 fprintf (vect_dump
, "def_stmt: ");
5414 print_gimple_stmt (vect_dump
, *def_stmt
, 0, TDF_SLIM
);
5417 /* Empty stmt is expected only in case of a function argument.
5418 (Otherwise - we expect a phi_node or a GIMPLE_ASSIGN). */
5419 if (gimple_nop_p (*def_stmt
))
5422 *dt
= vect_external_def
;
5426 bb
= gimple_bb (*def_stmt
);
5428 if ((loop
&& !flow_bb_inside_loop_p (loop
, bb
))
5429 || (!loop
&& bb
!= BB_VINFO_BB (bb_vinfo
))
5430 || (!loop
&& gimple_code (*def_stmt
) == GIMPLE_PHI
))
5431 *dt
= vect_external_def
;
5434 stmt_vinfo
= vinfo_for_stmt (*def_stmt
);
5435 *dt
= STMT_VINFO_DEF_TYPE (stmt_vinfo
);
5438 if (*dt
== vect_unknown_def_type
)
5440 if (vect_print_dump_info (REPORT_DETAILS
))
5441 fprintf (vect_dump
, "Unsupported pattern.");
5445 if (vect_print_dump_info (REPORT_DETAILS
))
5446 fprintf (vect_dump
, "type of def: %d.",*dt
);
5448 switch (gimple_code (*def_stmt
))
5451 *def
= gimple_phi_result (*def_stmt
);
5455 *def
= gimple_assign_lhs (*def_stmt
);
5459 *def
= gimple_call_lhs (*def_stmt
);
5464 if (vect_print_dump_info (REPORT_DETAILS
))
5465 fprintf (vect_dump
, "unsupported defining stmt: ");
5472 /* Function vect_is_simple_use_1.
5474 Same as vect_is_simple_use_1 but also determines the vector operand
5475 type of OPERAND and stores it to *VECTYPE. If the definition of
5476 OPERAND is vect_uninitialized_def, vect_constant_def or
5477 vect_external_def *VECTYPE will be set to NULL_TREE and the caller
5478 is responsible to compute the best suited vector type for the
5482 vect_is_simple_use_1 (tree operand
, loop_vec_info loop_vinfo
,
5483 bb_vec_info bb_vinfo
, gimple
*def_stmt
,
5484 tree
*def
, enum vect_def_type
*dt
, tree
*vectype
)
5486 if (!vect_is_simple_use (operand
, loop_vinfo
, bb_vinfo
, def_stmt
, def
, dt
))
5489 /* Now get a vector type if the def is internal, otherwise supply
5490 NULL_TREE and leave it up to the caller to figure out a proper
5491 type for the use stmt. */
5492 if (*dt
== vect_internal_def
5493 || *dt
== vect_induction_def
5494 || *dt
== vect_reduction_def
5495 || *dt
== vect_double_reduction_def
5496 || *dt
== vect_nested_cycle
)
5498 stmt_vec_info stmt_info
= vinfo_for_stmt (*def_stmt
);
5499 if (STMT_VINFO_IN_PATTERN_P (stmt_info
))
5500 stmt_info
= vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info
));
5501 *vectype
= STMT_VINFO_VECTYPE (stmt_info
);
5502 gcc_assert (*vectype
!= NULL_TREE
);
5504 else if (*dt
== vect_uninitialized_def
5505 || *dt
== vect_constant_def
5506 || *dt
== vect_external_def
)
5507 *vectype
= NULL_TREE
;
5515 /* Function supportable_widening_operation
5517 Check whether an operation represented by the code CODE is a
5518 widening operation that is supported by the target platform in
5519 vector form (i.e., when operating on arguments of type VECTYPE_IN
5520 producing a result of type VECTYPE_OUT).
5522 Widening operations we currently support are NOP (CONVERT), FLOAT
5523 and WIDEN_MULT. This function checks if these operations are supported
5524 by the target platform either directly (via vector tree-codes), or via
5528 - CODE1 and CODE2 are codes of vector operations to be used when
5529 vectorizing the operation, if available.
5530 - DECL1 and DECL2 are decls of target builtin functions to be used
5531 when vectorizing the operation, if available. In this case,
5532 CODE1 and CODE2 are CALL_EXPR.
5533 - MULTI_STEP_CVT determines the number of required intermediate steps in
5534 case of multi-step conversion (like char->short->int - in that case
5535 MULTI_STEP_CVT will be 1).
5536 - INTERM_TYPES contains the intermediate type required to perform the
5537 widening operation (short in the above example). */
5540 supportable_widening_operation (enum tree_code code
, gimple stmt
,
5541 tree vectype_out
, tree vectype_in
,
5542 tree
*decl1
, tree
*decl2
,
5543 enum tree_code
*code1
, enum tree_code
*code2
,
5544 int *multi_step_cvt
,
5545 VEC (tree
, heap
) **interm_types
)
5547 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
5548 loop_vec_info loop_info
= STMT_VINFO_LOOP_VINFO (stmt_info
);
5549 struct loop
*vect_loop
= LOOP_VINFO_LOOP (loop_info
);
5551 enum machine_mode vec_mode
;
5552 enum insn_code icode1
, icode2
;
5553 optab optab1
, optab2
;
5554 tree vectype
= vectype_in
;
5555 tree wide_vectype
= vectype_out
;
5556 enum tree_code c1
, c2
;
5558 /* The result of a vectorized widening operation usually requires two vectors
5559 (because the widened results do not fit int one vector). The generated
5560 vector results would normally be expected to be generated in the same
5561 order as in the original scalar computation, i.e. if 8 results are
5562 generated in each vector iteration, they are to be organized as follows:
5563 vect1: [res1,res2,res3,res4], vect2: [res5,res6,res7,res8].
5565 However, in the special case that the result of the widening operation is
5566 used in a reduction computation only, the order doesn't matter (because
5567 when vectorizing a reduction we change the order of the computation).
5568 Some targets can take advantage of this and generate more efficient code.
5569 For example, targets like Altivec, that support widen_mult using a sequence
5570 of {mult_even,mult_odd} generate the following vectors:
5571 vect1: [res1,res3,res5,res7], vect2: [res2,res4,res6,res8].
5573 When vectorizing outer-loops, we execute the inner-loop sequentially
5574 (each vectorized inner-loop iteration contributes to VF outer-loop
5575 iterations in parallel). We therefore don't allow to change the order
5576 of the computation in the inner-loop during outer-loop vectorization. */
5578 if (STMT_VINFO_RELEVANT (stmt_info
) == vect_used_by_reduction
5579 && !nested_in_vect_loop_p (vect_loop
, stmt
))
5585 && code
== WIDEN_MULT_EXPR
5586 && targetm
.vectorize
.builtin_mul_widen_even
5587 && targetm
.vectorize
.builtin_mul_widen_even (vectype
)
5588 && targetm
.vectorize
.builtin_mul_widen_odd
5589 && targetm
.vectorize
.builtin_mul_widen_odd (vectype
))
5591 if (vect_print_dump_info (REPORT_DETAILS
))
5592 fprintf (vect_dump
, "Unordered widening operation detected.");
5594 *code1
= *code2
= CALL_EXPR
;
5595 *decl1
= targetm
.vectorize
.builtin_mul_widen_even (vectype
);
5596 *decl2
= targetm
.vectorize
.builtin_mul_widen_odd (vectype
);
5602 case WIDEN_MULT_EXPR
:
5603 if (BYTES_BIG_ENDIAN
)
5605 c1
= VEC_WIDEN_MULT_HI_EXPR
;
5606 c2
= VEC_WIDEN_MULT_LO_EXPR
;
5610 c2
= VEC_WIDEN_MULT_HI_EXPR
;
5611 c1
= VEC_WIDEN_MULT_LO_EXPR
;
5616 if (BYTES_BIG_ENDIAN
)
5618 c1
= VEC_UNPACK_HI_EXPR
;
5619 c2
= VEC_UNPACK_LO_EXPR
;
5623 c2
= VEC_UNPACK_HI_EXPR
;
5624 c1
= VEC_UNPACK_LO_EXPR
;
5629 if (BYTES_BIG_ENDIAN
)
5631 c1
= VEC_UNPACK_FLOAT_HI_EXPR
;
5632 c2
= VEC_UNPACK_FLOAT_LO_EXPR
;
5636 c2
= VEC_UNPACK_FLOAT_HI_EXPR
;
5637 c1
= VEC_UNPACK_FLOAT_LO_EXPR
;
5641 case FIX_TRUNC_EXPR
:
5642 /* ??? Not yet implemented due to missing VEC_UNPACK_FIX_TRUNC_HI_EXPR/
5643 VEC_UNPACK_FIX_TRUNC_LO_EXPR tree codes and optabs used for
5644 computing the operation. */
5651 if (code
== FIX_TRUNC_EXPR
)
5653 /* The signedness is determined from output operand. */
5654 optab1
= optab_for_tree_code (c1
, vectype_out
, optab_default
);
5655 optab2
= optab_for_tree_code (c2
, vectype_out
, optab_default
);
5659 optab1
= optab_for_tree_code (c1
, vectype
, optab_default
);
5660 optab2
= optab_for_tree_code (c2
, vectype
, optab_default
);
5663 if (!optab1
|| !optab2
)
5666 vec_mode
= TYPE_MODE (vectype
);
5667 if ((icode1
= optab_handler (optab1
, vec_mode
)) == CODE_FOR_nothing
5668 || (icode2
= optab_handler (optab2
, vec_mode
)) == CODE_FOR_nothing
)
5671 /* Check if it's a multi-step conversion that can be done using intermediate
5673 if (insn_data
[icode1
].operand
[0].mode
!= TYPE_MODE (wide_vectype
)
5674 || insn_data
[icode2
].operand
[0].mode
!= TYPE_MODE (wide_vectype
))
5677 tree prev_type
= vectype
, intermediate_type
;
5678 enum machine_mode intermediate_mode
, prev_mode
= vec_mode
;
5679 optab optab3
, optab4
;
5681 if (!CONVERT_EXPR_CODE_P (code
))
5687 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
5688 intermediate steps in promotion sequence. We try
5689 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do
5691 *interm_types
= VEC_alloc (tree
, heap
, MAX_INTERM_CVT_STEPS
);
5692 for (i
= 0; i
< 3; i
++)
5694 intermediate_mode
= insn_data
[icode1
].operand
[0].mode
;
5695 intermediate_type
= lang_hooks
.types
.type_for_mode (intermediate_mode
,
5696 TYPE_UNSIGNED (prev_type
));
5697 optab3
= optab_for_tree_code (c1
, intermediate_type
, optab_default
);
5698 optab4
= optab_for_tree_code (c2
, intermediate_type
, optab_default
);
5700 if (!optab3
|| !optab4
5701 || ((icode1
= optab_handler (optab1
, prev_mode
))
5702 == CODE_FOR_nothing
)
5703 || insn_data
[icode1
].operand
[0].mode
!= intermediate_mode
5704 || ((icode2
= optab_handler (optab2
, prev_mode
))
5705 == CODE_FOR_nothing
)
5706 || insn_data
[icode2
].operand
[0].mode
!= intermediate_mode
5707 || ((icode1
= optab_handler (optab3
, intermediate_mode
))
5708 == CODE_FOR_nothing
)
5709 || ((icode2
= optab_handler (optab4
, intermediate_mode
))
5710 == CODE_FOR_nothing
))
5713 VEC_quick_push (tree
, *interm_types
, intermediate_type
);
5714 (*multi_step_cvt
)++;
5716 if (insn_data
[icode1
].operand
[0].mode
== TYPE_MODE (wide_vectype
)
5717 && insn_data
[icode2
].operand
[0].mode
== TYPE_MODE (wide_vectype
))
5720 prev_type
= intermediate_type
;
5721 prev_mode
= intermediate_mode
;
5733 /* Function supportable_narrowing_operation
5735 Check whether an operation represented by the code CODE is a
5736 narrowing operation that is supported by the target platform in
5737 vector form (i.e., when operating on arguments of type VECTYPE_IN
5738 and producing a result of type VECTYPE_OUT).
5740 Narrowing operations we currently support are NOP (CONVERT) and
5741 FIX_TRUNC. This function checks if these operations are supported by
5742 the target platform directly via vector tree-codes.
5745 - CODE1 is the code of a vector operation to be used when
5746 vectorizing the operation, if available.
5747 - MULTI_STEP_CVT determines the number of required intermediate steps in
5748 case of multi-step conversion (like int->short->char - in that case
5749 MULTI_STEP_CVT will be 1).
5750 - INTERM_TYPES contains the intermediate type required to perform the
5751 narrowing operation (short in the above example). */
5754 supportable_narrowing_operation (enum tree_code code
,
5755 tree vectype_out
, tree vectype_in
,
5756 enum tree_code
*code1
, int *multi_step_cvt
,
5757 VEC (tree
, heap
) **interm_types
)
5759 enum machine_mode vec_mode
;
5760 enum insn_code icode1
;
5761 optab optab1
, interm_optab
;
5762 tree vectype
= vectype_in
;
5763 tree narrow_vectype
= vectype_out
;
5765 tree intermediate_type
, prev_type
;
5771 c1
= VEC_PACK_TRUNC_EXPR
;
5774 case FIX_TRUNC_EXPR
:
5775 c1
= VEC_PACK_FIX_TRUNC_EXPR
;
5779 /* ??? Not yet implemented due to missing VEC_PACK_FLOAT_EXPR
5780 tree code and optabs used for computing the operation. */
5787 if (code
== FIX_TRUNC_EXPR
)
5788 /* The signedness is determined from output operand. */
5789 optab1
= optab_for_tree_code (c1
, vectype_out
, optab_default
);
5791 optab1
= optab_for_tree_code (c1
, vectype
, optab_default
);
5796 vec_mode
= TYPE_MODE (vectype
);
5797 if ((icode1
= optab_handler (optab1
, vec_mode
)) == CODE_FOR_nothing
)
5800 /* Check if it's a multi-step conversion that can be done using intermediate
5802 if (insn_data
[icode1
].operand
[0].mode
!= TYPE_MODE (narrow_vectype
))
5804 enum machine_mode intermediate_mode
, prev_mode
= vec_mode
;
5807 prev_type
= vectype
;
5808 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
5809 intermediate steps in promotion sequence. We try
5810 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do
5812 *interm_types
= VEC_alloc (tree
, heap
, MAX_INTERM_CVT_STEPS
);
5813 for (i
= 0; i
< 3; i
++)
5815 intermediate_mode
= insn_data
[icode1
].operand
[0].mode
;
5816 intermediate_type
= lang_hooks
.types
.type_for_mode (intermediate_mode
,
5817 TYPE_UNSIGNED (prev_type
));
5818 interm_optab
= optab_for_tree_code (c1
, intermediate_type
,
5821 || ((icode1
= optab_handler (optab1
, prev_mode
))
5822 == CODE_FOR_nothing
)
5823 || insn_data
[icode1
].operand
[0].mode
!= intermediate_mode
5824 || ((icode1
= optab_handler (interm_optab
, intermediate_mode
))
5825 == CODE_FOR_nothing
))
5828 VEC_quick_push (tree
, *interm_types
, intermediate_type
);
5829 (*multi_step_cvt
)++;
5831 if (insn_data
[icode1
].operand
[0].mode
== TYPE_MODE (narrow_vectype
))
5834 prev_type
= intermediate_type
;
5835 prev_mode
= intermediate_mode
;