1 /* Statement Analysis and Transformation for Vectorization
2 Copyright (C) 2003-2015 Free Software Foundation, Inc.
3 Contributed by Dorit Naishlos <dorit@il.ibm.com>
4 and Ira Rosen <irar@il.ibm.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
24 #include "coretypes.h"
32 #include "fold-const.h"
33 #include "stor-layout.h"
35 #include "gimple-pretty-print.h"
36 #include "internal-fn.h"
39 #include "gimple-iterator.h"
40 #include "gimplify-me.h"
42 #include "tree-ssa-loop-manip.h"
44 #include "tree-ssa-loop.h"
45 #include "tree-scalar-evolution.h"
47 #include "insn-config.h"
56 #include "recog.h" /* FIXME: for insn_data */
57 #include "insn-codes.h"
59 #include "diagnostic-core.h"
60 #include "tree-vectorizer.h"
64 /* For lang_hooks.types.type_for_mode. */
65 #include "langhooks.h"
67 /* Return the vectorized type for the given statement. */
70 stmt_vectype (struct _stmt_vec_info
*stmt_info
)
72 return STMT_VINFO_VECTYPE (stmt_info
);
75 /* Return TRUE iff the given statement is in an inner loop relative to
76 the loop being vectorized. */
78 stmt_in_inner_loop_p (struct _stmt_vec_info
*stmt_info
)
80 gimple stmt
= STMT_VINFO_STMT (stmt_info
);
81 basic_block bb
= gimple_bb (stmt
);
82 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
88 loop
= LOOP_VINFO_LOOP (loop_vinfo
);
90 return (bb
->loop_father
== loop
->inner
);
93 /* Record the cost of a statement, either by directly informing the
94 target model or by saving it in a vector for later processing.
95 Return a preliminary estimate of the statement's cost. */
98 record_stmt_cost (stmt_vector_for_cost
*body_cost_vec
, int count
,
99 enum vect_cost_for_stmt kind
, stmt_vec_info stmt_info
,
100 int misalign
, enum vect_cost_model_location where
)
104 tree vectype
= stmt_info
? stmt_vectype (stmt_info
) : NULL_TREE
;
105 add_stmt_info_to_vec (body_cost_vec
, count
, kind
,
106 stmt_info
? STMT_VINFO_STMT (stmt_info
) : NULL
,
109 (builtin_vectorization_cost (kind
, vectype
, misalign
) * count
);
114 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
115 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
116 void *target_cost_data
;
119 target_cost_data
= LOOP_VINFO_TARGET_COST_DATA (loop_vinfo
);
121 target_cost_data
= BB_VINFO_TARGET_COST_DATA (bb_vinfo
);
123 return add_stmt_cost (target_cost_data
, count
, kind
, stmt_info
,
128 /* Return a variable of type ELEM_TYPE[NELEMS]. */
131 create_vector_array (tree elem_type
, unsigned HOST_WIDE_INT nelems
)
133 return create_tmp_var (build_array_type_nelts (elem_type
, nelems
),
137 /* ARRAY is an array of vectors created by create_vector_array.
138 Return an SSA_NAME for the vector in index N. The reference
139 is part of the vectorization of STMT and the vector is associated
140 with scalar destination SCALAR_DEST. */
143 read_vector_array (gimple stmt
, gimple_stmt_iterator
*gsi
, tree scalar_dest
,
144 tree array
, unsigned HOST_WIDE_INT n
)
146 tree vect_type
, vect
, vect_name
, array_ref
;
149 gcc_assert (TREE_CODE (TREE_TYPE (array
)) == ARRAY_TYPE
);
150 vect_type
= TREE_TYPE (TREE_TYPE (array
));
151 vect
= vect_create_destination_var (scalar_dest
, vect_type
);
152 array_ref
= build4 (ARRAY_REF
, vect_type
, array
,
153 build_int_cst (size_type_node
, n
),
154 NULL_TREE
, NULL_TREE
);
156 new_stmt
= gimple_build_assign (vect
, array_ref
);
157 vect_name
= make_ssa_name (vect
, new_stmt
);
158 gimple_assign_set_lhs (new_stmt
, vect_name
);
159 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
164 /* ARRAY is an array of vectors created by create_vector_array.
165 Emit code to store SSA_NAME VECT in index N of the array.
166 The store is part of the vectorization of STMT. */
169 write_vector_array (gimple stmt
, gimple_stmt_iterator
*gsi
, tree vect
,
170 tree array
, unsigned HOST_WIDE_INT n
)
175 array_ref
= build4 (ARRAY_REF
, TREE_TYPE (vect
), array
,
176 build_int_cst (size_type_node
, n
),
177 NULL_TREE
, NULL_TREE
);
179 new_stmt
= gimple_build_assign (array_ref
, vect
);
180 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
183 /* PTR is a pointer to an array of type TYPE. Return a representation
184 of *PTR. The memory reference replaces those in FIRST_DR
188 create_array_ref (tree type
, tree ptr
, struct data_reference
*first_dr
)
190 tree mem_ref
, alias_ptr_type
;
192 alias_ptr_type
= reference_alias_ptr_type (DR_REF (first_dr
));
193 mem_ref
= build2 (MEM_REF
, type
, ptr
, build_int_cst (alias_ptr_type
, 0));
194 /* Arrays have the same alignment as their type. */
195 set_ptr_info_alignment (get_ptr_info (ptr
), TYPE_ALIGN_UNIT (type
), 0);
199 /* Utility functions used by vect_mark_stmts_to_be_vectorized. */
201 /* Function vect_mark_relevant.
203 Mark STMT as "relevant for vectorization" and add it to WORKLIST. */
206 vect_mark_relevant (vec
<gimple
> *worklist
, gimple stmt
,
207 enum vect_relevant relevant
, bool live_p
,
208 bool used_in_pattern
)
210 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
211 enum vect_relevant save_relevant
= STMT_VINFO_RELEVANT (stmt_info
);
212 bool save_live_p
= STMT_VINFO_LIVE_P (stmt_info
);
215 if (dump_enabled_p ())
216 dump_printf_loc (MSG_NOTE
, vect_location
,
217 "mark relevant %d, live %d.\n", relevant
, live_p
);
219 /* If this stmt is an original stmt in a pattern, we might need to mark its
220 related pattern stmt instead of the original stmt. However, such stmts
221 may have their own uses that are not in any pattern, in such cases the
222 stmt itself should be marked. */
223 if (STMT_VINFO_IN_PATTERN_P (stmt_info
))
226 if (!used_in_pattern
)
228 imm_use_iterator imm_iter
;
232 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
233 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
235 if (is_gimple_assign (stmt
))
236 lhs
= gimple_assign_lhs (stmt
);
238 lhs
= gimple_call_lhs (stmt
);
240 /* This use is out of pattern use, if LHS has other uses that are
241 pattern uses, we should mark the stmt itself, and not the pattern
243 if (lhs
&& TREE_CODE (lhs
) == SSA_NAME
)
244 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, lhs
)
246 if (is_gimple_debug (USE_STMT (use_p
)))
248 use_stmt
= USE_STMT (use_p
);
250 if (!flow_bb_inside_loop_p (loop
, gimple_bb (use_stmt
)))
253 if (vinfo_for_stmt (use_stmt
)
254 && STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (use_stmt
)))
264 /* This is the last stmt in a sequence that was detected as a
265 pattern that can potentially be vectorized. Don't mark the stmt
266 as relevant/live because it's not going to be vectorized.
267 Instead mark the pattern-stmt that replaces it. */
269 pattern_stmt
= STMT_VINFO_RELATED_STMT (stmt_info
);
271 if (dump_enabled_p ())
272 dump_printf_loc (MSG_NOTE
, vect_location
,
273 "last stmt in pattern. don't mark"
274 " relevant/live.\n");
275 stmt_info
= vinfo_for_stmt (pattern_stmt
);
276 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_info
) == stmt
);
277 save_relevant
= STMT_VINFO_RELEVANT (stmt_info
);
278 save_live_p
= STMT_VINFO_LIVE_P (stmt_info
);
283 STMT_VINFO_LIVE_P (stmt_info
) |= live_p
;
284 if (relevant
> STMT_VINFO_RELEVANT (stmt_info
))
285 STMT_VINFO_RELEVANT (stmt_info
) = relevant
;
287 if (STMT_VINFO_RELEVANT (stmt_info
) == save_relevant
288 && STMT_VINFO_LIVE_P (stmt_info
) == save_live_p
)
290 if (dump_enabled_p ())
291 dump_printf_loc (MSG_NOTE
, vect_location
,
292 "already marked relevant/live.\n");
296 worklist
->safe_push (stmt
);
300 /* Function vect_stmt_relevant_p.
302 Return true if STMT in loop that is represented by LOOP_VINFO is
303 "relevant for vectorization".
305 A stmt is considered "relevant for vectorization" if:
306 - it has uses outside the loop.
307 - it has vdefs (it alters memory).
308 - control stmts in the loop (except for the exit condition).
310 CHECKME: what other side effects would the vectorizer allow? */
313 vect_stmt_relevant_p (gimple stmt
, loop_vec_info loop_vinfo
,
314 enum vect_relevant
*relevant
, bool *live_p
)
316 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
318 imm_use_iterator imm_iter
;
322 *relevant
= vect_unused_in_scope
;
325 /* cond stmt other than loop exit cond. */
326 if (is_ctrl_stmt (stmt
)
327 && STMT_VINFO_TYPE (vinfo_for_stmt (stmt
))
328 != loop_exit_ctrl_vec_info_type
)
329 *relevant
= vect_used_in_scope
;
331 /* changing memory. */
332 if (gimple_code (stmt
) != GIMPLE_PHI
)
333 if (gimple_vdef (stmt
)
334 && !gimple_clobber_p (stmt
))
336 if (dump_enabled_p ())
337 dump_printf_loc (MSG_NOTE
, vect_location
,
338 "vec_stmt_relevant_p: stmt has vdefs.\n");
339 *relevant
= vect_used_in_scope
;
342 /* uses outside the loop. */
343 FOR_EACH_PHI_OR_STMT_DEF (def_p
, stmt
, op_iter
, SSA_OP_DEF
)
345 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, DEF_FROM_PTR (def_p
))
347 basic_block bb
= gimple_bb (USE_STMT (use_p
));
348 if (!flow_bb_inside_loop_p (loop
, bb
))
350 if (dump_enabled_p ())
351 dump_printf_loc (MSG_NOTE
, vect_location
,
352 "vec_stmt_relevant_p: used out of loop.\n");
354 if (is_gimple_debug (USE_STMT (use_p
)))
357 /* We expect all such uses to be in the loop exit phis
358 (because of loop closed form) */
359 gcc_assert (gimple_code (USE_STMT (use_p
)) == GIMPLE_PHI
);
360 gcc_assert (bb
== single_exit (loop
)->dest
);
367 return (*live_p
|| *relevant
);
371 /* Function exist_non_indexing_operands_for_use_p
373 USE is one of the uses attached to STMT. Check if USE is
374 used in STMT for anything other than indexing an array. */
377 exist_non_indexing_operands_for_use_p (tree use
, gimple stmt
)
380 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
382 /* USE corresponds to some operand in STMT. If there is no data
383 reference in STMT, then any operand that corresponds to USE
384 is not indexing an array. */
385 if (!STMT_VINFO_DATA_REF (stmt_info
))
388 /* STMT has a data_ref. FORNOW this means that its of one of
392 (This should have been verified in analyze_data_refs).
394 'var' in the second case corresponds to a def, not a use,
395 so USE cannot correspond to any operands that are not used
398 Therefore, all we need to check is if STMT falls into the
399 first case, and whether var corresponds to USE. */
401 if (!gimple_assign_copy_p (stmt
))
403 if (is_gimple_call (stmt
)
404 && gimple_call_internal_p (stmt
))
405 switch (gimple_call_internal_fn (stmt
))
408 operand
= gimple_call_arg (stmt
, 3);
413 operand
= gimple_call_arg (stmt
, 2);
423 if (TREE_CODE (gimple_assign_lhs (stmt
)) == SSA_NAME
)
425 operand
= gimple_assign_rhs1 (stmt
);
426 if (TREE_CODE (operand
) != SSA_NAME
)
437 Function process_use.
440 - a USE in STMT in a loop represented by LOOP_VINFO
441 - LIVE_P, RELEVANT - enum values to be set in the STMT_VINFO of the stmt
442 that defined USE. This is done by calling mark_relevant and passing it
443 the WORKLIST (to add DEF_STMT to the WORKLIST in case it is relevant).
444 - FORCE is true if exist_non_indexing_operands_for_use_p check shouldn't
448 Generally, LIVE_P and RELEVANT are used to define the liveness and
449 relevance info of the DEF_STMT of this USE:
450 STMT_VINFO_LIVE_P (DEF_STMT_info) <-- live_p
451 STMT_VINFO_RELEVANT (DEF_STMT_info) <-- relevant
453 - case 1: If USE is used only for address computations (e.g. array indexing),
454 which does not need to be directly vectorized, then the liveness/relevance
455 of the respective DEF_STMT is left unchanged.
456 - case 2: If STMT is a reduction phi and DEF_STMT is a reduction stmt, we
457 skip DEF_STMT cause it had already been processed.
458 - case 3: If DEF_STMT and STMT are in different nests, then "relevant" will
459 be modified accordingly.
461 Return true if everything is as expected. Return false otherwise. */
464 process_use (gimple stmt
, tree use
, loop_vec_info loop_vinfo
, bool live_p
,
465 enum vect_relevant relevant
, vec
<gimple
> *worklist
,
468 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
469 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (stmt
);
470 stmt_vec_info dstmt_vinfo
;
471 basic_block bb
, def_bb
;
474 enum vect_def_type dt
;
476 /* case 1: we are only interested in uses that need to be vectorized. Uses
477 that are used for address computation are not considered relevant. */
478 if (!force
&& !exist_non_indexing_operands_for_use_p (use
, stmt
))
481 if (!vect_is_simple_use (use
, stmt
, loop_vinfo
, NULL
, &def_stmt
, &def
, &dt
))
483 if (dump_enabled_p ())
484 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
485 "not vectorized: unsupported use in stmt.\n");
489 if (!def_stmt
|| gimple_nop_p (def_stmt
))
492 def_bb
= gimple_bb (def_stmt
);
493 if (!flow_bb_inside_loop_p (loop
, def_bb
))
495 if (dump_enabled_p ())
496 dump_printf_loc (MSG_NOTE
, vect_location
, "def_stmt is out of loop.\n");
500 /* case 2: A reduction phi (STMT) defined by a reduction stmt (DEF_STMT).
501 DEF_STMT must have already been processed, because this should be the
502 only way that STMT, which is a reduction-phi, was put in the worklist,
503 as there should be no other uses for DEF_STMT in the loop. So we just
504 check that everything is as expected, and we are done. */
505 dstmt_vinfo
= vinfo_for_stmt (def_stmt
);
506 bb
= gimple_bb (stmt
);
507 if (gimple_code (stmt
) == GIMPLE_PHI
508 && STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_reduction_def
509 && gimple_code (def_stmt
) != GIMPLE_PHI
510 && STMT_VINFO_DEF_TYPE (dstmt_vinfo
) == vect_reduction_def
511 && bb
->loop_father
== def_bb
->loop_father
)
513 if (dump_enabled_p ())
514 dump_printf_loc (MSG_NOTE
, vect_location
,
515 "reduc-stmt defining reduc-phi in the same nest.\n");
516 if (STMT_VINFO_IN_PATTERN_P (dstmt_vinfo
))
517 dstmt_vinfo
= vinfo_for_stmt (STMT_VINFO_RELATED_STMT (dstmt_vinfo
));
518 gcc_assert (STMT_VINFO_RELEVANT (dstmt_vinfo
) < vect_used_by_reduction
);
519 gcc_assert (STMT_VINFO_LIVE_P (dstmt_vinfo
)
520 || STMT_VINFO_RELEVANT (dstmt_vinfo
) > vect_unused_in_scope
);
524 /* case 3a: outer-loop stmt defining an inner-loop stmt:
525 outer-loop-header-bb:
531 if (flow_loop_nested_p (def_bb
->loop_father
, bb
->loop_father
))
533 if (dump_enabled_p ())
534 dump_printf_loc (MSG_NOTE
, vect_location
,
535 "outer-loop def-stmt defining inner-loop stmt.\n");
539 case vect_unused_in_scope
:
540 relevant
= (STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_nested_cycle
) ?
541 vect_used_in_scope
: vect_unused_in_scope
;
544 case vect_used_in_outer_by_reduction
:
545 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo
) != vect_reduction_def
);
546 relevant
= vect_used_by_reduction
;
549 case vect_used_in_outer
:
550 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo
) != vect_reduction_def
);
551 relevant
= vect_used_in_scope
;
554 case vect_used_in_scope
:
562 /* case 3b: inner-loop stmt defining an outer-loop stmt:
563 outer-loop-header-bb:
567 outer-loop-tail-bb (or outer-loop-exit-bb in double reduction):
569 else if (flow_loop_nested_p (bb
->loop_father
, def_bb
->loop_father
))
571 if (dump_enabled_p ())
572 dump_printf_loc (MSG_NOTE
, vect_location
,
573 "inner-loop def-stmt defining outer-loop stmt.\n");
577 case vect_unused_in_scope
:
578 relevant
= (STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_reduction_def
579 || STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_double_reduction_def
) ?
580 vect_used_in_outer_by_reduction
: vect_unused_in_scope
;
583 case vect_used_by_reduction
:
584 relevant
= vect_used_in_outer_by_reduction
;
587 case vect_used_in_scope
:
588 relevant
= vect_used_in_outer
;
596 vect_mark_relevant (worklist
, def_stmt
, relevant
, live_p
,
597 is_pattern_stmt_p (stmt_vinfo
));
602 /* Function vect_mark_stmts_to_be_vectorized.
604 Not all stmts in the loop need to be vectorized. For example:
613 Stmt 1 and 3 do not need to be vectorized, because loop control and
614 addressing of vectorized data-refs are handled differently.
616 This pass detects such stmts. */
619 vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo
)
621 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
622 basic_block
*bbs
= LOOP_VINFO_BBS (loop_vinfo
);
623 unsigned int nbbs
= loop
->num_nodes
;
624 gimple_stmt_iterator si
;
627 stmt_vec_info stmt_vinfo
;
631 enum vect_relevant relevant
, tmp_relevant
;
632 enum vect_def_type def_type
;
634 if (dump_enabled_p ())
635 dump_printf_loc (MSG_NOTE
, vect_location
,
636 "=== vect_mark_stmts_to_be_vectorized ===\n");
638 auto_vec
<gimple
, 64> worklist
;
640 /* 1. Init worklist. */
641 for (i
= 0; i
< nbbs
; i
++)
644 for (si
= gsi_start_phis (bb
); !gsi_end_p (si
); gsi_next (&si
))
647 if (dump_enabled_p ())
649 dump_printf_loc (MSG_NOTE
, vect_location
, "init: phi relevant? ");
650 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, phi
, 0);
653 if (vect_stmt_relevant_p (phi
, loop_vinfo
, &relevant
, &live_p
))
654 vect_mark_relevant (&worklist
, phi
, relevant
, live_p
, false);
656 for (si
= gsi_start_bb (bb
); !gsi_end_p (si
); gsi_next (&si
))
658 stmt
= gsi_stmt (si
);
659 if (dump_enabled_p ())
661 dump_printf_loc (MSG_NOTE
, vect_location
, "init: stmt relevant? ");
662 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
665 if (vect_stmt_relevant_p (stmt
, loop_vinfo
, &relevant
, &live_p
))
666 vect_mark_relevant (&worklist
, stmt
, relevant
, live_p
, false);
670 /* 2. Process_worklist */
671 while (worklist
.length () > 0)
676 stmt
= worklist
.pop ();
677 if (dump_enabled_p ())
679 dump_printf_loc (MSG_NOTE
, vect_location
, "worklist: examine stmt: ");
680 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
683 /* Examine the USEs of STMT. For each USE, mark the stmt that defines it
684 (DEF_STMT) as relevant/irrelevant and live/dead according to the
685 liveness and relevance properties of STMT. */
686 stmt_vinfo
= vinfo_for_stmt (stmt
);
687 relevant
= STMT_VINFO_RELEVANT (stmt_vinfo
);
688 live_p
= STMT_VINFO_LIVE_P (stmt_vinfo
);
690 /* Generally, the liveness and relevance properties of STMT are
691 propagated as is to the DEF_STMTs of its USEs:
692 live_p <-- STMT_VINFO_LIVE_P (STMT_VINFO)
693 relevant <-- STMT_VINFO_RELEVANT (STMT_VINFO)
695 One exception is when STMT has been identified as defining a reduction
696 variable; in this case we set the liveness/relevance as follows:
698 relevant = vect_used_by_reduction
699 This is because we distinguish between two kinds of relevant stmts -
700 those that are used by a reduction computation, and those that are
701 (also) used by a regular computation. This allows us later on to
702 identify stmts that are used solely by a reduction, and therefore the
703 order of the results that they produce does not have to be kept. */
705 def_type
= STMT_VINFO_DEF_TYPE (stmt_vinfo
);
706 tmp_relevant
= relevant
;
709 case vect_reduction_def
:
710 switch (tmp_relevant
)
712 case vect_unused_in_scope
:
713 relevant
= vect_used_by_reduction
;
716 case vect_used_by_reduction
:
717 if (gimple_code (stmt
) == GIMPLE_PHI
)
722 if (dump_enabled_p ())
723 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
724 "unsupported use of reduction.\n");
731 case vect_nested_cycle
:
732 if (tmp_relevant
!= vect_unused_in_scope
733 && tmp_relevant
!= vect_used_in_outer_by_reduction
734 && tmp_relevant
!= vect_used_in_outer
)
736 if (dump_enabled_p ())
737 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
738 "unsupported use of nested cycle.\n");
746 case vect_double_reduction_def
:
747 if (tmp_relevant
!= vect_unused_in_scope
748 && tmp_relevant
!= vect_used_by_reduction
)
750 if (dump_enabled_p ())
751 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
752 "unsupported use of double reduction.\n");
764 if (is_pattern_stmt_p (stmt_vinfo
))
766 /* Pattern statements are not inserted into the code, so
767 FOR_EACH_PHI_OR_STMT_USE optimizes their operands out, and we
768 have to scan the RHS or function arguments instead. */
769 if (is_gimple_assign (stmt
))
771 enum tree_code rhs_code
= gimple_assign_rhs_code (stmt
);
772 tree op
= gimple_assign_rhs1 (stmt
);
775 if (rhs_code
== COND_EXPR
&& COMPARISON_CLASS_P (op
))
777 if (!process_use (stmt
, TREE_OPERAND (op
, 0), loop_vinfo
,
778 live_p
, relevant
, &worklist
, false)
779 || !process_use (stmt
, TREE_OPERAND (op
, 1), loop_vinfo
,
780 live_p
, relevant
, &worklist
, false))
784 for (; i
< gimple_num_ops (stmt
); i
++)
786 op
= gimple_op (stmt
, i
);
787 if (TREE_CODE (op
) == SSA_NAME
788 && !process_use (stmt
, op
, loop_vinfo
, live_p
, relevant
,
793 else if (is_gimple_call (stmt
))
795 for (i
= 0; i
< gimple_call_num_args (stmt
); i
++)
797 tree arg
= gimple_call_arg (stmt
, i
);
798 if (!process_use (stmt
, arg
, loop_vinfo
, live_p
, relevant
,
805 FOR_EACH_PHI_OR_STMT_USE (use_p
, stmt
, iter
, SSA_OP_USE
)
807 tree op
= USE_FROM_PTR (use_p
);
808 if (!process_use (stmt
, op
, loop_vinfo
, live_p
, relevant
,
813 if (STMT_VINFO_GATHER_P (stmt_vinfo
))
816 tree decl
= vect_check_gather (stmt
, loop_vinfo
, NULL
, &off
, NULL
);
818 if (!process_use (stmt
, off
, loop_vinfo
, live_p
, relevant
,
822 } /* while worklist */
828 /* Function vect_model_simple_cost.
830 Models cost for simple operations, i.e. those that only emit ncopies of a
831 single op. Right now, this does not account for multiple insns that could
832 be generated for the single vector op. We will handle that shortly. */
835 vect_model_simple_cost (stmt_vec_info stmt_info
, int ncopies
,
836 enum vect_def_type
*dt
,
837 stmt_vector_for_cost
*prologue_cost_vec
,
838 stmt_vector_for_cost
*body_cost_vec
)
841 int inside_cost
= 0, prologue_cost
= 0;
843 /* The SLP costs were already calculated during SLP tree build. */
844 if (PURE_SLP_STMT (stmt_info
))
847 /* FORNOW: Assuming maximum 2 args per stmts. */
848 for (i
= 0; i
< 2; i
++)
849 if (dt
[i
] == vect_constant_def
|| dt
[i
] == vect_external_def
)
850 prologue_cost
+= record_stmt_cost (prologue_cost_vec
, 1, vector_stmt
,
851 stmt_info
, 0, vect_prologue
);
853 /* Pass the inside-of-loop statements to the target-specific cost model. */
854 inside_cost
= record_stmt_cost (body_cost_vec
, ncopies
, vector_stmt
,
855 stmt_info
, 0, vect_body
);
857 if (dump_enabled_p ())
858 dump_printf_loc (MSG_NOTE
, vect_location
,
859 "vect_model_simple_cost: inside_cost = %d, "
860 "prologue_cost = %d .\n", inside_cost
, prologue_cost
);
864 /* Model cost for type demotion and promotion operations. PWR is normally
865 zero for single-step promotions and demotions. It will be one if
866 two-step promotion/demotion is required, and so on. Each additional
867 step doubles the number of instructions required. */
870 vect_model_promotion_demotion_cost (stmt_vec_info stmt_info
,
871 enum vect_def_type
*dt
, int pwr
)
874 int inside_cost
= 0, prologue_cost
= 0;
875 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
876 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
877 void *target_cost_data
;
879 /* The SLP costs were already calculated during SLP tree build. */
880 if (PURE_SLP_STMT (stmt_info
))
884 target_cost_data
= LOOP_VINFO_TARGET_COST_DATA (loop_vinfo
);
886 target_cost_data
= BB_VINFO_TARGET_COST_DATA (bb_vinfo
);
888 for (i
= 0; i
< pwr
+ 1; i
++)
890 tmp
= (STMT_VINFO_TYPE (stmt_info
) == type_promotion_vec_info_type
) ?
892 inside_cost
+= add_stmt_cost (target_cost_data
, vect_pow2 (tmp
),
893 vec_promote_demote
, stmt_info
, 0,
897 /* FORNOW: Assuming maximum 2 args per stmts. */
898 for (i
= 0; i
< 2; i
++)
899 if (dt
[i
] == vect_constant_def
|| dt
[i
] == vect_external_def
)
900 prologue_cost
+= add_stmt_cost (target_cost_data
, 1, vector_stmt
,
901 stmt_info
, 0, vect_prologue
);
903 if (dump_enabled_p ())
904 dump_printf_loc (MSG_NOTE
, vect_location
,
905 "vect_model_promotion_demotion_cost: inside_cost = %d, "
906 "prologue_cost = %d .\n", inside_cost
, prologue_cost
);
909 /* Function vect_cost_group_size
911 For grouped load or store, return the group_size only if it is the first
912 load or store of a group, else return 1. This ensures that group size is
913 only returned once per group. */
916 vect_cost_group_size (stmt_vec_info stmt_info
)
918 gimple first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
920 if (first_stmt
== STMT_VINFO_STMT (stmt_info
))
921 return GROUP_SIZE (stmt_info
);
927 /* Function vect_model_store_cost
929 Models cost for stores. In the case of grouped accesses, one access
930 has the overhead of the grouped access attributed to it. */
933 vect_model_store_cost (stmt_vec_info stmt_info
, int ncopies
,
934 bool store_lanes_p
, enum vect_def_type dt
,
936 stmt_vector_for_cost
*prologue_cost_vec
,
937 stmt_vector_for_cost
*body_cost_vec
)
940 unsigned int inside_cost
= 0, prologue_cost
= 0;
941 struct data_reference
*first_dr
;
944 if (dt
== vect_constant_def
|| dt
== vect_external_def
)
945 prologue_cost
+= record_stmt_cost (prologue_cost_vec
, 1, scalar_to_vec
,
946 stmt_info
, 0, vect_prologue
);
948 /* Grouped access? */
949 if (STMT_VINFO_GROUPED_ACCESS (stmt_info
))
953 first_stmt
= SLP_TREE_SCALAR_STMTS (slp_node
)[0];
958 first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
959 group_size
= vect_cost_group_size (stmt_info
);
962 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
964 /* Not a grouped access. */
968 first_dr
= STMT_VINFO_DATA_REF (stmt_info
);
971 /* We assume that the cost of a single store-lanes instruction is
972 equivalent to the cost of GROUP_SIZE separate stores. If a grouped
973 access is instead being provided by a permute-and-store operation,
974 include the cost of the permutes. */
975 if (!store_lanes_p
&& group_size
> 1
976 && !STMT_VINFO_STRIDED_P (stmt_info
))
978 /* Uses a high and low interleave or shuffle operations for each
980 int nstmts
= ncopies
* ceil_log2 (group_size
) * group_size
;
981 inside_cost
= record_stmt_cost (body_cost_vec
, nstmts
, vec_perm
,
982 stmt_info
, 0, vect_body
);
984 if (dump_enabled_p ())
985 dump_printf_loc (MSG_NOTE
, vect_location
,
986 "vect_model_store_cost: strided group_size = %d .\n",
990 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
991 /* Costs of the stores. */
992 if (STMT_VINFO_STRIDED_P (stmt_info
)
993 && !STMT_VINFO_GROUPED_ACCESS (stmt_info
))
995 /* N scalar stores plus extracting the elements. */
996 inside_cost
+= record_stmt_cost (body_cost_vec
,
997 ncopies
* TYPE_VECTOR_SUBPARTS (vectype
),
998 scalar_store
, stmt_info
, 0, vect_body
);
1001 vect_get_store_cost (first_dr
, ncopies
, &inside_cost
, body_cost_vec
);
1003 if (STMT_VINFO_STRIDED_P (stmt_info
))
1004 inside_cost
+= record_stmt_cost (body_cost_vec
,
1005 ncopies
* TYPE_VECTOR_SUBPARTS (vectype
),
1006 vec_to_scalar
, stmt_info
, 0, vect_body
);
1008 if (dump_enabled_p ())
1009 dump_printf_loc (MSG_NOTE
, vect_location
,
1010 "vect_model_store_cost: inside_cost = %d, "
1011 "prologue_cost = %d .\n", inside_cost
, prologue_cost
);
1015 /* Calculate cost of DR's memory access. */
1017 vect_get_store_cost (struct data_reference
*dr
, int ncopies
,
1018 unsigned int *inside_cost
,
1019 stmt_vector_for_cost
*body_cost_vec
)
1021 int alignment_support_scheme
= vect_supportable_dr_alignment (dr
, false);
1022 gimple stmt
= DR_STMT (dr
);
1023 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1025 switch (alignment_support_scheme
)
1029 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
,
1030 vector_store
, stmt_info
, 0,
1033 if (dump_enabled_p ())
1034 dump_printf_loc (MSG_NOTE
, vect_location
,
1035 "vect_model_store_cost: aligned.\n");
1039 case dr_unaligned_supported
:
1041 /* Here, we assign an additional cost for the unaligned store. */
1042 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
,
1043 unaligned_store
, stmt_info
,
1044 DR_MISALIGNMENT (dr
), vect_body
);
1045 if (dump_enabled_p ())
1046 dump_printf_loc (MSG_NOTE
, vect_location
,
1047 "vect_model_store_cost: unaligned supported by "
1052 case dr_unaligned_unsupported
:
1054 *inside_cost
= VECT_MAX_COST
;
1056 if (dump_enabled_p ())
1057 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1058 "vect_model_store_cost: unsupported access.\n");
1068 /* Function vect_model_load_cost
1070 Models cost for loads. In the case of grouped accesses, the last access
1071 has the overhead of the grouped access attributed to it. Since unaligned
1072 accesses are supported for loads, we also account for the costs of the
1073 access scheme chosen. */
1076 vect_model_load_cost (stmt_vec_info stmt_info
, int ncopies
,
1077 bool load_lanes_p
, slp_tree slp_node
,
1078 stmt_vector_for_cost
*prologue_cost_vec
,
1079 stmt_vector_for_cost
*body_cost_vec
)
1083 struct data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
), *first_dr
;
1084 unsigned int inside_cost
= 0, prologue_cost
= 0;
1086 /* Grouped accesses? */
1087 first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
1088 if (STMT_VINFO_GROUPED_ACCESS (stmt_info
) && first_stmt
&& !slp_node
)
1090 group_size
= vect_cost_group_size (stmt_info
);
1091 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
1093 /* Not a grouped access. */
1100 /* We assume that the cost of a single load-lanes instruction is
1101 equivalent to the cost of GROUP_SIZE separate loads. If a grouped
1102 access is instead being provided by a load-and-permute operation,
1103 include the cost of the permutes. */
1104 if (!load_lanes_p
&& group_size
> 1
1105 && !STMT_VINFO_STRIDED_P (stmt_info
))
1107 /* Uses an even and odd extract operations or shuffle operations
1108 for each needed permute. */
1109 int nstmts
= ncopies
* ceil_log2 (group_size
) * group_size
;
1110 inside_cost
= record_stmt_cost (body_cost_vec
, nstmts
, vec_perm
,
1111 stmt_info
, 0, vect_body
);
1113 if (dump_enabled_p ())
1114 dump_printf_loc (MSG_NOTE
, vect_location
,
1115 "vect_model_load_cost: strided group_size = %d .\n",
1119 /* The loads themselves. */
1120 if (STMT_VINFO_STRIDED_P (stmt_info
)
1121 && !STMT_VINFO_GROUPED_ACCESS (stmt_info
))
1123 /* N scalar loads plus gathering them into a vector. */
1124 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
1125 inside_cost
+= record_stmt_cost (body_cost_vec
,
1126 ncopies
* TYPE_VECTOR_SUBPARTS (vectype
),
1127 scalar_load
, stmt_info
, 0, vect_body
);
1130 vect_get_load_cost (first_dr
, ncopies
,
1131 ((!STMT_VINFO_GROUPED_ACCESS (stmt_info
))
1132 || group_size
> 1 || slp_node
),
1133 &inside_cost
, &prologue_cost
,
1134 prologue_cost_vec
, body_cost_vec
, true);
1135 if (STMT_VINFO_STRIDED_P (stmt_info
))
1136 inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
, vec_construct
,
1137 stmt_info
, 0, vect_body
);
1139 if (dump_enabled_p ())
1140 dump_printf_loc (MSG_NOTE
, vect_location
,
1141 "vect_model_load_cost: inside_cost = %d, "
1142 "prologue_cost = %d .\n", inside_cost
, prologue_cost
);
1146 /* Calculate cost of DR's memory access. */
1148 vect_get_load_cost (struct data_reference
*dr
, int ncopies
,
1149 bool add_realign_cost
, unsigned int *inside_cost
,
1150 unsigned int *prologue_cost
,
1151 stmt_vector_for_cost
*prologue_cost_vec
,
1152 stmt_vector_for_cost
*body_cost_vec
,
1153 bool record_prologue_costs
)
1155 int alignment_support_scheme
= vect_supportable_dr_alignment (dr
, false);
1156 gimple stmt
= DR_STMT (dr
);
1157 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1159 switch (alignment_support_scheme
)
1163 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
, vector_load
,
1164 stmt_info
, 0, vect_body
);
1166 if (dump_enabled_p ())
1167 dump_printf_loc (MSG_NOTE
, vect_location
,
1168 "vect_model_load_cost: aligned.\n");
1172 case dr_unaligned_supported
:
1174 /* Here, we assign an additional cost for the unaligned load. */
1175 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
,
1176 unaligned_load
, stmt_info
,
1177 DR_MISALIGNMENT (dr
), vect_body
);
1179 if (dump_enabled_p ())
1180 dump_printf_loc (MSG_NOTE
, vect_location
,
1181 "vect_model_load_cost: unaligned supported by "
1186 case dr_explicit_realign
:
1188 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
* 2,
1189 vector_load
, stmt_info
, 0, vect_body
);
1190 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
,
1191 vec_perm
, stmt_info
, 0, vect_body
);
1193 /* FIXME: If the misalignment remains fixed across the iterations of
1194 the containing loop, the following cost should be added to the
1196 if (targetm
.vectorize
.builtin_mask_for_load
)
1197 *inside_cost
+= record_stmt_cost (body_cost_vec
, 1, vector_stmt
,
1198 stmt_info
, 0, vect_body
);
1200 if (dump_enabled_p ())
1201 dump_printf_loc (MSG_NOTE
, vect_location
,
1202 "vect_model_load_cost: explicit realign\n");
1206 case dr_explicit_realign_optimized
:
1208 if (dump_enabled_p ())
1209 dump_printf_loc (MSG_NOTE
, vect_location
,
1210 "vect_model_load_cost: unaligned software "
1213 /* Unaligned software pipeline has a load of an address, an initial
1214 load, and possibly a mask operation to "prime" the loop. However,
1215 if this is an access in a group of loads, which provide grouped
1216 access, then the above cost should only be considered for one
1217 access in the group. Inside the loop, there is a load op
1218 and a realignment op. */
1220 if (add_realign_cost
&& record_prologue_costs
)
1222 *prologue_cost
+= record_stmt_cost (prologue_cost_vec
, 2,
1223 vector_stmt
, stmt_info
,
1225 if (targetm
.vectorize
.builtin_mask_for_load
)
1226 *prologue_cost
+= record_stmt_cost (prologue_cost_vec
, 1,
1227 vector_stmt
, stmt_info
,
1231 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
, vector_load
,
1232 stmt_info
, 0, vect_body
);
1233 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
, vec_perm
,
1234 stmt_info
, 0, vect_body
);
1236 if (dump_enabled_p ())
1237 dump_printf_loc (MSG_NOTE
, vect_location
,
1238 "vect_model_load_cost: explicit realign optimized"
1244 case dr_unaligned_unsupported
:
1246 *inside_cost
= VECT_MAX_COST
;
1248 if (dump_enabled_p ())
1249 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1250 "vect_model_load_cost: unsupported access.\n");
1259 /* Insert the new stmt NEW_STMT at *GSI or at the appropriate place in
1260 the loop preheader for the vectorized stmt STMT. */
1263 vect_init_vector_1 (gimple stmt
, gimple new_stmt
, gimple_stmt_iterator
*gsi
)
1266 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
1269 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (stmt
);
1270 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_vinfo
);
1274 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
1278 if (nested_in_vect_loop_p (loop
, stmt
))
1281 pe
= loop_preheader_edge (loop
);
1282 new_bb
= gsi_insert_on_edge_immediate (pe
, new_stmt
);
1283 gcc_assert (!new_bb
);
1287 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_vinfo
);
1289 gimple_stmt_iterator gsi_bb_start
;
1291 gcc_assert (bb_vinfo
);
1292 bb
= BB_VINFO_BB (bb_vinfo
);
1293 gsi_bb_start
= gsi_after_labels (bb
);
1294 gsi_insert_before (&gsi_bb_start
, new_stmt
, GSI_SAME_STMT
);
1298 if (dump_enabled_p ())
1300 dump_printf_loc (MSG_NOTE
, vect_location
,
1301 "created new init_stmt: ");
1302 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, new_stmt
, 0);
1306 /* Function vect_init_vector.
1308 Insert a new stmt (INIT_STMT) that initializes a new variable of type
1309 TYPE with the value VAL. If TYPE is a vector type and VAL does not have
1310 vector type a vector with all elements equal to VAL is created first.
1311 Place the initialization at BSI if it is not NULL. Otherwise, place the
1312 initialization at the loop preheader.
1313 Return the DEF of INIT_STMT.
1314 It will be used in the vectorization of STMT. */
1317 vect_init_vector (gimple stmt
, tree val
, tree type
, gimple_stmt_iterator
*gsi
)
1324 if (TREE_CODE (type
) == VECTOR_TYPE
1325 && TREE_CODE (TREE_TYPE (val
)) != VECTOR_TYPE
)
1327 if (!types_compatible_p (TREE_TYPE (type
), TREE_TYPE (val
)))
1329 if (CONSTANT_CLASS_P (val
))
1330 val
= fold_unary (VIEW_CONVERT_EXPR
, TREE_TYPE (type
), val
);
1333 new_temp
= make_ssa_name (TREE_TYPE (type
));
1334 init_stmt
= gimple_build_assign (new_temp
, NOP_EXPR
, val
);
1335 vect_init_vector_1 (stmt
, init_stmt
, gsi
);
1339 val
= build_vector_from_val (type
, val
);
1342 new_var
= vect_get_new_vect_var (type
, vect_simple_var
, "cst_");
1343 init_stmt
= gimple_build_assign (new_var
, val
);
1344 new_temp
= make_ssa_name (new_var
, init_stmt
);
1345 gimple_assign_set_lhs (init_stmt
, new_temp
);
1346 vect_init_vector_1 (stmt
, init_stmt
, gsi
);
1347 vec_oprnd
= gimple_assign_lhs (init_stmt
);
1352 /* Function vect_get_vec_def_for_operand.
1354 OP is an operand in STMT. This function returns a (vector) def that will be
1355 used in the vectorized stmt for STMT.
1357 In the case that OP is an SSA_NAME which is defined in the loop, then
1358 STMT_VINFO_VEC_STMT of the defining stmt holds the relevant def.
1360 In case OP is an invariant or constant, a new stmt that creates a vector def
1361 needs to be introduced. */
1364 vect_get_vec_def_for_operand (tree op
, gimple stmt
, tree
*scalar_def
)
1369 stmt_vec_info def_stmt_info
= NULL
;
1370 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (stmt
);
1371 unsigned int nunits
;
1372 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_vinfo
);
1374 enum vect_def_type dt
;
1378 if (dump_enabled_p ())
1380 dump_printf_loc (MSG_NOTE
, vect_location
,
1381 "vect_get_vec_def_for_operand: ");
1382 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, op
);
1383 dump_printf (MSG_NOTE
, "\n");
1386 is_simple_use
= vect_is_simple_use (op
, stmt
, loop_vinfo
, NULL
,
1387 &def_stmt
, &def
, &dt
);
1388 gcc_assert (is_simple_use
);
1389 if (dump_enabled_p ())
1391 int loc_printed
= 0;
1394 dump_printf_loc (MSG_NOTE
, vect_location
, "def = ");
1396 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, def
);
1397 dump_printf (MSG_NOTE
, "\n");
1402 dump_printf (MSG_NOTE
, " def_stmt = ");
1404 dump_printf_loc (MSG_NOTE
, vect_location
, " def_stmt = ");
1405 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, def_stmt
, 0);
1411 /* Case 1: operand is a constant. */
1412 case vect_constant_def
:
1414 vector_type
= get_vectype_for_scalar_type (TREE_TYPE (op
));
1415 gcc_assert (vector_type
);
1416 nunits
= TYPE_VECTOR_SUBPARTS (vector_type
);
1421 /* Create 'vect_cst_ = {cst,cst,...,cst}' */
1422 if (dump_enabled_p ())
1423 dump_printf_loc (MSG_NOTE
, vect_location
,
1424 "Create vector_cst. nunits = %d\n", nunits
);
1426 return vect_init_vector (stmt
, op
, vector_type
, NULL
);
1429 /* Case 2: operand is defined outside the loop - loop invariant. */
1430 case vect_external_def
:
1432 vector_type
= get_vectype_for_scalar_type (TREE_TYPE (def
));
1433 gcc_assert (vector_type
);
1438 /* Create 'vec_inv = {inv,inv,..,inv}' */
1439 if (dump_enabled_p ())
1440 dump_printf_loc (MSG_NOTE
, vect_location
, "Create vector_inv.\n");
1442 return vect_init_vector (stmt
, def
, vector_type
, NULL
);
1445 /* Case 3: operand is defined inside the loop. */
1446 case vect_internal_def
:
1449 *scalar_def
= NULL
/* FIXME tuples: def_stmt*/;
1451 /* Get the def from the vectorized stmt. */
1452 def_stmt_info
= vinfo_for_stmt (def_stmt
);
1454 vec_stmt
= STMT_VINFO_VEC_STMT (def_stmt_info
);
1455 /* Get vectorized pattern statement. */
1457 && STMT_VINFO_IN_PATTERN_P (def_stmt_info
)
1458 && !STMT_VINFO_RELEVANT (def_stmt_info
))
1459 vec_stmt
= STMT_VINFO_VEC_STMT (vinfo_for_stmt (
1460 STMT_VINFO_RELATED_STMT (def_stmt_info
)));
1461 gcc_assert (vec_stmt
);
1462 if (gimple_code (vec_stmt
) == GIMPLE_PHI
)
1463 vec_oprnd
= PHI_RESULT (vec_stmt
);
1464 else if (is_gimple_call (vec_stmt
))
1465 vec_oprnd
= gimple_call_lhs (vec_stmt
);
1467 vec_oprnd
= gimple_assign_lhs (vec_stmt
);
1471 /* Case 4: operand is defined by a loop header phi - reduction */
1472 case vect_reduction_def
:
1473 case vect_double_reduction_def
:
1474 case vect_nested_cycle
:
1478 gcc_assert (gimple_code (def_stmt
) == GIMPLE_PHI
);
1479 loop
= (gimple_bb (def_stmt
))->loop_father
;
1481 /* Get the def before the loop */
1482 op
= PHI_ARG_DEF_FROM_EDGE (def_stmt
, loop_preheader_edge (loop
));
1483 return get_initial_def_for_reduction (stmt
, op
, scalar_def
);
1486 /* Case 5: operand is defined by loop-header phi - induction. */
1487 case vect_induction_def
:
1489 gcc_assert (gimple_code (def_stmt
) == GIMPLE_PHI
);
1491 /* Get the def from the vectorized stmt. */
1492 def_stmt_info
= vinfo_for_stmt (def_stmt
);
1493 vec_stmt
= STMT_VINFO_VEC_STMT (def_stmt_info
);
1494 if (gimple_code (vec_stmt
) == GIMPLE_PHI
)
1495 vec_oprnd
= PHI_RESULT (vec_stmt
);
1497 vec_oprnd
= gimple_get_lhs (vec_stmt
);
1507 /* Function vect_get_vec_def_for_stmt_copy
1509 Return a vector-def for an operand. This function is used when the
1510 vectorized stmt to be created (by the caller to this function) is a "copy"
1511 created in case the vectorized result cannot fit in one vector, and several
1512 copies of the vector-stmt are required. In this case the vector-def is
1513 retrieved from the vector stmt recorded in the STMT_VINFO_RELATED_STMT field
1514 of the stmt that defines VEC_OPRND.
1515 DT is the type of the vector def VEC_OPRND.
1518 In case the vectorization factor (VF) is bigger than the number
1519 of elements that can fit in a vectype (nunits), we have to generate
1520 more than one vector stmt to vectorize the scalar stmt. This situation
1521 arises when there are multiple data-types operated upon in the loop; the
1522 smallest data-type determines the VF, and as a result, when vectorizing
1523 stmts operating on wider types we need to create 'VF/nunits' "copies" of the
1524 vector stmt (each computing a vector of 'nunits' results, and together
1525 computing 'VF' results in each iteration). This function is called when
1526 vectorizing such a stmt (e.g. vectorizing S2 in the illustration below, in
1527 which VF=16 and nunits=4, so the number of copies required is 4):
1529 scalar stmt: vectorized into: STMT_VINFO_RELATED_STMT
1531 S1: x = load VS1.0: vx.0 = memref0 VS1.1
1532 VS1.1: vx.1 = memref1 VS1.2
1533 VS1.2: vx.2 = memref2 VS1.3
1534 VS1.3: vx.3 = memref3
1536 S2: z = x + ... VSnew.0: vz0 = vx.0 + ... VSnew.1
1537 VSnew.1: vz1 = vx.1 + ... VSnew.2
1538 VSnew.2: vz2 = vx.2 + ... VSnew.3
1539 VSnew.3: vz3 = vx.3 + ...
1541 The vectorization of S1 is explained in vectorizable_load.
1542 The vectorization of S2:
1543 To create the first vector-stmt out of the 4 copies - VSnew.0 -
1544 the function 'vect_get_vec_def_for_operand' is called to
1545 get the relevant vector-def for each operand of S2. For operand x it
1546 returns the vector-def 'vx.0'.
1548 To create the remaining copies of the vector-stmt (VSnew.j), this
1549 function is called to get the relevant vector-def for each operand. It is
1550 obtained from the respective VS1.j stmt, which is recorded in the
1551 STMT_VINFO_RELATED_STMT field of the stmt that defines VEC_OPRND.
1553 For example, to obtain the vector-def 'vx.1' in order to create the
1554 vector stmt 'VSnew.1', this function is called with VEC_OPRND='vx.0'.
1555 Given 'vx0' we obtain the stmt that defines it ('VS1.0'); from the
1556 STMT_VINFO_RELATED_STMT field of 'VS1.0' we obtain the next copy - 'VS1.1',
1557 and return its def ('vx.1').
1558 Overall, to create the above sequence this function will be called 3 times:
1559 vx.1 = vect_get_vec_def_for_stmt_copy (dt, vx.0);
1560 vx.2 = vect_get_vec_def_for_stmt_copy (dt, vx.1);
1561 vx.3 = vect_get_vec_def_for_stmt_copy (dt, vx.2); */
1564 vect_get_vec_def_for_stmt_copy (enum vect_def_type dt
, tree vec_oprnd
)
1566 gimple vec_stmt_for_operand
;
1567 stmt_vec_info def_stmt_info
;
1569 /* Do nothing; can reuse same def. */
1570 if (dt
== vect_external_def
|| dt
== vect_constant_def
)
1573 vec_stmt_for_operand
= SSA_NAME_DEF_STMT (vec_oprnd
);
1574 def_stmt_info
= vinfo_for_stmt (vec_stmt_for_operand
);
1575 gcc_assert (def_stmt_info
);
1576 vec_stmt_for_operand
= STMT_VINFO_RELATED_STMT (def_stmt_info
);
1577 gcc_assert (vec_stmt_for_operand
);
1578 vec_oprnd
= gimple_get_lhs (vec_stmt_for_operand
);
1579 if (gimple_code (vec_stmt_for_operand
) == GIMPLE_PHI
)
1580 vec_oprnd
= PHI_RESULT (vec_stmt_for_operand
);
1582 vec_oprnd
= gimple_get_lhs (vec_stmt_for_operand
);
1587 /* Get vectorized definitions for the operands to create a copy of an original
1588 stmt. See vect_get_vec_def_for_stmt_copy () for details. */
1591 vect_get_vec_defs_for_stmt_copy (enum vect_def_type
*dt
,
1592 vec
<tree
> *vec_oprnds0
,
1593 vec
<tree
> *vec_oprnds1
)
1595 tree vec_oprnd
= vec_oprnds0
->pop ();
1597 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
[0], vec_oprnd
);
1598 vec_oprnds0
->quick_push (vec_oprnd
);
1600 if (vec_oprnds1
&& vec_oprnds1
->length ())
1602 vec_oprnd
= vec_oprnds1
->pop ();
1603 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
[1], vec_oprnd
);
1604 vec_oprnds1
->quick_push (vec_oprnd
);
1609 /* Get vectorized definitions for OP0 and OP1.
1610 REDUC_INDEX is the index of reduction operand in case of reduction,
1611 and -1 otherwise. */
1614 vect_get_vec_defs (tree op0
, tree op1
, gimple stmt
,
1615 vec
<tree
> *vec_oprnds0
,
1616 vec
<tree
> *vec_oprnds1
,
1617 slp_tree slp_node
, int reduc_index
)
1621 int nops
= (op1
== NULL_TREE
) ? 1 : 2;
1622 auto_vec
<tree
> ops (nops
);
1623 auto_vec
<vec
<tree
> > vec_defs (nops
);
1625 ops
.quick_push (op0
);
1627 ops
.quick_push (op1
);
1629 vect_get_slp_defs (ops
, slp_node
, &vec_defs
, reduc_index
);
1631 *vec_oprnds0
= vec_defs
[0];
1633 *vec_oprnds1
= vec_defs
[1];
1639 vec_oprnds0
->create (1);
1640 vec_oprnd
= vect_get_vec_def_for_operand (op0
, stmt
, NULL
);
1641 vec_oprnds0
->quick_push (vec_oprnd
);
1645 vec_oprnds1
->create (1);
1646 vec_oprnd
= vect_get_vec_def_for_operand (op1
, stmt
, NULL
);
1647 vec_oprnds1
->quick_push (vec_oprnd
);
1653 /* Function vect_finish_stmt_generation.
1655 Insert a new stmt. */
1658 vect_finish_stmt_generation (gimple stmt
, gimple vec_stmt
,
1659 gimple_stmt_iterator
*gsi
)
1661 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1662 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
1663 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
1665 gcc_assert (gimple_code (stmt
) != GIMPLE_LABEL
);
1667 if (!gsi_end_p (*gsi
)
1668 && gimple_has_mem_ops (vec_stmt
))
1670 gimple at_stmt
= gsi_stmt (*gsi
);
1671 tree vuse
= gimple_vuse (at_stmt
);
1672 if (vuse
&& TREE_CODE (vuse
) == SSA_NAME
)
1674 tree vdef
= gimple_vdef (at_stmt
);
1675 gimple_set_vuse (vec_stmt
, gimple_vuse (at_stmt
));
1676 /* If we have an SSA vuse and insert a store, update virtual
1677 SSA form to avoid triggering the renamer. Do so only
1678 if we can easily see all uses - which is what almost always
1679 happens with the way vectorized stmts are inserted. */
1680 if ((vdef
&& TREE_CODE (vdef
) == SSA_NAME
)
1681 && ((is_gimple_assign (vec_stmt
)
1682 && !is_gimple_reg (gimple_assign_lhs (vec_stmt
)))
1683 || (is_gimple_call (vec_stmt
)
1684 && !(gimple_call_flags (vec_stmt
)
1685 & (ECF_CONST
|ECF_PURE
|ECF_NOVOPS
)))))
1687 tree new_vdef
= copy_ssa_name (vuse
, vec_stmt
);
1688 gimple_set_vdef (vec_stmt
, new_vdef
);
1689 SET_USE (gimple_vuse_op (at_stmt
), new_vdef
);
1693 gsi_insert_before (gsi
, vec_stmt
, GSI_SAME_STMT
);
1695 set_vinfo_for_stmt (vec_stmt
, new_stmt_vec_info (vec_stmt
, loop_vinfo
,
1698 if (dump_enabled_p ())
1700 dump_printf_loc (MSG_NOTE
, vect_location
, "add new stmt: ");
1701 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, vec_stmt
, 0);
1704 gimple_set_location (vec_stmt
, gimple_location (stmt
));
1706 /* While EH edges will generally prevent vectorization, stmt might
1707 e.g. be in a must-not-throw region. Ensure newly created stmts
1708 that could throw are part of the same region. */
1709 int lp_nr
= lookup_stmt_eh_lp (stmt
);
1710 if (lp_nr
!= 0 && stmt_could_throw_p (vec_stmt
))
1711 add_stmt_to_eh_lp (vec_stmt
, lp_nr
);
1714 /* Checks if CALL can be vectorized in type VECTYPE. Returns
1715 a function declaration if the target has a vectorized version
1716 of the function, or NULL_TREE if the function cannot be vectorized. */
1719 vectorizable_function (gcall
*call
, tree vectype_out
, tree vectype_in
)
1721 tree fndecl
= gimple_call_fndecl (call
);
1723 /* We only handle functions that do not read or clobber memory -- i.e.
1724 const or novops ones. */
1725 if (!(gimple_call_flags (call
) & (ECF_CONST
| ECF_NOVOPS
)))
1729 || TREE_CODE (fndecl
) != FUNCTION_DECL
1730 || !DECL_BUILT_IN (fndecl
))
1733 return targetm
.vectorize
.builtin_vectorized_function (fndecl
, vectype_out
,
1738 static tree
permute_vec_elements (tree
, tree
, tree
, gimple
,
1739 gimple_stmt_iterator
*);
1742 /* Function vectorizable_mask_load_store.
1744 Check if STMT performs a conditional load or store that can be vectorized.
1745 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
1746 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
1747 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1750 vectorizable_mask_load_store (gimple stmt
, gimple_stmt_iterator
*gsi
,
1751 gimple
*vec_stmt
, slp_tree slp_node
)
1753 tree vec_dest
= NULL
;
1754 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1755 stmt_vec_info prev_stmt_info
;
1756 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
1757 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
1758 bool nested_in_vect_loop
= nested_in_vect_loop_p (loop
, stmt
);
1759 struct data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
);
1760 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
1764 tree dataref_ptr
= NULL_TREE
;
1766 int nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
1770 tree gather_base
= NULL_TREE
, gather_off
= NULL_TREE
;
1771 tree gather_off_vectype
= NULL_TREE
, gather_decl
= NULL_TREE
;
1772 int gather_scale
= 1;
1773 enum vect_def_type gather_dt
= vect_unknown_def_type
;
1778 enum vect_def_type dt
;
1780 if (slp_node
!= NULL
)
1783 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits
;
1784 gcc_assert (ncopies
>= 1);
1786 is_store
= gimple_call_internal_fn (stmt
) == IFN_MASK_STORE
;
1787 mask
= gimple_call_arg (stmt
, 2);
1788 if (TYPE_PRECISION (TREE_TYPE (mask
))
1789 != GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (vectype
))))
1792 /* FORNOW. This restriction should be relaxed. */
1793 if (nested_in_vect_loop
&& ncopies
> 1)
1795 if (dump_enabled_p ())
1796 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1797 "multiple types in nested loop.");
1801 if (!STMT_VINFO_RELEVANT_P (stmt_info
))
1804 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
1807 if (!STMT_VINFO_DATA_REF (stmt_info
))
1810 elem_type
= TREE_TYPE (vectype
);
1812 if (STMT_VINFO_GROUPED_ACCESS (stmt_info
))
1815 if (STMT_VINFO_STRIDED_P (stmt_info
))
1818 if (STMT_VINFO_GATHER_P (stmt_info
))
1822 gather_decl
= vect_check_gather (stmt
, loop_vinfo
, &gather_base
,
1823 &gather_off
, &gather_scale
);
1824 gcc_assert (gather_decl
);
1825 if (!vect_is_simple_use_1 (gather_off
, NULL
, loop_vinfo
, NULL
,
1826 &def_stmt
, &def
, &gather_dt
,
1827 &gather_off_vectype
))
1829 if (dump_enabled_p ())
1830 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1831 "gather index use not simple.");
1835 tree arglist
= TYPE_ARG_TYPES (TREE_TYPE (gather_decl
));
1837 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (TREE_CHAIN (arglist
))));
1838 if (TREE_CODE (masktype
) == INTEGER_TYPE
)
1840 if (dump_enabled_p ())
1841 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1842 "masked gather with integer mask not supported.");
1846 else if (tree_int_cst_compare (nested_in_vect_loop
1847 ? STMT_VINFO_DR_STEP (stmt_info
)
1848 : DR_STEP (dr
), size_zero_node
) <= 0)
1850 else if (!VECTOR_MODE_P (TYPE_MODE (vectype
))
1851 || !can_vec_mask_load_store_p (TYPE_MODE (vectype
), !is_store
))
1854 if (TREE_CODE (mask
) != SSA_NAME
)
1857 if (!vect_is_simple_use (mask
, stmt
, loop_vinfo
, NULL
,
1858 &def_stmt
, &def
, &dt
))
1863 tree rhs
= gimple_call_arg (stmt
, 3);
1864 if (!vect_is_simple_use (rhs
, stmt
, loop_vinfo
, NULL
,
1865 &def_stmt
, &def
, &dt
))
1869 if (!vec_stmt
) /* transformation not required. */
1871 STMT_VINFO_TYPE (stmt_info
) = call_vec_info_type
;
1873 vect_model_store_cost (stmt_info
, ncopies
, false, dt
,
1876 vect_model_load_cost (stmt_info
, ncopies
, false, NULL
, NULL
, NULL
);
1882 if (STMT_VINFO_GATHER_P (stmt_info
))
1884 tree vec_oprnd0
= NULL_TREE
, op
;
1885 tree arglist
= TYPE_ARG_TYPES (TREE_TYPE (gather_decl
));
1886 tree rettype
, srctype
, ptrtype
, idxtype
, masktype
, scaletype
;
1887 tree ptr
, vec_mask
= NULL_TREE
, mask_op
= NULL_TREE
, var
, scale
;
1888 tree perm_mask
= NULL_TREE
, prev_res
= NULL_TREE
;
1889 tree mask_perm_mask
= NULL_TREE
;
1890 edge pe
= loop_preheader_edge (loop
);
1893 enum { NARROW
, NONE
, WIDEN
} modifier
;
1894 int gather_off_nunits
= TYPE_VECTOR_SUBPARTS (gather_off_vectype
);
1896 rettype
= TREE_TYPE (TREE_TYPE (gather_decl
));
1897 srctype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
1898 ptrtype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
1899 idxtype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
1900 masktype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
1901 scaletype
= TREE_VALUE (arglist
);
1902 gcc_checking_assert (types_compatible_p (srctype
, rettype
)
1903 && types_compatible_p (srctype
, masktype
));
1905 if (nunits
== gather_off_nunits
)
1907 else if (nunits
== gather_off_nunits
/ 2)
1909 unsigned char *sel
= XALLOCAVEC (unsigned char, gather_off_nunits
);
1912 for (i
= 0; i
< gather_off_nunits
; ++i
)
1913 sel
[i
] = i
| nunits
;
1915 perm_mask
= vect_gen_perm_mask_checked (gather_off_vectype
, sel
);
1917 else if (nunits
== gather_off_nunits
* 2)
1919 unsigned char *sel
= XALLOCAVEC (unsigned char, nunits
);
1922 for (i
= 0; i
< nunits
; ++i
)
1923 sel
[i
] = i
< gather_off_nunits
1924 ? i
: i
+ nunits
- gather_off_nunits
;
1926 perm_mask
= vect_gen_perm_mask_checked (vectype
, sel
);
1928 for (i
= 0; i
< nunits
; ++i
)
1929 sel
[i
] = i
| gather_off_nunits
;
1930 mask_perm_mask
= vect_gen_perm_mask_checked (masktype
, sel
);
1935 vec_dest
= vect_create_destination_var (gimple_call_lhs (stmt
), vectype
);
1937 ptr
= fold_convert (ptrtype
, gather_base
);
1938 if (!is_gimple_min_invariant (ptr
))
1940 ptr
= force_gimple_operand (ptr
, &seq
, true, NULL_TREE
);
1941 new_bb
= gsi_insert_seq_on_edge_immediate (pe
, seq
);
1942 gcc_assert (!new_bb
);
1945 scale
= build_int_cst (scaletype
, gather_scale
);
1947 prev_stmt_info
= NULL
;
1948 for (j
= 0; j
< ncopies
; ++j
)
1950 if (modifier
== WIDEN
&& (j
& 1))
1951 op
= permute_vec_elements (vec_oprnd0
, vec_oprnd0
,
1952 perm_mask
, stmt
, gsi
);
1955 = vect_get_vec_def_for_operand (gather_off
, stmt
, NULL
);
1958 = vect_get_vec_def_for_stmt_copy (gather_dt
, vec_oprnd0
);
1960 if (!useless_type_conversion_p (idxtype
, TREE_TYPE (op
)))
1962 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op
))
1963 == TYPE_VECTOR_SUBPARTS (idxtype
));
1964 var
= vect_get_new_vect_var (idxtype
, vect_simple_var
, NULL
);
1965 var
= make_ssa_name (var
);
1966 op
= build1 (VIEW_CONVERT_EXPR
, idxtype
, op
);
1968 = gimple_build_assign (var
, VIEW_CONVERT_EXPR
, op
);
1969 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
1973 if (mask_perm_mask
&& (j
& 1))
1974 mask_op
= permute_vec_elements (mask_op
, mask_op
,
1975 mask_perm_mask
, stmt
, gsi
);
1979 vec_mask
= vect_get_vec_def_for_operand (mask
, stmt
, NULL
);
1982 vect_is_simple_use (vec_mask
, NULL
, loop_vinfo
, NULL
,
1983 &def_stmt
, &def
, &dt
);
1984 vec_mask
= vect_get_vec_def_for_stmt_copy (dt
, vec_mask
);
1988 if (!useless_type_conversion_p (masktype
, TREE_TYPE (vec_mask
)))
1990 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (mask_op
))
1991 == TYPE_VECTOR_SUBPARTS (masktype
));
1992 var
= vect_get_new_vect_var (masktype
, vect_simple_var
,
1994 var
= make_ssa_name (var
);
1995 mask_op
= build1 (VIEW_CONVERT_EXPR
, masktype
, mask_op
);
1997 = gimple_build_assign (var
, VIEW_CONVERT_EXPR
, mask_op
);
1998 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2004 = gimple_build_call (gather_decl
, 5, mask_op
, ptr
, op
, mask_op
,
2007 if (!useless_type_conversion_p (vectype
, rettype
))
2009 gcc_assert (TYPE_VECTOR_SUBPARTS (vectype
)
2010 == TYPE_VECTOR_SUBPARTS (rettype
));
2011 var
= vect_get_new_vect_var (rettype
, vect_simple_var
, NULL
);
2012 op
= make_ssa_name (var
, new_stmt
);
2013 gimple_call_set_lhs (new_stmt
, op
);
2014 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2015 var
= make_ssa_name (vec_dest
);
2016 op
= build1 (VIEW_CONVERT_EXPR
, vectype
, op
);
2017 new_stmt
= gimple_build_assign (var
, VIEW_CONVERT_EXPR
, op
);
2021 var
= make_ssa_name (vec_dest
, new_stmt
);
2022 gimple_call_set_lhs (new_stmt
, var
);
2025 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2027 if (modifier
== NARROW
)
2034 var
= permute_vec_elements (prev_res
, var
,
2035 perm_mask
, stmt
, gsi
);
2036 new_stmt
= SSA_NAME_DEF_STMT (var
);
2039 if (prev_stmt_info
== NULL
)
2040 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
2042 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
2043 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
2046 /* Ensure that even with -fno-tree-dce the scalar MASK_LOAD is removed
2048 tree lhs
= gimple_call_lhs (stmt
);
2049 new_stmt
= gimple_build_assign (lhs
, build_zero_cst (TREE_TYPE (lhs
)));
2050 set_vinfo_for_stmt (new_stmt
, stmt_info
);
2051 set_vinfo_for_stmt (stmt
, NULL
);
2052 STMT_VINFO_STMT (stmt_info
) = new_stmt
;
2053 gsi_replace (gsi
, new_stmt
, true);
2058 tree vec_rhs
= NULL_TREE
, vec_mask
= NULL_TREE
;
2059 prev_stmt_info
= NULL
;
2060 for (i
= 0; i
< ncopies
; i
++)
2062 unsigned align
, misalign
;
2066 tree rhs
= gimple_call_arg (stmt
, 3);
2067 vec_rhs
= vect_get_vec_def_for_operand (rhs
, stmt
, NULL
);
2068 vec_mask
= vect_get_vec_def_for_operand (mask
, stmt
, NULL
);
2069 /* We should have catched mismatched types earlier. */
2070 gcc_assert (useless_type_conversion_p (vectype
,
2071 TREE_TYPE (vec_rhs
)));
2072 dataref_ptr
= vect_create_data_ref_ptr (stmt
, vectype
, NULL
,
2073 NULL_TREE
, &dummy
, gsi
,
2074 &ptr_incr
, false, &inv_p
);
2075 gcc_assert (!inv_p
);
2079 vect_is_simple_use (vec_rhs
, NULL
, loop_vinfo
, NULL
, &def_stmt
,
2081 vec_rhs
= vect_get_vec_def_for_stmt_copy (dt
, vec_rhs
);
2082 vect_is_simple_use (vec_mask
, NULL
, loop_vinfo
, NULL
, &def_stmt
,
2084 vec_mask
= vect_get_vec_def_for_stmt_copy (dt
, vec_mask
);
2085 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
, stmt
,
2086 TYPE_SIZE_UNIT (vectype
));
2089 align
= TYPE_ALIGN_UNIT (vectype
);
2090 if (aligned_access_p (dr
))
2092 else if (DR_MISALIGNMENT (dr
) == -1)
2094 align
= TYPE_ALIGN_UNIT (elem_type
);
2098 misalign
= DR_MISALIGNMENT (dr
);
2099 set_ptr_info_alignment (get_ptr_info (dataref_ptr
), align
,
2102 = gimple_build_call_internal (IFN_MASK_STORE
, 4, dataref_ptr
,
2103 gimple_call_arg (stmt
, 1),
2105 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2107 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
2109 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
2110 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
2115 tree vec_mask
= NULL_TREE
;
2116 prev_stmt_info
= NULL
;
2117 vec_dest
= vect_create_destination_var (gimple_call_lhs (stmt
), vectype
);
2118 for (i
= 0; i
< ncopies
; i
++)
2120 unsigned align
, misalign
;
2124 vec_mask
= vect_get_vec_def_for_operand (mask
, stmt
, NULL
);
2125 dataref_ptr
= vect_create_data_ref_ptr (stmt
, vectype
, NULL
,
2126 NULL_TREE
, &dummy
, gsi
,
2127 &ptr_incr
, false, &inv_p
);
2128 gcc_assert (!inv_p
);
2132 vect_is_simple_use (vec_mask
, NULL
, loop_vinfo
, NULL
, &def_stmt
,
2134 vec_mask
= vect_get_vec_def_for_stmt_copy (dt
, vec_mask
);
2135 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
, stmt
,
2136 TYPE_SIZE_UNIT (vectype
));
2139 align
= TYPE_ALIGN_UNIT (vectype
);
2140 if (aligned_access_p (dr
))
2142 else if (DR_MISALIGNMENT (dr
) == -1)
2144 align
= TYPE_ALIGN_UNIT (elem_type
);
2148 misalign
= DR_MISALIGNMENT (dr
);
2149 set_ptr_info_alignment (get_ptr_info (dataref_ptr
), align
,
2152 = gimple_build_call_internal (IFN_MASK_LOAD
, 3, dataref_ptr
,
2153 gimple_call_arg (stmt
, 1),
2155 gimple_call_set_lhs (new_stmt
, make_ssa_name (vec_dest
));
2156 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2158 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
2160 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
2161 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
2167 /* Ensure that even with -fno-tree-dce the scalar MASK_LOAD is removed
2169 tree lhs
= gimple_call_lhs (stmt
);
2170 new_stmt
= gimple_build_assign (lhs
, build_zero_cst (TREE_TYPE (lhs
)));
2171 set_vinfo_for_stmt (new_stmt
, stmt_info
);
2172 set_vinfo_for_stmt (stmt
, NULL
);
2173 STMT_VINFO_STMT (stmt_info
) = new_stmt
;
2174 gsi_replace (gsi
, new_stmt
, true);
2181 /* Function vectorizable_call.
2183 Check if GS performs a function call that can be vectorized.
2184 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2185 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2186 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2189 vectorizable_call (gimple gs
, gimple_stmt_iterator
*gsi
, gimple
*vec_stmt
,
2196 tree vec_oprnd0
= NULL_TREE
, vec_oprnd1
= NULL_TREE
;
2197 stmt_vec_info stmt_info
= vinfo_for_stmt (gs
), prev_stmt_info
;
2198 tree vectype_out
, vectype_in
;
2201 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
2202 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
2203 tree fndecl
, new_temp
, def
, rhs_type
;
2205 enum vect_def_type dt
[3]
2206 = {vect_unknown_def_type
, vect_unknown_def_type
, vect_unknown_def_type
};
2207 gimple new_stmt
= NULL
;
2209 vec
<tree
> vargs
= vNULL
;
2210 enum { NARROW
, NONE
, WIDEN
} modifier
;
2214 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
2217 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
2220 /* Is GS a vectorizable call? */
2221 stmt
= dyn_cast
<gcall
*> (gs
);
2225 if (gimple_call_internal_p (stmt
)
2226 && (gimple_call_internal_fn (stmt
) == IFN_MASK_LOAD
2227 || gimple_call_internal_fn (stmt
) == IFN_MASK_STORE
))
2228 return vectorizable_mask_load_store (stmt
, gsi
, vec_stmt
,
2231 if (gimple_call_lhs (stmt
) == NULL_TREE
2232 || TREE_CODE (gimple_call_lhs (stmt
)) != SSA_NAME
)
2235 gcc_checking_assert (!stmt_can_throw_internal (stmt
));
2237 vectype_out
= STMT_VINFO_VECTYPE (stmt_info
);
2239 /* Process function arguments. */
2240 rhs_type
= NULL_TREE
;
2241 vectype_in
= NULL_TREE
;
2242 nargs
= gimple_call_num_args (stmt
);
2244 /* Bail out if the function has more than three arguments, we do not have
2245 interesting builtin functions to vectorize with more than two arguments
2246 except for fma. No arguments is also not good. */
2247 if (nargs
== 0 || nargs
> 3)
2250 /* Ignore the argument of IFN_GOMP_SIMD_LANE, it is magic. */
2251 if (gimple_call_internal_p (stmt
)
2252 && gimple_call_internal_fn (stmt
) == IFN_GOMP_SIMD_LANE
)
2255 rhs_type
= unsigned_type_node
;
2258 for (i
= 0; i
< nargs
; i
++)
2262 op
= gimple_call_arg (stmt
, i
);
2264 /* We can only handle calls with arguments of the same type. */
2266 && !types_compatible_p (rhs_type
, TREE_TYPE (op
)))
2268 if (dump_enabled_p ())
2269 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2270 "argument types differ.\n");
2274 rhs_type
= TREE_TYPE (op
);
2276 if (!vect_is_simple_use_1 (op
, stmt
, loop_vinfo
, bb_vinfo
,
2277 &def_stmt
, &def
, &dt
[i
], &opvectype
))
2279 if (dump_enabled_p ())
2280 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2281 "use not simple.\n");
2286 vectype_in
= opvectype
;
2288 && opvectype
!= vectype_in
)
2290 if (dump_enabled_p ())
2291 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2292 "argument vector types differ.\n");
2296 /* If all arguments are external or constant defs use a vector type with
2297 the same size as the output vector type. */
2299 vectype_in
= get_same_sized_vectype (rhs_type
, vectype_out
);
2301 gcc_assert (vectype_in
);
2304 if (dump_enabled_p ())
2306 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2307 "no vectype for scalar type ");
2308 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, rhs_type
);
2309 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
2316 nunits_in
= TYPE_VECTOR_SUBPARTS (vectype_in
);
2317 nunits_out
= TYPE_VECTOR_SUBPARTS (vectype_out
);
2318 if (nunits_in
== nunits_out
/ 2)
2320 else if (nunits_out
== nunits_in
)
2322 else if (nunits_out
== nunits_in
/ 2)
2327 /* For now, we only vectorize functions if a target specific builtin
2328 is available. TODO -- in some cases, it might be profitable to
2329 insert the calls for pieces of the vector, in order to be able
2330 to vectorize other operations in the loop. */
2331 fndecl
= vectorizable_function (stmt
, vectype_out
, vectype_in
);
2332 if (fndecl
== NULL_TREE
)
2334 if (gimple_call_internal_p (stmt
)
2335 && gimple_call_internal_fn (stmt
) == IFN_GOMP_SIMD_LANE
2338 && LOOP_VINFO_LOOP (loop_vinfo
)->simduid
2339 && TREE_CODE (gimple_call_arg (stmt
, 0)) == SSA_NAME
2340 && LOOP_VINFO_LOOP (loop_vinfo
)->simduid
2341 == SSA_NAME_VAR (gimple_call_arg (stmt
, 0)))
2343 /* We can handle IFN_GOMP_SIMD_LANE by returning a
2344 { 0, 1, 2, ... vf - 1 } vector. */
2345 gcc_assert (nargs
== 0);
2349 if (dump_enabled_p ())
2350 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2351 "function is not vectorizable.\n");
2356 gcc_assert (!gimple_vuse (stmt
));
2358 if (slp_node
|| PURE_SLP_STMT (stmt_info
))
2360 else if (modifier
== NARROW
)
2361 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits_out
;
2363 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits_in
;
2365 /* Sanity check: make sure that at least one copy of the vectorized stmt
2366 needs to be generated. */
2367 gcc_assert (ncopies
>= 1);
2369 if (!vec_stmt
) /* transformation not required. */
2371 STMT_VINFO_TYPE (stmt_info
) = call_vec_info_type
;
2372 if (dump_enabled_p ())
2373 dump_printf_loc (MSG_NOTE
, vect_location
, "=== vectorizable_call ==="
2375 vect_model_simple_cost (stmt_info
, ncopies
, dt
, NULL
, NULL
);
2381 if (dump_enabled_p ())
2382 dump_printf_loc (MSG_NOTE
, vect_location
, "transform call.\n");
2385 scalar_dest
= gimple_call_lhs (stmt
);
2386 vec_dest
= vect_create_destination_var (scalar_dest
, vectype_out
);
2388 prev_stmt_info
= NULL
;
2392 for (j
= 0; j
< ncopies
; ++j
)
2394 /* Build argument list for the vectorized call. */
2396 vargs
.create (nargs
);
2402 auto_vec
<vec
<tree
> > vec_defs (nargs
);
2403 vec
<tree
> vec_oprnds0
;
2405 for (i
= 0; i
< nargs
; i
++)
2406 vargs
.quick_push (gimple_call_arg (stmt
, i
));
2407 vect_get_slp_defs (vargs
, slp_node
, &vec_defs
, -1);
2408 vec_oprnds0
= vec_defs
[0];
2410 /* Arguments are ready. Create the new vector stmt. */
2411 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vec_oprnd0
)
2414 for (k
= 0; k
< nargs
; k
++)
2416 vec
<tree
> vec_oprndsk
= vec_defs
[k
];
2417 vargs
[k
] = vec_oprndsk
[i
];
2419 new_stmt
= gimple_build_call_vec (fndecl
, vargs
);
2420 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
2421 gimple_call_set_lhs (new_stmt
, new_temp
);
2422 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2423 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
2426 for (i
= 0; i
< nargs
; i
++)
2428 vec
<tree
> vec_oprndsi
= vec_defs
[i
];
2429 vec_oprndsi
.release ();
2434 for (i
= 0; i
< nargs
; i
++)
2436 op
= gimple_call_arg (stmt
, i
);
2439 = vect_get_vec_def_for_operand (op
, stmt
, NULL
);
2442 vec_oprnd0
= gimple_call_arg (new_stmt
, i
);
2444 = vect_get_vec_def_for_stmt_copy (dt
[i
], vec_oprnd0
);
2447 vargs
.quick_push (vec_oprnd0
);
2450 if (gimple_call_internal_p (stmt
)
2451 && gimple_call_internal_fn (stmt
) == IFN_GOMP_SIMD_LANE
)
2453 tree
*v
= XALLOCAVEC (tree
, nunits_out
);
2455 for (k
= 0; k
< nunits_out
; ++k
)
2456 v
[k
] = build_int_cst (unsigned_type_node
, j
* nunits_out
+ k
);
2457 tree cst
= build_vector (vectype_out
, v
);
2459 = vect_get_new_vect_var (vectype_out
, vect_simple_var
, "cst_");
2460 gimple init_stmt
= gimple_build_assign (new_var
, cst
);
2461 new_temp
= make_ssa_name (new_var
, init_stmt
);
2462 gimple_assign_set_lhs (init_stmt
, new_temp
);
2463 vect_init_vector_1 (stmt
, init_stmt
, NULL
);
2464 new_temp
= make_ssa_name (vec_dest
);
2465 new_stmt
= gimple_build_assign (new_temp
,
2466 gimple_assign_lhs (init_stmt
));
2470 new_stmt
= gimple_build_call_vec (fndecl
, vargs
);
2471 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
2472 gimple_call_set_lhs (new_stmt
, new_temp
);
2474 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2477 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
2479 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
2481 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
2487 for (j
= 0; j
< ncopies
; ++j
)
2489 /* Build argument list for the vectorized call. */
2491 vargs
.create (nargs
* 2);
2497 auto_vec
<vec
<tree
> > vec_defs (nargs
);
2498 vec
<tree
> vec_oprnds0
;
2500 for (i
= 0; i
< nargs
; i
++)
2501 vargs
.quick_push (gimple_call_arg (stmt
, i
));
2502 vect_get_slp_defs (vargs
, slp_node
, &vec_defs
, -1);
2503 vec_oprnds0
= vec_defs
[0];
2505 /* Arguments are ready. Create the new vector stmt. */
2506 for (i
= 0; vec_oprnds0
.iterate (i
, &vec_oprnd0
); i
+= 2)
2510 for (k
= 0; k
< nargs
; k
++)
2512 vec
<tree
> vec_oprndsk
= vec_defs
[k
];
2513 vargs
.quick_push (vec_oprndsk
[i
]);
2514 vargs
.quick_push (vec_oprndsk
[i
+ 1]);
2516 new_stmt
= gimple_build_call_vec (fndecl
, vargs
);
2517 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
2518 gimple_call_set_lhs (new_stmt
, new_temp
);
2519 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2520 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
2523 for (i
= 0; i
< nargs
; i
++)
2525 vec
<tree
> vec_oprndsi
= vec_defs
[i
];
2526 vec_oprndsi
.release ();
2531 for (i
= 0; i
< nargs
; i
++)
2533 op
= gimple_call_arg (stmt
, i
);
2537 = vect_get_vec_def_for_operand (op
, stmt
, NULL
);
2539 = vect_get_vec_def_for_stmt_copy (dt
[i
], vec_oprnd0
);
2543 vec_oprnd1
= gimple_call_arg (new_stmt
, 2*i
+ 1);
2545 = vect_get_vec_def_for_stmt_copy (dt
[i
], vec_oprnd1
);
2547 = vect_get_vec_def_for_stmt_copy (dt
[i
], vec_oprnd0
);
2550 vargs
.quick_push (vec_oprnd0
);
2551 vargs
.quick_push (vec_oprnd1
);
2554 new_stmt
= gimple_build_call_vec (fndecl
, vargs
);
2555 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
2556 gimple_call_set_lhs (new_stmt
, new_temp
);
2557 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2560 STMT_VINFO_VEC_STMT (stmt_info
) = new_stmt
;
2562 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
2564 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
2567 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
2572 /* No current target implements this case. */
2578 /* The call in STMT might prevent it from being removed in dce.
2579 We however cannot remove it here, due to the way the ssa name
2580 it defines is mapped to the new definition. So just replace
2581 rhs of the statement with something harmless. */
2586 type
= TREE_TYPE (scalar_dest
);
2587 if (is_pattern_stmt_p (stmt_info
))
2588 lhs
= gimple_call_lhs (STMT_VINFO_RELATED_STMT (stmt_info
));
2590 lhs
= gimple_call_lhs (stmt
);
2592 if (gimple_call_internal_p (stmt
)
2593 && gimple_call_internal_fn (stmt
) == IFN_GOMP_SIMD_LANE
)
2595 /* Replace uses of the lhs of GOMP_SIMD_LANE call outside the loop
2596 with vf - 1 rather than 0, that is the last iteration of the
2598 imm_use_iterator iter
;
2599 use_operand_p use_p
;
2601 FOR_EACH_IMM_USE_STMT (use_stmt
, iter
, lhs
)
2603 basic_block use_bb
= gimple_bb (use_stmt
);
2605 && !flow_bb_inside_loop_p (LOOP_VINFO_LOOP (loop_vinfo
), use_bb
))
2607 FOR_EACH_IMM_USE_ON_STMT (use_p
, iter
)
2608 SET_USE (use_p
, build_int_cst (TREE_TYPE (lhs
),
2609 ncopies
* nunits_out
- 1));
2610 update_stmt (use_stmt
);
2615 new_stmt
= gimple_build_assign (lhs
, build_zero_cst (type
));
2616 set_vinfo_for_stmt (new_stmt
, stmt_info
);
2617 set_vinfo_for_stmt (stmt
, NULL
);
2618 STMT_VINFO_STMT (stmt_info
) = new_stmt
;
2619 gsi_replace (gsi
, new_stmt
, false);
2625 struct simd_call_arg_info
2629 enum vect_def_type dt
;
2630 HOST_WIDE_INT linear_step
;
2632 bool simd_lane_linear
;
2635 /* Helper function of vectorizable_simd_clone_call. If OP, an SSA_NAME,
2636 is linear within simd lane (but not within whole loop), note it in
2640 vect_simd_lane_linear (tree op
, struct loop
*loop
,
2641 struct simd_call_arg_info
*arginfo
)
2643 gimple def_stmt
= SSA_NAME_DEF_STMT (op
);
2645 if (!is_gimple_assign (def_stmt
)
2646 || gimple_assign_rhs_code (def_stmt
) != POINTER_PLUS_EXPR
2647 || !is_gimple_min_invariant (gimple_assign_rhs1 (def_stmt
)))
2650 tree base
= gimple_assign_rhs1 (def_stmt
);
2651 HOST_WIDE_INT linear_step
= 0;
2652 tree v
= gimple_assign_rhs2 (def_stmt
);
2653 while (TREE_CODE (v
) == SSA_NAME
)
2656 def_stmt
= SSA_NAME_DEF_STMT (v
);
2657 if (is_gimple_assign (def_stmt
))
2658 switch (gimple_assign_rhs_code (def_stmt
))
2661 t
= gimple_assign_rhs2 (def_stmt
);
2662 if (linear_step
|| TREE_CODE (t
) != INTEGER_CST
)
2664 base
= fold_build2 (POINTER_PLUS_EXPR
, TREE_TYPE (base
), base
, t
);
2665 v
= gimple_assign_rhs1 (def_stmt
);
2668 t
= gimple_assign_rhs2 (def_stmt
);
2669 if (linear_step
|| !tree_fits_shwi_p (t
) || integer_zerop (t
))
2671 linear_step
= tree_to_shwi (t
);
2672 v
= gimple_assign_rhs1 (def_stmt
);
2675 t
= gimple_assign_rhs1 (def_stmt
);
2676 if (TREE_CODE (TREE_TYPE (t
)) != INTEGER_TYPE
2677 || (TYPE_PRECISION (TREE_TYPE (v
))
2678 < TYPE_PRECISION (TREE_TYPE (t
))))
2687 else if (is_gimple_call (def_stmt
)
2688 && gimple_call_internal_p (def_stmt
)
2689 && gimple_call_internal_fn (def_stmt
) == IFN_GOMP_SIMD_LANE
2691 && TREE_CODE (gimple_call_arg (def_stmt
, 0)) == SSA_NAME
2692 && (SSA_NAME_VAR (gimple_call_arg (def_stmt
, 0))
2697 arginfo
->linear_step
= linear_step
;
2699 arginfo
->simd_lane_linear
= true;
2705 /* Function vectorizable_simd_clone_call.
2707 Check if STMT performs a function call that can be vectorized
2708 by calling a simd clone of the function.
2709 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2710 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2711 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2714 vectorizable_simd_clone_call (gimple stmt
, gimple_stmt_iterator
*gsi
,
2715 gimple
*vec_stmt
, slp_tree slp_node
)
2720 tree vec_oprnd0
= NULL_TREE
;
2721 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
), prev_stmt_info
;
2723 unsigned int nunits
;
2724 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
2725 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
2726 struct loop
*loop
= loop_vinfo
? LOOP_VINFO_LOOP (loop_vinfo
) : NULL
;
2727 tree fndecl
, new_temp
, def
;
2729 gimple new_stmt
= NULL
;
2731 vec
<simd_call_arg_info
> arginfo
= vNULL
;
2732 vec
<tree
> vargs
= vNULL
;
2734 tree lhs
, rtype
, ratype
;
2735 vec
<constructor_elt
, va_gc
> *ret_ctor_elts
;
2737 /* Is STMT a vectorizable call? */
2738 if (!is_gimple_call (stmt
))
2741 fndecl
= gimple_call_fndecl (stmt
);
2742 if (fndecl
== NULL_TREE
)
2745 struct cgraph_node
*node
= cgraph_node::get (fndecl
);
2746 if (node
== NULL
|| node
->simd_clones
== NULL
)
2749 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
2752 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
2755 if (gimple_call_lhs (stmt
)
2756 && TREE_CODE (gimple_call_lhs (stmt
)) != SSA_NAME
)
2759 gcc_checking_assert (!stmt_can_throw_internal (stmt
));
2761 vectype
= STMT_VINFO_VECTYPE (stmt_info
);
2763 if (loop_vinfo
&& nested_in_vect_loop_p (loop
, stmt
))
2767 if (slp_node
|| PURE_SLP_STMT (stmt_info
))
2770 /* Process function arguments. */
2771 nargs
= gimple_call_num_args (stmt
);
2773 /* Bail out if the function has zero arguments. */
2777 arginfo
.create (nargs
);
2779 for (i
= 0; i
< nargs
; i
++)
2781 simd_call_arg_info thisarginfo
;
2784 thisarginfo
.linear_step
= 0;
2785 thisarginfo
.align
= 0;
2786 thisarginfo
.op
= NULL_TREE
;
2787 thisarginfo
.simd_lane_linear
= false;
2789 op
= gimple_call_arg (stmt
, i
);
2790 if (!vect_is_simple_use_1 (op
, stmt
, loop_vinfo
, bb_vinfo
,
2791 &def_stmt
, &def
, &thisarginfo
.dt
,
2792 &thisarginfo
.vectype
)
2793 || thisarginfo
.dt
== vect_uninitialized_def
)
2795 if (dump_enabled_p ())
2796 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2797 "use not simple.\n");
2802 if (thisarginfo
.dt
== vect_constant_def
2803 || thisarginfo
.dt
== vect_external_def
)
2804 gcc_assert (thisarginfo
.vectype
== NULL_TREE
);
2806 gcc_assert (thisarginfo
.vectype
!= NULL_TREE
);
2808 /* For linear arguments, the analyze phase should have saved
2809 the base and step in STMT_VINFO_SIMD_CLONE_INFO. */
2810 if (i
* 3 + 4 <= STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).length ()
2811 && STMT_VINFO_SIMD_CLONE_INFO (stmt_info
)[i
* 3 + 2])
2813 gcc_assert (vec_stmt
);
2814 thisarginfo
.linear_step
2815 = tree_to_shwi (STMT_VINFO_SIMD_CLONE_INFO (stmt_info
)[i
* 3 + 2]);
2817 = STMT_VINFO_SIMD_CLONE_INFO (stmt_info
)[i
* 3 + 1];
2818 thisarginfo
.simd_lane_linear
2819 = (STMT_VINFO_SIMD_CLONE_INFO (stmt_info
)[i
* 3 + 3]
2820 == boolean_true_node
);
2821 /* If loop has been peeled for alignment, we need to adjust it. */
2822 tree n1
= LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo
);
2823 tree n2
= LOOP_VINFO_NITERS (loop_vinfo
);
2824 if (n1
!= n2
&& !thisarginfo
.simd_lane_linear
)
2826 tree bias
= fold_build2 (MINUS_EXPR
, TREE_TYPE (n1
), n1
, n2
);
2827 tree step
= STMT_VINFO_SIMD_CLONE_INFO (stmt_info
)[i
* 3 + 2];
2828 tree opt
= TREE_TYPE (thisarginfo
.op
);
2829 bias
= fold_convert (TREE_TYPE (step
), bias
);
2830 bias
= fold_build2 (MULT_EXPR
, TREE_TYPE (step
), bias
, step
);
2832 = fold_build2 (POINTER_TYPE_P (opt
)
2833 ? POINTER_PLUS_EXPR
: PLUS_EXPR
, opt
,
2834 thisarginfo
.op
, bias
);
2838 && thisarginfo
.dt
!= vect_constant_def
2839 && thisarginfo
.dt
!= vect_external_def
2841 && TREE_CODE (op
) == SSA_NAME
2842 && simple_iv (loop
, loop_containing_stmt (stmt
), op
,
2844 && tree_fits_shwi_p (iv
.step
))
2846 thisarginfo
.linear_step
= tree_to_shwi (iv
.step
);
2847 thisarginfo
.op
= iv
.base
;
2849 else if ((thisarginfo
.dt
== vect_constant_def
2850 || thisarginfo
.dt
== vect_external_def
)
2851 && POINTER_TYPE_P (TREE_TYPE (op
)))
2852 thisarginfo
.align
= get_pointer_alignment (op
) / BITS_PER_UNIT
;
2853 /* Addresses of array elements indexed by GOMP_SIMD_LANE are
2855 if (POINTER_TYPE_P (TREE_TYPE (op
))
2856 && !thisarginfo
.linear_step
2858 && thisarginfo
.dt
!= vect_constant_def
2859 && thisarginfo
.dt
!= vect_external_def
2862 && TREE_CODE (op
) == SSA_NAME
)
2863 vect_simd_lane_linear (op
, loop
, &thisarginfo
);
2865 arginfo
.quick_push (thisarginfo
);
2868 unsigned int badness
= 0;
2869 struct cgraph_node
*bestn
= NULL
;
2870 if (STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).exists ())
2871 bestn
= cgraph_node::get (STMT_VINFO_SIMD_CLONE_INFO (stmt_info
)[0]);
2873 for (struct cgraph_node
*n
= node
->simd_clones
; n
!= NULL
;
2874 n
= n
->simdclone
->next_clone
)
2876 unsigned int this_badness
= 0;
2877 if (n
->simdclone
->simdlen
2878 > (unsigned) LOOP_VINFO_VECT_FACTOR (loop_vinfo
)
2879 || n
->simdclone
->nargs
!= nargs
)
2881 if (n
->simdclone
->simdlen
2882 < (unsigned) LOOP_VINFO_VECT_FACTOR (loop_vinfo
))
2883 this_badness
+= (exact_log2 (LOOP_VINFO_VECT_FACTOR (loop_vinfo
))
2884 - exact_log2 (n
->simdclone
->simdlen
)) * 1024;
2885 if (n
->simdclone
->inbranch
)
2886 this_badness
+= 2048;
2887 int target_badness
= targetm
.simd_clone
.usable (n
);
2888 if (target_badness
< 0)
2890 this_badness
+= target_badness
* 512;
2891 /* FORNOW: Have to add code to add the mask argument. */
2892 if (n
->simdclone
->inbranch
)
2894 for (i
= 0; i
< nargs
; i
++)
2896 switch (n
->simdclone
->args
[i
].arg_type
)
2898 case SIMD_CLONE_ARG_TYPE_VECTOR
:
2899 if (!useless_type_conversion_p
2900 (n
->simdclone
->args
[i
].orig_type
,
2901 TREE_TYPE (gimple_call_arg (stmt
, i
))))
2903 else if (arginfo
[i
].dt
== vect_constant_def
2904 || arginfo
[i
].dt
== vect_external_def
2905 || arginfo
[i
].linear_step
)
2908 case SIMD_CLONE_ARG_TYPE_UNIFORM
:
2909 if (arginfo
[i
].dt
!= vect_constant_def
2910 && arginfo
[i
].dt
!= vect_external_def
)
2913 case SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP
:
2914 if (arginfo
[i
].dt
== vect_constant_def
2915 || arginfo
[i
].dt
== vect_external_def
2916 || (arginfo
[i
].linear_step
2917 != n
->simdclone
->args
[i
].linear_step
))
2920 case SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP
:
2924 case SIMD_CLONE_ARG_TYPE_MASK
:
2927 if (i
== (size_t) -1)
2929 if (n
->simdclone
->args
[i
].alignment
> arginfo
[i
].align
)
2934 if (arginfo
[i
].align
)
2935 this_badness
+= (exact_log2 (arginfo
[i
].align
)
2936 - exact_log2 (n
->simdclone
->args
[i
].alignment
));
2938 if (i
== (size_t) -1)
2940 if (bestn
== NULL
|| this_badness
< badness
)
2943 badness
= this_badness
;
2953 for (i
= 0; i
< nargs
; i
++)
2954 if ((arginfo
[i
].dt
== vect_constant_def
2955 || arginfo
[i
].dt
== vect_external_def
)
2956 && bestn
->simdclone
->args
[i
].arg_type
== SIMD_CLONE_ARG_TYPE_VECTOR
)
2959 = get_vectype_for_scalar_type (TREE_TYPE (gimple_call_arg (stmt
,
2961 if (arginfo
[i
].vectype
== NULL
2962 || (TYPE_VECTOR_SUBPARTS (arginfo
[i
].vectype
)
2963 > bestn
->simdclone
->simdlen
))
2970 fndecl
= bestn
->decl
;
2971 nunits
= bestn
->simdclone
->simdlen
;
2972 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits
;
2974 /* If the function isn't const, only allow it in simd loops where user
2975 has asserted that at least nunits consecutive iterations can be
2976 performed using SIMD instructions. */
2977 if ((loop
== NULL
|| (unsigned) loop
->safelen
< nunits
)
2978 && gimple_vuse (stmt
))
2984 /* Sanity check: make sure that at least one copy of the vectorized stmt
2985 needs to be generated. */
2986 gcc_assert (ncopies
>= 1);
2988 if (!vec_stmt
) /* transformation not required. */
2990 STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).safe_push (bestn
->decl
);
2991 for (i
= 0; i
< nargs
; i
++)
2992 if (bestn
->simdclone
->args
[i
].arg_type
2993 == SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP
)
2995 STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).safe_grow_cleared (i
* 3
2997 STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).safe_push (arginfo
[i
].op
);
2998 tree lst
= POINTER_TYPE_P (TREE_TYPE (arginfo
[i
].op
))
2999 ? size_type_node
: TREE_TYPE (arginfo
[i
].op
);
3000 tree ls
= build_int_cst (lst
, arginfo
[i
].linear_step
);
3001 STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).safe_push (ls
);
3002 tree sll
= arginfo
[i
].simd_lane_linear
3003 ? boolean_true_node
: boolean_false_node
;
3004 STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).safe_push (sll
);
3006 STMT_VINFO_TYPE (stmt_info
) = call_simd_clone_vec_info_type
;
3007 if (dump_enabled_p ())
3008 dump_printf_loc (MSG_NOTE
, vect_location
,
3009 "=== vectorizable_simd_clone_call ===\n");
3010 /* vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL); */
3017 if (dump_enabled_p ())
3018 dump_printf_loc (MSG_NOTE
, vect_location
, "transform call.\n");
3021 scalar_dest
= gimple_call_lhs (stmt
);
3022 vec_dest
= NULL_TREE
;
3027 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
3028 rtype
= TREE_TYPE (TREE_TYPE (fndecl
));
3029 if (TREE_CODE (rtype
) == ARRAY_TYPE
)
3032 rtype
= TREE_TYPE (ratype
);
3036 prev_stmt_info
= NULL
;
3037 for (j
= 0; j
< ncopies
; ++j
)
3039 /* Build argument list for the vectorized call. */
3041 vargs
.create (nargs
);
3045 for (i
= 0; i
< nargs
; i
++)
3047 unsigned int k
, l
, m
, o
;
3049 op
= gimple_call_arg (stmt
, i
);
3050 switch (bestn
->simdclone
->args
[i
].arg_type
)
3052 case SIMD_CLONE_ARG_TYPE_VECTOR
:
3053 atype
= bestn
->simdclone
->args
[i
].vector_type
;
3054 o
= nunits
/ TYPE_VECTOR_SUBPARTS (atype
);
3055 for (m
= j
* o
; m
< (j
+ 1) * o
; m
++)
3057 if (TYPE_VECTOR_SUBPARTS (atype
)
3058 < TYPE_VECTOR_SUBPARTS (arginfo
[i
].vectype
))
3060 unsigned int prec
= GET_MODE_BITSIZE (TYPE_MODE (atype
));
3061 k
= (TYPE_VECTOR_SUBPARTS (arginfo
[i
].vectype
)
3062 / TYPE_VECTOR_SUBPARTS (atype
));
3063 gcc_assert ((k
& (k
- 1)) == 0);
3066 = vect_get_vec_def_for_operand (op
, stmt
, NULL
);
3069 vec_oprnd0
= arginfo
[i
].op
;
3070 if ((m
& (k
- 1)) == 0)
3072 = vect_get_vec_def_for_stmt_copy (arginfo
[i
].dt
,
3075 arginfo
[i
].op
= vec_oprnd0
;
3077 = build3 (BIT_FIELD_REF
, atype
, vec_oprnd0
,
3079 bitsize_int ((m
& (k
- 1)) * prec
));
3081 = gimple_build_assign (make_ssa_name (atype
),
3083 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3084 vargs
.safe_push (gimple_assign_lhs (new_stmt
));
3088 k
= (TYPE_VECTOR_SUBPARTS (atype
)
3089 / TYPE_VECTOR_SUBPARTS (arginfo
[i
].vectype
));
3090 gcc_assert ((k
& (k
- 1)) == 0);
3091 vec
<constructor_elt
, va_gc
> *ctor_elts
;
3093 vec_alloc (ctor_elts
, k
);
3096 for (l
= 0; l
< k
; l
++)
3098 if (m
== 0 && l
== 0)
3100 = vect_get_vec_def_for_operand (op
, stmt
, NULL
);
3103 = vect_get_vec_def_for_stmt_copy (arginfo
[i
].dt
,
3105 arginfo
[i
].op
= vec_oprnd0
;
3108 CONSTRUCTOR_APPEND_ELT (ctor_elts
, NULL_TREE
,
3112 vargs
.safe_push (vec_oprnd0
);
3115 vec_oprnd0
= build_constructor (atype
, ctor_elts
);
3117 = gimple_build_assign (make_ssa_name (atype
),
3119 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3120 vargs
.safe_push (gimple_assign_lhs (new_stmt
));
3125 case SIMD_CLONE_ARG_TYPE_UNIFORM
:
3126 vargs
.safe_push (op
);
3128 case SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP
:
3133 = force_gimple_operand (arginfo
[i
].op
, &stmts
, true,
3138 edge pe
= loop_preheader_edge (loop
);
3139 new_bb
= gsi_insert_seq_on_edge_immediate (pe
, stmts
);
3140 gcc_assert (!new_bb
);
3142 if (arginfo
[i
].simd_lane_linear
)
3144 vargs
.safe_push (arginfo
[i
].op
);
3147 tree phi_res
= copy_ssa_name (op
);
3148 gphi
*new_phi
= create_phi_node (phi_res
, loop
->header
);
3149 set_vinfo_for_stmt (new_phi
,
3150 new_stmt_vec_info (new_phi
, loop_vinfo
,
3152 add_phi_arg (new_phi
, arginfo
[i
].op
,
3153 loop_preheader_edge (loop
), UNKNOWN_LOCATION
);
3155 = POINTER_TYPE_P (TREE_TYPE (op
))
3156 ? POINTER_PLUS_EXPR
: PLUS_EXPR
;
3157 tree type
= POINTER_TYPE_P (TREE_TYPE (op
))
3158 ? sizetype
: TREE_TYPE (op
);
3160 = wi::mul (bestn
->simdclone
->args
[i
].linear_step
,
3162 tree tcst
= wide_int_to_tree (type
, cst
);
3163 tree phi_arg
= copy_ssa_name (op
);
3165 = gimple_build_assign (phi_arg
, code
, phi_res
, tcst
);
3166 gimple_stmt_iterator si
= gsi_after_labels (loop
->header
);
3167 gsi_insert_after (&si
, new_stmt
, GSI_NEW_STMT
);
3168 set_vinfo_for_stmt (new_stmt
,
3169 new_stmt_vec_info (new_stmt
, loop_vinfo
,
3171 add_phi_arg (new_phi
, phi_arg
, loop_latch_edge (loop
),
3173 arginfo
[i
].op
= phi_res
;
3174 vargs
.safe_push (phi_res
);
3179 = POINTER_TYPE_P (TREE_TYPE (op
))
3180 ? POINTER_PLUS_EXPR
: PLUS_EXPR
;
3181 tree type
= POINTER_TYPE_P (TREE_TYPE (op
))
3182 ? sizetype
: TREE_TYPE (op
);
3184 = wi::mul (bestn
->simdclone
->args
[i
].linear_step
,
3186 tree tcst
= wide_int_to_tree (type
, cst
);
3187 new_temp
= make_ssa_name (TREE_TYPE (op
));
3188 new_stmt
= gimple_build_assign (new_temp
, code
,
3189 arginfo
[i
].op
, tcst
);
3190 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3191 vargs
.safe_push (new_temp
);
3194 case SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP
:
3200 new_stmt
= gimple_build_call_vec (fndecl
, vargs
);
3203 gcc_assert (ratype
|| TYPE_VECTOR_SUBPARTS (rtype
) == nunits
);
3205 new_temp
= create_tmp_var (ratype
);
3206 else if (TYPE_VECTOR_SUBPARTS (vectype
)
3207 == TYPE_VECTOR_SUBPARTS (rtype
))
3208 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
3210 new_temp
= make_ssa_name (rtype
, new_stmt
);
3211 gimple_call_set_lhs (new_stmt
, new_temp
);
3213 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3217 if (TYPE_VECTOR_SUBPARTS (vectype
) < nunits
)
3220 unsigned int prec
= GET_MODE_BITSIZE (TYPE_MODE (vectype
));
3221 k
= nunits
/ TYPE_VECTOR_SUBPARTS (vectype
);
3222 gcc_assert ((k
& (k
- 1)) == 0);
3223 for (l
= 0; l
< k
; l
++)
3228 t
= build_fold_addr_expr (new_temp
);
3229 t
= build2 (MEM_REF
, vectype
, t
,
3230 build_int_cst (TREE_TYPE (t
),
3231 l
* prec
/ BITS_PER_UNIT
));
3234 t
= build3 (BIT_FIELD_REF
, vectype
, new_temp
,
3235 size_int (prec
), bitsize_int (l
* prec
));
3237 = gimple_build_assign (make_ssa_name (vectype
), t
);
3238 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3239 if (j
== 0 && l
== 0)
3240 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
3242 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
3244 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
3249 tree clobber
= build_constructor (ratype
, NULL
);
3250 TREE_THIS_VOLATILE (clobber
) = 1;
3251 new_stmt
= gimple_build_assign (new_temp
, clobber
);
3252 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3256 else if (TYPE_VECTOR_SUBPARTS (vectype
) > nunits
)
3258 unsigned int k
= (TYPE_VECTOR_SUBPARTS (vectype
)
3259 / TYPE_VECTOR_SUBPARTS (rtype
));
3260 gcc_assert ((k
& (k
- 1)) == 0);
3261 if ((j
& (k
- 1)) == 0)
3262 vec_alloc (ret_ctor_elts
, k
);
3265 unsigned int m
, o
= nunits
/ TYPE_VECTOR_SUBPARTS (rtype
);
3266 for (m
= 0; m
< o
; m
++)
3268 tree tem
= build4 (ARRAY_REF
, rtype
, new_temp
,
3269 size_int (m
), NULL_TREE
, NULL_TREE
);
3271 = gimple_build_assign (make_ssa_name (rtype
), tem
);
3272 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3273 CONSTRUCTOR_APPEND_ELT (ret_ctor_elts
, NULL_TREE
,
3274 gimple_assign_lhs (new_stmt
));
3276 tree clobber
= build_constructor (ratype
, NULL
);
3277 TREE_THIS_VOLATILE (clobber
) = 1;
3278 new_stmt
= gimple_build_assign (new_temp
, clobber
);
3279 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3282 CONSTRUCTOR_APPEND_ELT (ret_ctor_elts
, NULL_TREE
, new_temp
);
3283 if ((j
& (k
- 1)) != k
- 1)
3285 vec_oprnd0
= build_constructor (vectype
, ret_ctor_elts
);
3287 = gimple_build_assign (make_ssa_name (vec_dest
), vec_oprnd0
);
3288 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3290 if ((unsigned) j
== k
- 1)
3291 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
3293 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
3295 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
3300 tree t
= build_fold_addr_expr (new_temp
);
3301 t
= build2 (MEM_REF
, vectype
, t
,
3302 build_int_cst (TREE_TYPE (t
), 0));
3304 = gimple_build_assign (make_ssa_name (vec_dest
), t
);
3305 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3306 tree clobber
= build_constructor (ratype
, NULL
);
3307 TREE_THIS_VOLATILE (clobber
) = 1;
3308 vect_finish_stmt_generation (stmt
,
3309 gimple_build_assign (new_temp
,
3315 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
3317 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
3319 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
3324 /* The call in STMT might prevent it from being removed in dce.
3325 We however cannot remove it here, due to the way the ssa name
3326 it defines is mapped to the new definition. So just replace
3327 rhs of the statement with something harmless. */
3334 type
= TREE_TYPE (scalar_dest
);
3335 if (is_pattern_stmt_p (stmt_info
))
3336 lhs
= gimple_call_lhs (STMT_VINFO_RELATED_STMT (stmt_info
));
3338 lhs
= gimple_call_lhs (stmt
);
3339 new_stmt
= gimple_build_assign (lhs
, build_zero_cst (type
));
3342 new_stmt
= gimple_build_nop ();
3343 set_vinfo_for_stmt (new_stmt
, stmt_info
);
3344 set_vinfo_for_stmt (stmt
, NULL
);
3345 STMT_VINFO_STMT (stmt_info
) = new_stmt
;
3346 gsi_replace (gsi
, new_stmt
, true);
3347 unlink_stmt_vdef (stmt
);
3353 /* Function vect_gen_widened_results_half
3355 Create a vector stmt whose code, type, number of arguments, and result
3356 variable are CODE, OP_TYPE, and VEC_DEST, and its arguments are
3357 VEC_OPRND0 and VEC_OPRND1. The new vector stmt is to be inserted at BSI.
3358 In the case that CODE is a CALL_EXPR, this means that a call to DECL
3359 needs to be created (DECL is a function-decl of a target-builtin).
3360 STMT is the original scalar stmt that we are vectorizing. */
3363 vect_gen_widened_results_half (enum tree_code code
,
3365 tree vec_oprnd0
, tree vec_oprnd1
, int op_type
,
3366 tree vec_dest
, gimple_stmt_iterator
*gsi
,
3372 /* Generate half of the widened result: */
3373 if (code
== CALL_EXPR
)
3375 /* Target specific support */
3376 if (op_type
== binary_op
)
3377 new_stmt
= gimple_build_call (decl
, 2, vec_oprnd0
, vec_oprnd1
);
3379 new_stmt
= gimple_build_call (decl
, 1, vec_oprnd0
);
3380 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
3381 gimple_call_set_lhs (new_stmt
, new_temp
);
3385 /* Generic support */
3386 gcc_assert (op_type
== TREE_CODE_LENGTH (code
));
3387 if (op_type
!= binary_op
)
3389 new_stmt
= gimple_build_assign (vec_dest
, code
, vec_oprnd0
, vec_oprnd1
);
3390 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
3391 gimple_assign_set_lhs (new_stmt
, new_temp
);
3393 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3399 /* Get vectorized definitions for loop-based vectorization. For the first
3400 operand we call vect_get_vec_def_for_operand() (with OPRND containing
3401 scalar operand), and for the rest we get a copy with
3402 vect_get_vec_def_for_stmt_copy() using the previous vector definition
3403 (stored in OPRND). See vect_get_vec_def_for_stmt_copy() for details.
3404 The vectors are collected into VEC_OPRNDS. */
3407 vect_get_loop_based_defs (tree
*oprnd
, gimple stmt
, enum vect_def_type dt
,
3408 vec
<tree
> *vec_oprnds
, int multi_step_cvt
)
3412 /* Get first vector operand. */
3413 /* All the vector operands except the very first one (that is scalar oprnd)
3415 if (TREE_CODE (TREE_TYPE (*oprnd
)) != VECTOR_TYPE
)
3416 vec_oprnd
= vect_get_vec_def_for_operand (*oprnd
, stmt
, NULL
);
3418 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
, *oprnd
);
3420 vec_oprnds
->quick_push (vec_oprnd
);
3422 /* Get second vector operand. */
3423 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
, vec_oprnd
);
3424 vec_oprnds
->quick_push (vec_oprnd
);
3428 /* For conversion in multiple steps, continue to get operands
3431 vect_get_loop_based_defs (oprnd
, stmt
, dt
, vec_oprnds
, multi_step_cvt
- 1);
3435 /* Create vectorized demotion statements for vector operands from VEC_OPRNDS.
3436 For multi-step conversions store the resulting vectors and call the function
3440 vect_create_vectorized_demotion_stmts (vec
<tree
> *vec_oprnds
,
3441 int multi_step_cvt
, gimple stmt
,
3443 gimple_stmt_iterator
*gsi
,
3444 slp_tree slp_node
, enum tree_code code
,
3445 stmt_vec_info
*prev_stmt_info
)
3448 tree vop0
, vop1
, new_tmp
, vec_dest
;
3450 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
3452 vec_dest
= vec_dsts
.pop ();
3454 for (i
= 0; i
< vec_oprnds
->length (); i
+= 2)
3456 /* Create demotion operation. */
3457 vop0
= (*vec_oprnds
)[i
];
3458 vop1
= (*vec_oprnds
)[i
+ 1];
3459 new_stmt
= gimple_build_assign (vec_dest
, code
, vop0
, vop1
);
3460 new_tmp
= make_ssa_name (vec_dest
, new_stmt
);
3461 gimple_assign_set_lhs (new_stmt
, new_tmp
);
3462 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3465 /* Store the resulting vector for next recursive call. */
3466 (*vec_oprnds
)[i
/2] = new_tmp
;
3469 /* This is the last step of the conversion sequence. Store the
3470 vectors in SLP_NODE or in vector info of the scalar statement
3471 (or in STMT_VINFO_RELATED_STMT chain). */
3473 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
3476 if (!*prev_stmt_info
)
3477 STMT_VINFO_VEC_STMT (stmt_info
) = new_stmt
;
3479 STMT_VINFO_RELATED_STMT (*prev_stmt_info
) = new_stmt
;
3481 *prev_stmt_info
= vinfo_for_stmt (new_stmt
);
3486 /* For multi-step demotion operations we first generate demotion operations
3487 from the source type to the intermediate types, and then combine the
3488 results (stored in VEC_OPRNDS) in demotion operation to the destination
3492 /* At each level of recursion we have half of the operands we had at the
3494 vec_oprnds
->truncate ((i
+1)/2);
3495 vect_create_vectorized_demotion_stmts (vec_oprnds
, multi_step_cvt
- 1,
3496 stmt
, vec_dsts
, gsi
, slp_node
,
3497 VEC_PACK_TRUNC_EXPR
,
3501 vec_dsts
.quick_push (vec_dest
);
3505 /* Create vectorized promotion statements for vector operands from VEC_OPRNDS0
3506 and VEC_OPRNDS1 (for binary operations). For multi-step conversions store
3507 the resulting vectors and call the function recursively. */
3510 vect_create_vectorized_promotion_stmts (vec
<tree
> *vec_oprnds0
,
3511 vec
<tree
> *vec_oprnds1
,
3512 gimple stmt
, tree vec_dest
,
3513 gimple_stmt_iterator
*gsi
,
3514 enum tree_code code1
,
3515 enum tree_code code2
, tree decl1
,
3516 tree decl2
, int op_type
)
3519 tree vop0
, vop1
, new_tmp1
, new_tmp2
;
3520 gimple new_stmt1
, new_stmt2
;
3521 vec
<tree
> vec_tmp
= vNULL
;
3523 vec_tmp
.create (vec_oprnds0
->length () * 2);
3524 FOR_EACH_VEC_ELT (*vec_oprnds0
, i
, vop0
)
3526 if (op_type
== binary_op
)
3527 vop1
= (*vec_oprnds1
)[i
];
3531 /* Generate the two halves of promotion operation. */
3532 new_stmt1
= vect_gen_widened_results_half (code1
, decl1
, vop0
, vop1
,
3533 op_type
, vec_dest
, gsi
, stmt
);
3534 new_stmt2
= vect_gen_widened_results_half (code2
, decl2
, vop0
, vop1
,
3535 op_type
, vec_dest
, gsi
, stmt
);
3536 if (is_gimple_call (new_stmt1
))
3538 new_tmp1
= gimple_call_lhs (new_stmt1
);
3539 new_tmp2
= gimple_call_lhs (new_stmt2
);
3543 new_tmp1
= gimple_assign_lhs (new_stmt1
);
3544 new_tmp2
= gimple_assign_lhs (new_stmt2
);
3547 /* Store the results for the next step. */
3548 vec_tmp
.quick_push (new_tmp1
);
3549 vec_tmp
.quick_push (new_tmp2
);
3552 vec_oprnds0
->release ();
3553 *vec_oprnds0
= vec_tmp
;
3557 /* Check if STMT performs a conversion operation, that can be vectorized.
3558 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3559 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
3560 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3563 vectorizable_conversion (gimple stmt
, gimple_stmt_iterator
*gsi
,
3564 gimple
*vec_stmt
, slp_tree slp_node
)
3568 tree op0
, op1
= NULL_TREE
;
3569 tree vec_oprnd0
= NULL_TREE
, vec_oprnd1
= NULL_TREE
;
3570 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
3571 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
3572 enum tree_code code
, code1
= ERROR_MARK
, code2
= ERROR_MARK
;
3573 enum tree_code codecvt1
= ERROR_MARK
, codecvt2
= ERROR_MARK
;
3574 tree decl1
= NULL_TREE
, decl2
= NULL_TREE
;
3578 enum vect_def_type dt
[2] = {vect_unknown_def_type
, vect_unknown_def_type
};
3579 gimple new_stmt
= NULL
;
3580 stmt_vec_info prev_stmt_info
;
3583 tree vectype_out
, vectype_in
;
3585 tree lhs_type
, rhs_type
;
3586 enum { NARROW
, NONE
, WIDEN
} modifier
;
3587 vec
<tree
> vec_oprnds0
= vNULL
;
3588 vec
<tree
> vec_oprnds1
= vNULL
;
3590 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
3591 int multi_step_cvt
= 0;
3592 vec
<tree
> vec_dsts
= vNULL
;
3593 vec
<tree
> interm_types
= vNULL
;
3594 tree last_oprnd
, intermediate_type
, cvt_type
= NULL_TREE
;
3596 machine_mode rhs_mode
;
3597 unsigned short fltsz
;
3599 /* Is STMT a vectorizable conversion? */
3601 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
3604 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
3607 if (!is_gimple_assign (stmt
))
3610 if (TREE_CODE (gimple_assign_lhs (stmt
)) != SSA_NAME
)
3613 code
= gimple_assign_rhs_code (stmt
);
3614 if (!CONVERT_EXPR_CODE_P (code
)
3615 && code
!= FIX_TRUNC_EXPR
3616 && code
!= FLOAT_EXPR
3617 && code
!= WIDEN_MULT_EXPR
3618 && code
!= WIDEN_LSHIFT_EXPR
)
3621 op_type
= TREE_CODE_LENGTH (code
);
3623 /* Check types of lhs and rhs. */
3624 scalar_dest
= gimple_assign_lhs (stmt
);
3625 lhs_type
= TREE_TYPE (scalar_dest
);
3626 vectype_out
= STMT_VINFO_VECTYPE (stmt_info
);
3628 op0
= gimple_assign_rhs1 (stmt
);
3629 rhs_type
= TREE_TYPE (op0
);
3631 if ((code
!= FIX_TRUNC_EXPR
&& code
!= FLOAT_EXPR
)
3632 && !((INTEGRAL_TYPE_P (lhs_type
)
3633 && INTEGRAL_TYPE_P (rhs_type
))
3634 || (SCALAR_FLOAT_TYPE_P (lhs_type
)
3635 && SCALAR_FLOAT_TYPE_P (rhs_type
))))
3638 if ((INTEGRAL_TYPE_P (lhs_type
)
3639 && (TYPE_PRECISION (lhs_type
)
3640 != GET_MODE_PRECISION (TYPE_MODE (lhs_type
))))
3641 || (INTEGRAL_TYPE_P (rhs_type
)
3642 && (TYPE_PRECISION (rhs_type
)
3643 != GET_MODE_PRECISION (TYPE_MODE (rhs_type
)))))
3645 if (dump_enabled_p ())
3646 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3647 "type conversion to/from bit-precision unsupported."
3652 /* Check the operands of the operation. */
3653 if (!vect_is_simple_use_1 (op0
, stmt
, loop_vinfo
, bb_vinfo
,
3654 &def_stmt
, &def
, &dt
[0], &vectype_in
))
3656 if (dump_enabled_p ())
3657 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3658 "use not simple.\n");
3661 if (op_type
== binary_op
)
3665 op1
= gimple_assign_rhs2 (stmt
);
3666 gcc_assert (code
== WIDEN_MULT_EXPR
|| code
== WIDEN_LSHIFT_EXPR
);
3667 /* For WIDEN_MULT_EXPR, if OP0 is a constant, use the type of
3669 if (CONSTANT_CLASS_P (op0
))
3670 ok
= vect_is_simple_use_1 (op1
, stmt
, loop_vinfo
, bb_vinfo
,
3671 &def_stmt
, &def
, &dt
[1], &vectype_in
);
3673 ok
= vect_is_simple_use (op1
, stmt
, loop_vinfo
, bb_vinfo
, &def_stmt
,
3678 if (dump_enabled_p ())
3679 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3680 "use not simple.\n");
3685 /* If op0 is an external or constant defs use a vector type of
3686 the same size as the output vector type. */
3688 vectype_in
= get_same_sized_vectype (rhs_type
, vectype_out
);
3690 gcc_assert (vectype_in
);
3693 if (dump_enabled_p ())
3695 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3696 "no vectype for scalar type ");
3697 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, rhs_type
);
3698 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
3704 nunits_in
= TYPE_VECTOR_SUBPARTS (vectype_in
);
3705 nunits_out
= TYPE_VECTOR_SUBPARTS (vectype_out
);
3706 if (nunits_in
< nunits_out
)
3708 else if (nunits_out
== nunits_in
)
3713 /* Multiple types in SLP are handled by creating the appropriate number of
3714 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
3716 if (slp_node
|| PURE_SLP_STMT (stmt_info
))
3718 else if (modifier
== NARROW
)
3719 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits_out
;
3721 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits_in
;
3723 /* Sanity check: make sure that at least one copy of the vectorized stmt
3724 needs to be generated. */
3725 gcc_assert (ncopies
>= 1);
3727 /* Supportable by target? */
3731 if (code
!= FIX_TRUNC_EXPR
&& code
!= FLOAT_EXPR
)
3733 if (supportable_convert_operation (code
, vectype_out
, vectype_in
,
3738 if (dump_enabled_p ())
3739 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3740 "conversion not supported by target.\n");
3744 if (supportable_widening_operation (code
, stmt
, vectype_out
, vectype_in
,
3745 &code1
, &code2
, &multi_step_cvt
,
3748 /* Binary widening operation can only be supported directly by the
3750 gcc_assert (!(multi_step_cvt
&& op_type
== binary_op
));
3754 if (code
!= FLOAT_EXPR
3755 || (GET_MODE_SIZE (TYPE_MODE (lhs_type
))
3756 <= GET_MODE_SIZE (TYPE_MODE (rhs_type
))))
3759 rhs_mode
= TYPE_MODE (rhs_type
);
3760 fltsz
= GET_MODE_SIZE (TYPE_MODE (lhs_type
));
3761 for (rhs_mode
= GET_MODE_2XWIDER_MODE (TYPE_MODE (rhs_type
));
3762 rhs_mode
!= VOIDmode
&& GET_MODE_SIZE (rhs_mode
) <= fltsz
;
3763 rhs_mode
= GET_MODE_2XWIDER_MODE (rhs_mode
))
3766 = build_nonstandard_integer_type (GET_MODE_BITSIZE (rhs_mode
), 0);
3767 cvt_type
= get_same_sized_vectype (cvt_type
, vectype_in
);
3768 if (cvt_type
== NULL_TREE
)
3771 if (GET_MODE_SIZE (rhs_mode
) == fltsz
)
3773 if (!supportable_convert_operation (code
, vectype_out
,
3774 cvt_type
, &decl1
, &codecvt1
))
3777 else if (!supportable_widening_operation (code
, stmt
, vectype_out
,
3778 cvt_type
, &codecvt1
,
3779 &codecvt2
, &multi_step_cvt
,
3783 gcc_assert (multi_step_cvt
== 0);
3785 if (supportable_widening_operation (NOP_EXPR
, stmt
, cvt_type
,
3786 vectype_in
, &code1
, &code2
,
3787 &multi_step_cvt
, &interm_types
))
3791 if (rhs_mode
== VOIDmode
|| GET_MODE_SIZE (rhs_mode
) > fltsz
)
3794 if (GET_MODE_SIZE (rhs_mode
) == fltsz
)
3795 codecvt2
= ERROR_MARK
;
3799 interm_types
.safe_push (cvt_type
);
3800 cvt_type
= NULL_TREE
;
3805 gcc_assert (op_type
== unary_op
);
3806 if (supportable_narrowing_operation (code
, vectype_out
, vectype_in
,
3807 &code1
, &multi_step_cvt
,
3811 if (code
!= FIX_TRUNC_EXPR
3812 || (GET_MODE_SIZE (TYPE_MODE (lhs_type
))
3813 >= GET_MODE_SIZE (TYPE_MODE (rhs_type
))))
3816 rhs_mode
= TYPE_MODE (rhs_type
);
3818 = build_nonstandard_integer_type (GET_MODE_BITSIZE (rhs_mode
), 0);
3819 cvt_type
= get_same_sized_vectype (cvt_type
, vectype_in
);
3820 if (cvt_type
== NULL_TREE
)
3822 if (!supportable_convert_operation (code
, cvt_type
, vectype_in
,
3825 if (supportable_narrowing_operation (NOP_EXPR
, vectype_out
, cvt_type
,
3826 &code1
, &multi_step_cvt
,
3835 if (!vec_stmt
) /* transformation not required. */
3837 if (dump_enabled_p ())
3838 dump_printf_loc (MSG_NOTE
, vect_location
,
3839 "=== vectorizable_conversion ===\n");
3840 if (code
== FIX_TRUNC_EXPR
|| code
== FLOAT_EXPR
)
3842 STMT_VINFO_TYPE (stmt_info
) = type_conversion_vec_info_type
;
3843 vect_model_simple_cost (stmt_info
, ncopies
, dt
, NULL
, NULL
);
3845 else if (modifier
== NARROW
)
3847 STMT_VINFO_TYPE (stmt_info
) = type_demotion_vec_info_type
;
3848 vect_model_promotion_demotion_cost (stmt_info
, dt
, multi_step_cvt
);
3852 STMT_VINFO_TYPE (stmt_info
) = type_promotion_vec_info_type
;
3853 vect_model_promotion_demotion_cost (stmt_info
, dt
, multi_step_cvt
);
3855 interm_types
.release ();
3860 if (dump_enabled_p ())
3861 dump_printf_loc (MSG_NOTE
, vect_location
,
3862 "transform conversion. ncopies = %d.\n", ncopies
);
3864 if (op_type
== binary_op
)
3866 if (CONSTANT_CLASS_P (op0
))
3867 op0
= fold_convert (TREE_TYPE (op1
), op0
);
3868 else if (CONSTANT_CLASS_P (op1
))
3869 op1
= fold_convert (TREE_TYPE (op0
), op1
);
3872 /* In case of multi-step conversion, we first generate conversion operations
3873 to the intermediate types, and then from that types to the final one.
3874 We create vector destinations for the intermediate type (TYPES) received
3875 from supportable_*_operation, and store them in the correct order
3876 for future use in vect_create_vectorized_*_stmts (). */
3877 vec_dsts
.create (multi_step_cvt
+ 1);
3878 vec_dest
= vect_create_destination_var (scalar_dest
,
3879 (cvt_type
&& modifier
== WIDEN
)
3880 ? cvt_type
: vectype_out
);
3881 vec_dsts
.quick_push (vec_dest
);
3885 for (i
= interm_types
.length () - 1;
3886 interm_types
.iterate (i
, &intermediate_type
); i
--)
3888 vec_dest
= vect_create_destination_var (scalar_dest
,
3890 vec_dsts
.quick_push (vec_dest
);
3895 vec_dest
= vect_create_destination_var (scalar_dest
,
3897 ? vectype_out
: cvt_type
);
3901 if (modifier
== WIDEN
)
3903 vec_oprnds0
.create (multi_step_cvt
? vect_pow2 (multi_step_cvt
) : 1);
3904 if (op_type
== binary_op
)
3905 vec_oprnds1
.create (1);
3907 else if (modifier
== NARROW
)
3908 vec_oprnds0
.create (
3909 2 * (multi_step_cvt
? vect_pow2 (multi_step_cvt
) : 1));
3911 else if (code
== WIDEN_LSHIFT_EXPR
)
3912 vec_oprnds1
.create (slp_node
->vec_stmts_size
);
3915 prev_stmt_info
= NULL
;
3919 for (j
= 0; j
< ncopies
; j
++)
3922 vect_get_vec_defs (op0
, NULL
, stmt
, &vec_oprnds0
, NULL
, slp_node
,
3925 vect_get_vec_defs_for_stmt_copy (dt
, &vec_oprnds0
, NULL
);
3927 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vop0
)
3929 /* Arguments are ready, create the new vector stmt. */
3930 if (code1
== CALL_EXPR
)
3932 new_stmt
= gimple_build_call (decl1
, 1, vop0
);
3933 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
3934 gimple_call_set_lhs (new_stmt
, new_temp
);
3938 gcc_assert (TREE_CODE_LENGTH (code1
) == unary_op
);
3939 new_stmt
= gimple_build_assign (vec_dest
, code1
, vop0
);
3940 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
3941 gimple_assign_set_lhs (new_stmt
, new_temp
);
3944 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3946 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
3949 if (!prev_stmt_info
)
3950 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
3952 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
3953 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
3960 /* In case the vectorization factor (VF) is bigger than the number
3961 of elements that we can fit in a vectype (nunits), we have to
3962 generate more than one vector stmt - i.e - we need to "unroll"
3963 the vector stmt by a factor VF/nunits. */
3964 for (j
= 0; j
< ncopies
; j
++)
3971 if (code
== WIDEN_LSHIFT_EXPR
)
3976 /* Store vec_oprnd1 for every vector stmt to be created
3977 for SLP_NODE. We check during the analysis that all
3978 the shift arguments are the same. */
3979 for (k
= 0; k
< slp_node
->vec_stmts_size
- 1; k
++)
3980 vec_oprnds1
.quick_push (vec_oprnd1
);
3982 vect_get_vec_defs (op0
, NULL_TREE
, stmt
, &vec_oprnds0
, NULL
,
3986 vect_get_vec_defs (op0
, op1
, stmt
, &vec_oprnds0
,
3987 &vec_oprnds1
, slp_node
, -1);
3991 vec_oprnd0
= vect_get_vec_def_for_operand (op0
, stmt
, NULL
);
3992 vec_oprnds0
.quick_push (vec_oprnd0
);
3993 if (op_type
== binary_op
)
3995 if (code
== WIDEN_LSHIFT_EXPR
)
3998 vec_oprnd1
= vect_get_vec_def_for_operand (op1
, stmt
,
4000 vec_oprnds1
.quick_push (vec_oprnd1
);
4006 vec_oprnd0
= vect_get_vec_def_for_stmt_copy (dt
[0], vec_oprnd0
);
4007 vec_oprnds0
.truncate (0);
4008 vec_oprnds0
.quick_push (vec_oprnd0
);
4009 if (op_type
== binary_op
)
4011 if (code
== WIDEN_LSHIFT_EXPR
)
4014 vec_oprnd1
= vect_get_vec_def_for_stmt_copy (dt
[1],
4016 vec_oprnds1
.truncate (0);
4017 vec_oprnds1
.quick_push (vec_oprnd1
);
4021 /* Arguments are ready. Create the new vector stmts. */
4022 for (i
= multi_step_cvt
; i
>= 0; i
--)
4024 tree this_dest
= vec_dsts
[i
];
4025 enum tree_code c1
= code1
, c2
= code2
;
4026 if (i
== 0 && codecvt2
!= ERROR_MARK
)
4031 vect_create_vectorized_promotion_stmts (&vec_oprnds0
,
4033 stmt
, this_dest
, gsi
,
4034 c1
, c2
, decl1
, decl2
,
4038 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vop0
)
4042 if (codecvt1
== CALL_EXPR
)
4044 new_stmt
= gimple_build_call (decl1
, 1, vop0
);
4045 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
4046 gimple_call_set_lhs (new_stmt
, new_temp
);
4050 gcc_assert (TREE_CODE_LENGTH (codecvt1
) == unary_op
);
4051 new_temp
= make_ssa_name (vec_dest
);
4052 new_stmt
= gimple_build_assign (new_temp
, codecvt1
,
4056 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
4059 new_stmt
= SSA_NAME_DEF_STMT (vop0
);
4062 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
4065 if (!prev_stmt_info
)
4066 STMT_VINFO_VEC_STMT (stmt_info
) = new_stmt
;
4068 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
4069 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
4074 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
4078 /* In case the vectorization factor (VF) is bigger than the number
4079 of elements that we can fit in a vectype (nunits), we have to
4080 generate more than one vector stmt - i.e - we need to "unroll"
4081 the vector stmt by a factor VF/nunits. */
4082 for (j
= 0; j
< ncopies
; j
++)
4086 vect_get_vec_defs (op0
, NULL_TREE
, stmt
, &vec_oprnds0
, NULL
,
4090 vec_oprnds0
.truncate (0);
4091 vect_get_loop_based_defs (&last_oprnd
, stmt
, dt
[0], &vec_oprnds0
,
4092 vect_pow2 (multi_step_cvt
) - 1);
4095 /* Arguments are ready. Create the new vector stmts. */
4097 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vop0
)
4099 if (codecvt1
== CALL_EXPR
)
4101 new_stmt
= gimple_build_call (decl1
, 1, vop0
);
4102 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
4103 gimple_call_set_lhs (new_stmt
, new_temp
);
4107 gcc_assert (TREE_CODE_LENGTH (codecvt1
) == unary_op
);
4108 new_temp
= make_ssa_name (vec_dest
);
4109 new_stmt
= gimple_build_assign (new_temp
, codecvt1
,
4113 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
4114 vec_oprnds0
[i
] = new_temp
;
4117 vect_create_vectorized_demotion_stmts (&vec_oprnds0
, multi_step_cvt
,
4118 stmt
, vec_dsts
, gsi
,
4123 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
4127 vec_oprnds0
.release ();
4128 vec_oprnds1
.release ();
4129 vec_dsts
.release ();
4130 interm_types
.release ();
4136 /* Function vectorizable_assignment.
4138 Check if STMT performs an assignment (copy) that can be vectorized.
4139 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4140 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
4141 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4144 vectorizable_assignment (gimple stmt
, gimple_stmt_iterator
*gsi
,
4145 gimple
*vec_stmt
, slp_tree slp_node
)
4150 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
4151 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
4155 enum vect_def_type dt
[2] = {vect_unknown_def_type
, vect_unknown_def_type
};
4158 vec
<tree
> vec_oprnds
= vNULL
;
4160 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
4161 gimple new_stmt
= NULL
;
4162 stmt_vec_info prev_stmt_info
= NULL
;
4163 enum tree_code code
;
4166 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
4169 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
4172 /* Is vectorizable assignment? */
4173 if (!is_gimple_assign (stmt
))
4176 scalar_dest
= gimple_assign_lhs (stmt
);
4177 if (TREE_CODE (scalar_dest
) != SSA_NAME
)
4180 code
= gimple_assign_rhs_code (stmt
);
4181 if (gimple_assign_single_p (stmt
)
4182 || code
== PAREN_EXPR
4183 || CONVERT_EXPR_CODE_P (code
))
4184 op
= gimple_assign_rhs1 (stmt
);
4188 if (code
== VIEW_CONVERT_EXPR
)
4189 op
= TREE_OPERAND (op
, 0);
4191 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
4192 unsigned int nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
4194 /* Multiple types in SLP are handled by creating the appropriate number of
4195 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4197 if (slp_node
|| PURE_SLP_STMT (stmt_info
))
4200 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits
;
4202 gcc_assert (ncopies
>= 1);
4204 if (!vect_is_simple_use_1 (op
, stmt
, loop_vinfo
, bb_vinfo
,
4205 &def_stmt
, &def
, &dt
[0], &vectype_in
))
4207 if (dump_enabled_p ())
4208 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4209 "use not simple.\n");
4213 /* We can handle NOP_EXPR conversions that do not change the number
4214 of elements or the vector size. */
4215 if ((CONVERT_EXPR_CODE_P (code
)
4216 || code
== VIEW_CONVERT_EXPR
)
4218 || TYPE_VECTOR_SUBPARTS (vectype_in
) != nunits
4219 || (GET_MODE_SIZE (TYPE_MODE (vectype
))
4220 != GET_MODE_SIZE (TYPE_MODE (vectype_in
)))))
4223 /* We do not handle bit-precision changes. */
4224 if ((CONVERT_EXPR_CODE_P (code
)
4225 || code
== VIEW_CONVERT_EXPR
)
4226 && INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest
))
4227 && ((TYPE_PRECISION (TREE_TYPE (scalar_dest
))
4228 != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (scalar_dest
))))
4229 || ((TYPE_PRECISION (TREE_TYPE (op
))
4230 != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (op
))))))
4231 /* But a conversion that does not change the bit-pattern is ok. */
4232 && !((TYPE_PRECISION (TREE_TYPE (scalar_dest
))
4233 > TYPE_PRECISION (TREE_TYPE (op
)))
4234 && TYPE_UNSIGNED (TREE_TYPE (op
))))
4236 if (dump_enabled_p ())
4237 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4238 "type conversion to/from bit-precision "
4243 if (!vec_stmt
) /* transformation not required. */
4245 STMT_VINFO_TYPE (stmt_info
) = assignment_vec_info_type
;
4246 if (dump_enabled_p ())
4247 dump_printf_loc (MSG_NOTE
, vect_location
,
4248 "=== vectorizable_assignment ===\n");
4249 vect_model_simple_cost (stmt_info
, ncopies
, dt
, NULL
, NULL
);
4254 if (dump_enabled_p ())
4255 dump_printf_loc (MSG_NOTE
, vect_location
, "transform assignment.\n");
4258 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
4261 for (j
= 0; j
< ncopies
; j
++)
4265 vect_get_vec_defs (op
, NULL
, stmt
, &vec_oprnds
, NULL
, slp_node
, -1);
4267 vect_get_vec_defs_for_stmt_copy (dt
, &vec_oprnds
, NULL
);
4269 /* Arguments are ready. create the new vector stmt. */
4270 FOR_EACH_VEC_ELT (vec_oprnds
, i
, vop
)
4272 if (CONVERT_EXPR_CODE_P (code
)
4273 || code
== VIEW_CONVERT_EXPR
)
4274 vop
= build1 (VIEW_CONVERT_EXPR
, vectype
, vop
);
4275 new_stmt
= gimple_build_assign (vec_dest
, vop
);
4276 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
4277 gimple_assign_set_lhs (new_stmt
, new_temp
);
4278 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
4280 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
4287 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
4289 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
4291 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
4294 vec_oprnds
.release ();
4299 /* Return TRUE if CODE (a shift operation) is supported for SCALAR_TYPE
4300 either as shift by a scalar or by a vector. */
4303 vect_supportable_shift (enum tree_code code
, tree scalar_type
)
4306 machine_mode vec_mode
;
4311 vectype
= get_vectype_for_scalar_type (scalar_type
);
4315 optab
= optab_for_tree_code (code
, vectype
, optab_scalar
);
4317 || optab_handler (optab
, TYPE_MODE (vectype
)) == CODE_FOR_nothing
)
4319 optab
= optab_for_tree_code (code
, vectype
, optab_vector
);
4321 || (optab_handler (optab
, TYPE_MODE (vectype
))
4322 == CODE_FOR_nothing
))
4326 vec_mode
= TYPE_MODE (vectype
);
4327 icode
= (int) optab_handler (optab
, vec_mode
);
4328 if (icode
== CODE_FOR_nothing
)
4335 /* Function vectorizable_shift.
4337 Check if STMT performs a shift operation that can be vectorized.
4338 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4339 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
4340 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4343 vectorizable_shift (gimple stmt
, gimple_stmt_iterator
*gsi
,
4344 gimple
*vec_stmt
, slp_tree slp_node
)
4348 tree op0
, op1
= NULL
;
4349 tree vec_oprnd1
= NULL_TREE
;
4350 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
4352 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
4353 enum tree_code code
;
4354 machine_mode vec_mode
;
4358 machine_mode optab_op2_mode
;
4361 enum vect_def_type dt
[2] = {vect_unknown_def_type
, vect_unknown_def_type
};
4362 gimple new_stmt
= NULL
;
4363 stmt_vec_info prev_stmt_info
;
4370 vec
<tree
> vec_oprnds0
= vNULL
;
4371 vec
<tree
> vec_oprnds1
= vNULL
;
4374 bool scalar_shift_arg
= true;
4375 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
4378 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
4381 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
4384 /* Is STMT a vectorizable binary/unary operation? */
4385 if (!is_gimple_assign (stmt
))
4388 if (TREE_CODE (gimple_assign_lhs (stmt
)) != SSA_NAME
)
4391 code
= gimple_assign_rhs_code (stmt
);
4393 if (!(code
== LSHIFT_EXPR
|| code
== RSHIFT_EXPR
|| code
== LROTATE_EXPR
4394 || code
== RROTATE_EXPR
))
4397 scalar_dest
= gimple_assign_lhs (stmt
);
4398 vectype_out
= STMT_VINFO_VECTYPE (stmt_info
);
4399 if (TYPE_PRECISION (TREE_TYPE (scalar_dest
))
4400 != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (scalar_dest
))))
4402 if (dump_enabled_p ())
4403 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4404 "bit-precision shifts not supported.\n");
4408 op0
= gimple_assign_rhs1 (stmt
);
4409 if (!vect_is_simple_use_1 (op0
, stmt
, loop_vinfo
, bb_vinfo
,
4410 &def_stmt
, &def
, &dt
[0], &vectype
))
4412 if (dump_enabled_p ())
4413 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4414 "use not simple.\n");
4417 /* If op0 is an external or constant def use a vector type with
4418 the same size as the output vector type. */
4420 vectype
= get_same_sized_vectype (TREE_TYPE (op0
), vectype_out
);
4422 gcc_assert (vectype
);
4425 if (dump_enabled_p ())
4426 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4427 "no vectype for scalar type\n");
4431 nunits_out
= TYPE_VECTOR_SUBPARTS (vectype_out
);
4432 nunits_in
= TYPE_VECTOR_SUBPARTS (vectype
);
4433 if (nunits_out
!= nunits_in
)
4436 op1
= gimple_assign_rhs2 (stmt
);
4437 if (!vect_is_simple_use_1 (op1
, stmt
, loop_vinfo
, bb_vinfo
, &def_stmt
,
4438 &def
, &dt
[1], &op1_vectype
))
4440 if (dump_enabled_p ())
4441 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4442 "use not simple.\n");
4447 vf
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
4451 /* Multiple types in SLP are handled by creating the appropriate number of
4452 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4454 if (slp_node
|| PURE_SLP_STMT (stmt_info
))
4457 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits_in
;
4459 gcc_assert (ncopies
>= 1);
4461 /* Determine whether the shift amount is a vector, or scalar. If the
4462 shift/rotate amount is a vector, use the vector/vector shift optabs. */
4464 if (dt
[1] == vect_internal_def
&& !slp_node
)
4465 scalar_shift_arg
= false;
4466 else if (dt
[1] == vect_constant_def
4467 || dt
[1] == vect_external_def
4468 || dt
[1] == vect_internal_def
)
4470 /* In SLP, need to check whether the shift count is the same,
4471 in loops if it is a constant or invariant, it is always
4475 vec
<gimple
> stmts
= SLP_TREE_SCALAR_STMTS (slp_node
);
4478 FOR_EACH_VEC_ELT (stmts
, k
, slpstmt
)
4479 if (!operand_equal_p (gimple_assign_rhs2 (slpstmt
), op1
, 0))
4480 scalar_shift_arg
= false;
4485 if (dump_enabled_p ())
4486 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4487 "operand mode requires invariant argument.\n");
4491 /* Vector shifted by vector. */
4492 if (!scalar_shift_arg
)
4494 optab
= optab_for_tree_code (code
, vectype
, optab_vector
);
4495 if (dump_enabled_p ())
4496 dump_printf_loc (MSG_NOTE
, vect_location
,
4497 "vector/vector shift/rotate found.\n");
4500 op1_vectype
= get_same_sized_vectype (TREE_TYPE (op1
), vectype_out
);
4501 if (op1_vectype
== NULL_TREE
4502 || TYPE_MODE (op1_vectype
) != TYPE_MODE (vectype
))
4504 if (dump_enabled_p ())
4505 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4506 "unusable type for last operand in"
4507 " vector/vector shift/rotate.\n");
4511 /* See if the machine has a vector shifted by scalar insn and if not
4512 then see if it has a vector shifted by vector insn. */
4515 optab
= optab_for_tree_code (code
, vectype
, optab_scalar
);
4517 && optab_handler (optab
, TYPE_MODE (vectype
)) != CODE_FOR_nothing
)
4519 if (dump_enabled_p ())
4520 dump_printf_loc (MSG_NOTE
, vect_location
,
4521 "vector/scalar shift/rotate found.\n");
4525 optab
= optab_for_tree_code (code
, vectype
, optab_vector
);
4527 && (optab_handler (optab
, TYPE_MODE (vectype
))
4528 != CODE_FOR_nothing
))
4530 scalar_shift_arg
= false;
4532 if (dump_enabled_p ())
4533 dump_printf_loc (MSG_NOTE
, vect_location
,
4534 "vector/vector shift/rotate found.\n");
4536 /* Unlike the other binary operators, shifts/rotates have
4537 the rhs being int, instead of the same type as the lhs,
4538 so make sure the scalar is the right type if we are
4539 dealing with vectors of long long/long/short/char. */
4540 if (dt
[1] == vect_constant_def
)
4541 op1
= fold_convert (TREE_TYPE (vectype
), op1
);
4542 else if (!useless_type_conversion_p (TREE_TYPE (vectype
),
4546 && TYPE_MODE (TREE_TYPE (vectype
))
4547 != TYPE_MODE (TREE_TYPE (op1
)))
4549 if (dump_enabled_p ())
4550 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4551 "unusable type for last operand in"
4552 " vector/vector shift/rotate.\n");
4555 if (vec_stmt
&& !slp_node
)
4557 op1
= fold_convert (TREE_TYPE (vectype
), op1
);
4558 op1
= vect_init_vector (stmt
, op1
,
4559 TREE_TYPE (vectype
), NULL
);
4566 /* Supportable by target? */
4569 if (dump_enabled_p ())
4570 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4574 vec_mode
= TYPE_MODE (vectype
);
4575 icode
= (int) optab_handler (optab
, vec_mode
);
4576 if (icode
== CODE_FOR_nothing
)
4578 if (dump_enabled_p ())
4579 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4580 "op not supported by target.\n");
4581 /* Check only during analysis. */
4582 if (GET_MODE_SIZE (vec_mode
) != UNITS_PER_WORD
4583 || (vf
< vect_min_worthwhile_factor (code
)
4586 if (dump_enabled_p ())
4587 dump_printf_loc (MSG_NOTE
, vect_location
,
4588 "proceeding using word mode.\n");
4591 /* Worthwhile without SIMD support? Check only during analysis. */
4592 if (!VECTOR_MODE_P (TYPE_MODE (vectype
))
4593 && vf
< vect_min_worthwhile_factor (code
)
4596 if (dump_enabled_p ())
4597 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4598 "not worthwhile without SIMD support.\n");
4602 if (!vec_stmt
) /* transformation not required. */
4604 STMT_VINFO_TYPE (stmt_info
) = shift_vec_info_type
;
4605 if (dump_enabled_p ())
4606 dump_printf_loc (MSG_NOTE
, vect_location
,
4607 "=== vectorizable_shift ===\n");
4608 vect_model_simple_cost (stmt_info
, ncopies
, dt
, NULL
, NULL
);
4614 if (dump_enabled_p ())
4615 dump_printf_loc (MSG_NOTE
, vect_location
,
4616 "transform binary/unary operation.\n");
4619 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
4621 prev_stmt_info
= NULL
;
4622 for (j
= 0; j
< ncopies
; j
++)
4627 if (scalar_shift_arg
)
4629 /* Vector shl and shr insn patterns can be defined with scalar
4630 operand 2 (shift operand). In this case, use constant or loop
4631 invariant op1 directly, without extending it to vector mode
4633 optab_op2_mode
= insn_data
[icode
].operand
[2].mode
;
4634 if (!VECTOR_MODE_P (optab_op2_mode
))
4636 if (dump_enabled_p ())
4637 dump_printf_loc (MSG_NOTE
, vect_location
,
4638 "operand 1 using scalar mode.\n");
4640 vec_oprnds1
.create (slp_node
? slp_node
->vec_stmts_size
: 1);
4641 vec_oprnds1
.quick_push (vec_oprnd1
);
4644 /* Store vec_oprnd1 for every vector stmt to be created
4645 for SLP_NODE. We check during the analysis that all
4646 the shift arguments are the same.
4647 TODO: Allow different constants for different vector
4648 stmts generated for an SLP instance. */
4649 for (k
= 0; k
< slp_node
->vec_stmts_size
- 1; k
++)
4650 vec_oprnds1
.quick_push (vec_oprnd1
);
4655 /* vec_oprnd1 is available if operand 1 should be of a scalar-type
4656 (a special case for certain kind of vector shifts); otherwise,
4657 operand 1 should be of a vector type (the usual case). */
4659 vect_get_vec_defs (op0
, NULL_TREE
, stmt
, &vec_oprnds0
, NULL
,
4662 vect_get_vec_defs (op0
, op1
, stmt
, &vec_oprnds0
, &vec_oprnds1
,
4666 vect_get_vec_defs_for_stmt_copy (dt
, &vec_oprnds0
, &vec_oprnds1
);
4668 /* Arguments are ready. Create the new vector stmt. */
4669 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vop0
)
4671 vop1
= vec_oprnds1
[i
];
4672 new_stmt
= gimple_build_assign (vec_dest
, code
, vop0
, vop1
);
4673 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
4674 gimple_assign_set_lhs (new_stmt
, new_temp
);
4675 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
4677 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
4684 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
4686 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
4687 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
4690 vec_oprnds0
.release ();
4691 vec_oprnds1
.release ();
4697 /* Function vectorizable_operation.
4699 Check if STMT performs a binary, unary or ternary operation that can
4701 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4702 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
4703 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4706 vectorizable_operation (gimple stmt
, gimple_stmt_iterator
*gsi
,
4707 gimple
*vec_stmt
, slp_tree slp_node
)
4711 tree op0
, op1
= NULL_TREE
, op2
= NULL_TREE
;
4712 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
4714 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
4715 enum tree_code code
;
4716 machine_mode vec_mode
;
4723 enum vect_def_type dt
[3]
4724 = {vect_unknown_def_type
, vect_unknown_def_type
, vect_unknown_def_type
};
4725 gimple new_stmt
= NULL
;
4726 stmt_vec_info prev_stmt_info
;
4732 vec
<tree
> vec_oprnds0
= vNULL
;
4733 vec
<tree
> vec_oprnds1
= vNULL
;
4734 vec
<tree
> vec_oprnds2
= vNULL
;
4735 tree vop0
, vop1
, vop2
;
4736 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
4739 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
4742 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
4745 /* Is STMT a vectorizable binary/unary operation? */
4746 if (!is_gimple_assign (stmt
))
4749 if (TREE_CODE (gimple_assign_lhs (stmt
)) != SSA_NAME
)
4752 code
= gimple_assign_rhs_code (stmt
);
4754 /* For pointer addition, we should use the normal plus for
4755 the vector addition. */
4756 if (code
== POINTER_PLUS_EXPR
)
4759 /* Support only unary or binary operations. */
4760 op_type
= TREE_CODE_LENGTH (code
);
4761 if (op_type
!= unary_op
&& op_type
!= binary_op
&& op_type
!= ternary_op
)
4763 if (dump_enabled_p ())
4764 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4765 "num. args = %d (not unary/binary/ternary op).\n",
4770 scalar_dest
= gimple_assign_lhs (stmt
);
4771 vectype_out
= STMT_VINFO_VECTYPE (stmt_info
);
4773 /* Most operations cannot handle bit-precision types without extra
4775 if ((TYPE_PRECISION (TREE_TYPE (scalar_dest
))
4776 != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (scalar_dest
))))
4777 /* Exception are bitwise binary operations. */
4778 && code
!= BIT_IOR_EXPR
4779 && code
!= BIT_XOR_EXPR
4780 && code
!= BIT_AND_EXPR
)
4782 if (dump_enabled_p ())
4783 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4784 "bit-precision arithmetic not supported.\n");
4788 op0
= gimple_assign_rhs1 (stmt
);
4789 if (!vect_is_simple_use_1 (op0
, stmt
, loop_vinfo
, bb_vinfo
,
4790 &def_stmt
, &def
, &dt
[0], &vectype
))
4792 if (dump_enabled_p ())
4793 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4794 "use not simple.\n");
4797 /* If op0 is an external or constant def use a vector type with
4798 the same size as the output vector type. */
4800 vectype
= get_same_sized_vectype (TREE_TYPE (op0
), vectype_out
);
4802 gcc_assert (vectype
);
4805 if (dump_enabled_p ())
4807 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4808 "no vectype for scalar type ");
4809 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
4811 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
4817 nunits_out
= TYPE_VECTOR_SUBPARTS (vectype_out
);
4818 nunits_in
= TYPE_VECTOR_SUBPARTS (vectype
);
4819 if (nunits_out
!= nunits_in
)
4822 if (op_type
== binary_op
|| op_type
== ternary_op
)
4824 op1
= gimple_assign_rhs2 (stmt
);
4825 if (!vect_is_simple_use (op1
, stmt
, loop_vinfo
, bb_vinfo
, &def_stmt
,
4828 if (dump_enabled_p ())
4829 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4830 "use not simple.\n");
4834 if (op_type
== ternary_op
)
4836 op2
= gimple_assign_rhs3 (stmt
);
4837 if (!vect_is_simple_use (op2
, stmt
, loop_vinfo
, bb_vinfo
, &def_stmt
,
4840 if (dump_enabled_p ())
4841 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4842 "use not simple.\n");
4848 vf
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
4852 /* Multiple types in SLP are handled by creating the appropriate number of
4853 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4855 if (slp_node
|| PURE_SLP_STMT (stmt_info
))
4858 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits_in
;
4860 gcc_assert (ncopies
>= 1);
4862 /* Shifts are handled in vectorizable_shift (). */
4863 if (code
== LSHIFT_EXPR
|| code
== RSHIFT_EXPR
|| code
== LROTATE_EXPR
4864 || code
== RROTATE_EXPR
)
4867 /* Supportable by target? */
4869 vec_mode
= TYPE_MODE (vectype
);
4870 if (code
== MULT_HIGHPART_EXPR
)
4872 if (can_mult_highpart_p (vec_mode
, TYPE_UNSIGNED (vectype
)))
4873 icode
= LAST_INSN_CODE
;
4875 icode
= CODE_FOR_nothing
;
4879 optab
= optab_for_tree_code (code
, vectype
, optab_default
);
4882 if (dump_enabled_p ())
4883 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4887 icode
= (int) optab_handler (optab
, vec_mode
);
4890 if (icode
== CODE_FOR_nothing
)
4892 if (dump_enabled_p ())
4893 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4894 "op not supported by target.\n");
4895 /* Check only during analysis. */
4896 if (GET_MODE_SIZE (vec_mode
) != UNITS_PER_WORD
4897 || (!vec_stmt
&& vf
< vect_min_worthwhile_factor (code
)))
4899 if (dump_enabled_p ())
4900 dump_printf_loc (MSG_NOTE
, vect_location
,
4901 "proceeding using word mode.\n");
4904 /* Worthwhile without SIMD support? Check only during analysis. */
4905 if (!VECTOR_MODE_P (vec_mode
)
4907 && vf
< vect_min_worthwhile_factor (code
))
4909 if (dump_enabled_p ())
4910 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4911 "not worthwhile without SIMD support.\n");
4915 if (!vec_stmt
) /* transformation not required. */
4917 STMT_VINFO_TYPE (stmt_info
) = op_vec_info_type
;
4918 if (dump_enabled_p ())
4919 dump_printf_loc (MSG_NOTE
, vect_location
,
4920 "=== vectorizable_operation ===\n");
4921 vect_model_simple_cost (stmt_info
, ncopies
, dt
, NULL
, NULL
);
4927 if (dump_enabled_p ())
4928 dump_printf_loc (MSG_NOTE
, vect_location
,
4929 "transform binary/unary operation.\n");
4932 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
4934 /* In case the vectorization factor (VF) is bigger than the number
4935 of elements that we can fit in a vectype (nunits), we have to generate
4936 more than one vector stmt - i.e - we need to "unroll" the
4937 vector stmt by a factor VF/nunits. In doing so, we record a pointer
4938 from one copy of the vector stmt to the next, in the field
4939 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
4940 stages to find the correct vector defs to be used when vectorizing
4941 stmts that use the defs of the current stmt. The example below
4942 illustrates the vectorization process when VF=16 and nunits=4 (i.e.,
4943 we need to create 4 vectorized stmts):
4945 before vectorization:
4946 RELATED_STMT VEC_STMT
4950 step 1: vectorize stmt S1 (done in vectorizable_load. See more details
4952 RELATED_STMT VEC_STMT
4953 VS1_0: vx0 = memref0 VS1_1 -
4954 VS1_1: vx1 = memref1 VS1_2 -
4955 VS1_2: vx2 = memref2 VS1_3 -
4956 VS1_3: vx3 = memref3 - -
4957 S1: x = load - VS1_0
4960 step2: vectorize stmt S2 (done here):
4961 To vectorize stmt S2 we first need to find the relevant vector
4962 def for the first operand 'x'. This is, as usual, obtained from
4963 the vector stmt recorded in the STMT_VINFO_VEC_STMT of the stmt
4964 that defines 'x' (S1). This way we find the stmt VS1_0, and the
4965 relevant vector def 'vx0'. Having found 'vx0' we can generate
4966 the vector stmt VS2_0, and as usual, record it in the
4967 STMT_VINFO_VEC_STMT of stmt S2.
4968 When creating the second copy (VS2_1), we obtain the relevant vector
4969 def from the vector stmt recorded in the STMT_VINFO_RELATED_STMT of
4970 stmt VS1_0. This way we find the stmt VS1_1 and the relevant
4971 vector def 'vx1'. Using 'vx1' we create stmt VS2_1 and record a
4972 pointer to it in the STMT_VINFO_RELATED_STMT of the vector stmt VS2_0.
4973 Similarly when creating stmts VS2_2 and VS2_3. This is the resulting
4974 chain of stmts and pointers:
4975 RELATED_STMT VEC_STMT
4976 VS1_0: vx0 = memref0 VS1_1 -
4977 VS1_1: vx1 = memref1 VS1_2 -
4978 VS1_2: vx2 = memref2 VS1_3 -
4979 VS1_3: vx3 = memref3 - -
4980 S1: x = load - VS1_0
4981 VS2_0: vz0 = vx0 + v1 VS2_1 -
4982 VS2_1: vz1 = vx1 + v1 VS2_2 -
4983 VS2_2: vz2 = vx2 + v1 VS2_3 -
4984 VS2_3: vz3 = vx3 + v1 - -
4985 S2: z = x + 1 - VS2_0 */
4987 prev_stmt_info
= NULL
;
4988 for (j
= 0; j
< ncopies
; j
++)
4993 if (op_type
== binary_op
|| op_type
== ternary_op
)
4994 vect_get_vec_defs (op0
, op1
, stmt
, &vec_oprnds0
, &vec_oprnds1
,
4997 vect_get_vec_defs (op0
, NULL_TREE
, stmt
, &vec_oprnds0
, NULL
,
4999 if (op_type
== ternary_op
)
5001 vec_oprnds2
.create (1);
5002 vec_oprnds2
.quick_push (vect_get_vec_def_for_operand (op2
,
5009 vect_get_vec_defs_for_stmt_copy (dt
, &vec_oprnds0
, &vec_oprnds1
);
5010 if (op_type
== ternary_op
)
5012 tree vec_oprnd
= vec_oprnds2
.pop ();
5013 vec_oprnds2
.quick_push (vect_get_vec_def_for_stmt_copy (dt
[2],
5018 /* Arguments are ready. Create the new vector stmt. */
5019 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vop0
)
5021 vop1
= ((op_type
== binary_op
|| op_type
== ternary_op
)
5022 ? vec_oprnds1
[i
] : NULL_TREE
);
5023 vop2
= ((op_type
== ternary_op
)
5024 ? vec_oprnds2
[i
] : NULL_TREE
);
5025 new_stmt
= gimple_build_assign (vec_dest
, code
, vop0
, vop1
, vop2
);
5026 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
5027 gimple_assign_set_lhs (new_stmt
, new_temp
);
5028 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
5030 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
5037 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
5039 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
5040 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
5043 vec_oprnds0
.release ();
5044 vec_oprnds1
.release ();
5045 vec_oprnds2
.release ();
5050 /* A helper function to ensure data reference DR's base alignment
5054 ensure_base_align (stmt_vec_info stmt_info
, struct data_reference
*dr
)
5059 if (((dataref_aux
*)dr
->aux
)->base_misaligned
)
5061 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
5062 tree base_decl
= ((dataref_aux
*)dr
->aux
)->base_decl
;
5064 if (decl_in_symtab_p (base_decl
))
5065 symtab_node::get (base_decl
)->increase_alignment (TYPE_ALIGN (vectype
));
5068 DECL_ALIGN (base_decl
) = TYPE_ALIGN (vectype
);
5069 DECL_USER_ALIGN (base_decl
) = 1;
5071 ((dataref_aux
*)dr
->aux
)->base_misaligned
= false;
5076 /* Given a vector type VECTYPE returns the VECTOR_CST mask that implements
5077 reversal of the vector elements. If that is impossible to do,
5081 perm_mask_for_reverse (tree vectype
)
5086 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
5087 sel
= XALLOCAVEC (unsigned char, nunits
);
5089 for (i
= 0; i
< nunits
; ++i
)
5090 sel
[i
] = nunits
- 1 - i
;
5092 if (!can_vec_perm_p (TYPE_MODE (vectype
), false, sel
))
5094 return vect_gen_perm_mask_checked (vectype
, sel
);
5097 /* Function vectorizable_store.
5099 Check if STMT defines a non scalar data-ref (array/pointer/structure) that
5101 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
5102 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
5103 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
5106 vectorizable_store (gimple stmt
, gimple_stmt_iterator
*gsi
, gimple
*vec_stmt
,
5112 tree vec_oprnd
= NULL_TREE
;
5113 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
5114 struct data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
), *first_dr
= NULL
;
5116 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
5117 struct loop
*loop
= NULL
;
5118 machine_mode vec_mode
;
5120 enum dr_alignment_support alignment_support_scheme
;
5123 enum vect_def_type dt
;
5124 stmt_vec_info prev_stmt_info
= NULL
;
5125 tree dataref_ptr
= NULL_TREE
;
5126 tree dataref_offset
= NULL_TREE
;
5127 gimple ptr_incr
= NULL
;
5130 gimple next_stmt
, first_stmt
= NULL
;
5131 bool grouped_store
= false;
5132 bool store_lanes_p
= false;
5133 unsigned int group_size
, i
;
5134 vec
<tree
> dr_chain
= vNULL
;
5135 vec
<tree
> oprnds
= vNULL
;
5136 vec
<tree
> result_chain
= vNULL
;
5138 bool negative
= false;
5139 tree offset
= NULL_TREE
;
5140 vec
<tree
> vec_oprnds
= vNULL
;
5141 bool slp
= (slp_node
!= NULL
);
5142 unsigned int vec_num
;
5143 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
5146 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
5149 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
5152 /* Is vectorizable store? */
5154 if (!is_gimple_assign (stmt
))
5157 scalar_dest
= gimple_assign_lhs (stmt
);
5158 if (TREE_CODE (scalar_dest
) == VIEW_CONVERT_EXPR
5159 && is_pattern_stmt_p (stmt_info
))
5160 scalar_dest
= TREE_OPERAND (scalar_dest
, 0);
5161 if (TREE_CODE (scalar_dest
) != ARRAY_REF
5162 && TREE_CODE (scalar_dest
) != BIT_FIELD_REF
5163 && TREE_CODE (scalar_dest
) != INDIRECT_REF
5164 && TREE_CODE (scalar_dest
) != COMPONENT_REF
5165 && TREE_CODE (scalar_dest
) != IMAGPART_EXPR
5166 && TREE_CODE (scalar_dest
) != REALPART_EXPR
5167 && TREE_CODE (scalar_dest
) != MEM_REF
)
5170 gcc_assert (gimple_assign_single_p (stmt
));
5172 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
5173 unsigned int nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
5176 loop
= LOOP_VINFO_LOOP (loop_vinfo
);
5178 /* Multiple types in SLP are handled by creating the appropriate number of
5179 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
5181 if (slp
|| PURE_SLP_STMT (stmt_info
))
5184 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits
;
5186 gcc_assert (ncopies
>= 1);
5188 /* FORNOW. This restriction should be relaxed. */
5189 if (loop
&& nested_in_vect_loop_p (loop
, stmt
) && ncopies
> 1)
5191 if (dump_enabled_p ())
5192 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5193 "multiple types in nested loop.\n");
5197 op
= gimple_assign_rhs1 (stmt
);
5198 if (!vect_is_simple_use (op
, stmt
, loop_vinfo
, bb_vinfo
, &def_stmt
,
5201 if (dump_enabled_p ())
5202 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5203 "use not simple.\n");
5207 elem_type
= TREE_TYPE (vectype
);
5208 vec_mode
= TYPE_MODE (vectype
);
5210 /* FORNOW. In some cases can vectorize even if data-type not supported
5211 (e.g. - array initialization with 0). */
5212 if (optab_handler (mov_optab
, vec_mode
) == CODE_FOR_nothing
)
5215 if (!STMT_VINFO_DATA_REF (stmt_info
))
5218 if (!STMT_VINFO_STRIDED_P (stmt_info
))
5221 tree_int_cst_compare (loop
&& nested_in_vect_loop_p (loop
, stmt
)
5222 ? STMT_VINFO_DR_STEP (stmt_info
) : DR_STEP (dr
),
5223 size_zero_node
) < 0;
5224 if (negative
&& ncopies
> 1)
5226 if (dump_enabled_p ())
5227 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5228 "multiple types with negative step.\n");
5233 gcc_assert (!grouped_store
);
5234 alignment_support_scheme
= vect_supportable_dr_alignment (dr
, false);
5235 if (alignment_support_scheme
!= dr_aligned
5236 && alignment_support_scheme
!= dr_unaligned_supported
)
5238 if (dump_enabled_p ())
5239 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5240 "negative step but alignment required.\n");
5243 if (dt
!= vect_constant_def
5244 && dt
!= vect_external_def
5245 && !perm_mask_for_reverse (vectype
))
5247 if (dump_enabled_p ())
5248 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5249 "negative step and reversing not supported.\n");
5255 if (STMT_VINFO_GROUPED_ACCESS (stmt_info
))
5257 grouped_store
= true;
5258 first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
5259 group_size
= GROUP_SIZE (vinfo_for_stmt (first_stmt
));
5261 && !PURE_SLP_STMT (stmt_info
)
5262 && !STMT_VINFO_STRIDED_P (stmt_info
))
5264 if (vect_store_lanes_supported (vectype
, group_size
))
5265 store_lanes_p
= true;
5266 else if (!vect_grouped_store_supported (vectype
, group_size
))
5270 if (STMT_VINFO_STRIDED_P (stmt_info
)
5271 && (slp
|| PURE_SLP_STMT (stmt_info
))
5272 && (group_size
> nunits
5273 || nunits
% group_size
!= 0))
5275 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5276 "unhandled strided group store\n");
5280 if (first_stmt
== stmt
)
5282 /* STMT is the leader of the group. Check the operands of all the
5283 stmts of the group. */
5284 next_stmt
= GROUP_NEXT_ELEMENT (stmt_info
);
5287 gcc_assert (gimple_assign_single_p (next_stmt
));
5288 op
= gimple_assign_rhs1 (next_stmt
);
5289 if (!vect_is_simple_use (op
, next_stmt
, loop_vinfo
, bb_vinfo
,
5290 &def_stmt
, &def
, &dt
))
5292 if (dump_enabled_p ())
5293 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5294 "use not simple.\n");
5297 next_stmt
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt
));
5302 if (!vec_stmt
) /* transformation not required. */
5304 STMT_VINFO_TYPE (stmt_info
) = store_vec_info_type
;
5305 /* The SLP costs are calculated during SLP analysis. */
5306 if (!PURE_SLP_STMT (stmt_info
))
5307 vect_model_store_cost (stmt_info
, ncopies
, store_lanes_p
, dt
,
5314 ensure_base_align (stmt_info
, dr
);
5318 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
5319 group_size
= GROUP_SIZE (vinfo_for_stmt (first_stmt
));
5321 GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt
))++;
5324 gcc_assert (!loop
|| !nested_in_vect_loop_p (loop
, stmt
));
5326 /* We vectorize all the stmts of the interleaving group when we
5327 reach the last stmt in the group. */
5328 if (GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt
))
5329 < GROUP_SIZE (vinfo_for_stmt (first_stmt
))
5338 grouped_store
= false;
5339 /* VEC_NUM is the number of vect stmts to be created for this
5341 vec_num
= SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node
);
5342 first_stmt
= SLP_TREE_SCALAR_STMTS (slp_node
)[0];
5343 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
5344 op
= gimple_assign_rhs1 (first_stmt
);
5347 /* VEC_NUM is the number of vect stmts to be created for this
5349 vec_num
= group_size
;
5355 group_size
= vec_num
= 1;
5358 if (dump_enabled_p ())
5359 dump_printf_loc (MSG_NOTE
, vect_location
,
5360 "transform store. ncopies = %d\n", ncopies
);
5362 if (STMT_VINFO_STRIDED_P (stmt_info
))
5364 gimple_stmt_iterator incr_gsi
;
5370 gimple_seq stmts
= NULL
;
5371 tree stride_base
, stride_step
, alias_off
;
5375 gcc_assert (!nested_in_vect_loop_p (loop
, stmt
));
5378 = fold_build_pointer_plus
5379 (unshare_expr (DR_BASE_ADDRESS (first_dr
)),
5380 size_binop (PLUS_EXPR
,
5381 convert_to_ptrofftype (unshare_expr (DR_OFFSET (first_dr
))),
5382 convert_to_ptrofftype (DR_INIT(first_dr
))));
5383 stride_step
= fold_convert (sizetype
, unshare_expr (DR_STEP (first_dr
)));
5385 /* For a store with loop-invariant (but other than power-of-2)
5386 stride (i.e. not a grouped access) like so:
5388 for (i = 0; i < n; i += stride)
5391 we generate a new induction variable and new stores from
5392 the components of the (vectorized) rhs:
5394 for (j = 0; ; j += VF*stride)
5399 array[j + stride] = tmp2;
5403 unsigned nstores
= nunits
;
5404 tree ltype
= elem_type
;
5407 nstores
= nunits
/ group_size
;
5408 if (group_size
< nunits
)
5409 ltype
= build_vector_type (elem_type
, group_size
);
5412 ltype
= build_aligned_type (ltype
, TYPE_ALIGN (elem_type
));
5413 ncopies
= SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node
);
5417 ivstep
= stride_step
;
5418 ivstep
= fold_build2 (MULT_EXPR
, TREE_TYPE (ivstep
), ivstep
,
5419 build_int_cst (TREE_TYPE (ivstep
),
5420 ncopies
* nstores
));
5422 standard_iv_increment_position (loop
, &incr_gsi
, &insert_after
);
5424 create_iv (stride_base
, ivstep
, NULL
,
5425 loop
, &incr_gsi
, insert_after
,
5427 incr
= gsi_stmt (incr_gsi
);
5428 set_vinfo_for_stmt (incr
, new_stmt_vec_info (incr
, loop_vinfo
, NULL
));
5430 stride_step
= force_gimple_operand (stride_step
, &stmts
, true, NULL_TREE
);
5432 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop
), stmts
);
5434 prev_stmt_info
= NULL
;
5435 alias_off
= build_int_cst (reference_alias_ptr_type (DR_REF (first_dr
)), 0);
5436 next_stmt
= first_stmt
;
5437 for (g
= 0; g
< group_size
; g
++)
5439 running_off
= offvar
;
5442 tree size
= TYPE_SIZE_UNIT (ltype
);
5443 tree pos
= fold_build2 (MULT_EXPR
, sizetype
, size_int (g
),
5445 tree newoff
= copy_ssa_name (running_off
, NULL
);
5446 incr
= gimple_build_assign (newoff
, POINTER_PLUS_EXPR
,
5448 vect_finish_stmt_generation (stmt
, incr
, gsi
);
5449 running_off
= newoff
;
5451 for (j
= 0; j
< ncopies
; j
++)
5453 /* We've set op and dt above, from gimple_assign_rhs1(stmt),
5454 and first_stmt == stmt. */
5459 vect_get_vec_defs (op
, NULL_TREE
, stmt
, &vec_oprnds
, NULL
,
5461 vec_oprnd
= vec_oprnds
[0];
5465 gcc_assert (gimple_assign_single_p (next_stmt
));
5466 op
= gimple_assign_rhs1 (next_stmt
);
5467 vec_oprnd
= vect_get_vec_def_for_operand (op
, next_stmt
,
5474 vec_oprnd
= vec_oprnds
[j
];
5477 vect_is_simple_use (vec_oprnd
, NULL
, loop_vinfo
,
5478 bb_vinfo
, &def_stmt
, &def
, &dt
);
5479 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
, vec_oprnd
);
5483 for (i
= 0; i
< nstores
; i
++)
5485 tree newref
, newoff
;
5486 gimple incr
, assign
;
5487 tree size
= TYPE_SIZE (ltype
);
5488 /* Extract the i'th component. */
5489 tree pos
= fold_build2 (MULT_EXPR
, bitsizetype
,
5490 bitsize_int (i
), size
);
5491 tree elem
= fold_build3 (BIT_FIELD_REF
, ltype
, vec_oprnd
,
5494 elem
= force_gimple_operand_gsi (gsi
, elem
, true,
5498 newref
= build2 (MEM_REF
, ltype
,
5499 running_off
, alias_off
);
5501 /* And store it to *running_off. */
5502 assign
= gimple_build_assign (newref
, elem
);
5503 vect_finish_stmt_generation (stmt
, assign
, gsi
);
5505 newoff
= copy_ssa_name (running_off
, NULL
);
5506 incr
= gimple_build_assign (newoff
, POINTER_PLUS_EXPR
,
5507 running_off
, stride_step
);
5508 vect_finish_stmt_generation (stmt
, incr
, gsi
);
5510 running_off
= newoff
;
5511 if (g
== group_size
- 1
5514 if (j
== 0 && i
== 0)
5515 STMT_VINFO_VEC_STMT (stmt_info
)
5516 = *vec_stmt
= assign
;
5518 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = assign
;
5519 prev_stmt_info
= vinfo_for_stmt (assign
);
5523 next_stmt
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt
));
5528 dr_chain
.create (group_size
);
5529 oprnds
.create (group_size
);
5531 alignment_support_scheme
= vect_supportable_dr_alignment (first_dr
, false);
5532 gcc_assert (alignment_support_scheme
);
5533 /* Targets with store-lane instructions must not require explicit
5535 gcc_assert (!store_lanes_p
5536 || alignment_support_scheme
== dr_aligned
5537 || alignment_support_scheme
== dr_unaligned_supported
);
5540 offset
= size_int (-TYPE_VECTOR_SUBPARTS (vectype
) + 1);
5543 aggr_type
= build_array_type_nelts (elem_type
, vec_num
* nunits
);
5545 aggr_type
= vectype
;
5547 /* In case the vectorization factor (VF) is bigger than the number
5548 of elements that we can fit in a vectype (nunits), we have to generate
5549 more than one vector stmt - i.e - we need to "unroll" the
5550 vector stmt by a factor VF/nunits. For more details see documentation in
5551 vect_get_vec_def_for_copy_stmt. */
5553 /* In case of interleaving (non-unit grouped access):
5560 We create vectorized stores starting from base address (the access of the
5561 first stmt in the chain (S2 in the above example), when the last store stmt
5562 of the chain (S4) is reached:
5565 VS2: &base + vec_size*1 = vx0
5566 VS3: &base + vec_size*2 = vx1
5567 VS4: &base + vec_size*3 = vx3
5569 Then permutation statements are generated:
5571 VS5: vx5 = VEC_PERM_EXPR < vx0, vx3, {0, 8, 1, 9, 2, 10, 3, 11} >
5572 VS6: vx6 = VEC_PERM_EXPR < vx0, vx3, {4, 12, 5, 13, 6, 14, 7, 15} >
5575 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
5576 (the order of the data-refs in the output of vect_permute_store_chain
5577 corresponds to the order of scalar stmts in the interleaving chain - see
5578 the documentation of vect_permute_store_chain()).
5580 In case of both multiple types and interleaving, above vector stores and
5581 permutation stmts are created for every copy. The result vector stmts are
5582 put in STMT_VINFO_VEC_STMT for the first copy and in the corresponding
5583 STMT_VINFO_RELATED_STMT for the next copies.
5586 prev_stmt_info
= NULL
;
5587 for (j
= 0; j
< ncopies
; j
++)
5595 /* Get vectorized arguments for SLP_NODE. */
5596 vect_get_vec_defs (op
, NULL_TREE
, stmt
, &vec_oprnds
,
5597 NULL
, slp_node
, -1);
5599 vec_oprnd
= vec_oprnds
[0];
5603 /* For interleaved stores we collect vectorized defs for all the
5604 stores in the group in DR_CHAIN and OPRNDS. DR_CHAIN is then
5605 used as an input to vect_permute_store_chain(), and OPRNDS as
5606 an input to vect_get_vec_def_for_stmt_copy() for the next copy.
5608 If the store is not grouped, GROUP_SIZE is 1, and DR_CHAIN and
5609 OPRNDS are of size 1. */
5610 next_stmt
= first_stmt
;
5611 for (i
= 0; i
< group_size
; i
++)
5613 /* Since gaps are not supported for interleaved stores,
5614 GROUP_SIZE is the exact number of stmts in the chain.
5615 Therefore, NEXT_STMT can't be NULL_TREE. In case that
5616 there is no interleaving, GROUP_SIZE is 1, and only one
5617 iteration of the loop will be executed. */
5618 gcc_assert (next_stmt
5619 && gimple_assign_single_p (next_stmt
));
5620 op
= gimple_assign_rhs1 (next_stmt
);
5622 vec_oprnd
= vect_get_vec_def_for_operand (op
, next_stmt
,
5624 dr_chain
.quick_push (vec_oprnd
);
5625 oprnds
.quick_push (vec_oprnd
);
5626 next_stmt
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt
));
5630 /* We should have catched mismatched types earlier. */
5631 gcc_assert (useless_type_conversion_p (vectype
,
5632 TREE_TYPE (vec_oprnd
)));
5633 bool simd_lane_access_p
5634 = STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info
);
5635 if (simd_lane_access_p
5636 && TREE_CODE (DR_BASE_ADDRESS (first_dr
)) == ADDR_EXPR
5637 && VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (first_dr
), 0))
5638 && integer_zerop (DR_OFFSET (first_dr
))
5639 && integer_zerop (DR_INIT (first_dr
))
5640 && alias_sets_conflict_p (get_alias_set (aggr_type
),
5641 get_alias_set (DR_REF (first_dr
))))
5643 dataref_ptr
= unshare_expr (DR_BASE_ADDRESS (first_dr
));
5644 dataref_offset
= build_int_cst (reference_alias_ptr_type
5645 (DR_REF (first_dr
)), 0);
5650 = vect_create_data_ref_ptr (first_stmt
, aggr_type
,
5651 simd_lane_access_p
? loop
: NULL
,
5652 offset
, &dummy
, gsi
, &ptr_incr
,
5653 simd_lane_access_p
, &inv_p
);
5654 gcc_assert (bb_vinfo
|| !inv_p
);
5658 /* For interleaved stores we created vectorized defs for all the
5659 defs stored in OPRNDS in the previous iteration (previous copy).
5660 DR_CHAIN is then used as an input to vect_permute_store_chain(),
5661 and OPRNDS as an input to vect_get_vec_def_for_stmt_copy() for the
5663 If the store is not grouped, GROUP_SIZE is 1, and DR_CHAIN and
5664 OPRNDS are of size 1. */
5665 for (i
= 0; i
< group_size
; i
++)
5668 vect_is_simple_use (op
, NULL
, loop_vinfo
, bb_vinfo
, &def_stmt
,
5670 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
, op
);
5671 dr_chain
[i
] = vec_oprnd
;
5672 oprnds
[i
] = vec_oprnd
;
5676 = int_const_binop (PLUS_EXPR
, dataref_offset
,
5677 TYPE_SIZE_UNIT (aggr_type
));
5679 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
, stmt
,
5680 TYPE_SIZE_UNIT (aggr_type
));
5687 /* Combine all the vectors into an array. */
5688 vec_array
= create_vector_array (vectype
, vec_num
);
5689 for (i
= 0; i
< vec_num
; i
++)
5691 vec_oprnd
= dr_chain
[i
];
5692 write_vector_array (stmt
, gsi
, vec_oprnd
, vec_array
, i
);
5696 MEM_REF[...all elements...] = STORE_LANES (VEC_ARRAY). */
5697 data_ref
= create_array_ref (aggr_type
, dataref_ptr
, first_dr
);
5698 new_stmt
= gimple_build_call_internal (IFN_STORE_LANES
, 1, vec_array
);
5699 gimple_call_set_lhs (new_stmt
, data_ref
);
5700 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
5708 result_chain
.create (group_size
);
5710 vect_permute_store_chain (dr_chain
, group_size
, stmt
, gsi
,
5714 next_stmt
= first_stmt
;
5715 for (i
= 0; i
< vec_num
; i
++)
5717 unsigned align
, misalign
;
5720 /* Bump the vector pointer. */
5721 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
,
5725 vec_oprnd
= vec_oprnds
[i
];
5726 else if (grouped_store
)
5727 /* For grouped stores vectorized defs are interleaved in
5728 vect_permute_store_chain(). */
5729 vec_oprnd
= result_chain
[i
];
5731 data_ref
= fold_build2 (MEM_REF
, TREE_TYPE (vec_oprnd
),
5735 : build_int_cst (reference_alias_ptr_type
5736 (DR_REF (first_dr
)), 0));
5737 align
= TYPE_ALIGN_UNIT (vectype
);
5738 if (aligned_access_p (first_dr
))
5740 else if (DR_MISALIGNMENT (first_dr
) == -1)
5742 TREE_TYPE (data_ref
)
5743 = build_aligned_type (TREE_TYPE (data_ref
),
5744 TYPE_ALIGN (elem_type
));
5745 align
= TYPE_ALIGN_UNIT (elem_type
);
5750 TREE_TYPE (data_ref
)
5751 = build_aligned_type (TREE_TYPE (data_ref
),
5752 TYPE_ALIGN (elem_type
));
5753 misalign
= DR_MISALIGNMENT (first_dr
);
5755 if (dataref_offset
== NULL_TREE
5756 && TREE_CODE (dataref_ptr
) == SSA_NAME
)
5757 set_ptr_info_alignment (get_ptr_info (dataref_ptr
), align
,
5761 && dt
!= vect_constant_def
5762 && dt
!= vect_external_def
)
5764 tree perm_mask
= perm_mask_for_reverse (vectype
);
5766 = vect_create_destination_var (gimple_assign_rhs1 (stmt
),
5768 tree new_temp
= make_ssa_name (perm_dest
);
5770 /* Generate the permute statement. */
5772 = gimple_build_assign (new_temp
, VEC_PERM_EXPR
, vec_oprnd
,
5773 vec_oprnd
, perm_mask
);
5774 vect_finish_stmt_generation (stmt
, perm_stmt
, gsi
);
5776 perm_stmt
= SSA_NAME_DEF_STMT (new_temp
);
5777 vec_oprnd
= new_temp
;
5780 /* Arguments are ready. Create the new vector stmt. */
5781 new_stmt
= gimple_build_assign (data_ref
, vec_oprnd
);
5782 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
5787 next_stmt
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt
));
5795 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
5797 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
5798 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
5802 dr_chain
.release ();
5804 result_chain
.release ();
5805 vec_oprnds
.release ();
5810 /* Given a vector type VECTYPE, turns permutation SEL into the equivalent
5811 VECTOR_CST mask. No checks are made that the target platform supports the
5812 mask, so callers may wish to test can_vec_perm_p separately, or use
5813 vect_gen_perm_mask_checked. */
5816 vect_gen_perm_mask_any (tree vectype
, const unsigned char *sel
)
5818 tree mask_elt_type
, mask_type
, mask_vec
, *mask_elts
;
5821 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
5823 mask_elt_type
= lang_hooks
.types
.type_for_mode
5824 (int_mode_for_mode (TYPE_MODE (TREE_TYPE (vectype
))), 1);
5825 mask_type
= get_vectype_for_scalar_type (mask_elt_type
);
5827 mask_elts
= XALLOCAVEC (tree
, nunits
);
5828 for (i
= nunits
- 1; i
>= 0; i
--)
5829 mask_elts
[i
] = build_int_cst (mask_elt_type
, sel
[i
]);
5830 mask_vec
= build_vector (mask_type
, mask_elts
);
5835 /* Checked version of vect_gen_perm_mask_any. Asserts can_vec_perm_p,
5836 i.e. that the target supports the pattern _for arbitrary input vectors_. */
5839 vect_gen_perm_mask_checked (tree vectype
, const unsigned char *sel
)
5841 gcc_assert (can_vec_perm_p (TYPE_MODE (vectype
), false, sel
));
5842 return vect_gen_perm_mask_any (vectype
, sel
);
5845 /* Given a vector variable X and Y, that was generated for the scalar
5846 STMT, generate instructions to permute the vector elements of X and Y
5847 using permutation mask MASK_VEC, insert them at *GSI and return the
5848 permuted vector variable. */
5851 permute_vec_elements (tree x
, tree y
, tree mask_vec
, gimple stmt
,
5852 gimple_stmt_iterator
*gsi
)
5854 tree vectype
= TREE_TYPE (x
);
5855 tree perm_dest
, data_ref
;
5858 perm_dest
= vect_create_destination_var (gimple_get_lhs (stmt
), vectype
);
5859 data_ref
= make_ssa_name (perm_dest
);
5861 /* Generate the permute statement. */
5862 perm_stmt
= gimple_build_assign (data_ref
, VEC_PERM_EXPR
, x
, y
, mask_vec
);
5863 vect_finish_stmt_generation (stmt
, perm_stmt
, gsi
);
5868 /* Hoist the definitions of all SSA uses on STMT out of the loop LOOP,
5869 inserting them on the loops preheader edge. Returns true if we
5870 were successful in doing so (and thus STMT can be moved then),
5871 otherwise returns false. */
5874 hoist_defs_of_uses (gimple stmt
, struct loop
*loop
)
5880 FOR_EACH_SSA_TREE_OPERAND (op
, stmt
, i
, SSA_OP_USE
)
5882 gimple def_stmt
= SSA_NAME_DEF_STMT (op
);
5883 if (!gimple_nop_p (def_stmt
)
5884 && flow_bb_inside_loop_p (loop
, gimple_bb (def_stmt
)))
5886 /* Make sure we don't need to recurse. While we could do
5887 so in simple cases when there are more complex use webs
5888 we don't have an easy way to preserve stmt order to fulfil
5889 dependencies within them. */
5892 if (gimple_code (def_stmt
) == GIMPLE_PHI
)
5894 FOR_EACH_SSA_TREE_OPERAND (op2
, def_stmt
, i2
, SSA_OP_USE
)
5896 gimple def_stmt2
= SSA_NAME_DEF_STMT (op2
);
5897 if (!gimple_nop_p (def_stmt2
)
5898 && flow_bb_inside_loop_p (loop
, gimple_bb (def_stmt2
)))
5908 FOR_EACH_SSA_TREE_OPERAND (op
, stmt
, i
, SSA_OP_USE
)
5910 gimple def_stmt
= SSA_NAME_DEF_STMT (op
);
5911 if (!gimple_nop_p (def_stmt
)
5912 && flow_bb_inside_loop_p (loop
, gimple_bb (def_stmt
)))
5914 gimple_stmt_iterator gsi
= gsi_for_stmt (def_stmt
);
5915 gsi_remove (&gsi
, false);
5916 gsi_insert_on_edge_immediate (loop_preheader_edge (loop
), def_stmt
);
5923 /* vectorizable_load.
5925 Check if STMT reads a non scalar data-ref (array/pointer/structure) that
5927 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
5928 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
5929 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
5932 vectorizable_load (gimple stmt
, gimple_stmt_iterator
*gsi
, gimple
*vec_stmt
,
5933 slp_tree slp_node
, slp_instance slp_node_instance
)
5936 tree vec_dest
= NULL
;
5937 tree data_ref
= NULL
;
5938 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
5939 stmt_vec_info prev_stmt_info
;
5940 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
5941 struct loop
*loop
= NULL
;
5942 struct loop
*containing_loop
= (gimple_bb (stmt
))->loop_father
;
5943 bool nested_in_vect_loop
= false;
5944 struct data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
), *first_dr
= NULL
;
5948 gimple new_stmt
= NULL
;
5950 enum dr_alignment_support alignment_support_scheme
;
5951 tree dataref_ptr
= NULL_TREE
;
5952 tree dataref_offset
= NULL_TREE
;
5953 gimple ptr_incr
= NULL
;
5955 int i
, j
, group_size
= -1, group_gap_adj
;
5956 tree msq
= NULL_TREE
, lsq
;
5957 tree offset
= NULL_TREE
;
5958 tree byte_offset
= NULL_TREE
;
5959 tree realignment_token
= NULL_TREE
;
5961 vec
<tree
> dr_chain
= vNULL
;
5962 bool grouped_load
= false;
5963 bool load_lanes_p
= false;
5966 bool negative
= false;
5967 bool compute_in_loop
= false;
5968 struct loop
*at_loop
;
5970 bool slp
= (slp_node
!= NULL
);
5971 bool slp_perm
= false;
5972 enum tree_code code
;
5973 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
5976 tree gather_base
= NULL_TREE
, gather_off
= NULL_TREE
;
5977 tree gather_off_vectype
= NULL_TREE
, gather_decl
= NULL_TREE
;
5978 int gather_scale
= 1;
5979 enum vect_def_type gather_dt
= vect_unknown_def_type
;
5981 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
5984 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
5987 /* Is vectorizable load? */
5988 if (!is_gimple_assign (stmt
))
5991 scalar_dest
= gimple_assign_lhs (stmt
);
5992 if (TREE_CODE (scalar_dest
) != SSA_NAME
)
5995 code
= gimple_assign_rhs_code (stmt
);
5996 if (code
!= ARRAY_REF
5997 && code
!= BIT_FIELD_REF
5998 && code
!= INDIRECT_REF
5999 && code
!= COMPONENT_REF
6000 && code
!= IMAGPART_EXPR
6001 && code
!= REALPART_EXPR
6003 && TREE_CODE_CLASS (code
) != tcc_declaration
)
6006 if (!STMT_VINFO_DATA_REF (stmt_info
))
6009 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
6010 int nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
6014 loop
= LOOP_VINFO_LOOP (loop_vinfo
);
6015 nested_in_vect_loop
= nested_in_vect_loop_p (loop
, stmt
);
6016 vf
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
6021 /* Multiple types in SLP are handled by creating the appropriate number of
6022 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
6024 if (slp
|| PURE_SLP_STMT (stmt_info
))
6027 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits
;
6029 gcc_assert (ncopies
>= 1);
6031 /* FORNOW. This restriction should be relaxed. */
6032 if (nested_in_vect_loop
&& ncopies
> 1)
6034 if (dump_enabled_p ())
6035 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6036 "multiple types in nested loop.\n");
6040 /* Invalidate assumptions made by dependence analysis when vectorization
6041 on the unrolled body effectively re-orders stmts. */
6043 && STMT_VINFO_MIN_NEG_DIST (stmt_info
) != 0
6044 && ((unsigned)LOOP_VINFO_VECT_FACTOR (loop_vinfo
)
6045 > STMT_VINFO_MIN_NEG_DIST (stmt_info
)))
6047 if (dump_enabled_p ())
6048 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6049 "cannot perform implicit CSE when unrolling "
6050 "with negative dependence distance\n");
6054 elem_type
= TREE_TYPE (vectype
);
6055 mode
= TYPE_MODE (vectype
);
6057 /* FORNOW. In some cases can vectorize even if data-type not supported
6058 (e.g. - data copies). */
6059 if (optab_handler (mov_optab
, mode
) == CODE_FOR_nothing
)
6061 if (dump_enabled_p ())
6062 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6063 "Aligned load, but unsupported type.\n");
6067 /* Check if the load is a part of an interleaving chain. */
6068 if (STMT_VINFO_GROUPED_ACCESS (stmt_info
))
6070 grouped_load
= true;
6072 gcc_assert (! nested_in_vect_loop
&& !STMT_VINFO_GATHER_P (stmt_info
));
6074 first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
6076 /* If this is single-element interleaving with an element distance
6077 that leaves unused vector loads around punt - we at least create
6078 very sub-optimal code in that case (and blow up memory,
6080 if (first_stmt
== stmt
6081 && !GROUP_NEXT_ELEMENT (stmt_info
)
6082 && GROUP_SIZE (stmt_info
) > TYPE_VECTOR_SUBPARTS (vectype
))
6084 if (dump_enabled_p ())
6085 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6086 "single-element interleaving not supported "
6087 "for not adjacent vector loads\n");
6091 if (slp
&& SLP_TREE_LOAD_PERMUTATION (slp_node
).exists ())
6094 group_size
= GROUP_SIZE (vinfo_for_stmt (first_stmt
));
6096 && !PURE_SLP_STMT (stmt_info
)
6097 && !STMT_VINFO_STRIDED_P (stmt_info
))
6099 if (vect_load_lanes_supported (vectype
, group_size
))
6100 load_lanes_p
= true;
6101 else if (!vect_grouped_load_supported (vectype
, group_size
))
6105 /* Invalidate assumptions made by dependence analysis when vectorization
6106 on the unrolled body effectively re-orders stmts. */
6107 if (!PURE_SLP_STMT (stmt_info
)
6108 && STMT_VINFO_MIN_NEG_DIST (stmt_info
) != 0
6109 && ((unsigned)LOOP_VINFO_VECT_FACTOR (loop_vinfo
)
6110 > STMT_VINFO_MIN_NEG_DIST (stmt_info
)))
6112 if (dump_enabled_p ())
6113 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6114 "cannot perform implicit CSE when performing "
6115 "group loads with negative dependence distance\n");
6119 /* Similarly when the stmt is a load that is both part of a SLP
6120 instance and a loop vectorized stmt via the same-dr mechanism
6121 we have to give up. */
6122 if (STMT_VINFO_GROUP_SAME_DR_STMT (stmt_info
)
6123 && (STMT_SLP_TYPE (stmt_info
)
6124 != STMT_SLP_TYPE (vinfo_for_stmt
6125 (STMT_VINFO_GROUP_SAME_DR_STMT (stmt_info
)))))
6127 if (dump_enabled_p ())
6128 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6129 "conflicting SLP types for CSEd load\n");
6135 if (STMT_VINFO_GATHER_P (stmt_info
))
6139 gather_decl
= vect_check_gather (stmt
, loop_vinfo
, &gather_base
,
6140 &gather_off
, &gather_scale
);
6141 gcc_assert (gather_decl
);
6142 if (!vect_is_simple_use_1 (gather_off
, NULL
, loop_vinfo
, bb_vinfo
,
6143 &def_stmt
, &def
, &gather_dt
,
6144 &gather_off_vectype
))
6146 if (dump_enabled_p ())
6147 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6148 "gather index use not simple.\n");
6152 else if (STMT_VINFO_STRIDED_P (stmt_info
))
6155 && (slp
|| PURE_SLP_STMT (stmt_info
)))
6156 && (group_size
> nunits
6157 || nunits
% group_size
!= 0))
6159 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6160 "unhandled strided group load\n");
6166 negative
= tree_int_cst_compare (nested_in_vect_loop
6167 ? STMT_VINFO_DR_STEP (stmt_info
)
6169 size_zero_node
) < 0;
6170 if (negative
&& ncopies
> 1)
6172 if (dump_enabled_p ())
6173 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6174 "multiple types with negative step.\n");
6182 if (dump_enabled_p ())
6183 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6184 "negative step for group load not supported"
6188 alignment_support_scheme
= vect_supportable_dr_alignment (dr
, false);
6189 if (alignment_support_scheme
!= dr_aligned
6190 && alignment_support_scheme
!= dr_unaligned_supported
)
6192 if (dump_enabled_p ())
6193 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6194 "negative step but alignment required.\n");
6197 if (!perm_mask_for_reverse (vectype
))
6199 if (dump_enabled_p ())
6200 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6201 "negative step and reversing not supported."
6208 if (!vec_stmt
) /* transformation not required. */
6210 STMT_VINFO_TYPE (stmt_info
) = load_vec_info_type
;
6211 /* The SLP costs are calculated during SLP analysis. */
6212 if (!PURE_SLP_STMT (stmt_info
))
6213 vect_model_load_cost (stmt_info
, ncopies
, load_lanes_p
,
6218 if (dump_enabled_p ())
6219 dump_printf_loc (MSG_NOTE
, vect_location
,
6220 "transform load. ncopies = %d\n", ncopies
);
6224 ensure_base_align (stmt_info
, dr
);
6226 if (STMT_VINFO_GATHER_P (stmt_info
))
6228 tree vec_oprnd0
= NULL_TREE
, op
;
6229 tree arglist
= TYPE_ARG_TYPES (TREE_TYPE (gather_decl
));
6230 tree rettype
, srctype
, ptrtype
, idxtype
, masktype
, scaletype
;
6231 tree ptr
, mask
, var
, scale
, merge
, perm_mask
= NULL_TREE
, prev_res
= NULL_TREE
;
6232 edge pe
= loop_preheader_edge (loop
);
6235 enum { NARROW
, NONE
, WIDEN
} modifier
;
6236 int gather_off_nunits
= TYPE_VECTOR_SUBPARTS (gather_off_vectype
);
6238 if (nunits
== gather_off_nunits
)
6240 else if (nunits
== gather_off_nunits
/ 2)
6242 unsigned char *sel
= XALLOCAVEC (unsigned char, gather_off_nunits
);
6245 for (i
= 0; i
< gather_off_nunits
; ++i
)
6246 sel
[i
] = i
| nunits
;
6248 perm_mask
= vect_gen_perm_mask_checked (gather_off_vectype
, sel
);
6250 else if (nunits
== gather_off_nunits
* 2)
6252 unsigned char *sel
= XALLOCAVEC (unsigned char, nunits
);
6255 for (i
= 0; i
< nunits
; ++i
)
6256 sel
[i
] = i
< gather_off_nunits
6257 ? i
: i
+ nunits
- gather_off_nunits
;
6259 perm_mask
= vect_gen_perm_mask_checked (vectype
, sel
);
6265 rettype
= TREE_TYPE (TREE_TYPE (gather_decl
));
6266 srctype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
6267 ptrtype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
6268 idxtype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
6269 masktype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
6270 scaletype
= TREE_VALUE (arglist
);
6271 gcc_checking_assert (types_compatible_p (srctype
, rettype
));
6273 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
6275 ptr
= fold_convert (ptrtype
, gather_base
);
6276 if (!is_gimple_min_invariant (ptr
))
6278 ptr
= force_gimple_operand (ptr
, &seq
, true, NULL_TREE
);
6279 new_bb
= gsi_insert_seq_on_edge_immediate (pe
, seq
);
6280 gcc_assert (!new_bb
);
6283 /* Currently we support only unconditional gather loads,
6284 so mask should be all ones. */
6285 if (TREE_CODE (masktype
) == INTEGER_TYPE
)
6286 mask
= build_int_cst (masktype
, -1);
6287 else if (TREE_CODE (TREE_TYPE (masktype
)) == INTEGER_TYPE
)
6289 mask
= build_int_cst (TREE_TYPE (masktype
), -1);
6290 mask
= build_vector_from_val (masktype
, mask
);
6291 mask
= vect_init_vector (stmt
, mask
, masktype
, NULL
);
6293 else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (masktype
)))
6297 for (j
= 0; j
< 6; ++j
)
6299 real_from_target (&r
, tmp
, TYPE_MODE (TREE_TYPE (masktype
)));
6300 mask
= build_real (TREE_TYPE (masktype
), r
);
6301 mask
= build_vector_from_val (masktype
, mask
);
6302 mask
= vect_init_vector (stmt
, mask
, masktype
, NULL
);
6307 scale
= build_int_cst (scaletype
, gather_scale
);
6309 if (TREE_CODE (TREE_TYPE (rettype
)) == INTEGER_TYPE
)
6310 merge
= build_int_cst (TREE_TYPE (rettype
), 0);
6311 else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (rettype
)))
6315 for (j
= 0; j
< 6; ++j
)
6317 real_from_target (&r
, tmp
, TYPE_MODE (TREE_TYPE (rettype
)));
6318 merge
= build_real (TREE_TYPE (rettype
), r
);
6322 merge
= build_vector_from_val (rettype
, merge
);
6323 merge
= vect_init_vector (stmt
, merge
, rettype
, NULL
);
6325 prev_stmt_info
= NULL
;
6326 for (j
= 0; j
< ncopies
; ++j
)
6328 if (modifier
== WIDEN
&& (j
& 1))
6329 op
= permute_vec_elements (vec_oprnd0
, vec_oprnd0
,
6330 perm_mask
, stmt
, gsi
);
6333 = vect_get_vec_def_for_operand (gather_off
, stmt
, NULL
);
6336 = vect_get_vec_def_for_stmt_copy (gather_dt
, vec_oprnd0
);
6338 if (!useless_type_conversion_p (idxtype
, TREE_TYPE (op
)))
6340 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op
))
6341 == TYPE_VECTOR_SUBPARTS (idxtype
));
6342 var
= vect_get_new_vect_var (idxtype
, vect_simple_var
, NULL
);
6343 var
= make_ssa_name (var
);
6344 op
= build1 (VIEW_CONVERT_EXPR
, idxtype
, op
);
6346 = gimple_build_assign (var
, VIEW_CONVERT_EXPR
, op
);
6347 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6352 = gimple_build_call (gather_decl
, 5, merge
, ptr
, op
, mask
, scale
);
6354 if (!useless_type_conversion_p (vectype
, rettype
))
6356 gcc_assert (TYPE_VECTOR_SUBPARTS (vectype
)
6357 == TYPE_VECTOR_SUBPARTS (rettype
));
6358 var
= vect_get_new_vect_var (rettype
, vect_simple_var
, NULL
);
6359 op
= make_ssa_name (var
, new_stmt
);
6360 gimple_call_set_lhs (new_stmt
, op
);
6361 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6362 var
= make_ssa_name (vec_dest
);
6363 op
= build1 (VIEW_CONVERT_EXPR
, vectype
, op
);
6365 = gimple_build_assign (var
, VIEW_CONVERT_EXPR
, op
);
6369 var
= make_ssa_name (vec_dest
, new_stmt
);
6370 gimple_call_set_lhs (new_stmt
, var
);
6373 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6375 if (modifier
== NARROW
)
6382 var
= permute_vec_elements (prev_res
, var
,
6383 perm_mask
, stmt
, gsi
);
6384 new_stmt
= SSA_NAME_DEF_STMT (var
);
6387 if (prev_stmt_info
== NULL
)
6388 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
6390 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
6391 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
6395 else if (STMT_VINFO_STRIDED_P (stmt_info
))
6397 gimple_stmt_iterator incr_gsi
;
6403 vec
<constructor_elt
, va_gc
> *v
= NULL
;
6404 gimple_seq stmts
= NULL
;
6405 tree stride_base
, stride_step
, alias_off
;
6407 gcc_assert (!nested_in_vect_loop
);
6409 if (slp
&& grouped_load
)
6410 first_dr
= STMT_VINFO_DATA_REF
6411 (vinfo_for_stmt (GROUP_FIRST_ELEMENT (stmt_info
)));
6416 = fold_build_pointer_plus
6417 (DR_BASE_ADDRESS (first_dr
),
6418 size_binop (PLUS_EXPR
,
6419 convert_to_ptrofftype (DR_OFFSET (first_dr
)),
6420 convert_to_ptrofftype (DR_INIT (first_dr
))));
6421 stride_step
= fold_convert (sizetype
, DR_STEP (first_dr
));
6423 /* For a load with loop-invariant (but other than power-of-2)
6424 stride (i.e. not a grouped access) like so:
6426 for (i = 0; i < n; i += stride)
6429 we generate a new induction variable and new accesses to
6430 form a new vector (or vectors, depending on ncopies):
6432 for (j = 0; ; j += VF*stride)
6434 tmp2 = array[j + stride];
6436 vectemp = {tmp1, tmp2, ...}
6439 ivstep
= fold_build2 (MULT_EXPR
, TREE_TYPE (stride_step
), stride_step
,
6440 build_int_cst (TREE_TYPE (stride_step
), vf
));
6442 standard_iv_increment_position (loop
, &incr_gsi
, &insert_after
);
6444 create_iv (unshare_expr (stride_base
), unshare_expr (ivstep
), NULL
,
6445 loop
, &incr_gsi
, insert_after
,
6447 incr
= gsi_stmt (incr_gsi
);
6448 set_vinfo_for_stmt (incr
, new_stmt_vec_info (incr
, loop_vinfo
, NULL
));
6450 stride_step
= force_gimple_operand (unshare_expr (stride_step
),
6451 &stmts
, true, NULL_TREE
);
6453 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop
), stmts
);
6455 prev_stmt_info
= NULL
;
6456 running_off
= offvar
;
6457 alias_off
= build_int_cst (reference_alias_ptr_type (DR_REF (first_dr
)), 0);
6458 int nloads
= nunits
;
6459 tree ltype
= TREE_TYPE (vectype
);
6460 auto_vec
<tree
> dr_chain
;
6463 nloads
= nunits
/ group_size
;
6464 if (group_size
< nunits
)
6465 ltype
= build_vector_type (TREE_TYPE (vectype
), group_size
);
6468 ltype
= build_aligned_type (ltype
, TYPE_ALIGN (TREE_TYPE (vectype
)));
6469 ncopies
= SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node
);
6471 dr_chain
.create (ncopies
);
6473 for (j
= 0; j
< ncopies
; j
++)
6479 vec_alloc (v
, nloads
);
6480 for (i
= 0; i
< nloads
; i
++)
6482 tree newref
, newoff
;
6484 newref
= build2 (MEM_REF
, ltype
, running_off
, alias_off
);
6486 newref
= force_gimple_operand_gsi (gsi
, newref
, true,
6489 CONSTRUCTOR_APPEND_ELT (v
, NULL_TREE
, newref
);
6490 newoff
= copy_ssa_name (running_off
);
6491 incr
= gimple_build_assign (newoff
, POINTER_PLUS_EXPR
,
6492 running_off
, stride_step
);
6493 vect_finish_stmt_generation (stmt
, incr
, gsi
);
6495 running_off
= newoff
;
6498 vec_inv
= build_constructor (vectype
, v
);
6499 new_temp
= vect_init_vector (stmt
, vec_inv
, vectype
, gsi
);
6500 new_stmt
= SSA_NAME_DEF_STMT (new_temp
);
6504 new_stmt
= gimple_build_assign (make_ssa_name (ltype
),
6505 build2 (MEM_REF
, ltype
,
6506 running_off
, alias_off
));
6507 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6509 tree newoff
= copy_ssa_name (running_off
);
6510 gimple incr
= gimple_build_assign (newoff
, POINTER_PLUS_EXPR
,
6511 running_off
, stride_step
);
6512 vect_finish_stmt_generation (stmt
, incr
, gsi
);
6514 running_off
= newoff
;
6519 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
6521 dr_chain
.quick_push (gimple_assign_lhs (new_stmt
));
6526 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
6528 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
6529 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
6533 vect_transform_slp_perm_load (slp_node
, dr_chain
, gsi
, vf
,
6534 slp_node_instance
, false);
6540 first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
6542 && !SLP_TREE_LOAD_PERMUTATION (slp_node
).exists ()
6543 && first_stmt
!= SLP_TREE_SCALAR_STMTS (slp_node
)[0])
6544 first_stmt
= SLP_TREE_SCALAR_STMTS (slp_node
)[0];
6546 /* Check if the chain of loads is already vectorized. */
6547 if (STMT_VINFO_VEC_STMT (vinfo_for_stmt (first_stmt
))
6548 /* For SLP we would need to copy over SLP_TREE_VEC_STMTS.
6549 ??? But we can only do so if there is exactly one
6550 as we have no way to get at the rest. Leave the CSE
6552 ??? With the group load eventually participating
6553 in multiple different permutations (having multiple
6554 slp nodes which refer to the same group) the CSE
6555 is even wrong code. See PR56270. */
6558 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
6561 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
6562 group_size
= GROUP_SIZE (vinfo_for_stmt (first_stmt
));
6565 /* VEC_NUM is the number of vect stmts to be created for this group. */
6568 grouped_load
= false;
6569 /* For SLP permutation support we need to load the whole group,
6570 not only the number of vector stmts the permutation result
6573 vec_num
= (group_size
* vf
+ nunits
- 1) / nunits
;
6575 vec_num
= SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node
);
6576 group_gap_adj
= vf
* group_size
- nunits
* vec_num
;
6579 vec_num
= group_size
;
6585 group_size
= vec_num
= 1;
6589 alignment_support_scheme
= vect_supportable_dr_alignment (first_dr
, false);
6590 gcc_assert (alignment_support_scheme
);
6591 /* Targets with load-lane instructions must not require explicit
6593 gcc_assert (!load_lanes_p
6594 || alignment_support_scheme
== dr_aligned
6595 || alignment_support_scheme
== dr_unaligned_supported
);
6597 /* In case the vectorization factor (VF) is bigger than the number
6598 of elements that we can fit in a vectype (nunits), we have to generate
6599 more than one vector stmt - i.e - we need to "unroll" the
6600 vector stmt by a factor VF/nunits. In doing so, we record a pointer
6601 from one copy of the vector stmt to the next, in the field
6602 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
6603 stages to find the correct vector defs to be used when vectorizing
6604 stmts that use the defs of the current stmt. The example below
6605 illustrates the vectorization process when VF=16 and nunits=4 (i.e., we
6606 need to create 4 vectorized stmts):
6608 before vectorization:
6609 RELATED_STMT VEC_STMT
6613 step 1: vectorize stmt S1:
6614 We first create the vector stmt VS1_0, and, as usual, record a
6615 pointer to it in the STMT_VINFO_VEC_STMT of the scalar stmt S1.
6616 Next, we create the vector stmt VS1_1, and record a pointer to
6617 it in the STMT_VINFO_RELATED_STMT of the vector stmt VS1_0.
6618 Similarly, for VS1_2 and VS1_3. This is the resulting chain of
6620 RELATED_STMT VEC_STMT
6621 VS1_0: vx0 = memref0 VS1_1 -
6622 VS1_1: vx1 = memref1 VS1_2 -
6623 VS1_2: vx2 = memref2 VS1_3 -
6624 VS1_3: vx3 = memref3 - -
6625 S1: x = load - VS1_0
6628 See in documentation in vect_get_vec_def_for_stmt_copy for how the
6629 information we recorded in RELATED_STMT field is used to vectorize
6632 /* In case of interleaving (non-unit grouped access):
6639 Vectorized loads are created in the order of memory accesses
6640 starting from the access of the first stmt of the chain:
6643 VS2: vx1 = &base + vec_size*1
6644 VS3: vx3 = &base + vec_size*2
6645 VS4: vx4 = &base + vec_size*3
6647 Then permutation statements are generated:
6649 VS5: vx5 = VEC_PERM_EXPR < vx0, vx1, { 0, 2, ..., i*2 } >
6650 VS6: vx6 = VEC_PERM_EXPR < vx0, vx1, { 1, 3, ..., i*2+1 } >
6653 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
6654 (the order of the data-refs in the output of vect_permute_load_chain
6655 corresponds to the order of scalar stmts in the interleaving chain - see
6656 the documentation of vect_permute_load_chain()).
6657 The generation of permutation stmts and recording them in
6658 STMT_VINFO_VEC_STMT is done in vect_transform_grouped_load().
6660 In case of both multiple types and interleaving, the vector loads and
6661 permutation stmts above are created for every copy. The result vector
6662 stmts are put in STMT_VINFO_VEC_STMT for the first copy and in the
6663 corresponding STMT_VINFO_RELATED_STMT for the next copies. */
6665 /* If the data reference is aligned (dr_aligned) or potentially unaligned
6666 on a target that supports unaligned accesses (dr_unaligned_supported)
6667 we generate the following code:
6671 p = p + indx * vectype_size;
6676 Otherwise, the data reference is potentially unaligned on a target that
6677 does not support unaligned accesses (dr_explicit_realign_optimized) -
6678 then generate the following code, in which the data in each iteration is
6679 obtained by two vector loads, one from the previous iteration, and one
6680 from the current iteration:
6682 msq_init = *(floor(p1))
6683 p2 = initial_addr + VS - 1;
6684 realignment_token = call target_builtin;
6687 p2 = p2 + indx * vectype_size
6689 vec_dest = realign_load (msq, lsq, realignment_token)
6694 /* If the misalignment remains the same throughout the execution of the
6695 loop, we can create the init_addr and permutation mask at the loop
6696 preheader. Otherwise, it needs to be created inside the loop.
6697 This can only occur when vectorizing memory accesses in the inner-loop
6698 nested within an outer-loop that is being vectorized. */
6700 if (nested_in_vect_loop
6701 && (TREE_INT_CST_LOW (DR_STEP (dr
))
6702 % GET_MODE_SIZE (TYPE_MODE (vectype
)) != 0))
6704 gcc_assert (alignment_support_scheme
!= dr_explicit_realign_optimized
);
6705 compute_in_loop
= true;
6708 if ((alignment_support_scheme
== dr_explicit_realign_optimized
6709 || alignment_support_scheme
== dr_explicit_realign
)
6710 && !compute_in_loop
)
6712 msq
= vect_setup_realignment (first_stmt
, gsi
, &realignment_token
,
6713 alignment_support_scheme
, NULL_TREE
,
6715 if (alignment_support_scheme
== dr_explicit_realign_optimized
)
6717 phi
= as_a
<gphi
*> (SSA_NAME_DEF_STMT (msq
));
6718 byte_offset
= size_binop (MINUS_EXPR
, TYPE_SIZE_UNIT (vectype
),
6726 offset
= size_int (-TYPE_VECTOR_SUBPARTS (vectype
) + 1);
6729 aggr_type
= build_array_type_nelts (elem_type
, vec_num
* nunits
);
6731 aggr_type
= vectype
;
6733 prev_stmt_info
= NULL
;
6734 for (j
= 0; j
< ncopies
; j
++)
6736 /* 1. Create the vector or array pointer update chain. */
6739 bool simd_lane_access_p
6740 = STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info
);
6741 if (simd_lane_access_p
6742 && TREE_CODE (DR_BASE_ADDRESS (first_dr
)) == ADDR_EXPR
6743 && VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (first_dr
), 0))
6744 && integer_zerop (DR_OFFSET (first_dr
))
6745 && integer_zerop (DR_INIT (first_dr
))
6746 && alias_sets_conflict_p (get_alias_set (aggr_type
),
6747 get_alias_set (DR_REF (first_dr
)))
6748 && (alignment_support_scheme
== dr_aligned
6749 || alignment_support_scheme
== dr_unaligned_supported
))
6751 dataref_ptr
= unshare_expr (DR_BASE_ADDRESS (first_dr
));
6752 dataref_offset
= build_int_cst (reference_alias_ptr_type
6753 (DR_REF (first_dr
)), 0);
6758 = vect_create_data_ref_ptr (first_stmt
, aggr_type
, at_loop
,
6759 offset
, &dummy
, gsi
, &ptr_incr
,
6760 simd_lane_access_p
, &inv_p
,
6763 else if (dataref_offset
)
6764 dataref_offset
= int_const_binop (PLUS_EXPR
, dataref_offset
,
6765 TYPE_SIZE_UNIT (aggr_type
));
6767 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
, stmt
,
6768 TYPE_SIZE_UNIT (aggr_type
));
6770 if (grouped_load
|| slp_perm
)
6771 dr_chain
.create (vec_num
);
6777 vec_array
= create_vector_array (vectype
, vec_num
);
6780 VEC_ARRAY = LOAD_LANES (MEM_REF[...all elements...]). */
6781 data_ref
= create_array_ref (aggr_type
, dataref_ptr
, first_dr
);
6782 new_stmt
= gimple_build_call_internal (IFN_LOAD_LANES
, 1, data_ref
);
6783 gimple_call_set_lhs (new_stmt
, vec_array
);
6784 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6786 /* Extract each vector into an SSA_NAME. */
6787 for (i
= 0; i
< vec_num
; i
++)
6789 new_temp
= read_vector_array (stmt
, gsi
, scalar_dest
,
6791 dr_chain
.quick_push (new_temp
);
6794 /* Record the mapping between SSA_NAMEs and statements. */
6795 vect_record_grouped_load_vectors (stmt
, dr_chain
);
6799 for (i
= 0; i
< vec_num
; i
++)
6802 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
,
6805 /* 2. Create the vector-load in the loop. */
6806 switch (alignment_support_scheme
)
6809 case dr_unaligned_supported
:
6811 unsigned int align
, misalign
;
6814 = fold_build2 (MEM_REF
, vectype
, dataref_ptr
,
6817 : build_int_cst (reference_alias_ptr_type
6818 (DR_REF (first_dr
)), 0));
6819 align
= TYPE_ALIGN_UNIT (vectype
);
6820 if (alignment_support_scheme
== dr_aligned
)
6822 gcc_assert (aligned_access_p (first_dr
));
6825 else if (DR_MISALIGNMENT (first_dr
) == -1)
6827 TREE_TYPE (data_ref
)
6828 = build_aligned_type (TREE_TYPE (data_ref
),
6829 TYPE_ALIGN (elem_type
));
6830 align
= TYPE_ALIGN_UNIT (elem_type
);
6835 TREE_TYPE (data_ref
)
6836 = build_aligned_type (TREE_TYPE (data_ref
),
6837 TYPE_ALIGN (elem_type
));
6838 misalign
= DR_MISALIGNMENT (first_dr
);
6840 if (dataref_offset
== NULL_TREE
6841 && TREE_CODE (dataref_ptr
) == SSA_NAME
)
6842 set_ptr_info_alignment (get_ptr_info (dataref_ptr
),
6846 case dr_explicit_realign
:
6850 tree vs
= size_int (TYPE_VECTOR_SUBPARTS (vectype
));
6852 if (compute_in_loop
)
6853 msq
= vect_setup_realignment (first_stmt
, gsi
,
6855 dr_explicit_realign
,
6858 if (TREE_CODE (dataref_ptr
) == SSA_NAME
)
6859 ptr
= copy_ssa_name (dataref_ptr
);
6861 ptr
= make_ssa_name (TREE_TYPE (dataref_ptr
));
6862 new_stmt
= gimple_build_assign
6863 (ptr
, BIT_AND_EXPR
, dataref_ptr
,
6865 (TREE_TYPE (dataref_ptr
),
6866 -(HOST_WIDE_INT
)TYPE_ALIGN_UNIT (vectype
)));
6867 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6869 = build2 (MEM_REF
, vectype
, ptr
,
6870 build_int_cst (reference_alias_ptr_type
6871 (DR_REF (first_dr
)), 0));
6872 vec_dest
= vect_create_destination_var (scalar_dest
,
6874 new_stmt
= gimple_build_assign (vec_dest
, data_ref
);
6875 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
6876 gimple_assign_set_lhs (new_stmt
, new_temp
);
6877 gimple_set_vdef (new_stmt
, gimple_vdef (stmt
));
6878 gimple_set_vuse (new_stmt
, gimple_vuse (stmt
));
6879 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6882 bump
= size_binop (MULT_EXPR
, vs
,
6883 TYPE_SIZE_UNIT (elem_type
));
6884 bump
= size_binop (MINUS_EXPR
, bump
, size_one_node
);
6885 ptr
= bump_vector_ptr (dataref_ptr
, NULL
, gsi
, stmt
, bump
);
6886 new_stmt
= gimple_build_assign
6887 (NULL_TREE
, BIT_AND_EXPR
, ptr
,
6890 -(HOST_WIDE_INT
)TYPE_ALIGN_UNIT (vectype
)));
6891 ptr
= copy_ssa_name (ptr
, new_stmt
);
6892 gimple_assign_set_lhs (new_stmt
, ptr
);
6893 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6895 = build2 (MEM_REF
, vectype
, ptr
,
6896 build_int_cst (reference_alias_ptr_type
6897 (DR_REF (first_dr
)), 0));
6900 case dr_explicit_realign_optimized
:
6901 if (TREE_CODE (dataref_ptr
) == SSA_NAME
)
6902 new_temp
= copy_ssa_name (dataref_ptr
);
6904 new_temp
= make_ssa_name (TREE_TYPE (dataref_ptr
));
6905 new_stmt
= gimple_build_assign
6906 (new_temp
, BIT_AND_EXPR
, dataref_ptr
,
6908 (TREE_TYPE (dataref_ptr
),
6909 -(HOST_WIDE_INT
)TYPE_ALIGN_UNIT (vectype
)));
6910 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6912 = build2 (MEM_REF
, vectype
, new_temp
,
6913 build_int_cst (reference_alias_ptr_type
6914 (DR_REF (first_dr
)), 0));
6919 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
6920 new_stmt
= gimple_build_assign (vec_dest
, data_ref
);
6921 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
6922 gimple_assign_set_lhs (new_stmt
, new_temp
);
6923 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6925 /* 3. Handle explicit realignment if necessary/supported.
6927 vec_dest = realign_load (msq, lsq, realignment_token) */
6928 if (alignment_support_scheme
== dr_explicit_realign_optimized
6929 || alignment_support_scheme
== dr_explicit_realign
)
6931 lsq
= gimple_assign_lhs (new_stmt
);
6932 if (!realignment_token
)
6933 realignment_token
= dataref_ptr
;
6934 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
6935 new_stmt
= gimple_build_assign (vec_dest
, REALIGN_LOAD_EXPR
,
6936 msq
, lsq
, realignment_token
);
6937 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
6938 gimple_assign_set_lhs (new_stmt
, new_temp
);
6939 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6941 if (alignment_support_scheme
== dr_explicit_realign_optimized
)
6944 if (i
== vec_num
- 1 && j
== ncopies
- 1)
6945 add_phi_arg (phi
, lsq
,
6946 loop_latch_edge (containing_loop
),
6952 /* 4. Handle invariant-load. */
6953 if (inv_p
&& !bb_vinfo
)
6955 gcc_assert (!grouped_load
);
6956 /* If we have versioned for aliasing or the loop doesn't
6957 have any data dependencies that would preclude this,
6958 then we are sure this is a loop invariant load and
6959 thus we can insert it on the preheader edge. */
6960 if (LOOP_VINFO_NO_DATA_DEPENDENCIES (loop_vinfo
)
6961 && !nested_in_vect_loop
6962 && hoist_defs_of_uses (stmt
, loop
))
6964 if (dump_enabled_p ())
6966 dump_printf_loc (MSG_NOTE
, vect_location
,
6967 "hoisting out of the vectorized "
6969 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
6971 tree tem
= copy_ssa_name (scalar_dest
);
6972 gsi_insert_on_edge_immediate
6973 (loop_preheader_edge (loop
),
6974 gimple_build_assign (tem
,
6976 (gimple_assign_rhs1 (stmt
))));
6977 new_temp
= vect_init_vector (stmt
, tem
, vectype
, NULL
);
6981 gimple_stmt_iterator gsi2
= *gsi
;
6983 new_temp
= vect_init_vector (stmt
, scalar_dest
,
6986 new_stmt
= SSA_NAME_DEF_STMT (new_temp
);
6987 set_vinfo_for_stmt (new_stmt
,
6988 new_stmt_vec_info (new_stmt
, loop_vinfo
,
6994 tree perm_mask
= perm_mask_for_reverse (vectype
);
6995 new_temp
= permute_vec_elements (new_temp
, new_temp
,
6996 perm_mask
, stmt
, gsi
);
6997 new_stmt
= SSA_NAME_DEF_STMT (new_temp
);
7000 /* Collect vector loads and later create their permutation in
7001 vect_transform_grouped_load (). */
7002 if (grouped_load
|| slp_perm
)
7003 dr_chain
.quick_push (new_temp
);
7005 /* Store vector loads in the corresponding SLP_NODE. */
7006 if (slp
&& !slp_perm
)
7007 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
7009 /* Bump the vector pointer to account for a gap or for excess
7010 elements loaded for a permuted SLP load. */
7011 if (group_gap_adj
!= 0)
7015 = wide_int_to_tree (sizetype
,
7016 wi::smul (TYPE_SIZE_UNIT (elem_type
),
7017 group_gap_adj
, &ovf
));
7018 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
,
7023 if (slp
&& !slp_perm
)
7028 if (!vect_transform_slp_perm_load (slp_node
, dr_chain
, gsi
, vf
,
7029 slp_node_instance
, false))
7031 dr_chain
.release ();
7040 vect_transform_grouped_load (stmt
, dr_chain
, group_size
, gsi
);
7041 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
7046 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
7048 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
7049 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
7052 dr_chain
.release ();
7058 /* Function vect_is_simple_cond.
7061 LOOP - the loop that is being vectorized.
7062 COND - Condition that is checked for simple use.
7065 *COMP_VECTYPE - the vector type for the comparison.
7067 Returns whether a COND can be vectorized. Checks whether
7068 condition operands are supportable using vec_is_simple_use. */
7071 vect_is_simple_cond (tree cond
, gimple stmt
, loop_vec_info loop_vinfo
,
7072 bb_vec_info bb_vinfo
, tree
*comp_vectype
)
7076 enum vect_def_type dt
;
7077 tree vectype1
= NULL_TREE
, vectype2
= NULL_TREE
;
7079 if (!COMPARISON_CLASS_P (cond
))
7082 lhs
= TREE_OPERAND (cond
, 0);
7083 rhs
= TREE_OPERAND (cond
, 1);
7085 if (TREE_CODE (lhs
) == SSA_NAME
)
7087 gimple lhs_def_stmt
= SSA_NAME_DEF_STMT (lhs
);
7088 if (!vect_is_simple_use_1 (lhs
, stmt
, loop_vinfo
, bb_vinfo
,
7089 &lhs_def_stmt
, &def
, &dt
, &vectype1
))
7092 else if (TREE_CODE (lhs
) != INTEGER_CST
&& TREE_CODE (lhs
) != REAL_CST
7093 && TREE_CODE (lhs
) != FIXED_CST
)
7096 if (TREE_CODE (rhs
) == SSA_NAME
)
7098 gimple rhs_def_stmt
= SSA_NAME_DEF_STMT (rhs
);
7099 if (!vect_is_simple_use_1 (rhs
, stmt
, loop_vinfo
, bb_vinfo
,
7100 &rhs_def_stmt
, &def
, &dt
, &vectype2
))
7103 else if (TREE_CODE (rhs
) != INTEGER_CST
&& TREE_CODE (rhs
) != REAL_CST
7104 && TREE_CODE (rhs
) != FIXED_CST
)
7107 *comp_vectype
= vectype1
? vectype1
: vectype2
;
7111 /* vectorizable_condition.
7113 Check if STMT is conditional modify expression that can be vectorized.
7114 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
7115 stmt using VEC_COND_EXPR to replace it, put it in VEC_STMT, and insert it
7118 When STMT is vectorized as nested cycle, REDUC_DEF is the vector variable
7119 to be used at REDUC_INDEX (in then clause if REDUC_INDEX is 1, and in
7120 else caluse if it is 2).
7122 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
7125 vectorizable_condition (gimple stmt
, gimple_stmt_iterator
*gsi
,
7126 gimple
*vec_stmt
, tree reduc_def
, int reduc_index
,
7129 tree scalar_dest
= NULL_TREE
;
7130 tree vec_dest
= NULL_TREE
;
7131 tree cond_expr
, then_clause
, else_clause
;
7132 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
7133 tree comp_vectype
= NULL_TREE
;
7134 tree vec_cond_lhs
= NULL_TREE
, vec_cond_rhs
= NULL_TREE
;
7135 tree vec_then_clause
= NULL_TREE
, vec_else_clause
= NULL_TREE
;
7136 tree vec_compare
, vec_cond_expr
;
7138 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
7140 enum vect_def_type dt
, dts
[4];
7142 enum tree_code code
;
7143 stmt_vec_info prev_stmt_info
= NULL
;
7145 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
7146 vec
<tree
> vec_oprnds0
= vNULL
;
7147 vec
<tree
> vec_oprnds1
= vNULL
;
7148 vec
<tree
> vec_oprnds2
= vNULL
;
7149 vec
<tree
> vec_oprnds3
= vNULL
;
7152 if (reduc_index
&& STMT_SLP_TYPE (stmt_info
))
7155 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
7158 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
7159 && !(STMT_VINFO_DEF_TYPE (stmt_info
) == vect_nested_cycle
7163 /* FORNOW: not yet supported. */
7164 if (STMT_VINFO_LIVE_P (stmt_info
))
7166 if (dump_enabled_p ())
7167 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7168 "value used after loop.\n");
7172 /* Is vectorizable conditional operation? */
7173 if (!is_gimple_assign (stmt
))
7176 code
= gimple_assign_rhs_code (stmt
);
7178 if (code
!= COND_EXPR
)
7181 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
7182 int nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
7184 if (slp_node
|| PURE_SLP_STMT (stmt_info
))
7187 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits
;
7189 gcc_assert (ncopies
>= 1);
7190 if (reduc_index
&& ncopies
> 1)
7191 return false; /* FORNOW */
7193 cond_expr
= gimple_assign_rhs1 (stmt
);
7194 then_clause
= gimple_assign_rhs2 (stmt
);
7195 else_clause
= gimple_assign_rhs3 (stmt
);
7197 if (!vect_is_simple_cond (cond_expr
, stmt
, loop_vinfo
, bb_vinfo
,
7202 if (TREE_CODE (then_clause
) == SSA_NAME
)
7204 gimple then_def_stmt
= SSA_NAME_DEF_STMT (then_clause
);
7205 if (!vect_is_simple_use (then_clause
, stmt
, loop_vinfo
, bb_vinfo
,
7206 &then_def_stmt
, &def
, &dt
))
7209 else if (TREE_CODE (then_clause
) != INTEGER_CST
7210 && TREE_CODE (then_clause
) != REAL_CST
7211 && TREE_CODE (then_clause
) != FIXED_CST
)
7214 if (TREE_CODE (else_clause
) == SSA_NAME
)
7216 gimple else_def_stmt
= SSA_NAME_DEF_STMT (else_clause
);
7217 if (!vect_is_simple_use (else_clause
, stmt
, loop_vinfo
, bb_vinfo
,
7218 &else_def_stmt
, &def
, &dt
))
7221 else if (TREE_CODE (else_clause
) != INTEGER_CST
7222 && TREE_CODE (else_clause
) != REAL_CST
7223 && TREE_CODE (else_clause
) != FIXED_CST
)
7226 unsigned int prec
= GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (vectype
)));
7227 /* The result of a vector comparison should be signed type. */
7228 tree cmp_type
= build_nonstandard_integer_type (prec
, 0);
7229 vec_cmp_type
= get_same_sized_vectype (cmp_type
, vectype
);
7230 if (vec_cmp_type
== NULL_TREE
)
7235 STMT_VINFO_TYPE (stmt_info
) = condition_vec_info_type
;
7236 return expand_vec_cond_expr_p (vectype
, comp_vectype
);
7243 vec_oprnds0
.create (1);
7244 vec_oprnds1
.create (1);
7245 vec_oprnds2
.create (1);
7246 vec_oprnds3
.create (1);
7250 scalar_dest
= gimple_assign_lhs (stmt
);
7251 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
7253 /* Handle cond expr. */
7254 for (j
= 0; j
< ncopies
; j
++)
7256 gassign
*new_stmt
= NULL
;
7261 auto_vec
<tree
, 4> ops
;
7262 auto_vec
<vec
<tree
>, 4> vec_defs
;
7264 ops
.safe_push (TREE_OPERAND (cond_expr
, 0));
7265 ops
.safe_push (TREE_OPERAND (cond_expr
, 1));
7266 ops
.safe_push (then_clause
);
7267 ops
.safe_push (else_clause
);
7268 vect_get_slp_defs (ops
, slp_node
, &vec_defs
, -1);
7269 vec_oprnds3
= vec_defs
.pop ();
7270 vec_oprnds2
= vec_defs
.pop ();
7271 vec_oprnds1
= vec_defs
.pop ();
7272 vec_oprnds0
= vec_defs
.pop ();
7275 vec_defs
.release ();
7281 vect_get_vec_def_for_operand (TREE_OPERAND (cond_expr
, 0),
7283 vect_is_simple_use (TREE_OPERAND (cond_expr
, 0), stmt
,
7284 loop_vinfo
, NULL
, >emp
, &def
, &dts
[0]);
7287 vect_get_vec_def_for_operand (TREE_OPERAND (cond_expr
, 1),
7289 vect_is_simple_use (TREE_OPERAND (cond_expr
, 1), stmt
,
7290 loop_vinfo
, NULL
, >emp
, &def
, &dts
[1]);
7291 if (reduc_index
== 1)
7292 vec_then_clause
= reduc_def
;
7295 vec_then_clause
= vect_get_vec_def_for_operand (then_clause
,
7297 vect_is_simple_use (then_clause
, stmt
, loop_vinfo
,
7298 NULL
, >emp
, &def
, &dts
[2]);
7300 if (reduc_index
== 2)
7301 vec_else_clause
= reduc_def
;
7304 vec_else_clause
= vect_get_vec_def_for_operand (else_clause
,
7306 vect_is_simple_use (else_clause
, stmt
, loop_vinfo
,
7307 NULL
, >emp
, &def
, &dts
[3]);
7313 vec_cond_lhs
= vect_get_vec_def_for_stmt_copy (dts
[0],
7314 vec_oprnds0
.pop ());
7315 vec_cond_rhs
= vect_get_vec_def_for_stmt_copy (dts
[1],
7316 vec_oprnds1
.pop ());
7317 vec_then_clause
= vect_get_vec_def_for_stmt_copy (dts
[2],
7318 vec_oprnds2
.pop ());
7319 vec_else_clause
= vect_get_vec_def_for_stmt_copy (dts
[3],
7320 vec_oprnds3
.pop ());
7325 vec_oprnds0
.quick_push (vec_cond_lhs
);
7326 vec_oprnds1
.quick_push (vec_cond_rhs
);
7327 vec_oprnds2
.quick_push (vec_then_clause
);
7328 vec_oprnds3
.quick_push (vec_else_clause
);
7331 /* Arguments are ready. Create the new vector stmt. */
7332 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vec_cond_lhs
)
7334 vec_cond_rhs
= vec_oprnds1
[i
];
7335 vec_then_clause
= vec_oprnds2
[i
];
7336 vec_else_clause
= vec_oprnds3
[i
];
7338 vec_compare
= build2 (TREE_CODE (cond_expr
), vec_cmp_type
,
7339 vec_cond_lhs
, vec_cond_rhs
);
7340 vec_cond_expr
= build3 (VEC_COND_EXPR
, vectype
,
7341 vec_compare
, vec_then_clause
, vec_else_clause
);
7343 new_stmt
= gimple_build_assign (vec_dest
, vec_cond_expr
);
7344 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
7345 gimple_assign_set_lhs (new_stmt
, new_temp
);
7346 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
7348 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
7355 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
7357 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
7359 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
7362 vec_oprnds0
.release ();
7363 vec_oprnds1
.release ();
7364 vec_oprnds2
.release ();
7365 vec_oprnds3
.release ();
7371 /* Make sure the statement is vectorizable. */
7374 vect_analyze_stmt (gimple stmt
, bool *need_to_vectorize
, slp_tree node
)
7376 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
7377 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
7378 enum vect_relevant relevance
= STMT_VINFO_RELEVANT (stmt_info
);
7380 tree scalar_type
, vectype
;
7381 gimple pattern_stmt
;
7382 gimple_seq pattern_def_seq
;
7384 if (dump_enabled_p ())
7386 dump_printf_loc (MSG_NOTE
, vect_location
, "==> examining statement: ");
7387 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
7390 if (gimple_has_volatile_ops (stmt
))
7392 if (dump_enabled_p ())
7393 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7394 "not vectorized: stmt has volatile operands\n");
7399 /* Skip stmts that do not need to be vectorized. In loops this is expected
7401 - the COND_EXPR which is the loop exit condition
7402 - any LABEL_EXPRs in the loop
7403 - computations that are used only for array indexing or loop control.
7404 In basic blocks we only analyze statements that are a part of some SLP
7405 instance, therefore, all the statements are relevant.
7407 Pattern statement needs to be analyzed instead of the original statement
7408 if the original statement is not relevant. Otherwise, we analyze both
7409 statements. In basic blocks we are called from some SLP instance
7410 traversal, don't analyze pattern stmts instead, the pattern stmts
7411 already will be part of SLP instance. */
7413 pattern_stmt
= STMT_VINFO_RELATED_STMT (stmt_info
);
7414 if (!STMT_VINFO_RELEVANT_P (stmt_info
)
7415 && !STMT_VINFO_LIVE_P (stmt_info
))
7417 if (STMT_VINFO_IN_PATTERN_P (stmt_info
)
7419 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt
))
7420 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt
))))
7422 /* Analyze PATTERN_STMT instead of the original stmt. */
7423 stmt
= pattern_stmt
;
7424 stmt_info
= vinfo_for_stmt (pattern_stmt
);
7425 if (dump_enabled_p ())
7427 dump_printf_loc (MSG_NOTE
, vect_location
,
7428 "==> examining pattern statement: ");
7429 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
7434 if (dump_enabled_p ())
7435 dump_printf_loc (MSG_NOTE
, vect_location
, "irrelevant.\n");
7440 else if (STMT_VINFO_IN_PATTERN_P (stmt_info
)
7443 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt
))
7444 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt
))))
7446 /* Analyze PATTERN_STMT too. */
7447 if (dump_enabled_p ())
7449 dump_printf_loc (MSG_NOTE
, vect_location
,
7450 "==> examining pattern statement: ");
7451 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
7454 if (!vect_analyze_stmt (pattern_stmt
, need_to_vectorize
, node
))
7458 if (is_pattern_stmt_p (stmt_info
)
7460 && (pattern_def_seq
= STMT_VINFO_PATTERN_DEF_SEQ (stmt_info
)))
7462 gimple_stmt_iterator si
;
7464 for (si
= gsi_start (pattern_def_seq
); !gsi_end_p (si
); gsi_next (&si
))
7466 gimple pattern_def_stmt
= gsi_stmt (si
);
7467 if (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_def_stmt
))
7468 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_def_stmt
)))
7470 /* Analyze def stmt of STMT if it's a pattern stmt. */
7471 if (dump_enabled_p ())
7473 dump_printf_loc (MSG_NOTE
, vect_location
,
7474 "==> examining pattern def statement: ");
7475 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, pattern_def_stmt
, 0);
7478 if (!vect_analyze_stmt (pattern_def_stmt
,
7479 need_to_vectorize
, node
))
7485 switch (STMT_VINFO_DEF_TYPE (stmt_info
))
7487 case vect_internal_def
:
7490 case vect_reduction_def
:
7491 case vect_nested_cycle
:
7492 gcc_assert (!bb_vinfo
7493 && (relevance
== vect_used_in_outer
7494 || relevance
== vect_used_in_outer_by_reduction
7495 || relevance
== vect_used_by_reduction
7496 || relevance
== vect_unused_in_scope
));
7499 case vect_induction_def
:
7500 case vect_constant_def
:
7501 case vect_external_def
:
7502 case vect_unknown_def_type
:
7509 gcc_assert (PURE_SLP_STMT (stmt_info
));
7511 scalar_type
= TREE_TYPE (gimple_get_lhs (stmt
));
7512 if (dump_enabled_p ())
7514 dump_printf_loc (MSG_NOTE
, vect_location
,
7515 "get vectype for scalar type: ");
7516 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, scalar_type
);
7517 dump_printf (MSG_NOTE
, "\n");
7520 vectype
= get_vectype_for_scalar_type (scalar_type
);
7523 if (dump_enabled_p ())
7525 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7526 "not SLPed: unsupported data-type ");
7527 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
7529 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
7534 if (dump_enabled_p ())
7536 dump_printf_loc (MSG_NOTE
, vect_location
, "vectype: ");
7537 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, vectype
);
7538 dump_printf (MSG_NOTE
, "\n");
7541 STMT_VINFO_VECTYPE (stmt_info
) = vectype
;
7544 if (STMT_VINFO_RELEVANT_P (stmt_info
))
7546 gcc_assert (!VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt
))));
7547 gcc_assert (STMT_VINFO_VECTYPE (stmt_info
)
7548 || (is_gimple_call (stmt
)
7549 && gimple_call_lhs (stmt
) == NULL_TREE
));
7550 *need_to_vectorize
= true;
7553 if (PURE_SLP_STMT (stmt_info
) && !node
)
7555 dump_printf_loc (MSG_NOTE
, vect_location
,
7556 "handled only by SLP analysis\n");
7562 && (STMT_VINFO_RELEVANT_P (stmt_info
)
7563 || STMT_VINFO_DEF_TYPE (stmt_info
) == vect_reduction_def
))
7564 ok
= (vectorizable_simd_clone_call (stmt
, NULL
, NULL
, node
)
7565 || vectorizable_conversion (stmt
, NULL
, NULL
, node
)
7566 || vectorizable_shift (stmt
, NULL
, NULL
, node
)
7567 || vectorizable_operation (stmt
, NULL
, NULL
, node
)
7568 || vectorizable_assignment (stmt
, NULL
, NULL
, node
)
7569 || vectorizable_load (stmt
, NULL
, NULL
, node
, NULL
)
7570 || vectorizable_call (stmt
, NULL
, NULL
, node
)
7571 || vectorizable_store (stmt
, NULL
, NULL
, node
)
7572 || vectorizable_reduction (stmt
, NULL
, NULL
, node
)
7573 || vectorizable_condition (stmt
, NULL
, NULL
, NULL
, 0, node
));
7577 ok
= (vectorizable_simd_clone_call (stmt
, NULL
, NULL
, node
)
7578 || vectorizable_conversion (stmt
, NULL
, NULL
, node
)
7579 || vectorizable_shift (stmt
, NULL
, NULL
, node
)
7580 || vectorizable_operation (stmt
, NULL
, NULL
, node
)
7581 || vectorizable_assignment (stmt
, NULL
, NULL
, node
)
7582 || vectorizable_load (stmt
, NULL
, NULL
, node
, NULL
)
7583 || vectorizable_call (stmt
, NULL
, NULL
, node
)
7584 || vectorizable_store (stmt
, NULL
, NULL
, node
)
7585 || vectorizable_condition (stmt
, NULL
, NULL
, NULL
, 0, node
));
7590 if (dump_enabled_p ())
7592 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7593 "not vectorized: relevant stmt not ");
7594 dump_printf (MSG_MISSED_OPTIMIZATION
, "supported: ");
7595 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
7604 /* Stmts that are (also) "live" (i.e. - that are used out of the loop)
7605 need extra handling, except for vectorizable reductions. */
7606 if (STMT_VINFO_LIVE_P (stmt_info
)
7607 && STMT_VINFO_TYPE (stmt_info
) != reduc_vec_info_type
)
7608 ok
= vectorizable_live_operation (stmt
, NULL
, NULL
);
7612 if (dump_enabled_p ())
7614 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7615 "not vectorized: live stmt not ");
7616 dump_printf (MSG_MISSED_OPTIMIZATION
, "supported: ");
7617 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
7627 /* Function vect_transform_stmt.
7629 Create a vectorized stmt to replace STMT, and insert it at BSI. */
7632 vect_transform_stmt (gimple stmt
, gimple_stmt_iterator
*gsi
,
7633 bool *grouped_store
, slp_tree slp_node
,
7634 slp_instance slp_node_instance
)
7636 bool is_store
= false;
7637 gimple vec_stmt
= NULL
;
7638 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
7641 gimple old_vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
7643 switch (STMT_VINFO_TYPE (stmt_info
))
7645 case type_demotion_vec_info_type
:
7646 case type_promotion_vec_info_type
:
7647 case type_conversion_vec_info_type
:
7648 done
= vectorizable_conversion (stmt
, gsi
, &vec_stmt
, slp_node
);
7652 case induc_vec_info_type
:
7653 gcc_assert (!slp_node
);
7654 done
= vectorizable_induction (stmt
, gsi
, &vec_stmt
);
7658 case shift_vec_info_type
:
7659 done
= vectorizable_shift (stmt
, gsi
, &vec_stmt
, slp_node
);
7663 case op_vec_info_type
:
7664 done
= vectorizable_operation (stmt
, gsi
, &vec_stmt
, slp_node
);
7668 case assignment_vec_info_type
:
7669 done
= vectorizable_assignment (stmt
, gsi
, &vec_stmt
, slp_node
);
7673 case load_vec_info_type
:
7674 done
= vectorizable_load (stmt
, gsi
, &vec_stmt
, slp_node
,
7679 case store_vec_info_type
:
7680 done
= vectorizable_store (stmt
, gsi
, &vec_stmt
, slp_node
);
7682 if (STMT_VINFO_GROUPED_ACCESS (stmt_info
) && !slp_node
)
7684 /* In case of interleaving, the whole chain is vectorized when the
7685 last store in the chain is reached. Store stmts before the last
7686 one are skipped, and there vec_stmt_info shouldn't be freed
7688 *grouped_store
= true;
7689 if (STMT_VINFO_VEC_STMT (stmt_info
))
7696 case condition_vec_info_type
:
7697 done
= vectorizable_condition (stmt
, gsi
, &vec_stmt
, NULL
, 0, slp_node
);
7701 case call_vec_info_type
:
7702 done
= vectorizable_call (stmt
, gsi
, &vec_stmt
, slp_node
);
7703 stmt
= gsi_stmt (*gsi
);
7704 if (is_gimple_call (stmt
)
7705 && gimple_call_internal_p (stmt
)
7706 && gimple_call_internal_fn (stmt
) == IFN_MASK_STORE
)
7710 case call_simd_clone_vec_info_type
:
7711 done
= vectorizable_simd_clone_call (stmt
, gsi
, &vec_stmt
, slp_node
);
7712 stmt
= gsi_stmt (*gsi
);
7715 case reduc_vec_info_type
:
7716 done
= vectorizable_reduction (stmt
, gsi
, &vec_stmt
, slp_node
);
7721 if (!STMT_VINFO_LIVE_P (stmt_info
))
7723 if (dump_enabled_p ())
7724 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7725 "stmt not supported.\n");
7730 /* Verify SLP vectorization doesn't mess with STMT_VINFO_VEC_STMT.
7731 This would break hybrid SLP vectorization. */
7733 gcc_assert (!vec_stmt
7734 && STMT_VINFO_VEC_STMT (stmt_info
) == old_vec_stmt
);
7736 /* Handle inner-loop stmts whose DEF is used in the loop-nest that
7737 is being vectorized, but outside the immediately enclosing loop. */
7739 && STMT_VINFO_LOOP_VINFO (stmt_info
)
7740 && nested_in_vect_loop_p (LOOP_VINFO_LOOP (
7741 STMT_VINFO_LOOP_VINFO (stmt_info
)), stmt
)
7742 && STMT_VINFO_TYPE (stmt_info
) != reduc_vec_info_type
7743 && (STMT_VINFO_RELEVANT (stmt_info
) == vect_used_in_outer
7744 || STMT_VINFO_RELEVANT (stmt_info
) ==
7745 vect_used_in_outer_by_reduction
))
7747 struct loop
*innerloop
= LOOP_VINFO_LOOP (
7748 STMT_VINFO_LOOP_VINFO (stmt_info
))->inner
;
7749 imm_use_iterator imm_iter
;
7750 use_operand_p use_p
;
7754 if (dump_enabled_p ())
7755 dump_printf_loc (MSG_NOTE
, vect_location
,
7756 "Record the vdef for outer-loop vectorization.\n");
7758 /* Find the relevant loop-exit phi-node, and reord the vec_stmt there
7759 (to be used when vectorizing outer-loop stmts that use the DEF of
7761 if (gimple_code (stmt
) == GIMPLE_PHI
)
7762 scalar_dest
= PHI_RESULT (stmt
);
7764 scalar_dest
= gimple_assign_lhs (stmt
);
7766 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, scalar_dest
)
7768 if (!flow_bb_inside_loop_p (innerloop
, gimple_bb (USE_STMT (use_p
))))
7770 exit_phi
= USE_STMT (use_p
);
7771 STMT_VINFO_VEC_STMT (vinfo_for_stmt (exit_phi
)) = vec_stmt
;
7776 /* Handle stmts whose DEF is used outside the loop-nest that is
7777 being vectorized. */
7778 if (STMT_VINFO_LIVE_P (stmt_info
)
7779 && STMT_VINFO_TYPE (stmt_info
) != reduc_vec_info_type
)
7781 done
= vectorizable_live_operation (stmt
, gsi
, &vec_stmt
);
7786 STMT_VINFO_VEC_STMT (stmt_info
) = vec_stmt
;
7792 /* Remove a group of stores (for SLP or interleaving), free their
7796 vect_remove_stores (gimple first_stmt
)
7798 gimple next
= first_stmt
;
7800 gimple_stmt_iterator next_si
;
7804 stmt_vec_info stmt_info
= vinfo_for_stmt (next
);
7806 tmp
= GROUP_NEXT_ELEMENT (stmt_info
);
7807 if (is_pattern_stmt_p (stmt_info
))
7808 next
= STMT_VINFO_RELATED_STMT (stmt_info
);
7809 /* Free the attached stmt_vec_info and remove the stmt. */
7810 next_si
= gsi_for_stmt (next
);
7811 unlink_stmt_vdef (next
);
7812 gsi_remove (&next_si
, true);
7813 release_defs (next
);
7814 free_stmt_vec_info (next
);
7820 /* Function new_stmt_vec_info.
7822 Create and initialize a new stmt_vec_info struct for STMT. */
7825 new_stmt_vec_info (gimple stmt
, loop_vec_info loop_vinfo
,
7826 bb_vec_info bb_vinfo
)
7829 res
= (stmt_vec_info
) xcalloc (1, sizeof (struct _stmt_vec_info
));
7831 STMT_VINFO_TYPE (res
) = undef_vec_info_type
;
7832 STMT_VINFO_STMT (res
) = stmt
;
7833 STMT_VINFO_LOOP_VINFO (res
) = loop_vinfo
;
7834 STMT_VINFO_BB_VINFO (res
) = bb_vinfo
;
7835 STMT_VINFO_RELEVANT (res
) = vect_unused_in_scope
;
7836 STMT_VINFO_LIVE_P (res
) = false;
7837 STMT_VINFO_VECTYPE (res
) = NULL
;
7838 STMT_VINFO_VEC_STMT (res
) = NULL
;
7839 STMT_VINFO_VECTORIZABLE (res
) = true;
7840 STMT_VINFO_IN_PATTERN_P (res
) = false;
7841 STMT_VINFO_RELATED_STMT (res
) = NULL
;
7842 STMT_VINFO_PATTERN_DEF_SEQ (res
) = NULL
;
7843 STMT_VINFO_DATA_REF (res
) = NULL
;
7845 STMT_VINFO_DR_BASE_ADDRESS (res
) = NULL
;
7846 STMT_VINFO_DR_OFFSET (res
) = NULL
;
7847 STMT_VINFO_DR_INIT (res
) = NULL
;
7848 STMT_VINFO_DR_STEP (res
) = NULL
;
7849 STMT_VINFO_DR_ALIGNED_TO (res
) = NULL
;
7851 if (gimple_code (stmt
) == GIMPLE_PHI
7852 && is_loop_header_bb_p (gimple_bb (stmt
)))
7853 STMT_VINFO_DEF_TYPE (res
) = vect_unknown_def_type
;
7855 STMT_VINFO_DEF_TYPE (res
) = vect_internal_def
;
7857 STMT_VINFO_SAME_ALIGN_REFS (res
).create (0);
7858 STMT_SLP_TYPE (res
) = loop_vect
;
7859 GROUP_FIRST_ELEMENT (res
) = NULL
;
7860 GROUP_NEXT_ELEMENT (res
) = NULL
;
7861 GROUP_SIZE (res
) = 0;
7862 GROUP_STORE_COUNT (res
) = 0;
7863 GROUP_GAP (res
) = 0;
7864 GROUP_SAME_DR_STMT (res
) = NULL
;
7870 /* Create a hash table for stmt_vec_info. */
7873 init_stmt_vec_info_vec (void)
7875 gcc_assert (!stmt_vec_info_vec
.exists ());
7876 stmt_vec_info_vec
.create (50);
7880 /* Free hash table for stmt_vec_info. */
7883 free_stmt_vec_info_vec (void)
7887 FOR_EACH_VEC_ELT (stmt_vec_info_vec
, i
, info
)
7889 free_stmt_vec_info (STMT_VINFO_STMT ((stmt_vec_info
) info
));
7890 gcc_assert (stmt_vec_info_vec
.exists ());
7891 stmt_vec_info_vec
.release ();
7895 /* Free stmt vectorization related info. */
7898 free_stmt_vec_info (gimple stmt
)
7900 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
7905 /* Check if this statement has a related "pattern stmt"
7906 (introduced by the vectorizer during the pattern recognition
7907 pass). Free pattern's stmt_vec_info and def stmt's stmt_vec_info
7909 if (STMT_VINFO_IN_PATTERN_P (stmt_info
))
7911 stmt_vec_info patt_info
7912 = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info
));
7915 gimple_seq seq
= STMT_VINFO_PATTERN_DEF_SEQ (patt_info
);
7916 gimple patt_stmt
= STMT_VINFO_STMT (patt_info
);
7917 gimple_set_bb (patt_stmt
, NULL
);
7918 tree lhs
= gimple_get_lhs (patt_stmt
);
7919 if (TREE_CODE (lhs
) == SSA_NAME
)
7920 release_ssa_name (lhs
);
7923 gimple_stmt_iterator si
;
7924 for (si
= gsi_start (seq
); !gsi_end_p (si
); gsi_next (&si
))
7926 gimple seq_stmt
= gsi_stmt (si
);
7927 gimple_set_bb (seq_stmt
, NULL
);
7928 lhs
= gimple_get_lhs (patt_stmt
);
7929 if (TREE_CODE (lhs
) == SSA_NAME
)
7930 release_ssa_name (lhs
);
7931 free_stmt_vec_info (seq_stmt
);
7934 free_stmt_vec_info (patt_stmt
);
7938 STMT_VINFO_SAME_ALIGN_REFS (stmt_info
).release ();
7939 STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).release ();
7940 set_vinfo_for_stmt (stmt
, NULL
);
7945 /* Function get_vectype_for_scalar_type_and_size.
7947 Returns the vector type corresponding to SCALAR_TYPE and SIZE as supported
7951 get_vectype_for_scalar_type_and_size (tree scalar_type
, unsigned size
)
7953 machine_mode inner_mode
= TYPE_MODE (scalar_type
);
7954 machine_mode simd_mode
;
7955 unsigned int nbytes
= GET_MODE_SIZE (inner_mode
);
7962 if (GET_MODE_CLASS (inner_mode
) != MODE_INT
7963 && GET_MODE_CLASS (inner_mode
) != MODE_FLOAT
)
7966 /* For vector types of elements whose mode precision doesn't
7967 match their types precision we use a element type of mode
7968 precision. The vectorization routines will have to make sure
7969 they support the proper result truncation/extension.
7970 We also make sure to build vector types with INTEGER_TYPE
7971 component type only. */
7972 if (INTEGRAL_TYPE_P (scalar_type
)
7973 && (GET_MODE_BITSIZE (inner_mode
) != TYPE_PRECISION (scalar_type
)
7974 || TREE_CODE (scalar_type
) != INTEGER_TYPE
))
7975 scalar_type
= build_nonstandard_integer_type (GET_MODE_BITSIZE (inner_mode
),
7976 TYPE_UNSIGNED (scalar_type
));
7978 /* We shouldn't end up building VECTOR_TYPEs of non-scalar components.
7979 When the component mode passes the above test simply use a type
7980 corresponding to that mode. The theory is that any use that
7981 would cause problems with this will disable vectorization anyway. */
7982 else if (!SCALAR_FLOAT_TYPE_P (scalar_type
)
7983 && !INTEGRAL_TYPE_P (scalar_type
))
7984 scalar_type
= lang_hooks
.types
.type_for_mode (inner_mode
, 1);
7986 /* We can't build a vector type of elements with alignment bigger than
7988 else if (nbytes
< TYPE_ALIGN_UNIT (scalar_type
))
7989 scalar_type
= lang_hooks
.types
.type_for_mode (inner_mode
,
7990 TYPE_UNSIGNED (scalar_type
));
7992 /* If we felt back to using the mode fail if there was
7993 no scalar type for it. */
7994 if (scalar_type
== NULL_TREE
)
7997 /* If no size was supplied use the mode the target prefers. Otherwise
7998 lookup a vector mode of the specified size. */
8000 simd_mode
= targetm
.vectorize
.preferred_simd_mode (inner_mode
);
8002 simd_mode
= mode_for_vector (inner_mode
, size
/ nbytes
);
8003 nunits
= GET_MODE_SIZE (simd_mode
) / nbytes
;
8007 vectype
= build_vector_type (scalar_type
, nunits
);
8009 if (!VECTOR_MODE_P (TYPE_MODE (vectype
))
8010 && !INTEGRAL_MODE_P (TYPE_MODE (vectype
)))
8016 unsigned int current_vector_size
;
8018 /* Function get_vectype_for_scalar_type.
8020 Returns the vector type corresponding to SCALAR_TYPE as supported
8024 get_vectype_for_scalar_type (tree scalar_type
)
8027 vectype
= get_vectype_for_scalar_type_and_size (scalar_type
,
8028 current_vector_size
);
8030 && current_vector_size
== 0)
8031 current_vector_size
= GET_MODE_SIZE (TYPE_MODE (vectype
));
8035 /* Function get_same_sized_vectype
8037 Returns a vector type corresponding to SCALAR_TYPE of size
8038 VECTOR_TYPE if supported by the target. */
8041 get_same_sized_vectype (tree scalar_type
, tree vector_type
)
8043 return get_vectype_for_scalar_type_and_size
8044 (scalar_type
, GET_MODE_SIZE (TYPE_MODE (vector_type
)));
8047 /* Function vect_is_simple_use.
8050 LOOP_VINFO - the vect info of the loop that is being vectorized.
8051 BB_VINFO - the vect info of the basic block that is being vectorized.
8052 OPERAND - operand of STMT in the loop or bb.
8053 DEF - the defining stmt in case OPERAND is an SSA_NAME.
8055 Returns whether a stmt with OPERAND can be vectorized.
8056 For loops, supportable operands are constants, loop invariants, and operands
8057 that are defined by the current iteration of the loop. Unsupportable
8058 operands are those that are defined by a previous iteration of the loop (as
8059 is the case in reduction/induction computations).
8060 For basic blocks, supportable operands are constants and bb invariants.
8061 For now, operands defined outside the basic block are not supported. */
8064 vect_is_simple_use (tree operand
, gimple stmt
, loop_vec_info loop_vinfo
,
8065 bb_vec_info bb_vinfo
, gimple
*def_stmt
,
8066 tree
*def
, enum vect_def_type
*dt
)
8070 *dt
= vect_unknown_def_type
;
8072 if (dump_enabled_p ())
8074 dump_printf_loc (MSG_NOTE
, vect_location
,
8075 "vect_is_simple_use: operand ");
8076 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, operand
);
8077 dump_printf (MSG_NOTE
, "\n");
8080 if (CONSTANT_CLASS_P (operand
))
8082 *dt
= vect_constant_def
;
8086 if (is_gimple_min_invariant (operand
))
8089 *dt
= vect_external_def
;
8093 if (TREE_CODE (operand
) != SSA_NAME
)
8095 if (dump_enabled_p ())
8096 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
8101 if (SSA_NAME_IS_DEFAULT_DEF (operand
))
8104 *dt
= vect_external_def
;
8108 *def_stmt
= SSA_NAME_DEF_STMT (operand
);
8109 if (dump_enabled_p ())
8111 dump_printf_loc (MSG_NOTE
, vect_location
, "def_stmt: ");
8112 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, *def_stmt
, 0);
8115 basic_block bb
= gimple_bb (*def_stmt
);
8116 if ((loop_vinfo
&& !flow_bb_inside_loop_p (LOOP_VINFO_LOOP (loop_vinfo
), bb
))
8118 && (bb
!= BB_VINFO_BB (bb_vinfo
)
8119 || gimple_code (*def_stmt
) == GIMPLE_PHI
)))
8120 *dt
= vect_external_def
;
8123 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (*def_stmt
);
8124 if (bb_vinfo
&& !STMT_VINFO_VECTORIZABLE (stmt_vinfo
))
8125 *dt
= vect_external_def
;
8127 *dt
= STMT_VINFO_DEF_TYPE (stmt_vinfo
);
8130 if (dump_enabled_p ())
8132 dump_printf_loc (MSG_NOTE
, vect_location
, "type of def: ");
8135 case vect_uninitialized_def
:
8136 dump_printf (MSG_NOTE
, "uninitialized\n");
8138 case vect_constant_def
:
8139 dump_printf (MSG_NOTE
, "constant\n");
8141 case vect_external_def
:
8142 dump_printf (MSG_NOTE
, "external\n");
8144 case vect_internal_def
:
8145 dump_printf (MSG_NOTE
, "internal\n");
8147 case vect_induction_def
:
8148 dump_printf (MSG_NOTE
, "induction\n");
8150 case vect_reduction_def
:
8151 dump_printf (MSG_NOTE
, "reduction\n");
8153 case vect_double_reduction_def
:
8154 dump_printf (MSG_NOTE
, "double reduction\n");
8156 case vect_nested_cycle
:
8157 dump_printf (MSG_NOTE
, "nested cycle\n");
8159 case vect_unknown_def_type
:
8160 dump_printf (MSG_NOTE
, "unknown\n");
8165 if (*dt
== vect_unknown_def_type
8167 && *dt
== vect_double_reduction_def
8168 && gimple_code (stmt
) != GIMPLE_PHI
))
8170 if (dump_enabled_p ())
8171 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
8172 "Unsupported pattern.\n");
8176 switch (gimple_code (*def_stmt
))
8179 *def
= gimple_phi_result (*def_stmt
);
8183 *def
= gimple_assign_lhs (*def_stmt
);
8187 *def
= gimple_call_lhs (*def_stmt
);
8192 if (dump_enabled_p ())
8193 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
8194 "unsupported defining stmt:\n");
8201 /* Function vect_is_simple_use_1.
8203 Same as vect_is_simple_use_1 but also determines the vector operand
8204 type of OPERAND and stores it to *VECTYPE. If the definition of
8205 OPERAND is vect_uninitialized_def, vect_constant_def or
8206 vect_external_def *VECTYPE will be set to NULL_TREE and the caller
8207 is responsible to compute the best suited vector type for the
8211 vect_is_simple_use_1 (tree operand
, gimple stmt
, loop_vec_info loop_vinfo
,
8212 bb_vec_info bb_vinfo
, gimple
*def_stmt
,
8213 tree
*def
, enum vect_def_type
*dt
, tree
*vectype
)
8215 if (!vect_is_simple_use (operand
, stmt
, loop_vinfo
, bb_vinfo
, def_stmt
,
8219 /* Now get a vector type if the def is internal, otherwise supply
8220 NULL_TREE and leave it up to the caller to figure out a proper
8221 type for the use stmt. */
8222 if (*dt
== vect_internal_def
8223 || *dt
== vect_induction_def
8224 || *dt
== vect_reduction_def
8225 || *dt
== vect_double_reduction_def
8226 || *dt
== vect_nested_cycle
)
8228 stmt_vec_info stmt_info
= vinfo_for_stmt (*def_stmt
);
8230 if (STMT_VINFO_IN_PATTERN_P (stmt_info
)
8231 && !STMT_VINFO_RELEVANT (stmt_info
)
8232 && !STMT_VINFO_LIVE_P (stmt_info
))
8233 stmt_info
= vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info
));
8235 *vectype
= STMT_VINFO_VECTYPE (stmt_info
);
8236 gcc_assert (*vectype
!= NULL_TREE
);
8238 else if (*dt
== vect_uninitialized_def
8239 || *dt
== vect_constant_def
8240 || *dt
== vect_external_def
)
8241 *vectype
= NULL_TREE
;
8249 /* Function supportable_widening_operation
8251 Check whether an operation represented by the code CODE is a
8252 widening operation that is supported by the target platform in
8253 vector form (i.e., when operating on arguments of type VECTYPE_IN
8254 producing a result of type VECTYPE_OUT).
8256 Widening operations we currently support are NOP (CONVERT), FLOAT
8257 and WIDEN_MULT. This function checks if these operations are supported
8258 by the target platform either directly (via vector tree-codes), or via
8262 - CODE1 and CODE2 are codes of vector operations to be used when
8263 vectorizing the operation, if available.
8264 - MULTI_STEP_CVT determines the number of required intermediate steps in
8265 case of multi-step conversion (like char->short->int - in that case
8266 MULTI_STEP_CVT will be 1).
8267 - INTERM_TYPES contains the intermediate type required to perform the
8268 widening operation (short in the above example). */
8271 supportable_widening_operation (enum tree_code code
, gimple stmt
,
8272 tree vectype_out
, tree vectype_in
,
8273 enum tree_code
*code1
, enum tree_code
*code2
,
8274 int *multi_step_cvt
,
8275 vec
<tree
> *interm_types
)
8277 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
8278 loop_vec_info loop_info
= STMT_VINFO_LOOP_VINFO (stmt_info
);
8279 struct loop
*vect_loop
= NULL
;
8280 machine_mode vec_mode
;
8281 enum insn_code icode1
, icode2
;
8282 optab optab1
, optab2
;
8283 tree vectype
= vectype_in
;
8284 tree wide_vectype
= vectype_out
;
8285 enum tree_code c1
, c2
;
8287 tree prev_type
, intermediate_type
;
8288 machine_mode intermediate_mode
, prev_mode
;
8289 optab optab3
, optab4
;
8291 *multi_step_cvt
= 0;
8293 vect_loop
= LOOP_VINFO_LOOP (loop_info
);
8297 case WIDEN_MULT_EXPR
:
8298 /* The result of a vectorized widening operation usually requires
8299 two vectors (because the widened results do not fit into one vector).
8300 The generated vector results would normally be expected to be
8301 generated in the same order as in the original scalar computation,
8302 i.e. if 8 results are generated in each vector iteration, they are
8303 to be organized as follows:
8304 vect1: [res1,res2,res3,res4],
8305 vect2: [res5,res6,res7,res8].
8307 However, in the special case that the result of the widening
8308 operation is used in a reduction computation only, the order doesn't
8309 matter (because when vectorizing a reduction we change the order of
8310 the computation). Some targets can take advantage of this and
8311 generate more efficient code. For example, targets like Altivec,
8312 that support widen_mult using a sequence of {mult_even,mult_odd}
8313 generate the following vectors:
8314 vect1: [res1,res3,res5,res7],
8315 vect2: [res2,res4,res6,res8].
8317 When vectorizing outer-loops, we execute the inner-loop sequentially
8318 (each vectorized inner-loop iteration contributes to VF outer-loop
8319 iterations in parallel). We therefore don't allow to change the
8320 order of the computation in the inner-loop during outer-loop
8322 /* TODO: Another case in which order doesn't *really* matter is when we
8323 widen and then contract again, e.g. (short)((int)x * y >> 8).
8324 Normally, pack_trunc performs an even/odd permute, whereas the
8325 repack from an even/odd expansion would be an interleave, which
8326 would be significantly simpler for e.g. AVX2. */
8327 /* In any case, in order to avoid duplicating the code below, recurse
8328 on VEC_WIDEN_MULT_EVEN_EXPR. If it succeeds, all the return values
8329 are properly set up for the caller. If we fail, we'll continue with
8330 a VEC_WIDEN_MULT_LO/HI_EXPR check. */
8332 && STMT_VINFO_RELEVANT (stmt_info
) == vect_used_by_reduction
8333 && !nested_in_vect_loop_p (vect_loop
, stmt
)
8334 && supportable_widening_operation (VEC_WIDEN_MULT_EVEN_EXPR
,
8335 stmt
, vectype_out
, vectype_in
,
8336 code1
, code2
, multi_step_cvt
,
8339 /* Elements in a vector with vect_used_by_reduction property cannot
8340 be reordered if the use chain with this property does not have the
8341 same operation. One such an example is s += a * b, where elements
8342 in a and b cannot be reordered. Here we check if the vector defined
8343 by STMT is only directly used in the reduction statement. */
8344 tree lhs
= gimple_assign_lhs (stmt
);
8345 use_operand_p dummy
;
8347 stmt_vec_info use_stmt_info
= NULL
;
8348 if (single_imm_use (lhs
, &dummy
, &use_stmt
)
8349 && (use_stmt_info
= vinfo_for_stmt (use_stmt
))
8350 && STMT_VINFO_DEF_TYPE (use_stmt_info
) == vect_reduction_def
)
8353 c1
= VEC_WIDEN_MULT_LO_EXPR
;
8354 c2
= VEC_WIDEN_MULT_HI_EXPR
;
8357 case VEC_WIDEN_MULT_EVEN_EXPR
:
8358 /* Support the recursion induced just above. */
8359 c1
= VEC_WIDEN_MULT_EVEN_EXPR
;
8360 c2
= VEC_WIDEN_MULT_ODD_EXPR
;
8363 case WIDEN_LSHIFT_EXPR
:
8364 c1
= VEC_WIDEN_LSHIFT_LO_EXPR
;
8365 c2
= VEC_WIDEN_LSHIFT_HI_EXPR
;
8369 c1
= VEC_UNPACK_LO_EXPR
;
8370 c2
= VEC_UNPACK_HI_EXPR
;
8374 c1
= VEC_UNPACK_FLOAT_LO_EXPR
;
8375 c2
= VEC_UNPACK_FLOAT_HI_EXPR
;
8378 case FIX_TRUNC_EXPR
:
8379 /* ??? Not yet implemented due to missing VEC_UNPACK_FIX_TRUNC_HI_EXPR/
8380 VEC_UNPACK_FIX_TRUNC_LO_EXPR tree codes and optabs used for
8381 computing the operation. */
8388 if (BYTES_BIG_ENDIAN
&& c1
!= VEC_WIDEN_MULT_EVEN_EXPR
)
8391 if (code
== FIX_TRUNC_EXPR
)
8393 /* The signedness is determined from output operand. */
8394 optab1
= optab_for_tree_code (c1
, vectype_out
, optab_default
);
8395 optab2
= optab_for_tree_code (c2
, vectype_out
, optab_default
);
8399 optab1
= optab_for_tree_code (c1
, vectype
, optab_default
);
8400 optab2
= optab_for_tree_code (c2
, vectype
, optab_default
);
8403 if (!optab1
|| !optab2
)
8406 vec_mode
= TYPE_MODE (vectype
);
8407 if ((icode1
= optab_handler (optab1
, vec_mode
)) == CODE_FOR_nothing
8408 || (icode2
= optab_handler (optab2
, vec_mode
)) == CODE_FOR_nothing
)
8414 if (insn_data
[icode1
].operand
[0].mode
== TYPE_MODE (wide_vectype
)
8415 && insn_data
[icode2
].operand
[0].mode
== TYPE_MODE (wide_vectype
))
8418 /* Check if it's a multi-step conversion that can be done using intermediate
8421 prev_type
= vectype
;
8422 prev_mode
= vec_mode
;
8424 if (!CONVERT_EXPR_CODE_P (code
))
8427 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
8428 intermediate steps in promotion sequence. We try
8429 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do
8431 interm_types
->create (MAX_INTERM_CVT_STEPS
);
8432 for (i
= 0; i
< MAX_INTERM_CVT_STEPS
; i
++)
8434 intermediate_mode
= insn_data
[icode1
].operand
[0].mode
;
8436 = lang_hooks
.types
.type_for_mode (intermediate_mode
,
8437 TYPE_UNSIGNED (prev_type
));
8438 optab3
= optab_for_tree_code (c1
, intermediate_type
, optab_default
);
8439 optab4
= optab_for_tree_code (c2
, intermediate_type
, optab_default
);
8441 if (!optab3
|| !optab4
8442 || (icode1
= optab_handler (optab1
, prev_mode
)) == CODE_FOR_nothing
8443 || insn_data
[icode1
].operand
[0].mode
!= intermediate_mode
8444 || (icode2
= optab_handler (optab2
, prev_mode
)) == CODE_FOR_nothing
8445 || insn_data
[icode2
].operand
[0].mode
!= intermediate_mode
8446 || ((icode1
= optab_handler (optab3
, intermediate_mode
))
8447 == CODE_FOR_nothing
)
8448 || ((icode2
= optab_handler (optab4
, intermediate_mode
))
8449 == CODE_FOR_nothing
))
8452 interm_types
->quick_push (intermediate_type
);
8453 (*multi_step_cvt
)++;
8455 if (insn_data
[icode1
].operand
[0].mode
== TYPE_MODE (wide_vectype
)
8456 && insn_data
[icode2
].operand
[0].mode
== TYPE_MODE (wide_vectype
))
8459 prev_type
= intermediate_type
;
8460 prev_mode
= intermediate_mode
;
8463 interm_types
->release ();
8468 /* Function supportable_narrowing_operation
8470 Check whether an operation represented by the code CODE is a
8471 narrowing operation that is supported by the target platform in
8472 vector form (i.e., when operating on arguments of type VECTYPE_IN
8473 and producing a result of type VECTYPE_OUT).
8475 Narrowing operations we currently support are NOP (CONVERT) and
8476 FIX_TRUNC. This function checks if these operations are supported by
8477 the target platform directly via vector tree-codes.
8480 - CODE1 is the code of a vector operation to be used when
8481 vectorizing the operation, if available.
8482 - MULTI_STEP_CVT determines the number of required intermediate steps in
8483 case of multi-step conversion (like int->short->char - in that case
8484 MULTI_STEP_CVT will be 1).
8485 - INTERM_TYPES contains the intermediate type required to perform the
8486 narrowing operation (short in the above example). */
8489 supportable_narrowing_operation (enum tree_code code
,
8490 tree vectype_out
, tree vectype_in
,
8491 enum tree_code
*code1
, int *multi_step_cvt
,
8492 vec
<tree
> *interm_types
)
8494 machine_mode vec_mode
;
8495 enum insn_code icode1
;
8496 optab optab1
, interm_optab
;
8497 tree vectype
= vectype_in
;
8498 tree narrow_vectype
= vectype_out
;
8500 tree intermediate_type
;
8501 machine_mode intermediate_mode
, prev_mode
;
8505 *multi_step_cvt
= 0;
8509 c1
= VEC_PACK_TRUNC_EXPR
;
8512 case FIX_TRUNC_EXPR
:
8513 c1
= VEC_PACK_FIX_TRUNC_EXPR
;
8517 /* ??? Not yet implemented due to missing VEC_PACK_FLOAT_EXPR
8518 tree code and optabs used for computing the operation. */
8525 if (code
== FIX_TRUNC_EXPR
)
8526 /* The signedness is determined from output operand. */
8527 optab1
= optab_for_tree_code (c1
, vectype_out
, optab_default
);
8529 optab1
= optab_for_tree_code (c1
, vectype
, optab_default
);
8534 vec_mode
= TYPE_MODE (vectype
);
8535 if ((icode1
= optab_handler (optab1
, vec_mode
)) == CODE_FOR_nothing
)
8540 if (insn_data
[icode1
].operand
[0].mode
== TYPE_MODE (narrow_vectype
))
8543 /* Check if it's a multi-step conversion that can be done using intermediate
8545 prev_mode
= vec_mode
;
8546 if (code
== FIX_TRUNC_EXPR
)
8547 uns
= TYPE_UNSIGNED (vectype_out
);
8549 uns
= TYPE_UNSIGNED (vectype
);
8551 /* For multi-step FIX_TRUNC_EXPR prefer signed floating to integer
8552 conversion over unsigned, as unsigned FIX_TRUNC_EXPR is often more
8553 costly than signed. */
8554 if (code
== FIX_TRUNC_EXPR
&& uns
)
8556 enum insn_code icode2
;
8559 = lang_hooks
.types
.type_for_mode (TYPE_MODE (vectype_out
), 0);
8561 = optab_for_tree_code (c1
, intermediate_type
, optab_default
);
8562 if (interm_optab
!= unknown_optab
8563 && (icode2
= optab_handler (optab1
, vec_mode
)) != CODE_FOR_nothing
8564 && insn_data
[icode1
].operand
[0].mode
8565 == insn_data
[icode2
].operand
[0].mode
)
8568 optab1
= interm_optab
;
8573 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
8574 intermediate steps in promotion sequence. We try
8575 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do not. */
8576 interm_types
->create (MAX_INTERM_CVT_STEPS
);
8577 for (i
= 0; i
< MAX_INTERM_CVT_STEPS
; i
++)
8579 intermediate_mode
= insn_data
[icode1
].operand
[0].mode
;
8581 = lang_hooks
.types
.type_for_mode (intermediate_mode
, uns
);
8583 = optab_for_tree_code (VEC_PACK_TRUNC_EXPR
, intermediate_type
,
8586 || ((icode1
= optab_handler (optab1
, prev_mode
)) == CODE_FOR_nothing
)
8587 || insn_data
[icode1
].operand
[0].mode
!= intermediate_mode
8588 || ((icode1
= optab_handler (interm_optab
, intermediate_mode
))
8589 == CODE_FOR_nothing
))
8592 interm_types
->quick_push (intermediate_type
);
8593 (*multi_step_cvt
)++;
8595 if (insn_data
[icode1
].operand
[0].mode
== TYPE_MODE (narrow_vectype
))
8598 prev_mode
= intermediate_mode
;
8599 optab1
= interm_optab
;
8602 interm_types
->release ();