1 /* Statement Analysis and Transformation for Vectorization
2 Copyright (C) 2003-2017 Free Software Foundation, Inc.
3 Contributed by Dorit Naishlos <dorit@il.ibm.com>
4 and Ira Rosen <irar@il.ibm.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
24 #include "coretypes.h"
31 #include "optabs-tree.h"
32 #include "insn-config.h"
33 #include "recog.h" /* FIXME: for insn_data */
37 #include "fold-const.h"
38 #include "stor-layout.h"
41 #include "gimple-iterator.h"
42 #include "gimplify-me.h"
44 #include "tree-ssa-loop-manip.h"
46 #include "tree-ssa-loop.h"
47 #include "tree-scalar-evolution.h"
48 #include "tree-vectorizer.h"
50 #include "internal-fn.h"
52 /* For lang_hooks.types.type_for_mode. */
53 #include "langhooks.h"
55 /* Says whether a statement is a load, a store of a vectorized statement
56 result, or a store of an invariant value. */
57 enum vec_load_store_type
{
63 /* Return the vectorized type for the given statement. */
66 stmt_vectype (struct _stmt_vec_info
*stmt_info
)
68 return STMT_VINFO_VECTYPE (stmt_info
);
71 /* Return TRUE iff the given statement is in an inner loop relative to
72 the loop being vectorized. */
74 stmt_in_inner_loop_p (struct _stmt_vec_info
*stmt_info
)
76 gimple
*stmt
= STMT_VINFO_STMT (stmt_info
);
77 basic_block bb
= gimple_bb (stmt
);
78 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
84 loop
= LOOP_VINFO_LOOP (loop_vinfo
);
86 return (bb
->loop_father
== loop
->inner
);
89 /* Record the cost of a statement, either by directly informing the
90 target model or by saving it in a vector for later processing.
91 Return a preliminary estimate of the statement's cost. */
94 record_stmt_cost (stmt_vector_for_cost
*body_cost_vec
, int count
,
95 enum vect_cost_for_stmt kind
, stmt_vec_info stmt_info
,
96 int misalign
, enum vect_cost_model_location where
)
100 tree vectype
= stmt_info
? stmt_vectype (stmt_info
) : NULL_TREE
;
101 stmt_info_for_cost si
= { count
, kind
,
102 stmt_info
? STMT_VINFO_STMT (stmt_info
) : NULL
,
104 body_cost_vec
->safe_push (si
);
106 (builtin_vectorization_cost (kind
, vectype
, misalign
) * count
);
109 return add_stmt_cost (stmt_info
->vinfo
->target_cost_data
,
110 count
, kind
, stmt_info
, misalign
, where
);
113 /* Return a variable of type ELEM_TYPE[NELEMS]. */
116 create_vector_array (tree elem_type
, unsigned HOST_WIDE_INT nelems
)
118 return create_tmp_var (build_array_type_nelts (elem_type
, nelems
),
122 /* ARRAY is an array of vectors created by create_vector_array.
123 Return an SSA_NAME for the vector in index N. The reference
124 is part of the vectorization of STMT and the vector is associated
125 with scalar destination SCALAR_DEST. */
128 read_vector_array (gimple
*stmt
, gimple_stmt_iterator
*gsi
, tree scalar_dest
,
129 tree array
, unsigned HOST_WIDE_INT n
)
131 tree vect_type
, vect
, vect_name
, array_ref
;
134 gcc_assert (TREE_CODE (TREE_TYPE (array
)) == ARRAY_TYPE
);
135 vect_type
= TREE_TYPE (TREE_TYPE (array
));
136 vect
= vect_create_destination_var (scalar_dest
, vect_type
);
137 array_ref
= build4 (ARRAY_REF
, vect_type
, array
,
138 build_int_cst (size_type_node
, n
),
139 NULL_TREE
, NULL_TREE
);
141 new_stmt
= gimple_build_assign (vect
, array_ref
);
142 vect_name
= make_ssa_name (vect
, new_stmt
);
143 gimple_assign_set_lhs (new_stmt
, vect_name
);
144 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
149 /* ARRAY is an array of vectors created by create_vector_array.
150 Emit code to store SSA_NAME VECT in index N of the array.
151 The store is part of the vectorization of STMT. */
154 write_vector_array (gimple
*stmt
, gimple_stmt_iterator
*gsi
, tree vect
,
155 tree array
, unsigned HOST_WIDE_INT n
)
160 array_ref
= build4 (ARRAY_REF
, TREE_TYPE (vect
), array
,
161 build_int_cst (size_type_node
, n
),
162 NULL_TREE
, NULL_TREE
);
164 new_stmt
= gimple_build_assign (array_ref
, vect
);
165 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
168 /* PTR is a pointer to an array of type TYPE. Return a representation
169 of *PTR. The memory reference replaces those in FIRST_DR
173 create_array_ref (tree type
, tree ptr
, tree alias_ptr_type
)
177 mem_ref
= build2 (MEM_REF
, type
, ptr
, build_int_cst (alias_ptr_type
, 0));
178 /* Arrays have the same alignment as their type. */
179 set_ptr_info_alignment (get_ptr_info (ptr
), TYPE_ALIGN_UNIT (type
), 0);
183 /* Utility functions used by vect_mark_stmts_to_be_vectorized. */
185 /* Function vect_mark_relevant.
187 Mark STMT as "relevant for vectorization" and add it to WORKLIST. */
190 vect_mark_relevant (vec
<gimple
*> *worklist
, gimple
*stmt
,
191 enum vect_relevant relevant
, bool live_p
)
193 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
194 enum vect_relevant save_relevant
= STMT_VINFO_RELEVANT (stmt_info
);
195 bool save_live_p
= STMT_VINFO_LIVE_P (stmt_info
);
196 gimple
*pattern_stmt
;
198 if (dump_enabled_p ())
200 dump_printf_loc (MSG_NOTE
, vect_location
,
201 "mark relevant %d, live %d: ", relevant
, live_p
);
202 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
205 /* If this stmt is an original stmt in a pattern, we might need to mark its
206 related pattern stmt instead of the original stmt. However, such stmts
207 may have their own uses that are not in any pattern, in such cases the
208 stmt itself should be marked. */
209 if (STMT_VINFO_IN_PATTERN_P (stmt_info
))
211 /* This is the last stmt in a sequence that was detected as a
212 pattern that can potentially be vectorized. Don't mark the stmt
213 as relevant/live because it's not going to be vectorized.
214 Instead mark the pattern-stmt that replaces it. */
216 pattern_stmt
= STMT_VINFO_RELATED_STMT (stmt_info
);
218 if (dump_enabled_p ())
219 dump_printf_loc (MSG_NOTE
, vect_location
,
220 "last stmt in pattern. don't mark"
221 " relevant/live.\n");
222 stmt_info
= vinfo_for_stmt (pattern_stmt
);
223 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_info
) == stmt
);
224 save_relevant
= STMT_VINFO_RELEVANT (stmt_info
);
225 save_live_p
= STMT_VINFO_LIVE_P (stmt_info
);
229 STMT_VINFO_LIVE_P (stmt_info
) |= live_p
;
230 if (relevant
> STMT_VINFO_RELEVANT (stmt_info
))
231 STMT_VINFO_RELEVANT (stmt_info
) = relevant
;
233 if (STMT_VINFO_RELEVANT (stmt_info
) == save_relevant
234 && STMT_VINFO_LIVE_P (stmt_info
) == save_live_p
)
236 if (dump_enabled_p ())
237 dump_printf_loc (MSG_NOTE
, vect_location
,
238 "already marked relevant/live.\n");
242 worklist
->safe_push (stmt
);
246 /* Function is_simple_and_all_uses_invariant
248 Return true if STMT is simple and all uses of it are invariant. */
251 is_simple_and_all_uses_invariant (gimple
*stmt
, loop_vec_info loop_vinfo
)
257 if (!is_gimple_assign (stmt
))
260 FOR_EACH_SSA_TREE_OPERAND (op
, stmt
, iter
, SSA_OP_USE
)
262 enum vect_def_type dt
= vect_uninitialized_def
;
264 if (!vect_is_simple_use (op
, loop_vinfo
, &def_stmt
, &dt
))
266 if (dump_enabled_p ())
267 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
268 "use not simple.\n");
272 if (dt
!= vect_external_def
&& dt
!= vect_constant_def
)
278 /* Function vect_stmt_relevant_p.
280 Return true if STMT in loop that is represented by LOOP_VINFO is
281 "relevant for vectorization".
283 A stmt is considered "relevant for vectorization" if:
284 - it has uses outside the loop.
285 - it has vdefs (it alters memory).
286 - control stmts in the loop (except for the exit condition).
288 CHECKME: what other side effects would the vectorizer allow? */
291 vect_stmt_relevant_p (gimple
*stmt
, loop_vec_info loop_vinfo
,
292 enum vect_relevant
*relevant
, bool *live_p
)
294 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
296 imm_use_iterator imm_iter
;
300 *relevant
= vect_unused_in_scope
;
303 /* cond stmt other than loop exit cond. */
304 if (is_ctrl_stmt (stmt
)
305 && STMT_VINFO_TYPE (vinfo_for_stmt (stmt
))
306 != loop_exit_ctrl_vec_info_type
)
307 *relevant
= vect_used_in_scope
;
309 /* changing memory. */
310 if (gimple_code (stmt
) != GIMPLE_PHI
)
311 if (gimple_vdef (stmt
)
312 && !gimple_clobber_p (stmt
))
314 if (dump_enabled_p ())
315 dump_printf_loc (MSG_NOTE
, vect_location
,
316 "vec_stmt_relevant_p: stmt has vdefs.\n");
317 *relevant
= vect_used_in_scope
;
320 /* uses outside the loop. */
321 FOR_EACH_PHI_OR_STMT_DEF (def_p
, stmt
, op_iter
, SSA_OP_DEF
)
323 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, DEF_FROM_PTR (def_p
))
325 basic_block bb
= gimple_bb (USE_STMT (use_p
));
326 if (!flow_bb_inside_loop_p (loop
, bb
))
328 if (dump_enabled_p ())
329 dump_printf_loc (MSG_NOTE
, vect_location
,
330 "vec_stmt_relevant_p: used out of loop.\n");
332 if (is_gimple_debug (USE_STMT (use_p
)))
335 /* We expect all such uses to be in the loop exit phis
336 (because of loop closed form) */
337 gcc_assert (gimple_code (USE_STMT (use_p
)) == GIMPLE_PHI
);
338 gcc_assert (bb
== single_exit (loop
)->dest
);
345 if (*live_p
&& *relevant
== vect_unused_in_scope
346 && !is_simple_and_all_uses_invariant (stmt
, loop_vinfo
))
348 if (dump_enabled_p ())
349 dump_printf_loc (MSG_NOTE
, vect_location
,
350 "vec_stmt_relevant_p: stmt live but not relevant.\n");
351 *relevant
= vect_used_only_live
;
354 return (*live_p
|| *relevant
);
358 /* Function exist_non_indexing_operands_for_use_p
360 USE is one of the uses attached to STMT. Check if USE is
361 used in STMT for anything other than indexing an array. */
364 exist_non_indexing_operands_for_use_p (tree use
, gimple
*stmt
)
367 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
369 /* USE corresponds to some operand in STMT. If there is no data
370 reference in STMT, then any operand that corresponds to USE
371 is not indexing an array. */
372 if (!STMT_VINFO_DATA_REF (stmt_info
))
375 /* STMT has a data_ref. FORNOW this means that its of one of
379 (This should have been verified in analyze_data_refs).
381 'var' in the second case corresponds to a def, not a use,
382 so USE cannot correspond to any operands that are not used
385 Therefore, all we need to check is if STMT falls into the
386 first case, and whether var corresponds to USE. */
388 if (!gimple_assign_copy_p (stmt
))
390 if (is_gimple_call (stmt
)
391 && gimple_call_internal_p (stmt
))
392 switch (gimple_call_internal_fn (stmt
))
395 operand
= gimple_call_arg (stmt
, 3);
400 operand
= gimple_call_arg (stmt
, 2);
410 if (TREE_CODE (gimple_assign_lhs (stmt
)) == SSA_NAME
)
412 operand
= gimple_assign_rhs1 (stmt
);
413 if (TREE_CODE (operand
) != SSA_NAME
)
424 Function process_use.
427 - a USE in STMT in a loop represented by LOOP_VINFO
428 - RELEVANT - enum value to be set in the STMT_VINFO of the stmt
429 that defined USE. This is done by calling mark_relevant and passing it
430 the WORKLIST (to add DEF_STMT to the WORKLIST in case it is relevant).
431 - FORCE is true if exist_non_indexing_operands_for_use_p check shouldn't
435 Generally, LIVE_P and RELEVANT are used to define the liveness and
436 relevance info of the DEF_STMT of this USE:
437 STMT_VINFO_LIVE_P (DEF_STMT_info) <-- live_p
438 STMT_VINFO_RELEVANT (DEF_STMT_info) <-- relevant
440 - case 1: If USE is used only for address computations (e.g. array indexing),
441 which does not need to be directly vectorized, then the liveness/relevance
442 of the respective DEF_STMT is left unchanged.
443 - case 2: If STMT is a reduction phi and DEF_STMT is a reduction stmt, we
444 skip DEF_STMT cause it had already been processed.
445 - case 3: If DEF_STMT and STMT are in different nests, then "relevant" will
446 be modified accordingly.
448 Return true if everything is as expected. Return false otherwise. */
451 process_use (gimple
*stmt
, tree use
, loop_vec_info loop_vinfo
,
452 enum vect_relevant relevant
, vec
<gimple
*> *worklist
,
455 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
456 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (stmt
);
457 stmt_vec_info dstmt_vinfo
;
458 basic_block bb
, def_bb
;
460 enum vect_def_type dt
;
462 /* case 1: we are only interested in uses that need to be vectorized. Uses
463 that are used for address computation are not considered relevant. */
464 if (!force
&& !exist_non_indexing_operands_for_use_p (use
, stmt
))
467 if (!vect_is_simple_use (use
, loop_vinfo
, &def_stmt
, &dt
))
469 if (dump_enabled_p ())
470 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
471 "not vectorized: unsupported use in stmt.\n");
475 if (!def_stmt
|| gimple_nop_p (def_stmt
))
478 def_bb
= gimple_bb (def_stmt
);
479 if (!flow_bb_inside_loop_p (loop
, def_bb
))
481 if (dump_enabled_p ())
482 dump_printf_loc (MSG_NOTE
, vect_location
, "def_stmt is out of loop.\n");
486 /* case 2: A reduction phi (STMT) defined by a reduction stmt (DEF_STMT).
487 DEF_STMT must have already been processed, because this should be the
488 only way that STMT, which is a reduction-phi, was put in the worklist,
489 as there should be no other uses for DEF_STMT in the loop. So we just
490 check that everything is as expected, and we are done. */
491 dstmt_vinfo
= vinfo_for_stmt (def_stmt
);
492 bb
= gimple_bb (stmt
);
493 if (gimple_code (stmt
) == GIMPLE_PHI
494 && STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_reduction_def
495 && gimple_code (def_stmt
) != GIMPLE_PHI
496 && STMT_VINFO_DEF_TYPE (dstmt_vinfo
) == vect_reduction_def
497 && bb
->loop_father
== def_bb
->loop_father
)
499 if (dump_enabled_p ())
500 dump_printf_loc (MSG_NOTE
, vect_location
,
501 "reduc-stmt defining reduc-phi in the same nest.\n");
502 if (STMT_VINFO_IN_PATTERN_P (dstmt_vinfo
))
503 dstmt_vinfo
= vinfo_for_stmt (STMT_VINFO_RELATED_STMT (dstmt_vinfo
));
504 gcc_assert (STMT_VINFO_RELEVANT (dstmt_vinfo
) < vect_used_by_reduction
);
505 gcc_assert (STMT_VINFO_LIVE_P (dstmt_vinfo
)
506 || STMT_VINFO_RELEVANT (dstmt_vinfo
) > vect_unused_in_scope
);
510 /* case 3a: outer-loop stmt defining an inner-loop stmt:
511 outer-loop-header-bb:
517 if (flow_loop_nested_p (def_bb
->loop_father
, bb
->loop_father
))
519 if (dump_enabled_p ())
520 dump_printf_loc (MSG_NOTE
, vect_location
,
521 "outer-loop def-stmt defining inner-loop stmt.\n");
525 case vect_unused_in_scope
:
526 relevant
= (STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_nested_cycle
) ?
527 vect_used_in_scope
: vect_unused_in_scope
;
530 case vect_used_in_outer_by_reduction
:
531 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo
) != vect_reduction_def
);
532 relevant
= vect_used_by_reduction
;
535 case vect_used_in_outer
:
536 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo
) != vect_reduction_def
);
537 relevant
= vect_used_in_scope
;
540 case vect_used_in_scope
:
548 /* case 3b: inner-loop stmt defining an outer-loop stmt:
549 outer-loop-header-bb:
553 outer-loop-tail-bb (or outer-loop-exit-bb in double reduction):
555 else if (flow_loop_nested_p (bb
->loop_father
, def_bb
->loop_father
))
557 if (dump_enabled_p ())
558 dump_printf_loc (MSG_NOTE
, vect_location
,
559 "inner-loop def-stmt defining outer-loop stmt.\n");
563 case vect_unused_in_scope
:
564 relevant
= (STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_reduction_def
565 || STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_double_reduction_def
) ?
566 vect_used_in_outer_by_reduction
: vect_unused_in_scope
;
569 case vect_used_by_reduction
:
570 case vect_used_only_live
:
571 relevant
= vect_used_in_outer_by_reduction
;
574 case vect_used_in_scope
:
575 relevant
= vect_used_in_outer
;
582 /* We are also not interested in uses on loop PHI backedges that are
583 inductions. Otherwise we'll needlessly vectorize the IV increment
584 and cause hybrid SLP for SLP inductions. Unless the PHI is live
586 else if (gimple_code (stmt
) == GIMPLE_PHI
587 && STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_induction_def
588 && ! STMT_VINFO_LIVE_P (stmt_vinfo
)
589 && (PHI_ARG_DEF_FROM_EDGE (stmt
, loop_latch_edge (bb
->loop_father
))
592 if (dump_enabled_p ())
593 dump_printf_loc (MSG_NOTE
, vect_location
,
594 "induction value on backedge.\n");
599 vect_mark_relevant (worklist
, def_stmt
, relevant
, false);
604 /* Function vect_mark_stmts_to_be_vectorized.
606 Not all stmts in the loop need to be vectorized. For example:
615 Stmt 1 and 3 do not need to be vectorized, because loop control and
616 addressing of vectorized data-refs are handled differently.
618 This pass detects such stmts. */
621 vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo
)
623 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
624 basic_block
*bbs
= LOOP_VINFO_BBS (loop_vinfo
);
625 unsigned int nbbs
= loop
->num_nodes
;
626 gimple_stmt_iterator si
;
629 stmt_vec_info stmt_vinfo
;
633 enum vect_relevant relevant
;
635 if (dump_enabled_p ())
636 dump_printf_loc (MSG_NOTE
, vect_location
,
637 "=== vect_mark_stmts_to_be_vectorized ===\n");
639 auto_vec
<gimple
*, 64> worklist
;
641 /* 1. Init worklist. */
642 for (i
= 0; i
< nbbs
; i
++)
645 for (si
= gsi_start_phis (bb
); !gsi_end_p (si
); gsi_next (&si
))
648 if (dump_enabled_p ())
650 dump_printf_loc (MSG_NOTE
, vect_location
, "init: phi relevant? ");
651 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, phi
, 0);
654 if (vect_stmt_relevant_p (phi
, loop_vinfo
, &relevant
, &live_p
))
655 vect_mark_relevant (&worklist
, phi
, relevant
, live_p
);
657 for (si
= gsi_start_bb (bb
); !gsi_end_p (si
); gsi_next (&si
))
659 stmt
= gsi_stmt (si
);
660 if (dump_enabled_p ())
662 dump_printf_loc (MSG_NOTE
, vect_location
, "init: stmt relevant? ");
663 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
666 if (vect_stmt_relevant_p (stmt
, loop_vinfo
, &relevant
, &live_p
))
667 vect_mark_relevant (&worklist
, stmt
, relevant
, live_p
);
671 /* 2. Process_worklist */
672 while (worklist
.length () > 0)
677 stmt
= worklist
.pop ();
678 if (dump_enabled_p ())
680 dump_printf_loc (MSG_NOTE
, vect_location
, "worklist: examine stmt: ");
681 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
684 /* Examine the USEs of STMT. For each USE, mark the stmt that defines it
685 (DEF_STMT) as relevant/irrelevant according to the relevance property
687 stmt_vinfo
= vinfo_for_stmt (stmt
);
688 relevant
= STMT_VINFO_RELEVANT (stmt_vinfo
);
690 /* Generally, the relevance property of STMT (in STMT_VINFO_RELEVANT) is
691 propagated as is to the DEF_STMTs of its USEs.
693 One exception is when STMT has been identified as defining a reduction
694 variable; in this case we set the relevance to vect_used_by_reduction.
695 This is because we distinguish between two kinds of relevant stmts -
696 those that are used by a reduction computation, and those that are
697 (also) used by a regular computation. This allows us later on to
698 identify stmts that are used solely by a reduction, and therefore the
699 order of the results that they produce does not have to be kept. */
701 switch (STMT_VINFO_DEF_TYPE (stmt_vinfo
))
703 case vect_reduction_def
:
704 gcc_assert (relevant
!= vect_unused_in_scope
);
705 if (relevant
!= vect_unused_in_scope
706 && relevant
!= vect_used_in_scope
707 && relevant
!= vect_used_by_reduction
708 && relevant
!= vect_used_only_live
)
710 if (dump_enabled_p ())
711 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
712 "unsupported use of reduction.\n");
717 case vect_nested_cycle
:
718 if (relevant
!= vect_unused_in_scope
719 && relevant
!= vect_used_in_outer_by_reduction
720 && relevant
!= vect_used_in_outer
)
722 if (dump_enabled_p ())
723 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
724 "unsupported use of nested cycle.\n");
730 case vect_double_reduction_def
:
731 if (relevant
!= vect_unused_in_scope
732 && relevant
!= vect_used_by_reduction
733 && relevant
!= vect_used_only_live
)
735 if (dump_enabled_p ())
736 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
737 "unsupported use of double reduction.\n");
747 if (is_pattern_stmt_p (stmt_vinfo
))
749 /* Pattern statements are not inserted into the code, so
750 FOR_EACH_PHI_OR_STMT_USE optimizes their operands out, and we
751 have to scan the RHS or function arguments instead. */
752 if (is_gimple_assign (stmt
))
754 enum tree_code rhs_code
= gimple_assign_rhs_code (stmt
);
755 tree op
= gimple_assign_rhs1 (stmt
);
758 if (rhs_code
== COND_EXPR
&& COMPARISON_CLASS_P (op
))
760 if (!process_use (stmt
, TREE_OPERAND (op
, 0), loop_vinfo
,
761 relevant
, &worklist
, false)
762 || !process_use (stmt
, TREE_OPERAND (op
, 1), loop_vinfo
,
763 relevant
, &worklist
, false))
767 for (; i
< gimple_num_ops (stmt
); i
++)
769 op
= gimple_op (stmt
, i
);
770 if (TREE_CODE (op
) == SSA_NAME
771 && !process_use (stmt
, op
, loop_vinfo
, relevant
,
776 else if (is_gimple_call (stmt
))
778 for (i
= 0; i
< gimple_call_num_args (stmt
); i
++)
780 tree arg
= gimple_call_arg (stmt
, i
);
781 if (!process_use (stmt
, arg
, loop_vinfo
, relevant
,
788 FOR_EACH_PHI_OR_STMT_USE (use_p
, stmt
, iter
, SSA_OP_USE
)
790 tree op
= USE_FROM_PTR (use_p
);
791 if (!process_use (stmt
, op
, loop_vinfo
, relevant
,
796 if (STMT_VINFO_GATHER_SCATTER_P (stmt_vinfo
))
798 gather_scatter_info gs_info
;
799 if (!vect_check_gather_scatter (stmt
, loop_vinfo
, &gs_info
))
801 if (!process_use (stmt
, gs_info
.offset
, loop_vinfo
, relevant
,
805 } /* while worklist */
811 /* Function vect_model_simple_cost.
813 Models cost for simple operations, i.e. those that only emit ncopies of a
814 single op. Right now, this does not account for multiple insns that could
815 be generated for the single vector op. We will handle that shortly. */
818 vect_model_simple_cost (stmt_vec_info stmt_info
, int ncopies
,
819 enum vect_def_type
*dt
,
821 stmt_vector_for_cost
*prologue_cost_vec
,
822 stmt_vector_for_cost
*body_cost_vec
)
825 int inside_cost
= 0, prologue_cost
= 0;
827 /* The SLP costs were already calculated during SLP tree build. */
828 if (PURE_SLP_STMT (stmt_info
))
831 /* Cost the "broadcast" of a scalar operand in to a vector operand.
832 Use scalar_to_vec to cost the broadcast, as elsewhere in the vector
834 for (i
= 0; i
< ndts
; i
++)
835 if (dt
[i
] == vect_constant_def
|| dt
[i
] == vect_external_def
)
836 prologue_cost
+= record_stmt_cost (prologue_cost_vec
, 1, scalar_to_vec
,
837 stmt_info
, 0, vect_prologue
);
839 /* Pass the inside-of-loop statements to the target-specific cost model. */
840 inside_cost
= record_stmt_cost (body_cost_vec
, ncopies
, vector_stmt
,
841 stmt_info
, 0, vect_body
);
843 if (dump_enabled_p ())
844 dump_printf_loc (MSG_NOTE
, vect_location
,
845 "vect_model_simple_cost: inside_cost = %d, "
846 "prologue_cost = %d .\n", inside_cost
, prologue_cost
);
850 /* Model cost for type demotion and promotion operations. PWR is normally
851 zero for single-step promotions and demotions. It will be one if
852 two-step promotion/demotion is required, and so on. Each additional
853 step doubles the number of instructions required. */
856 vect_model_promotion_demotion_cost (stmt_vec_info stmt_info
,
857 enum vect_def_type
*dt
, int pwr
)
860 int inside_cost
= 0, prologue_cost
= 0;
861 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
862 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
863 void *target_cost_data
;
865 /* The SLP costs were already calculated during SLP tree build. */
866 if (PURE_SLP_STMT (stmt_info
))
870 target_cost_data
= LOOP_VINFO_TARGET_COST_DATA (loop_vinfo
);
872 target_cost_data
= BB_VINFO_TARGET_COST_DATA (bb_vinfo
);
874 for (i
= 0; i
< pwr
+ 1; i
++)
876 tmp
= (STMT_VINFO_TYPE (stmt_info
) == type_promotion_vec_info_type
) ?
878 inside_cost
+= add_stmt_cost (target_cost_data
, vect_pow2 (tmp
),
879 vec_promote_demote
, stmt_info
, 0,
883 /* FORNOW: Assuming maximum 2 args per stmts. */
884 for (i
= 0; i
< 2; i
++)
885 if (dt
[i
] == vect_constant_def
|| dt
[i
] == vect_external_def
)
886 prologue_cost
+= add_stmt_cost (target_cost_data
, 1, vector_stmt
,
887 stmt_info
, 0, vect_prologue
);
889 if (dump_enabled_p ())
890 dump_printf_loc (MSG_NOTE
, vect_location
,
891 "vect_model_promotion_demotion_cost: inside_cost = %d, "
892 "prologue_cost = %d .\n", inside_cost
, prologue_cost
);
895 /* Function vect_model_store_cost
897 Models cost for stores. In the case of grouped accesses, one access
898 has the overhead of the grouped access attributed to it. */
901 vect_model_store_cost (stmt_vec_info stmt_info
, int ncopies
,
902 vect_memory_access_type memory_access_type
,
903 enum vect_def_type dt
, slp_tree slp_node
,
904 stmt_vector_for_cost
*prologue_cost_vec
,
905 stmt_vector_for_cost
*body_cost_vec
)
907 unsigned int inside_cost
= 0, prologue_cost
= 0;
908 struct data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
);
909 gimple
*first_stmt
= STMT_VINFO_STMT (stmt_info
);
910 bool grouped_access_p
= STMT_VINFO_GROUPED_ACCESS (stmt_info
);
912 if (dt
== vect_constant_def
|| dt
== vect_external_def
)
913 prologue_cost
+= record_stmt_cost (prologue_cost_vec
, 1, scalar_to_vec
,
914 stmt_info
, 0, vect_prologue
);
916 /* Grouped stores update all elements in the group at once,
917 so we want the DR for the first statement. */
918 if (!slp_node
&& grouped_access_p
)
920 first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
921 dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
924 /* True if we should include any once-per-group costs as well as
925 the cost of the statement itself. For SLP we only get called
926 once per group anyhow. */
927 bool first_stmt_p
= (first_stmt
== STMT_VINFO_STMT (stmt_info
));
929 /* We assume that the cost of a single store-lanes instruction is
930 equivalent to the cost of GROUP_SIZE separate stores. If a grouped
931 access is instead being provided by a permute-and-store operation,
932 include the cost of the permutes. */
934 && memory_access_type
== VMAT_CONTIGUOUS_PERMUTE
)
936 /* Uses a high and low interleave or shuffle operations for each
938 int group_size
= GROUP_SIZE (vinfo_for_stmt (first_stmt
));
939 int nstmts
= ncopies
* ceil_log2 (group_size
) * group_size
;
940 inside_cost
= record_stmt_cost (body_cost_vec
, nstmts
, vec_perm
,
941 stmt_info
, 0, vect_body
);
943 if (dump_enabled_p ())
944 dump_printf_loc (MSG_NOTE
, vect_location
,
945 "vect_model_store_cost: strided group_size = %d .\n",
949 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
950 /* Costs of the stores. */
951 if (memory_access_type
== VMAT_ELEMENTWISE
952 || memory_access_type
== VMAT_GATHER_SCATTER
)
953 /* N scalar stores plus extracting the elements. */
954 inside_cost
+= record_stmt_cost (body_cost_vec
,
955 ncopies
* TYPE_VECTOR_SUBPARTS (vectype
),
956 scalar_store
, stmt_info
, 0, vect_body
);
958 vect_get_store_cost (dr
, ncopies
, &inside_cost
, body_cost_vec
);
960 if (memory_access_type
== VMAT_ELEMENTWISE
961 || memory_access_type
== VMAT_STRIDED_SLP
)
962 inside_cost
+= record_stmt_cost (body_cost_vec
,
963 ncopies
* TYPE_VECTOR_SUBPARTS (vectype
),
964 vec_to_scalar
, stmt_info
, 0, vect_body
);
966 if (dump_enabled_p ())
967 dump_printf_loc (MSG_NOTE
, vect_location
,
968 "vect_model_store_cost: inside_cost = %d, "
969 "prologue_cost = %d .\n", inside_cost
, prologue_cost
);
973 /* Calculate cost of DR's memory access. */
975 vect_get_store_cost (struct data_reference
*dr
, int ncopies
,
976 unsigned int *inside_cost
,
977 stmt_vector_for_cost
*body_cost_vec
)
979 int alignment_support_scheme
= vect_supportable_dr_alignment (dr
, false);
980 gimple
*stmt
= DR_STMT (dr
);
981 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
983 switch (alignment_support_scheme
)
987 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
,
988 vector_store
, stmt_info
, 0,
991 if (dump_enabled_p ())
992 dump_printf_loc (MSG_NOTE
, vect_location
,
993 "vect_model_store_cost: aligned.\n");
997 case dr_unaligned_supported
:
999 /* Here, we assign an additional cost for the unaligned store. */
1000 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
,
1001 unaligned_store
, stmt_info
,
1002 DR_MISALIGNMENT (dr
), vect_body
);
1003 if (dump_enabled_p ())
1004 dump_printf_loc (MSG_NOTE
, vect_location
,
1005 "vect_model_store_cost: unaligned supported by "
1010 case dr_unaligned_unsupported
:
1012 *inside_cost
= VECT_MAX_COST
;
1014 if (dump_enabled_p ())
1015 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1016 "vect_model_store_cost: unsupported access.\n");
1026 /* Function vect_model_load_cost
1028 Models cost for loads. In the case of grouped accesses, one access has
1029 the overhead of the grouped access attributed to it. Since unaligned
1030 accesses are supported for loads, we also account for the costs of the
1031 access scheme chosen. */
1034 vect_model_load_cost (stmt_vec_info stmt_info
, int ncopies
,
1035 vect_memory_access_type memory_access_type
,
1037 stmt_vector_for_cost
*prologue_cost_vec
,
1038 stmt_vector_for_cost
*body_cost_vec
)
1040 gimple
*first_stmt
= STMT_VINFO_STMT (stmt_info
);
1041 struct data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
);
1042 unsigned int inside_cost
= 0, prologue_cost
= 0;
1043 bool grouped_access_p
= STMT_VINFO_GROUPED_ACCESS (stmt_info
);
1045 /* Grouped loads read all elements in the group at once,
1046 so we want the DR for the first statement. */
1047 if (!slp_node
&& grouped_access_p
)
1049 first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
1050 dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
1053 /* True if we should include any once-per-group costs as well as
1054 the cost of the statement itself. For SLP we only get called
1055 once per group anyhow. */
1056 bool first_stmt_p
= (first_stmt
== STMT_VINFO_STMT (stmt_info
));
1058 /* We assume that the cost of a single load-lanes instruction is
1059 equivalent to the cost of GROUP_SIZE separate loads. If a grouped
1060 access is instead being provided by a load-and-permute operation,
1061 include the cost of the permutes. */
1063 && memory_access_type
== VMAT_CONTIGUOUS_PERMUTE
)
1065 /* Uses an even and odd extract operations or shuffle operations
1066 for each needed permute. */
1067 int group_size
= GROUP_SIZE (vinfo_for_stmt (first_stmt
));
1068 int nstmts
= ncopies
* ceil_log2 (group_size
) * group_size
;
1069 inside_cost
= record_stmt_cost (body_cost_vec
, nstmts
, vec_perm
,
1070 stmt_info
, 0, vect_body
);
1072 if (dump_enabled_p ())
1073 dump_printf_loc (MSG_NOTE
, vect_location
,
1074 "vect_model_load_cost: strided group_size = %d .\n",
1078 /* The loads themselves. */
1079 if (memory_access_type
== VMAT_ELEMENTWISE
1080 || memory_access_type
== VMAT_GATHER_SCATTER
)
1082 /* N scalar loads plus gathering them into a vector. */
1083 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
1084 inside_cost
+= record_stmt_cost (body_cost_vec
,
1085 ncopies
* TYPE_VECTOR_SUBPARTS (vectype
),
1086 scalar_load
, stmt_info
, 0, vect_body
);
1089 vect_get_load_cost (dr
, ncopies
, first_stmt_p
,
1090 &inside_cost
, &prologue_cost
,
1091 prologue_cost_vec
, body_cost_vec
, true);
1092 if (memory_access_type
== VMAT_ELEMENTWISE
1093 || memory_access_type
== VMAT_STRIDED_SLP
)
1094 inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
, vec_construct
,
1095 stmt_info
, 0, vect_body
);
1097 if (dump_enabled_p ())
1098 dump_printf_loc (MSG_NOTE
, vect_location
,
1099 "vect_model_load_cost: inside_cost = %d, "
1100 "prologue_cost = %d .\n", inside_cost
, prologue_cost
);
1104 /* Calculate cost of DR's memory access. */
1106 vect_get_load_cost (struct data_reference
*dr
, int ncopies
,
1107 bool add_realign_cost
, unsigned int *inside_cost
,
1108 unsigned int *prologue_cost
,
1109 stmt_vector_for_cost
*prologue_cost_vec
,
1110 stmt_vector_for_cost
*body_cost_vec
,
1111 bool record_prologue_costs
)
1113 int alignment_support_scheme
= vect_supportable_dr_alignment (dr
, false);
1114 gimple
*stmt
= DR_STMT (dr
);
1115 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1117 switch (alignment_support_scheme
)
1121 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
, vector_load
,
1122 stmt_info
, 0, vect_body
);
1124 if (dump_enabled_p ())
1125 dump_printf_loc (MSG_NOTE
, vect_location
,
1126 "vect_model_load_cost: aligned.\n");
1130 case dr_unaligned_supported
:
1132 /* Here, we assign an additional cost for the unaligned load. */
1133 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
,
1134 unaligned_load
, stmt_info
,
1135 DR_MISALIGNMENT (dr
), vect_body
);
1137 if (dump_enabled_p ())
1138 dump_printf_loc (MSG_NOTE
, vect_location
,
1139 "vect_model_load_cost: unaligned supported by "
1144 case dr_explicit_realign
:
1146 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
* 2,
1147 vector_load
, stmt_info
, 0, vect_body
);
1148 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
,
1149 vec_perm
, stmt_info
, 0, vect_body
);
1151 /* FIXME: If the misalignment remains fixed across the iterations of
1152 the containing loop, the following cost should be added to the
1154 if (targetm
.vectorize
.builtin_mask_for_load
)
1155 *inside_cost
+= record_stmt_cost (body_cost_vec
, 1, vector_stmt
,
1156 stmt_info
, 0, vect_body
);
1158 if (dump_enabled_p ())
1159 dump_printf_loc (MSG_NOTE
, vect_location
,
1160 "vect_model_load_cost: explicit realign\n");
1164 case dr_explicit_realign_optimized
:
1166 if (dump_enabled_p ())
1167 dump_printf_loc (MSG_NOTE
, vect_location
,
1168 "vect_model_load_cost: unaligned software "
1171 /* Unaligned software pipeline has a load of an address, an initial
1172 load, and possibly a mask operation to "prime" the loop. However,
1173 if this is an access in a group of loads, which provide grouped
1174 access, then the above cost should only be considered for one
1175 access in the group. Inside the loop, there is a load op
1176 and a realignment op. */
1178 if (add_realign_cost
&& record_prologue_costs
)
1180 *prologue_cost
+= record_stmt_cost (prologue_cost_vec
, 2,
1181 vector_stmt
, stmt_info
,
1183 if (targetm
.vectorize
.builtin_mask_for_load
)
1184 *prologue_cost
+= record_stmt_cost (prologue_cost_vec
, 1,
1185 vector_stmt
, stmt_info
,
1189 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
, vector_load
,
1190 stmt_info
, 0, vect_body
);
1191 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
, vec_perm
,
1192 stmt_info
, 0, vect_body
);
1194 if (dump_enabled_p ())
1195 dump_printf_loc (MSG_NOTE
, vect_location
,
1196 "vect_model_load_cost: explicit realign optimized"
1202 case dr_unaligned_unsupported
:
1204 *inside_cost
= VECT_MAX_COST
;
1206 if (dump_enabled_p ())
1207 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1208 "vect_model_load_cost: unsupported access.\n");
1217 /* Insert the new stmt NEW_STMT at *GSI or at the appropriate place in
1218 the loop preheader for the vectorized stmt STMT. */
1221 vect_init_vector_1 (gimple
*stmt
, gimple
*new_stmt
, gimple_stmt_iterator
*gsi
)
1224 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
1227 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (stmt
);
1228 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_vinfo
);
1232 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
1236 if (nested_in_vect_loop_p (loop
, stmt
))
1239 pe
= loop_preheader_edge (loop
);
1240 new_bb
= gsi_insert_on_edge_immediate (pe
, new_stmt
);
1241 gcc_assert (!new_bb
);
1245 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_vinfo
);
1247 gimple_stmt_iterator gsi_bb_start
;
1249 gcc_assert (bb_vinfo
);
1250 bb
= BB_VINFO_BB (bb_vinfo
);
1251 gsi_bb_start
= gsi_after_labels (bb
);
1252 gsi_insert_before (&gsi_bb_start
, new_stmt
, GSI_SAME_STMT
);
1256 if (dump_enabled_p ())
1258 dump_printf_loc (MSG_NOTE
, vect_location
,
1259 "created new init_stmt: ");
1260 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, new_stmt
, 0);
1264 /* Function vect_init_vector.
1266 Insert a new stmt (INIT_STMT) that initializes a new variable of type
1267 TYPE with the value VAL. If TYPE is a vector type and VAL does not have
1268 vector type a vector with all elements equal to VAL is created first.
1269 Place the initialization at BSI if it is not NULL. Otherwise, place the
1270 initialization at the loop preheader.
1271 Return the DEF of INIT_STMT.
1272 It will be used in the vectorization of STMT. */
1275 vect_init_vector (gimple
*stmt
, tree val
, tree type
, gimple_stmt_iterator
*gsi
)
1280 /* We abuse this function to push sth to a SSA name with initial 'val'. */
1281 if (! useless_type_conversion_p (type
, TREE_TYPE (val
)))
1283 gcc_assert (TREE_CODE (type
) == VECTOR_TYPE
);
1284 if (! types_compatible_p (TREE_TYPE (type
), TREE_TYPE (val
)))
1286 /* Scalar boolean value should be transformed into
1287 all zeros or all ones value before building a vector. */
1288 if (VECTOR_BOOLEAN_TYPE_P (type
))
1290 tree true_val
= build_all_ones_cst (TREE_TYPE (type
));
1291 tree false_val
= build_zero_cst (TREE_TYPE (type
));
1293 if (CONSTANT_CLASS_P (val
))
1294 val
= integer_zerop (val
) ? false_val
: true_val
;
1297 new_temp
= make_ssa_name (TREE_TYPE (type
));
1298 init_stmt
= gimple_build_assign (new_temp
, COND_EXPR
,
1299 val
, true_val
, false_val
);
1300 vect_init_vector_1 (stmt
, init_stmt
, gsi
);
1304 else if (CONSTANT_CLASS_P (val
))
1305 val
= fold_convert (TREE_TYPE (type
), val
);
1308 new_temp
= make_ssa_name (TREE_TYPE (type
));
1309 if (! INTEGRAL_TYPE_P (TREE_TYPE (val
)))
1310 init_stmt
= gimple_build_assign (new_temp
,
1311 fold_build1 (VIEW_CONVERT_EXPR
,
1315 init_stmt
= gimple_build_assign (new_temp
, NOP_EXPR
, val
);
1316 vect_init_vector_1 (stmt
, init_stmt
, gsi
);
1320 val
= build_vector_from_val (type
, val
);
1323 new_temp
= vect_get_new_ssa_name (type
, vect_simple_var
, "cst_");
1324 init_stmt
= gimple_build_assign (new_temp
, val
);
1325 vect_init_vector_1 (stmt
, init_stmt
, gsi
);
1329 /* Function vect_get_vec_def_for_operand_1.
1331 For a defining stmt DEF_STMT of a scalar stmt, return a vector def with type
1332 DT that will be used in the vectorized stmt. */
1335 vect_get_vec_def_for_operand_1 (gimple
*def_stmt
, enum vect_def_type dt
)
1339 stmt_vec_info def_stmt_info
= NULL
;
1343 /* operand is a constant or a loop invariant. */
1344 case vect_constant_def
:
1345 case vect_external_def
:
1346 /* Code should use vect_get_vec_def_for_operand. */
1349 /* operand is defined inside the loop. */
1350 case vect_internal_def
:
1352 /* Get the def from the vectorized stmt. */
1353 def_stmt_info
= vinfo_for_stmt (def_stmt
);
1355 vec_stmt
= STMT_VINFO_VEC_STMT (def_stmt_info
);
1356 /* Get vectorized pattern statement. */
1358 && STMT_VINFO_IN_PATTERN_P (def_stmt_info
)
1359 && !STMT_VINFO_RELEVANT (def_stmt_info
))
1360 vec_stmt
= STMT_VINFO_VEC_STMT (vinfo_for_stmt (
1361 STMT_VINFO_RELATED_STMT (def_stmt_info
)));
1362 gcc_assert (vec_stmt
);
1363 if (gimple_code (vec_stmt
) == GIMPLE_PHI
)
1364 vec_oprnd
= PHI_RESULT (vec_stmt
);
1365 else if (is_gimple_call (vec_stmt
))
1366 vec_oprnd
= gimple_call_lhs (vec_stmt
);
1368 vec_oprnd
= gimple_assign_lhs (vec_stmt
);
1372 /* operand is defined by a loop header phi. */
1373 case vect_reduction_def
:
1374 case vect_double_reduction_def
:
1375 case vect_nested_cycle
:
1376 case vect_induction_def
:
1378 gcc_assert (gimple_code (def_stmt
) == GIMPLE_PHI
);
1380 /* Get the def from the vectorized stmt. */
1381 def_stmt_info
= vinfo_for_stmt (def_stmt
);
1382 vec_stmt
= STMT_VINFO_VEC_STMT (def_stmt_info
);
1383 if (gimple_code (vec_stmt
) == GIMPLE_PHI
)
1384 vec_oprnd
= PHI_RESULT (vec_stmt
);
1386 vec_oprnd
= gimple_get_lhs (vec_stmt
);
1396 /* Function vect_get_vec_def_for_operand.
1398 OP is an operand in STMT. This function returns a (vector) def that will be
1399 used in the vectorized stmt for STMT.
1401 In the case that OP is an SSA_NAME which is defined in the loop, then
1402 STMT_VINFO_VEC_STMT of the defining stmt holds the relevant def.
1404 In case OP is an invariant or constant, a new stmt that creates a vector def
1405 needs to be introduced. VECTYPE may be used to specify a required type for
1406 vector invariant. */
1409 vect_get_vec_def_for_operand (tree op
, gimple
*stmt
, tree vectype
)
1412 enum vect_def_type dt
;
1414 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (stmt
);
1415 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_vinfo
);
1417 if (dump_enabled_p ())
1419 dump_printf_loc (MSG_NOTE
, vect_location
,
1420 "vect_get_vec_def_for_operand: ");
1421 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, op
);
1422 dump_printf (MSG_NOTE
, "\n");
1425 is_simple_use
= vect_is_simple_use (op
, loop_vinfo
, &def_stmt
, &dt
);
1426 gcc_assert (is_simple_use
);
1427 if (def_stmt
&& dump_enabled_p ())
1429 dump_printf_loc (MSG_NOTE
, vect_location
, " def_stmt = ");
1430 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, def_stmt
, 0);
1433 if (dt
== vect_constant_def
|| dt
== vect_external_def
)
1435 tree stmt_vectype
= STMT_VINFO_VECTYPE (stmt_vinfo
);
1439 vector_type
= vectype
;
1440 else if (VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (op
))
1441 && VECTOR_BOOLEAN_TYPE_P (stmt_vectype
))
1442 vector_type
= build_same_sized_truth_vector_type (stmt_vectype
);
1444 vector_type
= get_vectype_for_scalar_type (TREE_TYPE (op
));
1446 gcc_assert (vector_type
);
1447 return vect_init_vector (stmt
, op
, vector_type
, NULL
);
1450 return vect_get_vec_def_for_operand_1 (def_stmt
, dt
);
1454 /* Function vect_get_vec_def_for_stmt_copy
1456 Return a vector-def for an operand. This function is used when the
1457 vectorized stmt to be created (by the caller to this function) is a "copy"
1458 created in case the vectorized result cannot fit in one vector, and several
1459 copies of the vector-stmt are required. In this case the vector-def is
1460 retrieved from the vector stmt recorded in the STMT_VINFO_RELATED_STMT field
1461 of the stmt that defines VEC_OPRND.
1462 DT is the type of the vector def VEC_OPRND.
1465 In case the vectorization factor (VF) is bigger than the number
1466 of elements that can fit in a vectype (nunits), we have to generate
1467 more than one vector stmt to vectorize the scalar stmt. This situation
1468 arises when there are multiple data-types operated upon in the loop; the
1469 smallest data-type determines the VF, and as a result, when vectorizing
1470 stmts operating on wider types we need to create 'VF/nunits' "copies" of the
1471 vector stmt (each computing a vector of 'nunits' results, and together
1472 computing 'VF' results in each iteration). This function is called when
1473 vectorizing such a stmt (e.g. vectorizing S2 in the illustration below, in
1474 which VF=16 and nunits=4, so the number of copies required is 4):
1476 scalar stmt: vectorized into: STMT_VINFO_RELATED_STMT
1478 S1: x = load VS1.0: vx.0 = memref0 VS1.1
1479 VS1.1: vx.1 = memref1 VS1.2
1480 VS1.2: vx.2 = memref2 VS1.3
1481 VS1.3: vx.3 = memref3
1483 S2: z = x + ... VSnew.0: vz0 = vx.0 + ... VSnew.1
1484 VSnew.1: vz1 = vx.1 + ... VSnew.2
1485 VSnew.2: vz2 = vx.2 + ... VSnew.3
1486 VSnew.3: vz3 = vx.3 + ...
1488 The vectorization of S1 is explained in vectorizable_load.
1489 The vectorization of S2:
1490 To create the first vector-stmt out of the 4 copies - VSnew.0 -
1491 the function 'vect_get_vec_def_for_operand' is called to
1492 get the relevant vector-def for each operand of S2. For operand x it
1493 returns the vector-def 'vx.0'.
1495 To create the remaining copies of the vector-stmt (VSnew.j), this
1496 function is called to get the relevant vector-def for each operand. It is
1497 obtained from the respective VS1.j stmt, which is recorded in the
1498 STMT_VINFO_RELATED_STMT field of the stmt that defines VEC_OPRND.
1500 For example, to obtain the vector-def 'vx.1' in order to create the
1501 vector stmt 'VSnew.1', this function is called with VEC_OPRND='vx.0'.
1502 Given 'vx0' we obtain the stmt that defines it ('VS1.0'); from the
1503 STMT_VINFO_RELATED_STMT field of 'VS1.0' we obtain the next copy - 'VS1.1',
1504 and return its def ('vx.1').
1505 Overall, to create the above sequence this function will be called 3 times:
1506 vx.1 = vect_get_vec_def_for_stmt_copy (dt, vx.0);
1507 vx.2 = vect_get_vec_def_for_stmt_copy (dt, vx.1);
1508 vx.3 = vect_get_vec_def_for_stmt_copy (dt, vx.2); */
1511 vect_get_vec_def_for_stmt_copy (enum vect_def_type dt
, tree vec_oprnd
)
1513 gimple
*vec_stmt_for_operand
;
1514 stmt_vec_info def_stmt_info
;
1516 /* Do nothing; can reuse same def. */
1517 if (dt
== vect_external_def
|| dt
== vect_constant_def
)
1520 vec_stmt_for_operand
= SSA_NAME_DEF_STMT (vec_oprnd
);
1521 def_stmt_info
= vinfo_for_stmt (vec_stmt_for_operand
);
1522 gcc_assert (def_stmt_info
);
1523 vec_stmt_for_operand
= STMT_VINFO_RELATED_STMT (def_stmt_info
);
1524 gcc_assert (vec_stmt_for_operand
);
1525 if (gimple_code (vec_stmt_for_operand
) == GIMPLE_PHI
)
1526 vec_oprnd
= PHI_RESULT (vec_stmt_for_operand
);
1528 vec_oprnd
= gimple_get_lhs (vec_stmt_for_operand
);
1533 /* Get vectorized definitions for the operands to create a copy of an original
1534 stmt. See vect_get_vec_def_for_stmt_copy () for details. */
1537 vect_get_vec_defs_for_stmt_copy (enum vect_def_type
*dt
,
1538 vec
<tree
> *vec_oprnds0
,
1539 vec
<tree
> *vec_oprnds1
)
1541 tree vec_oprnd
= vec_oprnds0
->pop ();
1543 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
[0], vec_oprnd
);
1544 vec_oprnds0
->quick_push (vec_oprnd
);
1546 if (vec_oprnds1
&& vec_oprnds1
->length ())
1548 vec_oprnd
= vec_oprnds1
->pop ();
1549 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
[1], vec_oprnd
);
1550 vec_oprnds1
->quick_push (vec_oprnd
);
1555 /* Get vectorized definitions for OP0 and OP1. */
1558 vect_get_vec_defs (tree op0
, tree op1
, gimple
*stmt
,
1559 vec
<tree
> *vec_oprnds0
,
1560 vec
<tree
> *vec_oprnds1
,
1565 int nops
= (op1
== NULL_TREE
) ? 1 : 2;
1566 auto_vec
<tree
> ops (nops
);
1567 auto_vec
<vec
<tree
> > vec_defs (nops
);
1569 ops
.quick_push (op0
);
1571 ops
.quick_push (op1
);
1573 vect_get_slp_defs (ops
, slp_node
, &vec_defs
);
1575 *vec_oprnds0
= vec_defs
[0];
1577 *vec_oprnds1
= vec_defs
[1];
1583 vec_oprnds0
->create (1);
1584 vec_oprnd
= vect_get_vec_def_for_operand (op0
, stmt
);
1585 vec_oprnds0
->quick_push (vec_oprnd
);
1589 vec_oprnds1
->create (1);
1590 vec_oprnd
= vect_get_vec_def_for_operand (op1
, stmt
);
1591 vec_oprnds1
->quick_push (vec_oprnd
);
1597 /* Function vect_finish_stmt_generation.
1599 Insert a new stmt. */
1602 vect_finish_stmt_generation (gimple
*stmt
, gimple
*vec_stmt
,
1603 gimple_stmt_iterator
*gsi
)
1605 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1606 vec_info
*vinfo
= stmt_info
->vinfo
;
1608 gcc_assert (gimple_code (stmt
) != GIMPLE_LABEL
);
1610 if (!gsi_end_p (*gsi
)
1611 && gimple_has_mem_ops (vec_stmt
))
1613 gimple
*at_stmt
= gsi_stmt (*gsi
);
1614 tree vuse
= gimple_vuse (at_stmt
);
1615 if (vuse
&& TREE_CODE (vuse
) == SSA_NAME
)
1617 tree vdef
= gimple_vdef (at_stmt
);
1618 gimple_set_vuse (vec_stmt
, gimple_vuse (at_stmt
));
1619 /* If we have an SSA vuse and insert a store, update virtual
1620 SSA form to avoid triggering the renamer. Do so only
1621 if we can easily see all uses - which is what almost always
1622 happens with the way vectorized stmts are inserted. */
1623 if ((vdef
&& TREE_CODE (vdef
) == SSA_NAME
)
1624 && ((is_gimple_assign (vec_stmt
)
1625 && !is_gimple_reg (gimple_assign_lhs (vec_stmt
)))
1626 || (is_gimple_call (vec_stmt
)
1627 && !(gimple_call_flags (vec_stmt
)
1628 & (ECF_CONST
|ECF_PURE
|ECF_NOVOPS
)))))
1630 tree new_vdef
= copy_ssa_name (vuse
, vec_stmt
);
1631 gimple_set_vdef (vec_stmt
, new_vdef
);
1632 SET_USE (gimple_vuse_op (at_stmt
), new_vdef
);
1636 gsi_insert_before (gsi
, vec_stmt
, GSI_SAME_STMT
);
1638 set_vinfo_for_stmt (vec_stmt
, new_stmt_vec_info (vec_stmt
, vinfo
));
1640 if (dump_enabled_p ())
1642 dump_printf_loc (MSG_NOTE
, vect_location
, "add new stmt: ");
1643 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, vec_stmt
, 0);
1646 gimple_set_location (vec_stmt
, gimple_location (stmt
));
1648 /* While EH edges will generally prevent vectorization, stmt might
1649 e.g. be in a must-not-throw region. Ensure newly created stmts
1650 that could throw are part of the same region. */
1651 int lp_nr
= lookup_stmt_eh_lp (stmt
);
1652 if (lp_nr
!= 0 && stmt_could_throw_p (vec_stmt
))
1653 add_stmt_to_eh_lp (vec_stmt
, lp_nr
);
1656 /* We want to vectorize a call to combined function CFN with function
1657 decl FNDECL, using VECTYPE_OUT as the type of the output and VECTYPE_IN
1658 as the types of all inputs. Check whether this is possible using
1659 an internal function, returning its code if so or IFN_LAST if not. */
1662 vectorizable_internal_function (combined_fn cfn
, tree fndecl
,
1663 tree vectype_out
, tree vectype_in
)
1666 if (internal_fn_p (cfn
))
1667 ifn
= as_internal_fn (cfn
);
1669 ifn
= associated_internal_fn (fndecl
);
1670 if (ifn
!= IFN_LAST
&& direct_internal_fn_p (ifn
))
1672 const direct_internal_fn_info
&info
= direct_internal_fn (ifn
);
1673 if (info
.vectorizable
)
1675 tree type0
= (info
.type0
< 0 ? vectype_out
: vectype_in
);
1676 tree type1
= (info
.type1
< 0 ? vectype_out
: vectype_in
);
1677 if (direct_internal_fn_supported_p (ifn
, tree_pair (type0
, type1
),
1678 OPTIMIZE_FOR_SPEED
))
1686 static tree
permute_vec_elements (tree
, tree
, tree
, gimple
*,
1687 gimple_stmt_iterator
*);
1689 /* STMT is a non-strided load or store, meaning that it accesses
1690 elements with a known constant step. Return -1 if that step
1691 is negative, 0 if it is zero, and 1 if it is greater than zero. */
1694 compare_step_with_zero (gimple
*stmt
)
1696 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1697 data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
);
1698 return tree_int_cst_compare (vect_dr_behavior (dr
)->step
,
1702 /* If the target supports a permute mask that reverses the elements in
1703 a vector of type VECTYPE, return that mask, otherwise return null. */
1706 perm_mask_for_reverse (tree vectype
)
1711 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
1712 sel
= XALLOCAVEC (unsigned char, nunits
);
1714 for (i
= 0; i
< nunits
; ++i
)
1715 sel
[i
] = nunits
- 1 - i
;
1717 if (!can_vec_perm_p (TYPE_MODE (vectype
), false, sel
))
1719 return vect_gen_perm_mask_checked (vectype
, sel
);
1722 /* A subroutine of get_load_store_type, with a subset of the same
1723 arguments. Handle the case where STMT is part of a grouped load
1726 For stores, the statements in the group are all consecutive
1727 and there is no gap at the end. For loads, the statements in the
1728 group might not be consecutive; there can be gaps between statements
1729 as well as at the end. */
1732 get_group_load_store_type (gimple
*stmt
, tree vectype
, bool slp
,
1733 vec_load_store_type vls_type
,
1734 vect_memory_access_type
*memory_access_type
)
1736 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1737 vec_info
*vinfo
= stmt_info
->vinfo
;
1738 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
1739 struct loop
*loop
= loop_vinfo
? LOOP_VINFO_LOOP (loop_vinfo
) : NULL
;
1740 gimple
*first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
1741 unsigned int group_size
= GROUP_SIZE (vinfo_for_stmt (first_stmt
));
1742 bool single_element_p
= (stmt
== first_stmt
1743 && !GROUP_NEXT_ELEMENT (stmt_info
));
1744 unsigned HOST_WIDE_INT gap
= GROUP_GAP (vinfo_for_stmt (first_stmt
));
1745 unsigned nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
1747 /* True if the vectorized statements would access beyond the last
1748 statement in the group. */
1749 bool overrun_p
= false;
1751 /* True if we can cope with such overrun by peeling for gaps, so that
1752 there is at least one final scalar iteration after the vector loop. */
1753 bool can_overrun_p
= (vls_type
== VLS_LOAD
&& loop_vinfo
&& !loop
->inner
);
1755 /* There can only be a gap at the end of the group if the stride is
1756 known at compile time. */
1757 gcc_assert (!STMT_VINFO_STRIDED_P (stmt_info
) || gap
== 0);
1759 /* Stores can't yet have gaps. */
1760 gcc_assert (slp
|| vls_type
== VLS_LOAD
|| gap
== 0);
1764 if (STMT_VINFO_STRIDED_P (stmt_info
))
1766 /* Try to use consecutive accesses of GROUP_SIZE elements,
1767 separated by the stride, until we have a complete vector.
1768 Fall back to scalar accesses if that isn't possible. */
1769 if (nunits
% group_size
== 0)
1770 *memory_access_type
= VMAT_STRIDED_SLP
;
1772 *memory_access_type
= VMAT_ELEMENTWISE
;
1776 overrun_p
= loop_vinfo
&& gap
!= 0;
1777 if (overrun_p
&& vls_type
!= VLS_LOAD
)
1779 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1780 "Grouped store with gaps requires"
1781 " non-consecutive accesses\n");
1784 /* If the access is aligned an overrun is fine. */
1787 (STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
))))
1789 if (overrun_p
&& !can_overrun_p
)
1791 if (dump_enabled_p ())
1792 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1793 "Peeling for outer loop is not supported\n");
1796 *memory_access_type
= VMAT_CONTIGUOUS
;
1801 /* We can always handle this case using elementwise accesses,
1802 but see if something more efficient is available. */
1803 *memory_access_type
= VMAT_ELEMENTWISE
;
1805 /* If there is a gap at the end of the group then these optimizations
1806 would access excess elements in the last iteration. */
1807 bool would_overrun_p
= (gap
!= 0);
1808 /* If the access is aligned an overrun is fine, but only if the
1809 overrun is not inside an unused vector (if the gap is as large
1810 or larger than a vector). */
1814 (STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
))))
1815 would_overrun_p
= false;
1816 if (!STMT_VINFO_STRIDED_P (stmt_info
)
1817 && (can_overrun_p
|| !would_overrun_p
)
1818 && compare_step_with_zero (stmt
) > 0)
1820 /* First try using LOAD/STORE_LANES. */
1821 if (vls_type
== VLS_LOAD
1822 ? vect_load_lanes_supported (vectype
, group_size
)
1823 : vect_store_lanes_supported (vectype
, group_size
))
1825 *memory_access_type
= VMAT_LOAD_STORE_LANES
;
1826 overrun_p
= would_overrun_p
;
1829 /* If that fails, try using permuting loads. */
1830 if (*memory_access_type
== VMAT_ELEMENTWISE
1831 && (vls_type
== VLS_LOAD
1832 ? vect_grouped_load_supported (vectype
, single_element_p
,
1834 : vect_grouped_store_supported (vectype
, group_size
)))
1836 *memory_access_type
= VMAT_CONTIGUOUS_PERMUTE
;
1837 overrun_p
= would_overrun_p
;
1842 if (vls_type
!= VLS_LOAD
&& first_stmt
== stmt
)
1844 /* STMT is the leader of the group. Check the operands of all the
1845 stmts of the group. */
1846 gimple
*next_stmt
= GROUP_NEXT_ELEMENT (stmt_info
);
1849 gcc_assert (gimple_assign_single_p (next_stmt
));
1850 tree op
= gimple_assign_rhs1 (next_stmt
);
1852 enum vect_def_type dt
;
1853 if (!vect_is_simple_use (op
, vinfo
, &def_stmt
, &dt
))
1855 if (dump_enabled_p ())
1856 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1857 "use not simple.\n");
1860 next_stmt
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt
));
1866 gcc_assert (can_overrun_p
);
1867 if (dump_enabled_p ())
1868 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1869 "Data access with gaps requires scalar "
1871 LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo
) = true;
1877 /* A subroutine of get_load_store_type, with a subset of the same
1878 arguments. Handle the case where STMT is a load or store that
1879 accesses consecutive elements with a negative step. */
1881 static vect_memory_access_type
1882 get_negative_load_store_type (gimple
*stmt
, tree vectype
,
1883 vec_load_store_type vls_type
,
1884 unsigned int ncopies
)
1886 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1887 struct data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
);
1888 dr_alignment_support alignment_support_scheme
;
1892 if (dump_enabled_p ())
1893 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1894 "multiple types with negative step.\n");
1895 return VMAT_ELEMENTWISE
;
1898 alignment_support_scheme
= vect_supportable_dr_alignment (dr
, false);
1899 if (alignment_support_scheme
!= dr_aligned
1900 && alignment_support_scheme
!= dr_unaligned_supported
)
1902 if (dump_enabled_p ())
1903 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1904 "negative step but alignment required.\n");
1905 return VMAT_ELEMENTWISE
;
1908 if (vls_type
== VLS_STORE_INVARIANT
)
1910 if (dump_enabled_p ())
1911 dump_printf_loc (MSG_NOTE
, vect_location
,
1912 "negative step with invariant source;"
1913 " no permute needed.\n");
1914 return VMAT_CONTIGUOUS_DOWN
;
1917 if (!perm_mask_for_reverse (vectype
))
1919 if (dump_enabled_p ())
1920 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1921 "negative step and reversing not supported.\n");
1922 return VMAT_ELEMENTWISE
;
1925 return VMAT_CONTIGUOUS_REVERSE
;
1928 /* Analyze load or store statement STMT of type VLS_TYPE. Return true
1929 if there is a memory access type that the vectorized form can use,
1930 storing it in *MEMORY_ACCESS_TYPE if so. If we decide to use gathers
1931 or scatters, fill in GS_INFO accordingly.
1933 SLP says whether we're performing SLP rather than loop vectorization.
1934 VECTYPE is the vector type that the vectorized statements will use.
1935 NCOPIES is the number of vector statements that will be needed. */
1938 get_load_store_type (gimple
*stmt
, tree vectype
, bool slp
,
1939 vec_load_store_type vls_type
, unsigned int ncopies
,
1940 vect_memory_access_type
*memory_access_type
,
1941 gather_scatter_info
*gs_info
)
1943 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1944 vec_info
*vinfo
= stmt_info
->vinfo
;
1945 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
1946 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info
))
1948 *memory_access_type
= VMAT_GATHER_SCATTER
;
1950 if (!vect_check_gather_scatter (stmt
, loop_vinfo
, gs_info
))
1952 else if (!vect_is_simple_use (gs_info
->offset
, vinfo
, &def_stmt
,
1953 &gs_info
->offset_dt
,
1954 &gs_info
->offset_vectype
))
1956 if (dump_enabled_p ())
1957 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1958 "%s index use not simple.\n",
1959 vls_type
== VLS_LOAD
? "gather" : "scatter");
1963 else if (STMT_VINFO_GROUPED_ACCESS (stmt_info
))
1965 if (!get_group_load_store_type (stmt
, vectype
, slp
, vls_type
,
1966 memory_access_type
))
1969 else if (STMT_VINFO_STRIDED_P (stmt_info
))
1972 *memory_access_type
= VMAT_ELEMENTWISE
;
1976 int cmp
= compare_step_with_zero (stmt
);
1978 *memory_access_type
= get_negative_load_store_type
1979 (stmt
, vectype
, vls_type
, ncopies
);
1982 gcc_assert (vls_type
== VLS_LOAD
);
1983 *memory_access_type
= VMAT_INVARIANT
;
1986 *memory_access_type
= VMAT_CONTIGUOUS
;
1989 /* FIXME: At the moment the cost model seems to underestimate the
1990 cost of using elementwise accesses. This check preserves the
1991 traditional behavior until that can be fixed. */
1992 if (*memory_access_type
== VMAT_ELEMENTWISE
1993 && !STMT_VINFO_STRIDED_P (stmt_info
))
1995 if (dump_enabled_p ())
1996 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1997 "not falling back to elementwise accesses\n");
2003 /* Function vectorizable_mask_load_store.
2005 Check if STMT performs a conditional load or store that can be vectorized.
2006 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2007 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
2008 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2011 vectorizable_mask_load_store (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
2012 gimple
**vec_stmt
, slp_tree slp_node
)
2014 tree vec_dest
= NULL
;
2015 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
2016 stmt_vec_info prev_stmt_info
;
2017 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
2018 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
2019 bool nested_in_vect_loop
= nested_in_vect_loop_p (loop
, stmt
);
2020 struct data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
);
2021 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
2022 tree rhs_vectype
= NULL_TREE
;
2027 tree dataref_ptr
= NULL_TREE
;
2029 int nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
2033 gather_scatter_info gs_info
;
2034 vec_load_store_type vls_type
;
2037 enum vect_def_type dt
;
2039 if (slp_node
!= NULL
)
2042 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits
;
2043 gcc_assert (ncopies
>= 1);
2045 mask
= gimple_call_arg (stmt
, 2);
2047 if (!VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (mask
)))
2050 /* FORNOW. This restriction should be relaxed. */
2051 if (nested_in_vect_loop
&& ncopies
> 1)
2053 if (dump_enabled_p ())
2054 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2055 "multiple types in nested loop.");
2059 if (!STMT_VINFO_RELEVANT_P (stmt_info
))
2062 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
2066 if (!STMT_VINFO_DATA_REF (stmt_info
))
2069 elem_type
= TREE_TYPE (vectype
);
2071 if (TREE_CODE (mask
) != SSA_NAME
)
2074 if (!vect_is_simple_use (mask
, loop_vinfo
, &def_stmt
, &dt
, &mask_vectype
))
2078 mask_vectype
= get_mask_type_for_scalar_type (TREE_TYPE (vectype
));
2080 if (!mask_vectype
|| !VECTOR_BOOLEAN_TYPE_P (mask_vectype
)
2081 || TYPE_VECTOR_SUBPARTS (mask_vectype
) != TYPE_VECTOR_SUBPARTS (vectype
))
2084 if (gimple_call_internal_fn (stmt
) == IFN_MASK_STORE
)
2086 tree rhs
= gimple_call_arg (stmt
, 3);
2087 if (!vect_is_simple_use (rhs
, loop_vinfo
, &def_stmt
, &dt
, &rhs_vectype
))
2089 if (dt
== vect_constant_def
|| dt
== vect_external_def
)
2090 vls_type
= VLS_STORE_INVARIANT
;
2092 vls_type
= VLS_STORE
;
2095 vls_type
= VLS_LOAD
;
2097 vect_memory_access_type memory_access_type
;
2098 if (!get_load_store_type (stmt
, vectype
, false, vls_type
, ncopies
,
2099 &memory_access_type
, &gs_info
))
2102 if (memory_access_type
== VMAT_GATHER_SCATTER
)
2104 tree arglist
= TYPE_ARG_TYPES (TREE_TYPE (gs_info
.decl
));
2106 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (TREE_CHAIN (arglist
))));
2107 if (TREE_CODE (masktype
) == INTEGER_TYPE
)
2109 if (dump_enabled_p ())
2110 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2111 "masked gather with integer mask not supported.");
2115 else if (memory_access_type
!= VMAT_CONTIGUOUS
)
2117 if (dump_enabled_p ())
2118 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2119 "unsupported access type for masked %s.\n",
2120 vls_type
== VLS_LOAD
? "load" : "store");
2123 else if (!VECTOR_MODE_P (TYPE_MODE (vectype
))
2124 || !can_vec_mask_load_store_p (TYPE_MODE (vectype
),
2125 TYPE_MODE (mask_vectype
),
2126 vls_type
== VLS_LOAD
)
2128 && !useless_type_conversion_p (vectype
, rhs_vectype
)))
2131 if (!vec_stmt
) /* transformation not required. */
2133 STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info
) = memory_access_type
;
2134 STMT_VINFO_TYPE (stmt_info
) = call_vec_info_type
;
2135 if (vls_type
== VLS_LOAD
)
2136 vect_model_load_cost (stmt_info
, ncopies
, memory_access_type
,
2139 vect_model_store_cost (stmt_info
, ncopies
, memory_access_type
,
2140 dt
, NULL
, NULL
, NULL
);
2143 gcc_assert (memory_access_type
== STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info
));
2147 if (memory_access_type
== VMAT_GATHER_SCATTER
)
2149 tree vec_oprnd0
= NULL_TREE
, op
;
2150 tree arglist
= TYPE_ARG_TYPES (TREE_TYPE (gs_info
.decl
));
2151 tree rettype
, srctype
, ptrtype
, idxtype
, masktype
, scaletype
;
2152 tree ptr
, vec_mask
= NULL_TREE
, mask_op
= NULL_TREE
, var
, scale
;
2153 tree perm_mask
= NULL_TREE
, prev_res
= NULL_TREE
;
2154 tree mask_perm_mask
= NULL_TREE
;
2155 edge pe
= loop_preheader_edge (loop
);
2158 enum { NARROW
, NONE
, WIDEN
} modifier
;
2159 int gather_off_nunits
= TYPE_VECTOR_SUBPARTS (gs_info
.offset_vectype
);
2161 rettype
= TREE_TYPE (TREE_TYPE (gs_info
.decl
));
2162 srctype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
2163 ptrtype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
2164 idxtype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
2165 masktype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
2166 scaletype
= TREE_VALUE (arglist
);
2167 gcc_checking_assert (types_compatible_p (srctype
, rettype
)
2168 && types_compatible_p (srctype
, masktype
));
2170 if (nunits
== gather_off_nunits
)
2172 else if (nunits
== gather_off_nunits
/ 2)
2174 unsigned char *sel
= XALLOCAVEC (unsigned char, gather_off_nunits
);
2177 for (i
= 0; i
< gather_off_nunits
; ++i
)
2178 sel
[i
] = i
| nunits
;
2180 perm_mask
= vect_gen_perm_mask_checked (gs_info
.offset_vectype
, sel
);
2182 else if (nunits
== gather_off_nunits
* 2)
2184 unsigned char *sel
= XALLOCAVEC (unsigned char, nunits
);
2187 for (i
= 0; i
< nunits
; ++i
)
2188 sel
[i
] = i
< gather_off_nunits
2189 ? i
: i
+ nunits
- gather_off_nunits
;
2191 perm_mask
= vect_gen_perm_mask_checked (vectype
, sel
);
2193 for (i
= 0; i
< nunits
; ++i
)
2194 sel
[i
] = i
| gather_off_nunits
;
2195 mask_perm_mask
= vect_gen_perm_mask_checked (masktype
, sel
);
2200 vec_dest
= vect_create_destination_var (gimple_call_lhs (stmt
), vectype
);
2202 ptr
= fold_convert (ptrtype
, gs_info
.base
);
2203 if (!is_gimple_min_invariant (ptr
))
2205 ptr
= force_gimple_operand (ptr
, &seq
, true, NULL_TREE
);
2206 new_bb
= gsi_insert_seq_on_edge_immediate (pe
, seq
);
2207 gcc_assert (!new_bb
);
2210 scale
= build_int_cst (scaletype
, gs_info
.scale
);
2212 prev_stmt_info
= NULL
;
2213 for (j
= 0; j
< ncopies
; ++j
)
2215 if (modifier
== WIDEN
&& (j
& 1))
2216 op
= permute_vec_elements (vec_oprnd0
, vec_oprnd0
,
2217 perm_mask
, stmt
, gsi
);
2220 = vect_get_vec_def_for_operand (gs_info
.offset
, stmt
);
2223 = vect_get_vec_def_for_stmt_copy (gs_info
.offset_dt
, vec_oprnd0
);
2225 if (!useless_type_conversion_p (idxtype
, TREE_TYPE (op
)))
2227 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op
))
2228 == TYPE_VECTOR_SUBPARTS (idxtype
));
2229 var
= vect_get_new_ssa_name (idxtype
, vect_simple_var
);
2230 op
= build1 (VIEW_CONVERT_EXPR
, idxtype
, op
);
2232 = gimple_build_assign (var
, VIEW_CONVERT_EXPR
, op
);
2233 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2237 if (mask_perm_mask
&& (j
& 1))
2238 mask_op
= permute_vec_elements (mask_op
, mask_op
,
2239 mask_perm_mask
, stmt
, gsi
);
2243 vec_mask
= vect_get_vec_def_for_operand (mask
, stmt
);
2246 vect_is_simple_use (vec_mask
, loop_vinfo
, &def_stmt
, &dt
);
2247 vec_mask
= vect_get_vec_def_for_stmt_copy (dt
, vec_mask
);
2251 if (!useless_type_conversion_p (masktype
, TREE_TYPE (vec_mask
)))
2253 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (mask_op
))
2254 == TYPE_VECTOR_SUBPARTS (masktype
));
2255 var
= vect_get_new_ssa_name (masktype
, vect_simple_var
);
2256 mask_op
= build1 (VIEW_CONVERT_EXPR
, masktype
, mask_op
);
2258 = gimple_build_assign (var
, VIEW_CONVERT_EXPR
, mask_op
);
2259 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2265 = gimple_build_call (gs_info
.decl
, 5, mask_op
, ptr
, op
, mask_op
,
2268 if (!useless_type_conversion_p (vectype
, rettype
))
2270 gcc_assert (TYPE_VECTOR_SUBPARTS (vectype
)
2271 == TYPE_VECTOR_SUBPARTS (rettype
));
2272 op
= vect_get_new_ssa_name (rettype
, vect_simple_var
);
2273 gimple_call_set_lhs (new_stmt
, op
);
2274 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2275 var
= make_ssa_name (vec_dest
);
2276 op
= build1 (VIEW_CONVERT_EXPR
, vectype
, op
);
2277 new_stmt
= gimple_build_assign (var
, VIEW_CONVERT_EXPR
, op
);
2281 var
= make_ssa_name (vec_dest
, new_stmt
);
2282 gimple_call_set_lhs (new_stmt
, var
);
2285 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2287 if (modifier
== NARROW
)
2294 var
= permute_vec_elements (prev_res
, var
,
2295 perm_mask
, stmt
, gsi
);
2296 new_stmt
= SSA_NAME_DEF_STMT (var
);
2299 if (prev_stmt_info
== NULL
)
2300 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
2302 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
2303 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
2306 /* Ensure that even with -fno-tree-dce the scalar MASK_LOAD is removed
2308 if (STMT_VINFO_RELATED_STMT (stmt_info
))
2310 stmt
= STMT_VINFO_RELATED_STMT (stmt_info
);
2311 stmt_info
= vinfo_for_stmt (stmt
);
2313 tree lhs
= gimple_call_lhs (stmt
);
2314 new_stmt
= gimple_build_assign (lhs
, build_zero_cst (TREE_TYPE (lhs
)));
2315 set_vinfo_for_stmt (new_stmt
, stmt_info
);
2316 set_vinfo_for_stmt (stmt
, NULL
);
2317 STMT_VINFO_STMT (stmt_info
) = new_stmt
;
2318 gsi_replace (gsi
, new_stmt
, true);
2321 else if (vls_type
!= VLS_LOAD
)
2323 tree vec_rhs
= NULL_TREE
, vec_mask
= NULL_TREE
;
2324 prev_stmt_info
= NULL
;
2325 LOOP_VINFO_HAS_MASK_STORE (loop_vinfo
) = true;
2326 for (i
= 0; i
< ncopies
; i
++)
2328 unsigned align
, misalign
;
2332 tree rhs
= gimple_call_arg (stmt
, 3);
2333 vec_rhs
= vect_get_vec_def_for_operand (rhs
, stmt
);
2334 vec_mask
= vect_get_vec_def_for_operand (mask
, stmt
);
2335 /* We should have catched mismatched types earlier. */
2336 gcc_assert (useless_type_conversion_p (vectype
,
2337 TREE_TYPE (vec_rhs
)));
2338 dataref_ptr
= vect_create_data_ref_ptr (stmt
, vectype
, NULL
,
2339 NULL_TREE
, &dummy
, gsi
,
2340 &ptr_incr
, false, &inv_p
);
2341 gcc_assert (!inv_p
);
2345 vect_is_simple_use (vec_rhs
, loop_vinfo
, &def_stmt
, &dt
);
2346 vec_rhs
= vect_get_vec_def_for_stmt_copy (dt
, vec_rhs
);
2347 vect_is_simple_use (vec_mask
, loop_vinfo
, &def_stmt
, &dt
);
2348 vec_mask
= vect_get_vec_def_for_stmt_copy (dt
, vec_mask
);
2349 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
, stmt
,
2350 TYPE_SIZE_UNIT (vectype
));
2353 align
= TYPE_ALIGN_UNIT (vectype
);
2354 if (aligned_access_p (dr
))
2356 else if (DR_MISALIGNMENT (dr
) == -1)
2358 align
= TYPE_ALIGN_UNIT (elem_type
);
2362 misalign
= DR_MISALIGNMENT (dr
);
2363 set_ptr_info_alignment (get_ptr_info (dataref_ptr
), align
,
2365 tree ptr
= build_int_cst (TREE_TYPE (gimple_call_arg (stmt
, 1)),
2366 misalign
? least_bit_hwi (misalign
) : align
);
2368 = gimple_build_call_internal (IFN_MASK_STORE
, 4, dataref_ptr
,
2369 ptr
, vec_mask
, vec_rhs
);
2370 gimple_call_set_nothrow (call
, true);
2372 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2374 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
2376 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
2377 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
2382 tree vec_mask
= NULL_TREE
;
2383 prev_stmt_info
= NULL
;
2384 vec_dest
= vect_create_destination_var (gimple_call_lhs (stmt
), vectype
);
2385 for (i
= 0; i
< ncopies
; i
++)
2387 unsigned align
, misalign
;
2391 vec_mask
= vect_get_vec_def_for_operand (mask
, stmt
);
2392 dataref_ptr
= vect_create_data_ref_ptr (stmt
, vectype
, NULL
,
2393 NULL_TREE
, &dummy
, gsi
,
2394 &ptr_incr
, false, &inv_p
);
2395 gcc_assert (!inv_p
);
2399 vect_is_simple_use (vec_mask
, loop_vinfo
, &def_stmt
, &dt
);
2400 vec_mask
= vect_get_vec_def_for_stmt_copy (dt
, vec_mask
);
2401 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
, stmt
,
2402 TYPE_SIZE_UNIT (vectype
));
2405 align
= TYPE_ALIGN_UNIT (vectype
);
2406 if (aligned_access_p (dr
))
2408 else if (DR_MISALIGNMENT (dr
) == -1)
2410 align
= TYPE_ALIGN_UNIT (elem_type
);
2414 misalign
= DR_MISALIGNMENT (dr
);
2415 set_ptr_info_alignment (get_ptr_info (dataref_ptr
), align
,
2417 tree ptr
= build_int_cst (TREE_TYPE (gimple_call_arg (stmt
, 1)),
2418 misalign
? least_bit_hwi (misalign
) : align
);
2420 = gimple_build_call_internal (IFN_MASK_LOAD
, 3, dataref_ptr
,
2422 gimple_call_set_lhs (call
, make_ssa_name (vec_dest
));
2423 gimple_call_set_nothrow (call
, true);
2424 vect_finish_stmt_generation (stmt
, call
, gsi
);
2426 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= call
;
2428 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = call
;
2429 prev_stmt_info
= vinfo_for_stmt (call
);
2433 if (vls_type
== VLS_LOAD
)
2435 /* Ensure that even with -fno-tree-dce the scalar MASK_LOAD is removed
2437 if (STMT_VINFO_RELATED_STMT (stmt_info
))
2439 stmt
= STMT_VINFO_RELATED_STMT (stmt_info
);
2440 stmt_info
= vinfo_for_stmt (stmt
);
2442 tree lhs
= gimple_call_lhs (stmt
);
2443 new_stmt
= gimple_build_assign (lhs
, build_zero_cst (TREE_TYPE (lhs
)));
2444 set_vinfo_for_stmt (new_stmt
, stmt_info
);
2445 set_vinfo_for_stmt (stmt
, NULL
);
2446 STMT_VINFO_STMT (stmt_info
) = new_stmt
;
2447 gsi_replace (gsi
, new_stmt
, true);
2453 /* Check and perform vectorization of BUILT_IN_BSWAP{16,32,64}. */
2456 vectorizable_bswap (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
2457 gimple
**vec_stmt
, slp_tree slp_node
,
2458 tree vectype_in
, enum vect_def_type
*dt
)
2461 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
2462 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
2463 unsigned ncopies
, nunits
;
2465 op
= gimple_call_arg (stmt
, 0);
2466 vectype
= STMT_VINFO_VECTYPE (stmt_info
);
2467 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
2469 /* Multiple types in SLP are handled by creating the appropriate number of
2470 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
2475 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits
;
2477 gcc_assert (ncopies
>= 1);
2479 tree char_vectype
= get_same_sized_vectype (char_type_node
, vectype_in
);
2484 = XALLOCAVEC (unsigned char, TYPE_VECTOR_SUBPARTS (char_vectype
));
2485 unsigned char *elt
= elts
;
2486 unsigned word_bytes
= TYPE_VECTOR_SUBPARTS (char_vectype
) / nunits
;
2487 for (unsigned i
= 0; i
< nunits
; ++i
)
2488 for (unsigned j
= 0; j
< word_bytes
; ++j
)
2489 *elt
++ = (i
+ 1) * word_bytes
- j
- 1;
2491 if (! can_vec_perm_p (TYPE_MODE (char_vectype
), false, elts
))
2496 STMT_VINFO_TYPE (stmt_info
) = call_vec_info_type
;
2497 if (dump_enabled_p ())
2498 dump_printf_loc (MSG_NOTE
, vect_location
, "=== vectorizable_bswap ==="
2500 if (! PURE_SLP_STMT (stmt_info
))
2502 add_stmt_cost (stmt_info
->vinfo
->target_cost_data
,
2503 1, vector_stmt
, stmt_info
, 0, vect_prologue
);
2504 add_stmt_cost (stmt_info
->vinfo
->target_cost_data
,
2505 ncopies
, vec_perm
, stmt_info
, 0, vect_body
);
2510 tree
*telts
= XALLOCAVEC (tree
, TYPE_VECTOR_SUBPARTS (char_vectype
));
2511 for (unsigned i
= 0; i
< TYPE_VECTOR_SUBPARTS (char_vectype
); ++i
)
2512 telts
[i
] = build_int_cst (char_type_node
, elts
[i
]);
2513 tree bswap_vconst
= build_vector (char_vectype
, telts
);
2516 vec
<tree
> vec_oprnds
= vNULL
;
2517 gimple
*new_stmt
= NULL
;
2518 stmt_vec_info prev_stmt_info
= NULL
;
2519 for (unsigned j
= 0; j
< ncopies
; j
++)
2523 vect_get_vec_defs (op
, NULL
, stmt
, &vec_oprnds
, NULL
, slp_node
);
2525 vect_get_vec_defs_for_stmt_copy (dt
, &vec_oprnds
, NULL
);
2527 /* Arguments are ready. create the new vector stmt. */
2530 FOR_EACH_VEC_ELT (vec_oprnds
, i
, vop
)
2532 tree tem
= make_ssa_name (char_vectype
);
2533 new_stmt
= gimple_build_assign (tem
, build1 (VIEW_CONVERT_EXPR
,
2534 char_vectype
, vop
));
2535 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2536 tree tem2
= make_ssa_name (char_vectype
);
2537 new_stmt
= gimple_build_assign (tem2
, VEC_PERM_EXPR
,
2538 tem
, tem
, bswap_vconst
);
2539 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2540 tem
= make_ssa_name (vectype
);
2541 new_stmt
= gimple_build_assign (tem
, build1 (VIEW_CONVERT_EXPR
,
2543 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2545 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
2552 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
2554 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
2556 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
2559 vec_oprnds
.release ();
2563 /* Return true if vector types VECTYPE_IN and VECTYPE_OUT have
2564 integer elements and if we can narrow VECTYPE_IN to VECTYPE_OUT
2565 in a single step. On success, store the binary pack code in
2569 simple_integer_narrowing (tree vectype_out
, tree vectype_in
,
2570 tree_code
*convert_code
)
2572 if (!INTEGRAL_TYPE_P (TREE_TYPE (vectype_out
))
2573 || !INTEGRAL_TYPE_P (TREE_TYPE (vectype_in
)))
2577 int multi_step_cvt
= 0;
2578 auto_vec
<tree
, 8> interm_types
;
2579 if (!supportable_narrowing_operation (NOP_EXPR
, vectype_out
, vectype_in
,
2580 &code
, &multi_step_cvt
,
2585 *convert_code
= code
;
2589 /* Function vectorizable_call.
2591 Check if GS performs a function call that can be vectorized.
2592 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2593 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2594 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2597 vectorizable_call (gimple
*gs
, gimple_stmt_iterator
*gsi
, gimple
**vec_stmt
,
2604 tree vec_oprnd0
= NULL_TREE
, vec_oprnd1
= NULL_TREE
;
2605 stmt_vec_info stmt_info
= vinfo_for_stmt (gs
), prev_stmt_info
;
2606 tree vectype_out
, vectype_in
;
2609 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
2610 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
2611 vec_info
*vinfo
= stmt_info
->vinfo
;
2612 tree fndecl
, new_temp
, rhs_type
;
2614 enum vect_def_type dt
[3]
2615 = {vect_unknown_def_type
, vect_unknown_def_type
, vect_unknown_def_type
};
2617 gimple
*new_stmt
= NULL
;
2619 vec
<tree
> vargs
= vNULL
;
2620 enum { NARROW
, NONE
, WIDEN
} modifier
;
2624 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
2627 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
2631 /* Is GS a vectorizable call? */
2632 stmt
= dyn_cast
<gcall
*> (gs
);
2636 if (gimple_call_internal_p (stmt
)
2637 && (gimple_call_internal_fn (stmt
) == IFN_MASK_LOAD
2638 || gimple_call_internal_fn (stmt
) == IFN_MASK_STORE
))
2639 return vectorizable_mask_load_store (stmt
, gsi
, vec_stmt
,
2642 if (gimple_call_lhs (stmt
) == NULL_TREE
2643 || TREE_CODE (gimple_call_lhs (stmt
)) != SSA_NAME
)
2646 gcc_checking_assert (!stmt_can_throw_internal (stmt
));
2648 vectype_out
= STMT_VINFO_VECTYPE (stmt_info
);
2650 /* Process function arguments. */
2651 rhs_type
= NULL_TREE
;
2652 vectype_in
= NULL_TREE
;
2653 nargs
= gimple_call_num_args (stmt
);
2655 /* Bail out if the function has more than three arguments, we do not have
2656 interesting builtin functions to vectorize with more than two arguments
2657 except for fma. No arguments is also not good. */
2658 if (nargs
== 0 || nargs
> 3)
2661 /* Ignore the argument of IFN_GOMP_SIMD_LANE, it is magic. */
2662 if (gimple_call_internal_p (stmt
)
2663 && gimple_call_internal_fn (stmt
) == IFN_GOMP_SIMD_LANE
)
2666 rhs_type
= unsigned_type_node
;
2669 for (i
= 0; i
< nargs
; i
++)
2673 op
= gimple_call_arg (stmt
, i
);
2675 /* We can only handle calls with arguments of the same type. */
2677 && !types_compatible_p (rhs_type
, TREE_TYPE (op
)))
2679 if (dump_enabled_p ())
2680 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2681 "argument types differ.\n");
2685 rhs_type
= TREE_TYPE (op
);
2687 if (!vect_is_simple_use (op
, vinfo
, &def_stmt
, &dt
[i
], &opvectype
))
2689 if (dump_enabled_p ())
2690 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2691 "use not simple.\n");
2696 vectype_in
= opvectype
;
2698 && opvectype
!= vectype_in
)
2700 if (dump_enabled_p ())
2701 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2702 "argument vector types differ.\n");
2706 /* If all arguments are external or constant defs use a vector type with
2707 the same size as the output vector type. */
2709 vectype_in
= get_same_sized_vectype (rhs_type
, vectype_out
);
2711 gcc_assert (vectype_in
);
2714 if (dump_enabled_p ())
2716 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2717 "no vectype for scalar type ");
2718 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, rhs_type
);
2719 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
2726 nunits_in
= TYPE_VECTOR_SUBPARTS (vectype_in
);
2727 nunits_out
= TYPE_VECTOR_SUBPARTS (vectype_out
);
2728 if (nunits_in
== nunits_out
/ 2)
2730 else if (nunits_out
== nunits_in
)
2732 else if (nunits_out
== nunits_in
/ 2)
2737 /* We only handle functions that do not read or clobber memory. */
2738 if (gimple_vuse (stmt
))
2740 if (dump_enabled_p ())
2741 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2742 "function reads from or writes to memory.\n");
2746 /* For now, we only vectorize functions if a target specific builtin
2747 is available. TODO -- in some cases, it might be profitable to
2748 insert the calls for pieces of the vector, in order to be able
2749 to vectorize other operations in the loop. */
2751 internal_fn ifn
= IFN_LAST
;
2752 combined_fn cfn
= gimple_call_combined_fn (stmt
);
2753 tree callee
= gimple_call_fndecl (stmt
);
2755 /* First try using an internal function. */
2756 tree_code convert_code
= ERROR_MARK
;
2758 && (modifier
== NONE
2759 || (modifier
== NARROW
2760 && simple_integer_narrowing (vectype_out
, vectype_in
,
2762 ifn
= vectorizable_internal_function (cfn
, callee
, vectype_out
,
2765 /* If that fails, try asking for a target-specific built-in function. */
2766 if (ifn
== IFN_LAST
)
2768 if (cfn
!= CFN_LAST
)
2769 fndecl
= targetm
.vectorize
.builtin_vectorized_function
2770 (cfn
, vectype_out
, vectype_in
);
2772 fndecl
= targetm
.vectorize
.builtin_md_vectorized_function
2773 (callee
, vectype_out
, vectype_in
);
2776 if (ifn
== IFN_LAST
&& !fndecl
)
2778 if (cfn
== CFN_GOMP_SIMD_LANE
2781 && LOOP_VINFO_LOOP (loop_vinfo
)->simduid
2782 && TREE_CODE (gimple_call_arg (stmt
, 0)) == SSA_NAME
2783 && LOOP_VINFO_LOOP (loop_vinfo
)->simduid
2784 == SSA_NAME_VAR (gimple_call_arg (stmt
, 0)))
2786 /* We can handle IFN_GOMP_SIMD_LANE by returning a
2787 { 0, 1, 2, ... vf - 1 } vector. */
2788 gcc_assert (nargs
== 0);
2790 else if (modifier
== NONE
2791 && (gimple_call_builtin_p (stmt
, BUILT_IN_BSWAP16
)
2792 || gimple_call_builtin_p (stmt
, BUILT_IN_BSWAP32
)
2793 || gimple_call_builtin_p (stmt
, BUILT_IN_BSWAP64
)))
2794 return vectorizable_bswap (stmt
, gsi
, vec_stmt
, slp_node
,
2798 if (dump_enabled_p ())
2799 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2800 "function is not vectorizable.\n");
2807 else if (modifier
== NARROW
&& ifn
== IFN_LAST
)
2808 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits_out
;
2810 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits_in
;
2812 /* Sanity check: make sure that at least one copy of the vectorized stmt
2813 needs to be generated. */
2814 gcc_assert (ncopies
>= 1);
2816 if (!vec_stmt
) /* transformation not required. */
2818 STMT_VINFO_TYPE (stmt_info
) = call_vec_info_type
;
2819 if (dump_enabled_p ())
2820 dump_printf_loc (MSG_NOTE
, vect_location
, "=== vectorizable_call ==="
2822 vect_model_simple_cost (stmt_info
, ncopies
, dt
, ndts
, NULL
, NULL
);
2823 if (ifn
!= IFN_LAST
&& modifier
== NARROW
&& !slp_node
)
2824 add_stmt_cost (stmt_info
->vinfo
->target_cost_data
, ncopies
/ 2,
2825 vec_promote_demote
, stmt_info
, 0, vect_body
);
2832 if (dump_enabled_p ())
2833 dump_printf_loc (MSG_NOTE
, vect_location
, "transform call.\n");
2836 scalar_dest
= gimple_call_lhs (stmt
);
2837 vec_dest
= vect_create_destination_var (scalar_dest
, vectype_out
);
2839 prev_stmt_info
= NULL
;
2840 if (modifier
== NONE
|| ifn
!= IFN_LAST
)
2842 tree prev_res
= NULL_TREE
;
2843 for (j
= 0; j
< ncopies
; ++j
)
2845 /* Build argument list for the vectorized call. */
2847 vargs
.create (nargs
);
2853 auto_vec
<vec
<tree
> > vec_defs (nargs
);
2854 vec
<tree
> vec_oprnds0
;
2856 for (i
= 0; i
< nargs
; i
++)
2857 vargs
.quick_push (gimple_call_arg (stmt
, i
));
2858 vect_get_slp_defs (vargs
, slp_node
, &vec_defs
);
2859 vec_oprnds0
= vec_defs
[0];
2861 /* Arguments are ready. Create the new vector stmt. */
2862 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vec_oprnd0
)
2865 for (k
= 0; k
< nargs
; k
++)
2867 vec
<tree
> vec_oprndsk
= vec_defs
[k
];
2868 vargs
[k
] = vec_oprndsk
[i
];
2870 if (modifier
== NARROW
)
2872 tree half_res
= make_ssa_name (vectype_in
);
2874 = gimple_build_call_internal_vec (ifn
, vargs
);
2875 gimple_call_set_lhs (call
, half_res
);
2876 gimple_call_set_nothrow (call
, true);
2878 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2881 prev_res
= half_res
;
2884 new_temp
= make_ssa_name (vec_dest
);
2885 new_stmt
= gimple_build_assign (new_temp
, convert_code
,
2886 prev_res
, half_res
);
2891 if (ifn
!= IFN_LAST
)
2892 call
= gimple_build_call_internal_vec (ifn
, vargs
);
2894 call
= gimple_build_call_vec (fndecl
, vargs
);
2895 new_temp
= make_ssa_name (vec_dest
, call
);
2896 gimple_call_set_lhs (call
, new_temp
);
2897 gimple_call_set_nothrow (call
, true);
2900 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2901 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
2904 for (i
= 0; i
< nargs
; i
++)
2906 vec
<tree
> vec_oprndsi
= vec_defs
[i
];
2907 vec_oprndsi
.release ();
2912 for (i
= 0; i
< nargs
; i
++)
2914 op
= gimple_call_arg (stmt
, i
);
2917 = vect_get_vec_def_for_operand (op
, stmt
);
2920 vec_oprnd0
= gimple_call_arg (new_stmt
, i
);
2922 = vect_get_vec_def_for_stmt_copy (dt
[i
], vec_oprnd0
);
2925 vargs
.quick_push (vec_oprnd0
);
2928 if (gimple_call_internal_p (stmt
)
2929 && gimple_call_internal_fn (stmt
) == IFN_GOMP_SIMD_LANE
)
2931 tree
*v
= XALLOCAVEC (tree
, nunits_out
);
2933 for (k
= 0; k
< nunits_out
; ++k
)
2934 v
[k
] = build_int_cst (unsigned_type_node
, j
* nunits_out
+ k
);
2935 tree cst
= build_vector (vectype_out
, v
);
2937 = vect_get_new_ssa_name (vectype_out
, vect_simple_var
, "cst_");
2938 gimple
*init_stmt
= gimple_build_assign (new_var
, cst
);
2939 vect_init_vector_1 (stmt
, init_stmt
, NULL
);
2940 new_temp
= make_ssa_name (vec_dest
);
2941 new_stmt
= gimple_build_assign (new_temp
, new_var
);
2943 else if (modifier
== NARROW
)
2945 tree half_res
= make_ssa_name (vectype_in
);
2946 gcall
*call
= gimple_build_call_internal_vec (ifn
, vargs
);
2947 gimple_call_set_lhs (call
, half_res
);
2948 gimple_call_set_nothrow (call
, true);
2950 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2953 prev_res
= half_res
;
2956 new_temp
= make_ssa_name (vec_dest
);
2957 new_stmt
= gimple_build_assign (new_temp
, convert_code
,
2958 prev_res
, half_res
);
2963 if (ifn
!= IFN_LAST
)
2964 call
= gimple_build_call_internal_vec (ifn
, vargs
);
2966 call
= gimple_build_call_vec (fndecl
, vargs
);
2967 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
2968 gimple_call_set_lhs (call
, new_temp
);
2969 gimple_call_set_nothrow (call
, true);
2972 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2974 if (j
== (modifier
== NARROW
? 1 : 0))
2975 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
2977 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
2979 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
2982 else if (modifier
== NARROW
)
2984 for (j
= 0; j
< ncopies
; ++j
)
2986 /* Build argument list for the vectorized call. */
2988 vargs
.create (nargs
* 2);
2994 auto_vec
<vec
<tree
> > vec_defs (nargs
);
2995 vec
<tree
> vec_oprnds0
;
2997 for (i
= 0; i
< nargs
; i
++)
2998 vargs
.quick_push (gimple_call_arg (stmt
, i
));
2999 vect_get_slp_defs (vargs
, slp_node
, &vec_defs
);
3000 vec_oprnds0
= vec_defs
[0];
3002 /* Arguments are ready. Create the new vector stmt. */
3003 for (i
= 0; vec_oprnds0
.iterate (i
, &vec_oprnd0
); i
+= 2)
3007 for (k
= 0; k
< nargs
; k
++)
3009 vec
<tree
> vec_oprndsk
= vec_defs
[k
];
3010 vargs
.quick_push (vec_oprndsk
[i
]);
3011 vargs
.quick_push (vec_oprndsk
[i
+ 1]);
3014 if (ifn
!= IFN_LAST
)
3015 call
= gimple_build_call_internal_vec (ifn
, vargs
);
3017 call
= gimple_build_call_vec (fndecl
, vargs
);
3018 new_temp
= make_ssa_name (vec_dest
, call
);
3019 gimple_call_set_lhs (call
, new_temp
);
3020 gimple_call_set_nothrow (call
, true);
3022 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3023 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
3026 for (i
= 0; i
< nargs
; i
++)
3028 vec
<tree
> vec_oprndsi
= vec_defs
[i
];
3029 vec_oprndsi
.release ();
3034 for (i
= 0; i
< nargs
; i
++)
3036 op
= gimple_call_arg (stmt
, i
);
3040 = vect_get_vec_def_for_operand (op
, stmt
);
3042 = vect_get_vec_def_for_stmt_copy (dt
[i
], vec_oprnd0
);
3046 vec_oprnd1
= gimple_call_arg (new_stmt
, 2*i
+ 1);
3048 = vect_get_vec_def_for_stmt_copy (dt
[i
], vec_oprnd1
);
3050 = vect_get_vec_def_for_stmt_copy (dt
[i
], vec_oprnd0
);
3053 vargs
.quick_push (vec_oprnd0
);
3054 vargs
.quick_push (vec_oprnd1
);
3057 new_stmt
= gimple_build_call_vec (fndecl
, vargs
);
3058 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
3059 gimple_call_set_lhs (new_stmt
, new_temp
);
3060 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3063 STMT_VINFO_VEC_STMT (stmt_info
) = new_stmt
;
3065 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
3067 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
3070 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
3073 /* No current target implements this case. */
3078 /* The call in STMT might prevent it from being removed in dce.
3079 We however cannot remove it here, due to the way the ssa name
3080 it defines is mapped to the new definition. So just replace
3081 rhs of the statement with something harmless. */
3086 type
= TREE_TYPE (scalar_dest
);
3087 if (is_pattern_stmt_p (stmt_info
))
3088 lhs
= gimple_call_lhs (STMT_VINFO_RELATED_STMT (stmt_info
));
3090 lhs
= gimple_call_lhs (stmt
);
3092 new_stmt
= gimple_build_assign (lhs
, build_zero_cst (type
));
3093 set_vinfo_for_stmt (new_stmt
, stmt_info
);
3094 set_vinfo_for_stmt (stmt
, NULL
);
3095 STMT_VINFO_STMT (stmt_info
) = new_stmt
;
3096 gsi_replace (gsi
, new_stmt
, false);
3102 struct simd_call_arg_info
3106 HOST_WIDE_INT linear_step
;
3107 enum vect_def_type dt
;
3109 bool simd_lane_linear
;
3112 /* Helper function of vectorizable_simd_clone_call. If OP, an SSA_NAME,
3113 is linear within simd lane (but not within whole loop), note it in
3117 vect_simd_lane_linear (tree op
, struct loop
*loop
,
3118 struct simd_call_arg_info
*arginfo
)
3120 gimple
*def_stmt
= SSA_NAME_DEF_STMT (op
);
3122 if (!is_gimple_assign (def_stmt
)
3123 || gimple_assign_rhs_code (def_stmt
) != POINTER_PLUS_EXPR
3124 || !is_gimple_min_invariant (gimple_assign_rhs1 (def_stmt
)))
3127 tree base
= gimple_assign_rhs1 (def_stmt
);
3128 HOST_WIDE_INT linear_step
= 0;
3129 tree v
= gimple_assign_rhs2 (def_stmt
);
3130 while (TREE_CODE (v
) == SSA_NAME
)
3133 def_stmt
= SSA_NAME_DEF_STMT (v
);
3134 if (is_gimple_assign (def_stmt
))
3135 switch (gimple_assign_rhs_code (def_stmt
))
3138 t
= gimple_assign_rhs2 (def_stmt
);
3139 if (linear_step
|| TREE_CODE (t
) != INTEGER_CST
)
3141 base
= fold_build2 (POINTER_PLUS_EXPR
, TREE_TYPE (base
), base
, t
);
3142 v
= gimple_assign_rhs1 (def_stmt
);
3145 t
= gimple_assign_rhs2 (def_stmt
);
3146 if (linear_step
|| !tree_fits_shwi_p (t
) || integer_zerop (t
))
3148 linear_step
= tree_to_shwi (t
);
3149 v
= gimple_assign_rhs1 (def_stmt
);
3152 t
= gimple_assign_rhs1 (def_stmt
);
3153 if (TREE_CODE (TREE_TYPE (t
)) != INTEGER_TYPE
3154 || (TYPE_PRECISION (TREE_TYPE (v
))
3155 < TYPE_PRECISION (TREE_TYPE (t
))))
3164 else if (gimple_call_internal_p (def_stmt
, IFN_GOMP_SIMD_LANE
)
3166 && TREE_CODE (gimple_call_arg (def_stmt
, 0)) == SSA_NAME
3167 && (SSA_NAME_VAR (gimple_call_arg (def_stmt
, 0))
3172 arginfo
->linear_step
= linear_step
;
3174 arginfo
->simd_lane_linear
= true;
3180 /* Function vectorizable_simd_clone_call.
3182 Check if STMT performs a function call that can be vectorized
3183 by calling a simd clone of the function.
3184 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3185 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
3186 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3189 vectorizable_simd_clone_call (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
3190 gimple
**vec_stmt
, slp_tree slp_node
)
3195 tree vec_oprnd0
= NULL_TREE
;
3196 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
), prev_stmt_info
;
3198 unsigned int nunits
;
3199 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
3200 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
3201 vec_info
*vinfo
= stmt_info
->vinfo
;
3202 struct loop
*loop
= loop_vinfo
? LOOP_VINFO_LOOP (loop_vinfo
) : NULL
;
3203 tree fndecl
, new_temp
;
3205 gimple
*new_stmt
= NULL
;
3207 auto_vec
<simd_call_arg_info
> arginfo
;
3208 vec
<tree
> vargs
= vNULL
;
3210 tree lhs
, rtype
, ratype
;
3211 vec
<constructor_elt
, va_gc
> *ret_ctor_elts
;
3213 /* Is STMT a vectorizable call? */
3214 if (!is_gimple_call (stmt
))
3217 fndecl
= gimple_call_fndecl (stmt
);
3218 if (fndecl
== NULL_TREE
)
3221 struct cgraph_node
*node
= cgraph_node::get (fndecl
);
3222 if (node
== NULL
|| node
->simd_clones
== NULL
)
3225 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
3228 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
3232 if (gimple_call_lhs (stmt
)
3233 && TREE_CODE (gimple_call_lhs (stmt
)) != SSA_NAME
)
3236 gcc_checking_assert (!stmt_can_throw_internal (stmt
));
3238 vectype
= STMT_VINFO_VECTYPE (stmt_info
);
3240 if (loop_vinfo
&& nested_in_vect_loop_p (loop
, stmt
))
3247 /* Process function arguments. */
3248 nargs
= gimple_call_num_args (stmt
);
3250 /* Bail out if the function has zero arguments. */
3254 arginfo
.reserve (nargs
, true);
3256 for (i
= 0; i
< nargs
; i
++)
3258 simd_call_arg_info thisarginfo
;
3261 thisarginfo
.linear_step
= 0;
3262 thisarginfo
.align
= 0;
3263 thisarginfo
.op
= NULL_TREE
;
3264 thisarginfo
.simd_lane_linear
= false;
3266 op
= gimple_call_arg (stmt
, i
);
3267 if (!vect_is_simple_use (op
, vinfo
, &def_stmt
, &thisarginfo
.dt
,
3268 &thisarginfo
.vectype
)
3269 || thisarginfo
.dt
== vect_uninitialized_def
)
3271 if (dump_enabled_p ())
3272 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3273 "use not simple.\n");
3277 if (thisarginfo
.dt
== vect_constant_def
3278 || thisarginfo
.dt
== vect_external_def
)
3279 gcc_assert (thisarginfo
.vectype
== NULL_TREE
);
3281 gcc_assert (thisarginfo
.vectype
!= NULL_TREE
);
3283 /* For linear arguments, the analyze phase should have saved
3284 the base and step in STMT_VINFO_SIMD_CLONE_INFO. */
3285 if (i
* 3 + 4 <= STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).length ()
3286 && STMT_VINFO_SIMD_CLONE_INFO (stmt_info
)[i
* 3 + 2])
3288 gcc_assert (vec_stmt
);
3289 thisarginfo
.linear_step
3290 = tree_to_shwi (STMT_VINFO_SIMD_CLONE_INFO (stmt_info
)[i
* 3 + 2]);
3292 = STMT_VINFO_SIMD_CLONE_INFO (stmt_info
)[i
* 3 + 1];
3293 thisarginfo
.simd_lane_linear
3294 = (STMT_VINFO_SIMD_CLONE_INFO (stmt_info
)[i
* 3 + 3]
3295 == boolean_true_node
);
3296 /* If loop has been peeled for alignment, we need to adjust it. */
3297 tree n1
= LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo
);
3298 tree n2
= LOOP_VINFO_NITERS (loop_vinfo
);
3299 if (n1
!= n2
&& !thisarginfo
.simd_lane_linear
)
3301 tree bias
= fold_build2 (MINUS_EXPR
, TREE_TYPE (n1
), n1
, n2
);
3302 tree step
= STMT_VINFO_SIMD_CLONE_INFO (stmt_info
)[i
* 3 + 2];
3303 tree opt
= TREE_TYPE (thisarginfo
.op
);
3304 bias
= fold_convert (TREE_TYPE (step
), bias
);
3305 bias
= fold_build2 (MULT_EXPR
, TREE_TYPE (step
), bias
, step
);
3307 = fold_build2 (POINTER_TYPE_P (opt
)
3308 ? POINTER_PLUS_EXPR
: PLUS_EXPR
, opt
,
3309 thisarginfo
.op
, bias
);
3313 && thisarginfo
.dt
!= vect_constant_def
3314 && thisarginfo
.dt
!= vect_external_def
3316 && TREE_CODE (op
) == SSA_NAME
3317 && simple_iv (loop
, loop_containing_stmt (stmt
), op
,
3319 && tree_fits_shwi_p (iv
.step
))
3321 thisarginfo
.linear_step
= tree_to_shwi (iv
.step
);
3322 thisarginfo
.op
= iv
.base
;
3324 else if ((thisarginfo
.dt
== vect_constant_def
3325 || thisarginfo
.dt
== vect_external_def
)
3326 && POINTER_TYPE_P (TREE_TYPE (op
)))
3327 thisarginfo
.align
= get_pointer_alignment (op
) / BITS_PER_UNIT
;
3328 /* Addresses of array elements indexed by GOMP_SIMD_LANE are
3330 if (POINTER_TYPE_P (TREE_TYPE (op
))
3331 && !thisarginfo
.linear_step
3333 && thisarginfo
.dt
!= vect_constant_def
3334 && thisarginfo
.dt
!= vect_external_def
3337 && TREE_CODE (op
) == SSA_NAME
)
3338 vect_simd_lane_linear (op
, loop
, &thisarginfo
);
3340 arginfo
.quick_push (thisarginfo
);
3343 unsigned int badness
= 0;
3344 struct cgraph_node
*bestn
= NULL
;
3345 if (STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).exists ())
3346 bestn
= cgraph_node::get (STMT_VINFO_SIMD_CLONE_INFO (stmt_info
)[0]);
3348 for (struct cgraph_node
*n
= node
->simd_clones
; n
!= NULL
;
3349 n
= n
->simdclone
->next_clone
)
3351 unsigned int this_badness
= 0;
3352 if (n
->simdclone
->simdlen
3353 > (unsigned) LOOP_VINFO_VECT_FACTOR (loop_vinfo
)
3354 || n
->simdclone
->nargs
!= nargs
)
3356 if (n
->simdclone
->simdlen
3357 < (unsigned) LOOP_VINFO_VECT_FACTOR (loop_vinfo
))
3358 this_badness
+= (exact_log2 (LOOP_VINFO_VECT_FACTOR (loop_vinfo
))
3359 - exact_log2 (n
->simdclone
->simdlen
)) * 1024;
3360 if (n
->simdclone
->inbranch
)
3361 this_badness
+= 2048;
3362 int target_badness
= targetm
.simd_clone
.usable (n
);
3363 if (target_badness
< 0)
3365 this_badness
+= target_badness
* 512;
3366 /* FORNOW: Have to add code to add the mask argument. */
3367 if (n
->simdclone
->inbranch
)
3369 for (i
= 0; i
< nargs
; i
++)
3371 switch (n
->simdclone
->args
[i
].arg_type
)
3373 case SIMD_CLONE_ARG_TYPE_VECTOR
:
3374 if (!useless_type_conversion_p
3375 (n
->simdclone
->args
[i
].orig_type
,
3376 TREE_TYPE (gimple_call_arg (stmt
, i
))))
3378 else if (arginfo
[i
].dt
== vect_constant_def
3379 || arginfo
[i
].dt
== vect_external_def
3380 || arginfo
[i
].linear_step
)
3383 case SIMD_CLONE_ARG_TYPE_UNIFORM
:
3384 if (arginfo
[i
].dt
!= vect_constant_def
3385 && arginfo
[i
].dt
!= vect_external_def
)
3388 case SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP
:
3389 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP
:
3390 if (arginfo
[i
].dt
== vect_constant_def
3391 || arginfo
[i
].dt
== vect_external_def
3392 || (arginfo
[i
].linear_step
3393 != n
->simdclone
->args
[i
].linear_step
))
3396 case SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP
:
3397 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_CONSTANT_STEP
:
3398 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_CONSTANT_STEP
:
3399 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_VARIABLE_STEP
:
3400 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_VARIABLE_STEP
:
3401 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_VARIABLE_STEP
:
3405 case SIMD_CLONE_ARG_TYPE_MASK
:
3408 if (i
== (size_t) -1)
3410 if (n
->simdclone
->args
[i
].alignment
> arginfo
[i
].align
)
3415 if (arginfo
[i
].align
)
3416 this_badness
+= (exact_log2 (arginfo
[i
].align
)
3417 - exact_log2 (n
->simdclone
->args
[i
].alignment
));
3419 if (i
== (size_t) -1)
3421 if (bestn
== NULL
|| this_badness
< badness
)
3424 badness
= this_badness
;
3431 for (i
= 0; i
< nargs
; i
++)
3432 if ((arginfo
[i
].dt
== vect_constant_def
3433 || arginfo
[i
].dt
== vect_external_def
)
3434 && bestn
->simdclone
->args
[i
].arg_type
== SIMD_CLONE_ARG_TYPE_VECTOR
)
3437 = get_vectype_for_scalar_type (TREE_TYPE (gimple_call_arg (stmt
,
3439 if (arginfo
[i
].vectype
== NULL
3440 || (TYPE_VECTOR_SUBPARTS (arginfo
[i
].vectype
)
3441 > bestn
->simdclone
->simdlen
))
3445 fndecl
= bestn
->decl
;
3446 nunits
= bestn
->simdclone
->simdlen
;
3447 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits
;
3449 /* If the function isn't const, only allow it in simd loops where user
3450 has asserted that at least nunits consecutive iterations can be
3451 performed using SIMD instructions. */
3452 if ((loop
== NULL
|| (unsigned) loop
->safelen
< nunits
)
3453 && gimple_vuse (stmt
))
3456 /* Sanity check: make sure that at least one copy of the vectorized stmt
3457 needs to be generated. */
3458 gcc_assert (ncopies
>= 1);
3460 if (!vec_stmt
) /* transformation not required. */
3462 STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).safe_push (bestn
->decl
);
3463 for (i
= 0; i
< nargs
; i
++)
3464 if ((bestn
->simdclone
->args
[i
].arg_type
3465 == SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP
)
3466 || (bestn
->simdclone
->args
[i
].arg_type
3467 == SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP
))
3469 STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).safe_grow_cleared (i
* 3
3471 STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).safe_push (arginfo
[i
].op
);
3472 tree lst
= POINTER_TYPE_P (TREE_TYPE (arginfo
[i
].op
))
3473 ? size_type_node
: TREE_TYPE (arginfo
[i
].op
);
3474 tree ls
= build_int_cst (lst
, arginfo
[i
].linear_step
);
3475 STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).safe_push (ls
);
3476 tree sll
= arginfo
[i
].simd_lane_linear
3477 ? boolean_true_node
: boolean_false_node
;
3478 STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).safe_push (sll
);
3480 STMT_VINFO_TYPE (stmt_info
) = call_simd_clone_vec_info_type
;
3481 if (dump_enabled_p ())
3482 dump_printf_loc (MSG_NOTE
, vect_location
,
3483 "=== vectorizable_simd_clone_call ===\n");
3484 /* vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL); */
3490 if (dump_enabled_p ())
3491 dump_printf_loc (MSG_NOTE
, vect_location
, "transform call.\n");
3494 scalar_dest
= gimple_call_lhs (stmt
);
3495 vec_dest
= NULL_TREE
;
3500 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
3501 rtype
= TREE_TYPE (TREE_TYPE (fndecl
));
3502 if (TREE_CODE (rtype
) == ARRAY_TYPE
)
3505 rtype
= TREE_TYPE (ratype
);
3509 prev_stmt_info
= NULL
;
3510 for (j
= 0; j
< ncopies
; ++j
)
3512 /* Build argument list for the vectorized call. */
3514 vargs
.create (nargs
);
3518 for (i
= 0; i
< nargs
; i
++)
3520 unsigned int k
, l
, m
, o
;
3522 op
= gimple_call_arg (stmt
, i
);
3523 switch (bestn
->simdclone
->args
[i
].arg_type
)
3525 case SIMD_CLONE_ARG_TYPE_VECTOR
:
3526 atype
= bestn
->simdclone
->args
[i
].vector_type
;
3527 o
= nunits
/ TYPE_VECTOR_SUBPARTS (atype
);
3528 for (m
= j
* o
; m
< (j
+ 1) * o
; m
++)
3530 if (TYPE_VECTOR_SUBPARTS (atype
)
3531 < TYPE_VECTOR_SUBPARTS (arginfo
[i
].vectype
))
3533 unsigned int prec
= GET_MODE_BITSIZE (TYPE_MODE (atype
));
3534 k
= (TYPE_VECTOR_SUBPARTS (arginfo
[i
].vectype
)
3535 / TYPE_VECTOR_SUBPARTS (atype
));
3536 gcc_assert ((k
& (k
- 1)) == 0);
3539 = vect_get_vec_def_for_operand (op
, stmt
);
3542 vec_oprnd0
= arginfo
[i
].op
;
3543 if ((m
& (k
- 1)) == 0)
3545 = vect_get_vec_def_for_stmt_copy (arginfo
[i
].dt
,
3548 arginfo
[i
].op
= vec_oprnd0
;
3550 = build3 (BIT_FIELD_REF
, atype
, vec_oprnd0
,
3552 bitsize_int ((m
& (k
- 1)) * prec
));
3554 = gimple_build_assign (make_ssa_name (atype
),
3556 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3557 vargs
.safe_push (gimple_assign_lhs (new_stmt
));
3561 k
= (TYPE_VECTOR_SUBPARTS (atype
)
3562 / TYPE_VECTOR_SUBPARTS (arginfo
[i
].vectype
));
3563 gcc_assert ((k
& (k
- 1)) == 0);
3564 vec
<constructor_elt
, va_gc
> *ctor_elts
;
3566 vec_alloc (ctor_elts
, k
);
3569 for (l
= 0; l
< k
; l
++)
3571 if (m
== 0 && l
== 0)
3573 = vect_get_vec_def_for_operand (op
, stmt
);
3576 = vect_get_vec_def_for_stmt_copy (arginfo
[i
].dt
,
3578 arginfo
[i
].op
= vec_oprnd0
;
3581 CONSTRUCTOR_APPEND_ELT (ctor_elts
, NULL_TREE
,
3585 vargs
.safe_push (vec_oprnd0
);
3588 vec_oprnd0
= build_constructor (atype
, ctor_elts
);
3590 = gimple_build_assign (make_ssa_name (atype
),
3592 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3593 vargs
.safe_push (gimple_assign_lhs (new_stmt
));
3598 case SIMD_CLONE_ARG_TYPE_UNIFORM
:
3599 vargs
.safe_push (op
);
3601 case SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP
:
3602 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP
:
3607 = force_gimple_operand (arginfo
[i
].op
, &stmts
, true,
3612 edge pe
= loop_preheader_edge (loop
);
3613 new_bb
= gsi_insert_seq_on_edge_immediate (pe
, stmts
);
3614 gcc_assert (!new_bb
);
3616 if (arginfo
[i
].simd_lane_linear
)
3618 vargs
.safe_push (arginfo
[i
].op
);
3621 tree phi_res
= copy_ssa_name (op
);
3622 gphi
*new_phi
= create_phi_node (phi_res
, loop
->header
);
3623 set_vinfo_for_stmt (new_phi
,
3624 new_stmt_vec_info (new_phi
, loop_vinfo
));
3625 add_phi_arg (new_phi
, arginfo
[i
].op
,
3626 loop_preheader_edge (loop
), UNKNOWN_LOCATION
);
3628 = POINTER_TYPE_P (TREE_TYPE (op
))
3629 ? POINTER_PLUS_EXPR
: PLUS_EXPR
;
3630 tree type
= POINTER_TYPE_P (TREE_TYPE (op
))
3631 ? sizetype
: TREE_TYPE (op
);
3633 = wi::mul (bestn
->simdclone
->args
[i
].linear_step
,
3635 tree tcst
= wide_int_to_tree (type
, cst
);
3636 tree phi_arg
= copy_ssa_name (op
);
3638 = gimple_build_assign (phi_arg
, code
, phi_res
, tcst
);
3639 gimple_stmt_iterator si
= gsi_after_labels (loop
->header
);
3640 gsi_insert_after (&si
, new_stmt
, GSI_NEW_STMT
);
3641 set_vinfo_for_stmt (new_stmt
,
3642 new_stmt_vec_info (new_stmt
, loop_vinfo
));
3643 add_phi_arg (new_phi
, phi_arg
, loop_latch_edge (loop
),
3645 arginfo
[i
].op
= phi_res
;
3646 vargs
.safe_push (phi_res
);
3651 = POINTER_TYPE_P (TREE_TYPE (op
))
3652 ? POINTER_PLUS_EXPR
: PLUS_EXPR
;
3653 tree type
= POINTER_TYPE_P (TREE_TYPE (op
))
3654 ? sizetype
: TREE_TYPE (op
);
3656 = wi::mul (bestn
->simdclone
->args
[i
].linear_step
,
3658 tree tcst
= wide_int_to_tree (type
, cst
);
3659 new_temp
= make_ssa_name (TREE_TYPE (op
));
3660 new_stmt
= gimple_build_assign (new_temp
, code
,
3661 arginfo
[i
].op
, tcst
);
3662 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3663 vargs
.safe_push (new_temp
);
3666 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_CONSTANT_STEP
:
3667 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_CONSTANT_STEP
:
3668 case SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP
:
3669 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_VARIABLE_STEP
:
3670 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_VARIABLE_STEP
:
3671 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_VARIABLE_STEP
:
3677 new_stmt
= gimple_build_call_vec (fndecl
, vargs
);
3680 gcc_assert (ratype
|| TYPE_VECTOR_SUBPARTS (rtype
) == nunits
);
3682 new_temp
= create_tmp_var (ratype
);
3683 else if (TYPE_VECTOR_SUBPARTS (vectype
)
3684 == TYPE_VECTOR_SUBPARTS (rtype
))
3685 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
3687 new_temp
= make_ssa_name (rtype
, new_stmt
);
3688 gimple_call_set_lhs (new_stmt
, new_temp
);
3690 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3694 if (TYPE_VECTOR_SUBPARTS (vectype
) < nunits
)
3697 unsigned int prec
= GET_MODE_BITSIZE (TYPE_MODE (vectype
));
3698 k
= nunits
/ TYPE_VECTOR_SUBPARTS (vectype
);
3699 gcc_assert ((k
& (k
- 1)) == 0);
3700 for (l
= 0; l
< k
; l
++)
3705 t
= build_fold_addr_expr (new_temp
);
3706 t
= build2 (MEM_REF
, vectype
, t
,
3707 build_int_cst (TREE_TYPE (t
),
3708 l
* prec
/ BITS_PER_UNIT
));
3711 t
= build3 (BIT_FIELD_REF
, vectype
, new_temp
,
3712 bitsize_int (prec
), bitsize_int (l
* prec
));
3714 = gimple_build_assign (make_ssa_name (vectype
), t
);
3715 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3716 if (j
== 0 && l
== 0)
3717 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
3719 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
3721 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
3726 tree clobber
= build_constructor (ratype
, NULL
);
3727 TREE_THIS_VOLATILE (clobber
) = 1;
3728 new_stmt
= gimple_build_assign (new_temp
, clobber
);
3729 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3733 else if (TYPE_VECTOR_SUBPARTS (vectype
) > nunits
)
3735 unsigned int k
= (TYPE_VECTOR_SUBPARTS (vectype
)
3736 / TYPE_VECTOR_SUBPARTS (rtype
));
3737 gcc_assert ((k
& (k
- 1)) == 0);
3738 if ((j
& (k
- 1)) == 0)
3739 vec_alloc (ret_ctor_elts
, k
);
3742 unsigned int m
, o
= nunits
/ TYPE_VECTOR_SUBPARTS (rtype
);
3743 for (m
= 0; m
< o
; m
++)
3745 tree tem
= build4 (ARRAY_REF
, rtype
, new_temp
,
3746 size_int (m
), NULL_TREE
, NULL_TREE
);
3748 = gimple_build_assign (make_ssa_name (rtype
), tem
);
3749 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3750 CONSTRUCTOR_APPEND_ELT (ret_ctor_elts
, NULL_TREE
,
3751 gimple_assign_lhs (new_stmt
));
3753 tree clobber
= build_constructor (ratype
, NULL
);
3754 TREE_THIS_VOLATILE (clobber
) = 1;
3755 new_stmt
= gimple_build_assign (new_temp
, clobber
);
3756 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3759 CONSTRUCTOR_APPEND_ELT (ret_ctor_elts
, NULL_TREE
, new_temp
);
3760 if ((j
& (k
- 1)) != k
- 1)
3762 vec_oprnd0
= build_constructor (vectype
, ret_ctor_elts
);
3764 = gimple_build_assign (make_ssa_name (vec_dest
), vec_oprnd0
);
3765 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3767 if ((unsigned) j
== k
- 1)
3768 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
3770 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
3772 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
3777 tree t
= build_fold_addr_expr (new_temp
);
3778 t
= build2 (MEM_REF
, vectype
, t
,
3779 build_int_cst (TREE_TYPE (t
), 0));
3781 = gimple_build_assign (make_ssa_name (vec_dest
), t
);
3782 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3783 tree clobber
= build_constructor (ratype
, NULL
);
3784 TREE_THIS_VOLATILE (clobber
) = 1;
3785 vect_finish_stmt_generation (stmt
,
3786 gimple_build_assign (new_temp
,
3792 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
3794 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
3796 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
3801 /* The call in STMT might prevent it from being removed in dce.
3802 We however cannot remove it here, due to the way the ssa name
3803 it defines is mapped to the new definition. So just replace
3804 rhs of the statement with something harmless. */
3811 type
= TREE_TYPE (scalar_dest
);
3812 if (is_pattern_stmt_p (stmt_info
))
3813 lhs
= gimple_call_lhs (STMT_VINFO_RELATED_STMT (stmt_info
));
3815 lhs
= gimple_call_lhs (stmt
);
3816 new_stmt
= gimple_build_assign (lhs
, build_zero_cst (type
));
3819 new_stmt
= gimple_build_nop ();
3820 set_vinfo_for_stmt (new_stmt
, stmt_info
);
3821 set_vinfo_for_stmt (stmt
, NULL
);
3822 STMT_VINFO_STMT (stmt_info
) = new_stmt
;
3823 gsi_replace (gsi
, new_stmt
, true);
3824 unlink_stmt_vdef (stmt
);
3830 /* Function vect_gen_widened_results_half
3832 Create a vector stmt whose code, type, number of arguments, and result
3833 variable are CODE, OP_TYPE, and VEC_DEST, and its arguments are
3834 VEC_OPRND0 and VEC_OPRND1. The new vector stmt is to be inserted at BSI.
3835 In the case that CODE is a CALL_EXPR, this means that a call to DECL
3836 needs to be created (DECL is a function-decl of a target-builtin).
3837 STMT is the original scalar stmt that we are vectorizing. */
3840 vect_gen_widened_results_half (enum tree_code code
,
3842 tree vec_oprnd0
, tree vec_oprnd1
, int op_type
,
3843 tree vec_dest
, gimple_stmt_iterator
*gsi
,
3849 /* Generate half of the widened result: */
3850 if (code
== CALL_EXPR
)
3852 /* Target specific support */
3853 if (op_type
== binary_op
)
3854 new_stmt
= gimple_build_call (decl
, 2, vec_oprnd0
, vec_oprnd1
);
3856 new_stmt
= gimple_build_call (decl
, 1, vec_oprnd0
);
3857 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
3858 gimple_call_set_lhs (new_stmt
, new_temp
);
3862 /* Generic support */
3863 gcc_assert (op_type
== TREE_CODE_LENGTH (code
));
3864 if (op_type
!= binary_op
)
3866 new_stmt
= gimple_build_assign (vec_dest
, code
, vec_oprnd0
, vec_oprnd1
);
3867 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
3868 gimple_assign_set_lhs (new_stmt
, new_temp
);
3870 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3876 /* Get vectorized definitions for loop-based vectorization. For the first
3877 operand we call vect_get_vec_def_for_operand() (with OPRND containing
3878 scalar operand), and for the rest we get a copy with
3879 vect_get_vec_def_for_stmt_copy() using the previous vector definition
3880 (stored in OPRND). See vect_get_vec_def_for_stmt_copy() for details.
3881 The vectors are collected into VEC_OPRNDS. */
3884 vect_get_loop_based_defs (tree
*oprnd
, gimple
*stmt
, enum vect_def_type dt
,
3885 vec
<tree
> *vec_oprnds
, int multi_step_cvt
)
3889 /* Get first vector operand. */
3890 /* All the vector operands except the very first one (that is scalar oprnd)
3892 if (TREE_CODE (TREE_TYPE (*oprnd
)) != VECTOR_TYPE
)
3893 vec_oprnd
= vect_get_vec_def_for_operand (*oprnd
, stmt
);
3895 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
, *oprnd
);
3897 vec_oprnds
->quick_push (vec_oprnd
);
3899 /* Get second vector operand. */
3900 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
, vec_oprnd
);
3901 vec_oprnds
->quick_push (vec_oprnd
);
3905 /* For conversion in multiple steps, continue to get operands
3908 vect_get_loop_based_defs (oprnd
, stmt
, dt
, vec_oprnds
, multi_step_cvt
- 1);
3912 /* Create vectorized demotion statements for vector operands from VEC_OPRNDS.
3913 For multi-step conversions store the resulting vectors and call the function
3917 vect_create_vectorized_demotion_stmts (vec
<tree
> *vec_oprnds
,
3918 int multi_step_cvt
, gimple
*stmt
,
3920 gimple_stmt_iterator
*gsi
,
3921 slp_tree slp_node
, enum tree_code code
,
3922 stmt_vec_info
*prev_stmt_info
)
3925 tree vop0
, vop1
, new_tmp
, vec_dest
;
3927 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
3929 vec_dest
= vec_dsts
.pop ();
3931 for (i
= 0; i
< vec_oprnds
->length (); i
+= 2)
3933 /* Create demotion operation. */
3934 vop0
= (*vec_oprnds
)[i
];
3935 vop1
= (*vec_oprnds
)[i
+ 1];
3936 new_stmt
= gimple_build_assign (vec_dest
, code
, vop0
, vop1
);
3937 new_tmp
= make_ssa_name (vec_dest
, new_stmt
);
3938 gimple_assign_set_lhs (new_stmt
, new_tmp
);
3939 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3942 /* Store the resulting vector for next recursive call. */
3943 (*vec_oprnds
)[i
/2] = new_tmp
;
3946 /* This is the last step of the conversion sequence. Store the
3947 vectors in SLP_NODE or in vector info of the scalar statement
3948 (or in STMT_VINFO_RELATED_STMT chain). */
3950 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
3953 if (!*prev_stmt_info
)
3954 STMT_VINFO_VEC_STMT (stmt_info
) = new_stmt
;
3956 STMT_VINFO_RELATED_STMT (*prev_stmt_info
) = new_stmt
;
3958 *prev_stmt_info
= vinfo_for_stmt (new_stmt
);
3963 /* For multi-step demotion operations we first generate demotion operations
3964 from the source type to the intermediate types, and then combine the
3965 results (stored in VEC_OPRNDS) in demotion operation to the destination
3969 /* At each level of recursion we have half of the operands we had at the
3971 vec_oprnds
->truncate ((i
+1)/2);
3972 vect_create_vectorized_demotion_stmts (vec_oprnds
, multi_step_cvt
- 1,
3973 stmt
, vec_dsts
, gsi
, slp_node
,
3974 VEC_PACK_TRUNC_EXPR
,
3978 vec_dsts
.quick_push (vec_dest
);
3982 /* Create vectorized promotion statements for vector operands from VEC_OPRNDS0
3983 and VEC_OPRNDS1 (for binary operations). For multi-step conversions store
3984 the resulting vectors and call the function recursively. */
3987 vect_create_vectorized_promotion_stmts (vec
<tree
> *vec_oprnds0
,
3988 vec
<tree
> *vec_oprnds1
,
3989 gimple
*stmt
, tree vec_dest
,
3990 gimple_stmt_iterator
*gsi
,
3991 enum tree_code code1
,
3992 enum tree_code code2
, tree decl1
,
3993 tree decl2
, int op_type
)
3996 tree vop0
, vop1
, new_tmp1
, new_tmp2
;
3997 gimple
*new_stmt1
, *new_stmt2
;
3998 vec
<tree
> vec_tmp
= vNULL
;
4000 vec_tmp
.create (vec_oprnds0
->length () * 2);
4001 FOR_EACH_VEC_ELT (*vec_oprnds0
, i
, vop0
)
4003 if (op_type
== binary_op
)
4004 vop1
= (*vec_oprnds1
)[i
];
4008 /* Generate the two halves of promotion operation. */
4009 new_stmt1
= vect_gen_widened_results_half (code1
, decl1
, vop0
, vop1
,
4010 op_type
, vec_dest
, gsi
, stmt
);
4011 new_stmt2
= vect_gen_widened_results_half (code2
, decl2
, vop0
, vop1
,
4012 op_type
, vec_dest
, gsi
, stmt
);
4013 if (is_gimple_call (new_stmt1
))
4015 new_tmp1
= gimple_call_lhs (new_stmt1
);
4016 new_tmp2
= gimple_call_lhs (new_stmt2
);
4020 new_tmp1
= gimple_assign_lhs (new_stmt1
);
4021 new_tmp2
= gimple_assign_lhs (new_stmt2
);
4024 /* Store the results for the next step. */
4025 vec_tmp
.quick_push (new_tmp1
);
4026 vec_tmp
.quick_push (new_tmp2
);
4029 vec_oprnds0
->release ();
4030 *vec_oprnds0
= vec_tmp
;
4034 /* Check if STMT performs a conversion operation, that can be vectorized.
4035 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4036 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
4037 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4040 vectorizable_conversion (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
4041 gimple
**vec_stmt
, slp_tree slp_node
)
4045 tree op0
, op1
= NULL_TREE
;
4046 tree vec_oprnd0
= NULL_TREE
, vec_oprnd1
= NULL_TREE
;
4047 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
4048 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
4049 enum tree_code code
, code1
= ERROR_MARK
, code2
= ERROR_MARK
;
4050 enum tree_code codecvt1
= ERROR_MARK
, codecvt2
= ERROR_MARK
;
4051 tree decl1
= NULL_TREE
, decl2
= NULL_TREE
;
4054 enum vect_def_type dt
[2] = {vect_unknown_def_type
, vect_unknown_def_type
};
4056 gimple
*new_stmt
= NULL
;
4057 stmt_vec_info prev_stmt_info
;
4060 tree vectype_out
, vectype_in
;
4062 tree lhs_type
, rhs_type
;
4063 enum { NARROW
, NONE
, WIDEN
} modifier
;
4064 vec
<tree
> vec_oprnds0
= vNULL
;
4065 vec
<tree
> vec_oprnds1
= vNULL
;
4067 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
4068 vec_info
*vinfo
= stmt_info
->vinfo
;
4069 int multi_step_cvt
= 0;
4070 vec
<tree
> interm_types
= vNULL
;
4071 tree last_oprnd
, intermediate_type
, cvt_type
= NULL_TREE
;
4073 unsigned short fltsz
;
4075 /* Is STMT a vectorizable conversion? */
4077 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
4080 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
4084 if (!is_gimple_assign (stmt
))
4087 if (TREE_CODE (gimple_assign_lhs (stmt
)) != SSA_NAME
)
4090 code
= gimple_assign_rhs_code (stmt
);
4091 if (!CONVERT_EXPR_CODE_P (code
)
4092 && code
!= FIX_TRUNC_EXPR
4093 && code
!= FLOAT_EXPR
4094 && code
!= WIDEN_MULT_EXPR
4095 && code
!= WIDEN_LSHIFT_EXPR
)
4098 op_type
= TREE_CODE_LENGTH (code
);
4100 /* Check types of lhs and rhs. */
4101 scalar_dest
= gimple_assign_lhs (stmt
);
4102 lhs_type
= TREE_TYPE (scalar_dest
);
4103 vectype_out
= STMT_VINFO_VECTYPE (stmt_info
);
4105 op0
= gimple_assign_rhs1 (stmt
);
4106 rhs_type
= TREE_TYPE (op0
);
4108 if ((code
!= FIX_TRUNC_EXPR
&& code
!= FLOAT_EXPR
)
4109 && !((INTEGRAL_TYPE_P (lhs_type
)
4110 && INTEGRAL_TYPE_P (rhs_type
))
4111 || (SCALAR_FLOAT_TYPE_P (lhs_type
)
4112 && SCALAR_FLOAT_TYPE_P (rhs_type
))))
4115 if (!VECTOR_BOOLEAN_TYPE_P (vectype_out
)
4116 && ((INTEGRAL_TYPE_P (lhs_type
)
4117 && !type_has_mode_precision_p (lhs_type
))
4118 || (INTEGRAL_TYPE_P (rhs_type
)
4119 && !type_has_mode_precision_p (rhs_type
))))
4121 if (dump_enabled_p ())
4122 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4123 "type conversion to/from bit-precision unsupported."
4128 /* Check the operands of the operation. */
4129 if (!vect_is_simple_use (op0
, vinfo
, &def_stmt
, &dt
[0], &vectype_in
))
4131 if (dump_enabled_p ())
4132 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4133 "use not simple.\n");
4136 if (op_type
== binary_op
)
4140 op1
= gimple_assign_rhs2 (stmt
);
4141 gcc_assert (code
== WIDEN_MULT_EXPR
|| code
== WIDEN_LSHIFT_EXPR
);
4142 /* For WIDEN_MULT_EXPR, if OP0 is a constant, use the type of
4144 if (CONSTANT_CLASS_P (op0
))
4145 ok
= vect_is_simple_use (op1
, vinfo
, &def_stmt
, &dt
[1], &vectype_in
);
4147 ok
= vect_is_simple_use (op1
, vinfo
, &def_stmt
, &dt
[1]);
4151 if (dump_enabled_p ())
4152 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4153 "use not simple.\n");
4158 /* If op0 is an external or constant defs use a vector type of
4159 the same size as the output vector type. */
4161 vectype_in
= get_same_sized_vectype (rhs_type
, vectype_out
);
4163 gcc_assert (vectype_in
);
4166 if (dump_enabled_p ())
4168 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4169 "no vectype for scalar type ");
4170 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, rhs_type
);
4171 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
4177 if (VECTOR_BOOLEAN_TYPE_P (vectype_out
)
4178 && !VECTOR_BOOLEAN_TYPE_P (vectype_in
))
4180 if (dump_enabled_p ())
4182 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4183 "can't convert between boolean and non "
4185 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, rhs_type
);
4186 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
4192 nunits_in
= TYPE_VECTOR_SUBPARTS (vectype_in
);
4193 nunits_out
= TYPE_VECTOR_SUBPARTS (vectype_out
);
4194 if (nunits_in
< nunits_out
)
4196 else if (nunits_out
== nunits_in
)
4201 /* Multiple types in SLP are handled by creating the appropriate number of
4202 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4206 else if (modifier
== NARROW
)
4207 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits_out
;
4209 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits_in
;
4211 /* Sanity check: make sure that at least one copy of the vectorized stmt
4212 needs to be generated. */
4213 gcc_assert (ncopies
>= 1);
4215 bool found_mode
= false;
4216 scalar_mode lhs_mode
= SCALAR_TYPE_MODE (lhs_type
);
4217 scalar_mode rhs_mode
= SCALAR_TYPE_MODE (rhs_type
);
4218 opt_scalar_mode rhs_mode_iter
;
4220 /* Supportable by target? */
4224 if (code
!= FIX_TRUNC_EXPR
&& code
!= FLOAT_EXPR
)
4226 if (supportable_convert_operation (code
, vectype_out
, vectype_in
,
4231 if (dump_enabled_p ())
4232 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4233 "conversion not supported by target.\n");
4237 if (supportable_widening_operation (code
, stmt
, vectype_out
, vectype_in
,
4238 &code1
, &code2
, &multi_step_cvt
,
4241 /* Binary widening operation can only be supported directly by the
4243 gcc_assert (!(multi_step_cvt
&& op_type
== binary_op
));
4247 if (code
!= FLOAT_EXPR
4248 || GET_MODE_SIZE (lhs_mode
) <= GET_MODE_SIZE (rhs_mode
))
4251 fltsz
= GET_MODE_SIZE (lhs_mode
);
4252 FOR_EACH_2XWIDER_MODE (rhs_mode_iter
, rhs_mode
)
4254 rhs_mode
= rhs_mode_iter
.require ();
4255 if (GET_MODE_SIZE (rhs_mode
) > fltsz
)
4259 = build_nonstandard_integer_type (GET_MODE_BITSIZE (rhs_mode
), 0);
4260 cvt_type
= get_same_sized_vectype (cvt_type
, vectype_in
);
4261 if (cvt_type
== NULL_TREE
)
4264 if (GET_MODE_SIZE (rhs_mode
) == fltsz
)
4266 if (!supportable_convert_operation (code
, vectype_out
,
4267 cvt_type
, &decl1
, &codecvt1
))
4270 else if (!supportable_widening_operation (code
, stmt
, vectype_out
,
4271 cvt_type
, &codecvt1
,
4272 &codecvt2
, &multi_step_cvt
,
4276 gcc_assert (multi_step_cvt
== 0);
4278 if (supportable_widening_operation (NOP_EXPR
, stmt
, cvt_type
,
4279 vectype_in
, &code1
, &code2
,
4280 &multi_step_cvt
, &interm_types
))
4290 if (GET_MODE_SIZE (rhs_mode
) == fltsz
)
4291 codecvt2
= ERROR_MARK
;
4295 interm_types
.safe_push (cvt_type
);
4296 cvt_type
= NULL_TREE
;
4301 gcc_assert (op_type
== unary_op
);
4302 if (supportable_narrowing_operation (code
, vectype_out
, vectype_in
,
4303 &code1
, &multi_step_cvt
,
4307 if (code
!= FIX_TRUNC_EXPR
4308 || GET_MODE_SIZE (lhs_mode
) >= GET_MODE_SIZE (rhs_mode
))
4312 = build_nonstandard_integer_type (GET_MODE_BITSIZE (rhs_mode
), 0);
4313 cvt_type
= get_same_sized_vectype (cvt_type
, vectype_in
);
4314 if (cvt_type
== NULL_TREE
)
4316 if (!supportable_convert_operation (code
, cvt_type
, vectype_in
,
4319 if (supportable_narrowing_operation (NOP_EXPR
, vectype_out
, cvt_type
,
4320 &code1
, &multi_step_cvt
,
4329 if (!vec_stmt
) /* transformation not required. */
4331 if (dump_enabled_p ())
4332 dump_printf_loc (MSG_NOTE
, vect_location
,
4333 "=== vectorizable_conversion ===\n");
4334 if (code
== FIX_TRUNC_EXPR
|| code
== FLOAT_EXPR
)
4336 STMT_VINFO_TYPE (stmt_info
) = type_conversion_vec_info_type
;
4337 vect_model_simple_cost (stmt_info
, ncopies
, dt
, ndts
, NULL
, NULL
);
4339 else if (modifier
== NARROW
)
4341 STMT_VINFO_TYPE (stmt_info
) = type_demotion_vec_info_type
;
4342 vect_model_promotion_demotion_cost (stmt_info
, dt
, multi_step_cvt
);
4346 STMT_VINFO_TYPE (stmt_info
) = type_promotion_vec_info_type
;
4347 vect_model_promotion_demotion_cost (stmt_info
, dt
, multi_step_cvt
);
4349 interm_types
.release ();
4354 if (dump_enabled_p ())
4355 dump_printf_loc (MSG_NOTE
, vect_location
,
4356 "transform conversion. ncopies = %d.\n", ncopies
);
4358 if (op_type
== binary_op
)
4360 if (CONSTANT_CLASS_P (op0
))
4361 op0
= fold_convert (TREE_TYPE (op1
), op0
);
4362 else if (CONSTANT_CLASS_P (op1
))
4363 op1
= fold_convert (TREE_TYPE (op0
), op1
);
4366 /* In case of multi-step conversion, we first generate conversion operations
4367 to the intermediate types, and then from that types to the final one.
4368 We create vector destinations for the intermediate type (TYPES) received
4369 from supportable_*_operation, and store them in the correct order
4370 for future use in vect_create_vectorized_*_stmts (). */
4371 auto_vec
<tree
> vec_dsts (multi_step_cvt
+ 1);
4372 vec_dest
= vect_create_destination_var (scalar_dest
,
4373 (cvt_type
&& modifier
== WIDEN
)
4374 ? cvt_type
: vectype_out
);
4375 vec_dsts
.quick_push (vec_dest
);
4379 for (i
= interm_types
.length () - 1;
4380 interm_types
.iterate (i
, &intermediate_type
); i
--)
4382 vec_dest
= vect_create_destination_var (scalar_dest
,
4384 vec_dsts
.quick_push (vec_dest
);
4389 vec_dest
= vect_create_destination_var (scalar_dest
,
4391 ? vectype_out
: cvt_type
);
4395 if (modifier
== WIDEN
)
4397 vec_oprnds0
.create (multi_step_cvt
? vect_pow2 (multi_step_cvt
) : 1);
4398 if (op_type
== binary_op
)
4399 vec_oprnds1
.create (1);
4401 else if (modifier
== NARROW
)
4402 vec_oprnds0
.create (
4403 2 * (multi_step_cvt
? vect_pow2 (multi_step_cvt
) : 1));
4405 else if (code
== WIDEN_LSHIFT_EXPR
)
4406 vec_oprnds1
.create (slp_node
->vec_stmts_size
);
4409 prev_stmt_info
= NULL
;
4413 for (j
= 0; j
< ncopies
; j
++)
4416 vect_get_vec_defs (op0
, NULL
, stmt
, &vec_oprnds0
, NULL
, slp_node
);
4418 vect_get_vec_defs_for_stmt_copy (dt
, &vec_oprnds0
, NULL
);
4420 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vop0
)
4422 /* Arguments are ready, create the new vector stmt. */
4423 if (code1
== CALL_EXPR
)
4425 new_stmt
= gimple_build_call (decl1
, 1, vop0
);
4426 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
4427 gimple_call_set_lhs (new_stmt
, new_temp
);
4431 gcc_assert (TREE_CODE_LENGTH (code1
) == unary_op
);
4432 new_stmt
= gimple_build_assign (vec_dest
, code1
, vop0
);
4433 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
4434 gimple_assign_set_lhs (new_stmt
, new_temp
);
4437 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
4439 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
4442 if (!prev_stmt_info
)
4443 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
4445 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
4446 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
4453 /* In case the vectorization factor (VF) is bigger than the number
4454 of elements that we can fit in a vectype (nunits), we have to
4455 generate more than one vector stmt - i.e - we need to "unroll"
4456 the vector stmt by a factor VF/nunits. */
4457 for (j
= 0; j
< ncopies
; j
++)
4464 if (code
== WIDEN_LSHIFT_EXPR
)
4469 /* Store vec_oprnd1 for every vector stmt to be created
4470 for SLP_NODE. We check during the analysis that all
4471 the shift arguments are the same. */
4472 for (k
= 0; k
< slp_node
->vec_stmts_size
- 1; k
++)
4473 vec_oprnds1
.quick_push (vec_oprnd1
);
4475 vect_get_vec_defs (op0
, NULL_TREE
, stmt
, &vec_oprnds0
, NULL
,
4479 vect_get_vec_defs (op0
, op1
, stmt
, &vec_oprnds0
,
4480 &vec_oprnds1
, slp_node
);
4484 vec_oprnd0
= vect_get_vec_def_for_operand (op0
, stmt
);
4485 vec_oprnds0
.quick_push (vec_oprnd0
);
4486 if (op_type
== binary_op
)
4488 if (code
== WIDEN_LSHIFT_EXPR
)
4491 vec_oprnd1
= vect_get_vec_def_for_operand (op1
, stmt
);
4492 vec_oprnds1
.quick_push (vec_oprnd1
);
4498 vec_oprnd0
= vect_get_vec_def_for_stmt_copy (dt
[0], vec_oprnd0
);
4499 vec_oprnds0
.truncate (0);
4500 vec_oprnds0
.quick_push (vec_oprnd0
);
4501 if (op_type
== binary_op
)
4503 if (code
== WIDEN_LSHIFT_EXPR
)
4506 vec_oprnd1
= vect_get_vec_def_for_stmt_copy (dt
[1],
4508 vec_oprnds1
.truncate (0);
4509 vec_oprnds1
.quick_push (vec_oprnd1
);
4513 /* Arguments are ready. Create the new vector stmts. */
4514 for (i
= multi_step_cvt
; i
>= 0; i
--)
4516 tree this_dest
= vec_dsts
[i
];
4517 enum tree_code c1
= code1
, c2
= code2
;
4518 if (i
== 0 && codecvt2
!= ERROR_MARK
)
4523 vect_create_vectorized_promotion_stmts (&vec_oprnds0
,
4525 stmt
, this_dest
, gsi
,
4526 c1
, c2
, decl1
, decl2
,
4530 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vop0
)
4534 if (codecvt1
== CALL_EXPR
)
4536 new_stmt
= gimple_build_call (decl1
, 1, vop0
);
4537 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
4538 gimple_call_set_lhs (new_stmt
, new_temp
);
4542 gcc_assert (TREE_CODE_LENGTH (codecvt1
) == unary_op
);
4543 new_temp
= make_ssa_name (vec_dest
);
4544 new_stmt
= gimple_build_assign (new_temp
, codecvt1
,
4548 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
4551 new_stmt
= SSA_NAME_DEF_STMT (vop0
);
4554 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
4557 if (!prev_stmt_info
)
4558 STMT_VINFO_VEC_STMT (stmt_info
) = new_stmt
;
4560 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
4561 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
4566 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
4570 /* In case the vectorization factor (VF) is bigger than the number
4571 of elements that we can fit in a vectype (nunits), we have to
4572 generate more than one vector stmt - i.e - we need to "unroll"
4573 the vector stmt by a factor VF/nunits. */
4574 for (j
= 0; j
< ncopies
; j
++)
4578 vect_get_vec_defs (op0
, NULL_TREE
, stmt
, &vec_oprnds0
, NULL
,
4582 vec_oprnds0
.truncate (0);
4583 vect_get_loop_based_defs (&last_oprnd
, stmt
, dt
[0], &vec_oprnds0
,
4584 vect_pow2 (multi_step_cvt
) - 1);
4587 /* Arguments are ready. Create the new vector stmts. */
4589 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vop0
)
4591 if (codecvt1
== CALL_EXPR
)
4593 new_stmt
= gimple_build_call (decl1
, 1, vop0
);
4594 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
4595 gimple_call_set_lhs (new_stmt
, new_temp
);
4599 gcc_assert (TREE_CODE_LENGTH (codecvt1
) == unary_op
);
4600 new_temp
= make_ssa_name (vec_dest
);
4601 new_stmt
= gimple_build_assign (new_temp
, codecvt1
,
4605 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
4606 vec_oprnds0
[i
] = new_temp
;
4609 vect_create_vectorized_demotion_stmts (&vec_oprnds0
, multi_step_cvt
,
4610 stmt
, vec_dsts
, gsi
,
4615 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
4619 vec_oprnds0
.release ();
4620 vec_oprnds1
.release ();
4621 interm_types
.release ();
4627 /* Function vectorizable_assignment.
4629 Check if STMT performs an assignment (copy) that can be vectorized.
4630 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4631 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
4632 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4635 vectorizable_assignment (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
4636 gimple
**vec_stmt
, slp_tree slp_node
)
4641 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
4642 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
4645 enum vect_def_type dt
[1] = {vect_unknown_def_type
};
4649 vec
<tree
> vec_oprnds
= vNULL
;
4651 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
4652 vec_info
*vinfo
= stmt_info
->vinfo
;
4653 gimple
*new_stmt
= NULL
;
4654 stmt_vec_info prev_stmt_info
= NULL
;
4655 enum tree_code code
;
4658 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
4661 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
4665 /* Is vectorizable assignment? */
4666 if (!is_gimple_assign (stmt
))
4669 scalar_dest
= gimple_assign_lhs (stmt
);
4670 if (TREE_CODE (scalar_dest
) != SSA_NAME
)
4673 code
= gimple_assign_rhs_code (stmt
);
4674 if (gimple_assign_single_p (stmt
)
4675 || code
== PAREN_EXPR
4676 || CONVERT_EXPR_CODE_P (code
))
4677 op
= gimple_assign_rhs1 (stmt
);
4681 if (code
== VIEW_CONVERT_EXPR
)
4682 op
= TREE_OPERAND (op
, 0);
4684 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
4685 unsigned int nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
4687 /* Multiple types in SLP are handled by creating the appropriate number of
4688 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4693 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits
;
4695 gcc_assert (ncopies
>= 1);
4697 if (!vect_is_simple_use (op
, vinfo
, &def_stmt
, &dt
[0], &vectype_in
))
4699 if (dump_enabled_p ())
4700 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4701 "use not simple.\n");
4705 /* We can handle NOP_EXPR conversions that do not change the number
4706 of elements or the vector size. */
4707 if ((CONVERT_EXPR_CODE_P (code
)
4708 || code
== VIEW_CONVERT_EXPR
)
4710 || TYPE_VECTOR_SUBPARTS (vectype_in
) != nunits
4711 || (GET_MODE_SIZE (TYPE_MODE (vectype
))
4712 != GET_MODE_SIZE (TYPE_MODE (vectype_in
)))))
4715 /* We do not handle bit-precision changes. */
4716 if ((CONVERT_EXPR_CODE_P (code
)
4717 || code
== VIEW_CONVERT_EXPR
)
4718 && INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest
))
4719 && (!type_has_mode_precision_p (TREE_TYPE (scalar_dest
))
4720 || !type_has_mode_precision_p (TREE_TYPE (op
)))
4721 /* But a conversion that does not change the bit-pattern is ok. */
4722 && !((TYPE_PRECISION (TREE_TYPE (scalar_dest
))
4723 > TYPE_PRECISION (TREE_TYPE (op
)))
4724 && TYPE_UNSIGNED (TREE_TYPE (op
)))
4725 /* Conversion between boolean types of different sizes is
4726 a simple assignment in case their vectypes are same
4728 && (!VECTOR_BOOLEAN_TYPE_P (vectype
)
4729 || !VECTOR_BOOLEAN_TYPE_P (vectype_in
)))
4731 if (dump_enabled_p ())
4732 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4733 "type conversion to/from bit-precision "
4738 if (!vec_stmt
) /* transformation not required. */
4740 STMT_VINFO_TYPE (stmt_info
) = assignment_vec_info_type
;
4741 if (dump_enabled_p ())
4742 dump_printf_loc (MSG_NOTE
, vect_location
,
4743 "=== vectorizable_assignment ===\n");
4744 vect_model_simple_cost (stmt_info
, ncopies
, dt
, ndts
, NULL
, NULL
);
4749 if (dump_enabled_p ())
4750 dump_printf_loc (MSG_NOTE
, vect_location
, "transform assignment.\n");
4753 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
4756 for (j
= 0; j
< ncopies
; j
++)
4760 vect_get_vec_defs (op
, NULL
, stmt
, &vec_oprnds
, NULL
, slp_node
);
4762 vect_get_vec_defs_for_stmt_copy (dt
, &vec_oprnds
, NULL
);
4764 /* Arguments are ready. create the new vector stmt. */
4765 FOR_EACH_VEC_ELT (vec_oprnds
, i
, vop
)
4767 if (CONVERT_EXPR_CODE_P (code
)
4768 || code
== VIEW_CONVERT_EXPR
)
4769 vop
= build1 (VIEW_CONVERT_EXPR
, vectype
, vop
);
4770 new_stmt
= gimple_build_assign (vec_dest
, vop
);
4771 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
4772 gimple_assign_set_lhs (new_stmt
, new_temp
);
4773 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
4775 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
4782 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
4784 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
4786 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
4789 vec_oprnds
.release ();
4794 /* Return TRUE if CODE (a shift operation) is supported for SCALAR_TYPE
4795 either as shift by a scalar or by a vector. */
4798 vect_supportable_shift (enum tree_code code
, tree scalar_type
)
4801 machine_mode vec_mode
;
4806 vectype
= get_vectype_for_scalar_type (scalar_type
);
4810 optab
= optab_for_tree_code (code
, vectype
, optab_scalar
);
4812 || optab_handler (optab
, TYPE_MODE (vectype
)) == CODE_FOR_nothing
)
4814 optab
= optab_for_tree_code (code
, vectype
, optab_vector
);
4816 || (optab_handler (optab
, TYPE_MODE (vectype
))
4817 == CODE_FOR_nothing
))
4821 vec_mode
= TYPE_MODE (vectype
);
4822 icode
= (int) optab_handler (optab
, vec_mode
);
4823 if (icode
== CODE_FOR_nothing
)
4830 /* Function vectorizable_shift.
4832 Check if STMT performs a shift operation that can be vectorized.
4833 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4834 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
4835 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4838 vectorizable_shift (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
4839 gimple
**vec_stmt
, slp_tree slp_node
)
4843 tree op0
, op1
= NULL
;
4844 tree vec_oprnd1
= NULL_TREE
;
4845 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
4847 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
4848 enum tree_code code
;
4849 machine_mode vec_mode
;
4853 machine_mode optab_op2_mode
;
4855 enum vect_def_type dt
[2] = {vect_unknown_def_type
, vect_unknown_def_type
};
4857 gimple
*new_stmt
= NULL
;
4858 stmt_vec_info prev_stmt_info
;
4865 vec
<tree
> vec_oprnds0
= vNULL
;
4866 vec
<tree
> vec_oprnds1
= vNULL
;
4869 bool scalar_shift_arg
= true;
4870 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
4871 vec_info
*vinfo
= stmt_info
->vinfo
;
4874 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
4877 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
4881 /* Is STMT a vectorizable binary/unary operation? */
4882 if (!is_gimple_assign (stmt
))
4885 if (TREE_CODE (gimple_assign_lhs (stmt
)) != SSA_NAME
)
4888 code
= gimple_assign_rhs_code (stmt
);
4890 if (!(code
== LSHIFT_EXPR
|| code
== RSHIFT_EXPR
|| code
== LROTATE_EXPR
4891 || code
== RROTATE_EXPR
))
4894 scalar_dest
= gimple_assign_lhs (stmt
);
4895 vectype_out
= STMT_VINFO_VECTYPE (stmt_info
);
4896 if (!type_has_mode_precision_p (TREE_TYPE (scalar_dest
)))
4898 if (dump_enabled_p ())
4899 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4900 "bit-precision shifts not supported.\n");
4904 op0
= gimple_assign_rhs1 (stmt
);
4905 if (!vect_is_simple_use (op0
, vinfo
, &def_stmt
, &dt
[0], &vectype
))
4907 if (dump_enabled_p ())
4908 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4909 "use not simple.\n");
4912 /* If op0 is an external or constant def use a vector type with
4913 the same size as the output vector type. */
4915 vectype
= get_same_sized_vectype (TREE_TYPE (op0
), vectype_out
);
4917 gcc_assert (vectype
);
4920 if (dump_enabled_p ())
4921 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4922 "no vectype for scalar type\n");
4926 nunits_out
= TYPE_VECTOR_SUBPARTS (vectype_out
);
4927 nunits_in
= TYPE_VECTOR_SUBPARTS (vectype
);
4928 if (nunits_out
!= nunits_in
)
4931 op1
= gimple_assign_rhs2 (stmt
);
4932 if (!vect_is_simple_use (op1
, vinfo
, &def_stmt
, &dt
[1], &op1_vectype
))
4934 if (dump_enabled_p ())
4935 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4936 "use not simple.\n");
4941 vf
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
4945 /* Multiple types in SLP are handled by creating the appropriate number of
4946 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4951 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits_in
;
4953 gcc_assert (ncopies
>= 1);
4955 /* Determine whether the shift amount is a vector, or scalar. If the
4956 shift/rotate amount is a vector, use the vector/vector shift optabs. */
4958 if ((dt
[1] == vect_internal_def
4959 || dt
[1] == vect_induction_def
)
4961 scalar_shift_arg
= false;
4962 else if (dt
[1] == vect_constant_def
4963 || dt
[1] == vect_external_def
4964 || dt
[1] == vect_internal_def
)
4966 /* In SLP, need to check whether the shift count is the same,
4967 in loops if it is a constant or invariant, it is always
4971 vec
<gimple
*> stmts
= SLP_TREE_SCALAR_STMTS (slp_node
);
4974 FOR_EACH_VEC_ELT (stmts
, k
, slpstmt
)
4975 if (!operand_equal_p (gimple_assign_rhs2 (slpstmt
), op1
, 0))
4976 scalar_shift_arg
= false;
4979 /* If the shift amount is computed by a pattern stmt we cannot
4980 use the scalar amount directly thus give up and use a vector
4982 if (dt
[1] == vect_internal_def
)
4984 gimple
*def
= SSA_NAME_DEF_STMT (op1
);
4985 if (is_pattern_stmt_p (vinfo_for_stmt (def
)))
4986 scalar_shift_arg
= false;
4991 if (dump_enabled_p ())
4992 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4993 "operand mode requires invariant argument.\n");
4997 /* Vector shifted by vector. */
4998 if (!scalar_shift_arg
)
5000 optab
= optab_for_tree_code (code
, vectype
, optab_vector
);
5001 if (dump_enabled_p ())
5002 dump_printf_loc (MSG_NOTE
, vect_location
,
5003 "vector/vector shift/rotate found.\n");
5006 op1_vectype
= get_same_sized_vectype (TREE_TYPE (op1
), vectype_out
);
5007 if (op1_vectype
== NULL_TREE
5008 || TYPE_MODE (op1_vectype
) != TYPE_MODE (vectype
))
5010 if (dump_enabled_p ())
5011 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5012 "unusable type for last operand in"
5013 " vector/vector shift/rotate.\n");
5017 /* See if the machine has a vector shifted by scalar insn and if not
5018 then see if it has a vector shifted by vector insn. */
5021 optab
= optab_for_tree_code (code
, vectype
, optab_scalar
);
5023 && optab_handler (optab
, TYPE_MODE (vectype
)) != CODE_FOR_nothing
)
5025 if (dump_enabled_p ())
5026 dump_printf_loc (MSG_NOTE
, vect_location
,
5027 "vector/scalar shift/rotate found.\n");
5031 optab
= optab_for_tree_code (code
, vectype
, optab_vector
);
5033 && (optab_handler (optab
, TYPE_MODE (vectype
))
5034 != CODE_FOR_nothing
))
5036 scalar_shift_arg
= false;
5038 if (dump_enabled_p ())
5039 dump_printf_loc (MSG_NOTE
, vect_location
,
5040 "vector/vector shift/rotate found.\n");
5042 /* Unlike the other binary operators, shifts/rotates have
5043 the rhs being int, instead of the same type as the lhs,
5044 so make sure the scalar is the right type if we are
5045 dealing with vectors of long long/long/short/char. */
5046 if (dt
[1] == vect_constant_def
)
5047 op1
= fold_convert (TREE_TYPE (vectype
), op1
);
5048 else if (!useless_type_conversion_p (TREE_TYPE (vectype
),
5052 && TYPE_MODE (TREE_TYPE (vectype
))
5053 != TYPE_MODE (TREE_TYPE (op1
)))
5055 if (dump_enabled_p ())
5056 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5057 "unusable type for last operand in"
5058 " vector/vector shift/rotate.\n");
5061 if (vec_stmt
&& !slp_node
)
5063 op1
= fold_convert (TREE_TYPE (vectype
), op1
);
5064 op1
= vect_init_vector (stmt
, op1
,
5065 TREE_TYPE (vectype
), NULL
);
5072 /* Supportable by target? */
5075 if (dump_enabled_p ())
5076 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5080 vec_mode
= TYPE_MODE (vectype
);
5081 icode
= (int) optab_handler (optab
, vec_mode
);
5082 if (icode
== CODE_FOR_nothing
)
5084 if (dump_enabled_p ())
5085 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5086 "op not supported by target.\n");
5087 /* Check only during analysis. */
5088 if (GET_MODE_SIZE (vec_mode
) != UNITS_PER_WORD
5089 || (vf
< vect_min_worthwhile_factor (code
)
5092 if (dump_enabled_p ())
5093 dump_printf_loc (MSG_NOTE
, vect_location
,
5094 "proceeding using word mode.\n");
5097 /* Worthwhile without SIMD support? Check only during analysis. */
5098 if (!VECTOR_MODE_P (TYPE_MODE (vectype
))
5099 && vf
< vect_min_worthwhile_factor (code
)
5102 if (dump_enabled_p ())
5103 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5104 "not worthwhile without SIMD support.\n");
5108 if (!vec_stmt
) /* transformation not required. */
5110 STMT_VINFO_TYPE (stmt_info
) = shift_vec_info_type
;
5111 if (dump_enabled_p ())
5112 dump_printf_loc (MSG_NOTE
, vect_location
,
5113 "=== vectorizable_shift ===\n");
5114 vect_model_simple_cost (stmt_info
, ncopies
, dt
, ndts
, NULL
, NULL
);
5120 if (dump_enabled_p ())
5121 dump_printf_loc (MSG_NOTE
, vect_location
,
5122 "transform binary/unary operation.\n");
5125 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
5127 prev_stmt_info
= NULL
;
5128 for (j
= 0; j
< ncopies
; j
++)
5133 if (scalar_shift_arg
)
5135 /* Vector shl and shr insn patterns can be defined with scalar
5136 operand 2 (shift operand). In this case, use constant or loop
5137 invariant op1 directly, without extending it to vector mode
5139 optab_op2_mode
= insn_data
[icode
].operand
[2].mode
;
5140 if (!VECTOR_MODE_P (optab_op2_mode
))
5142 if (dump_enabled_p ())
5143 dump_printf_loc (MSG_NOTE
, vect_location
,
5144 "operand 1 using scalar mode.\n");
5146 vec_oprnds1
.create (slp_node
? slp_node
->vec_stmts_size
: 1);
5147 vec_oprnds1
.quick_push (vec_oprnd1
);
5150 /* Store vec_oprnd1 for every vector stmt to be created
5151 for SLP_NODE. We check during the analysis that all
5152 the shift arguments are the same.
5153 TODO: Allow different constants for different vector
5154 stmts generated for an SLP instance. */
5155 for (k
= 0; k
< slp_node
->vec_stmts_size
- 1; k
++)
5156 vec_oprnds1
.quick_push (vec_oprnd1
);
5161 /* vec_oprnd1 is available if operand 1 should be of a scalar-type
5162 (a special case for certain kind of vector shifts); otherwise,
5163 operand 1 should be of a vector type (the usual case). */
5165 vect_get_vec_defs (op0
, NULL_TREE
, stmt
, &vec_oprnds0
, NULL
,
5168 vect_get_vec_defs (op0
, op1
, stmt
, &vec_oprnds0
, &vec_oprnds1
,
5172 vect_get_vec_defs_for_stmt_copy (dt
, &vec_oprnds0
, &vec_oprnds1
);
5174 /* Arguments are ready. Create the new vector stmt. */
5175 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vop0
)
5177 vop1
= vec_oprnds1
[i
];
5178 new_stmt
= gimple_build_assign (vec_dest
, code
, vop0
, vop1
);
5179 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
5180 gimple_assign_set_lhs (new_stmt
, new_temp
);
5181 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
5183 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
5190 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
5192 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
5193 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
5196 vec_oprnds0
.release ();
5197 vec_oprnds1
.release ();
5203 /* Function vectorizable_operation.
5205 Check if STMT performs a binary, unary or ternary operation that can
5207 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
5208 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
5209 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
5212 vectorizable_operation (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
5213 gimple
**vec_stmt
, slp_tree slp_node
)
5217 tree op0
, op1
= NULL_TREE
, op2
= NULL_TREE
;
5218 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
5220 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
5221 enum tree_code code
;
5222 machine_mode vec_mode
;
5226 bool target_support_p
;
5228 enum vect_def_type dt
[3]
5229 = {vect_unknown_def_type
, vect_unknown_def_type
, vect_unknown_def_type
};
5231 gimple
*new_stmt
= NULL
;
5232 stmt_vec_info prev_stmt_info
;
5238 vec
<tree
> vec_oprnds0
= vNULL
;
5239 vec
<tree
> vec_oprnds1
= vNULL
;
5240 vec
<tree
> vec_oprnds2
= vNULL
;
5241 tree vop0
, vop1
, vop2
;
5242 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
5243 vec_info
*vinfo
= stmt_info
->vinfo
;
5246 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
5249 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
5253 /* Is STMT a vectorizable binary/unary operation? */
5254 if (!is_gimple_assign (stmt
))
5257 if (TREE_CODE (gimple_assign_lhs (stmt
)) != SSA_NAME
)
5260 code
= gimple_assign_rhs_code (stmt
);
5262 /* For pointer addition, we should use the normal plus for
5263 the vector addition. */
5264 if (code
== POINTER_PLUS_EXPR
)
5267 /* Support only unary or binary operations. */
5268 op_type
= TREE_CODE_LENGTH (code
);
5269 if (op_type
!= unary_op
&& op_type
!= binary_op
&& op_type
!= ternary_op
)
5271 if (dump_enabled_p ())
5272 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5273 "num. args = %d (not unary/binary/ternary op).\n",
5278 scalar_dest
= gimple_assign_lhs (stmt
);
5279 vectype_out
= STMT_VINFO_VECTYPE (stmt_info
);
5281 /* Most operations cannot handle bit-precision types without extra
5283 if (!VECTOR_BOOLEAN_TYPE_P (vectype_out
)
5284 && !type_has_mode_precision_p (TREE_TYPE (scalar_dest
))
5285 /* Exception are bitwise binary operations. */
5286 && code
!= BIT_IOR_EXPR
5287 && code
!= BIT_XOR_EXPR
5288 && code
!= BIT_AND_EXPR
)
5290 if (dump_enabled_p ())
5291 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5292 "bit-precision arithmetic not supported.\n");
5296 op0
= gimple_assign_rhs1 (stmt
);
5297 if (!vect_is_simple_use (op0
, vinfo
, &def_stmt
, &dt
[0], &vectype
))
5299 if (dump_enabled_p ())
5300 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5301 "use not simple.\n");
5304 /* If op0 is an external or constant def use a vector type with
5305 the same size as the output vector type. */
5308 /* For boolean type we cannot determine vectype by
5309 invariant value (don't know whether it is a vector
5310 of booleans or vector of integers). We use output
5311 vectype because operations on boolean don't change
5313 if (VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (op0
)))
5315 if (!VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (scalar_dest
)))
5317 if (dump_enabled_p ())
5318 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5319 "not supported operation on bool value.\n");
5322 vectype
= vectype_out
;
5325 vectype
= get_same_sized_vectype (TREE_TYPE (op0
), vectype_out
);
5328 gcc_assert (vectype
);
5331 if (dump_enabled_p ())
5333 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5334 "no vectype for scalar type ");
5335 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
5337 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
5343 nunits_out
= TYPE_VECTOR_SUBPARTS (vectype_out
);
5344 nunits_in
= TYPE_VECTOR_SUBPARTS (vectype
);
5345 if (nunits_out
!= nunits_in
)
5348 if (op_type
== binary_op
|| op_type
== ternary_op
)
5350 op1
= gimple_assign_rhs2 (stmt
);
5351 if (!vect_is_simple_use (op1
, vinfo
, &def_stmt
, &dt
[1]))
5353 if (dump_enabled_p ())
5354 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5355 "use not simple.\n");
5359 if (op_type
== ternary_op
)
5361 op2
= gimple_assign_rhs3 (stmt
);
5362 if (!vect_is_simple_use (op2
, vinfo
, &def_stmt
, &dt
[2]))
5364 if (dump_enabled_p ())
5365 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5366 "use not simple.\n");
5372 vf
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
5376 /* Multiple types in SLP are handled by creating the appropriate number of
5377 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
5382 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits_in
;
5384 gcc_assert (ncopies
>= 1);
5386 /* Shifts are handled in vectorizable_shift (). */
5387 if (code
== LSHIFT_EXPR
|| code
== RSHIFT_EXPR
|| code
== LROTATE_EXPR
5388 || code
== RROTATE_EXPR
)
5391 /* Supportable by target? */
5393 vec_mode
= TYPE_MODE (vectype
);
5394 if (code
== MULT_HIGHPART_EXPR
)
5395 target_support_p
= can_mult_highpart_p (vec_mode
, TYPE_UNSIGNED (vectype
));
5398 optab
= optab_for_tree_code (code
, vectype
, optab_default
);
5401 if (dump_enabled_p ())
5402 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5406 target_support_p
= (optab_handler (optab
, vec_mode
)
5407 != CODE_FOR_nothing
);
5410 if (!target_support_p
)
5412 if (dump_enabled_p ())
5413 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5414 "op not supported by target.\n");
5415 /* Check only during analysis. */
5416 if (GET_MODE_SIZE (vec_mode
) != UNITS_PER_WORD
5417 || (!vec_stmt
&& vf
< vect_min_worthwhile_factor (code
)))
5419 if (dump_enabled_p ())
5420 dump_printf_loc (MSG_NOTE
, vect_location
,
5421 "proceeding using word mode.\n");
5424 /* Worthwhile without SIMD support? Check only during analysis. */
5425 if (!VECTOR_MODE_P (vec_mode
)
5427 && vf
< vect_min_worthwhile_factor (code
))
5429 if (dump_enabled_p ())
5430 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5431 "not worthwhile without SIMD support.\n");
5435 if (!vec_stmt
) /* transformation not required. */
5437 STMT_VINFO_TYPE (stmt_info
) = op_vec_info_type
;
5438 if (dump_enabled_p ())
5439 dump_printf_loc (MSG_NOTE
, vect_location
,
5440 "=== vectorizable_operation ===\n");
5441 vect_model_simple_cost (stmt_info
, ncopies
, dt
, ndts
, NULL
, NULL
);
5447 if (dump_enabled_p ())
5448 dump_printf_loc (MSG_NOTE
, vect_location
,
5449 "transform binary/unary operation.\n");
5452 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
5454 /* In case the vectorization factor (VF) is bigger than the number
5455 of elements that we can fit in a vectype (nunits), we have to generate
5456 more than one vector stmt - i.e - we need to "unroll" the
5457 vector stmt by a factor VF/nunits. In doing so, we record a pointer
5458 from one copy of the vector stmt to the next, in the field
5459 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
5460 stages to find the correct vector defs to be used when vectorizing
5461 stmts that use the defs of the current stmt. The example below
5462 illustrates the vectorization process when VF=16 and nunits=4 (i.e.,
5463 we need to create 4 vectorized stmts):
5465 before vectorization:
5466 RELATED_STMT VEC_STMT
5470 step 1: vectorize stmt S1 (done in vectorizable_load. See more details
5472 RELATED_STMT VEC_STMT
5473 VS1_0: vx0 = memref0 VS1_1 -
5474 VS1_1: vx1 = memref1 VS1_2 -
5475 VS1_2: vx2 = memref2 VS1_3 -
5476 VS1_3: vx3 = memref3 - -
5477 S1: x = load - VS1_0
5480 step2: vectorize stmt S2 (done here):
5481 To vectorize stmt S2 we first need to find the relevant vector
5482 def for the first operand 'x'. This is, as usual, obtained from
5483 the vector stmt recorded in the STMT_VINFO_VEC_STMT of the stmt
5484 that defines 'x' (S1). This way we find the stmt VS1_0, and the
5485 relevant vector def 'vx0'. Having found 'vx0' we can generate
5486 the vector stmt VS2_0, and as usual, record it in the
5487 STMT_VINFO_VEC_STMT of stmt S2.
5488 When creating the second copy (VS2_1), we obtain the relevant vector
5489 def from the vector stmt recorded in the STMT_VINFO_RELATED_STMT of
5490 stmt VS1_0. This way we find the stmt VS1_1 and the relevant
5491 vector def 'vx1'. Using 'vx1' we create stmt VS2_1 and record a
5492 pointer to it in the STMT_VINFO_RELATED_STMT of the vector stmt VS2_0.
5493 Similarly when creating stmts VS2_2 and VS2_3. This is the resulting
5494 chain of stmts and pointers:
5495 RELATED_STMT VEC_STMT
5496 VS1_0: vx0 = memref0 VS1_1 -
5497 VS1_1: vx1 = memref1 VS1_2 -
5498 VS1_2: vx2 = memref2 VS1_3 -
5499 VS1_3: vx3 = memref3 - -
5500 S1: x = load - VS1_0
5501 VS2_0: vz0 = vx0 + v1 VS2_1 -
5502 VS2_1: vz1 = vx1 + v1 VS2_2 -
5503 VS2_2: vz2 = vx2 + v1 VS2_3 -
5504 VS2_3: vz3 = vx3 + v1 - -
5505 S2: z = x + 1 - VS2_0 */
5507 prev_stmt_info
= NULL
;
5508 for (j
= 0; j
< ncopies
; j
++)
5513 if (op_type
== binary_op
|| op_type
== ternary_op
)
5514 vect_get_vec_defs (op0
, op1
, stmt
, &vec_oprnds0
, &vec_oprnds1
,
5517 vect_get_vec_defs (op0
, NULL_TREE
, stmt
, &vec_oprnds0
, NULL
,
5519 if (op_type
== ternary_op
)
5520 vect_get_vec_defs (op2
, NULL_TREE
, stmt
, &vec_oprnds2
, NULL
,
5525 vect_get_vec_defs_for_stmt_copy (dt
, &vec_oprnds0
, &vec_oprnds1
);
5526 if (op_type
== ternary_op
)
5528 tree vec_oprnd
= vec_oprnds2
.pop ();
5529 vec_oprnds2
.quick_push (vect_get_vec_def_for_stmt_copy (dt
[2],
5534 /* Arguments are ready. Create the new vector stmt. */
5535 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vop0
)
5537 vop1
= ((op_type
== binary_op
|| op_type
== ternary_op
)
5538 ? vec_oprnds1
[i
] : NULL_TREE
);
5539 vop2
= ((op_type
== ternary_op
)
5540 ? vec_oprnds2
[i
] : NULL_TREE
);
5541 new_stmt
= gimple_build_assign (vec_dest
, code
, vop0
, vop1
, vop2
);
5542 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
5543 gimple_assign_set_lhs (new_stmt
, new_temp
);
5544 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
5546 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
5553 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
5555 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
5556 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
5559 vec_oprnds0
.release ();
5560 vec_oprnds1
.release ();
5561 vec_oprnds2
.release ();
5566 /* A helper function to ensure data reference DR's base alignment
5570 ensure_base_align (stmt_vec_info stmt_info
, struct data_reference
*dr
)
5575 if (DR_VECT_AUX (dr
)->base_misaligned
)
5577 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
5578 tree base_decl
= DR_VECT_AUX (dr
)->base_decl
;
5580 if (decl_in_symtab_p (base_decl
))
5581 symtab_node::get (base_decl
)->increase_alignment (TYPE_ALIGN (vectype
));
5584 SET_DECL_ALIGN (base_decl
, TYPE_ALIGN (vectype
));
5585 DECL_USER_ALIGN (base_decl
) = 1;
5587 DR_VECT_AUX (dr
)->base_misaligned
= false;
5592 /* Function get_group_alias_ptr_type.
5594 Return the alias type for the group starting at FIRST_STMT. */
5597 get_group_alias_ptr_type (gimple
*first_stmt
)
5599 struct data_reference
*first_dr
, *next_dr
;
5602 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
5603 next_stmt
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (first_stmt
));
5606 next_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (next_stmt
));
5607 if (get_alias_set (DR_REF (first_dr
))
5608 != get_alias_set (DR_REF (next_dr
)))
5610 if (dump_enabled_p ())
5611 dump_printf_loc (MSG_NOTE
, vect_location
,
5612 "conflicting alias set types.\n");
5613 return ptr_type_node
;
5615 next_stmt
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt
));
5617 return reference_alias_ptr_type (DR_REF (first_dr
));
5621 /* Function vectorizable_store.
5623 Check if STMT defines a non scalar data-ref (array/pointer/structure) that
5625 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
5626 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
5627 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
5630 vectorizable_store (gimple
*stmt
, gimple_stmt_iterator
*gsi
, gimple
**vec_stmt
,
5636 tree vec_oprnd
= NULL_TREE
;
5637 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
5638 struct data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
), *first_dr
= NULL
;
5640 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
5641 struct loop
*loop
= NULL
;
5642 machine_mode vec_mode
;
5644 enum dr_alignment_support alignment_support_scheme
;
5646 enum vect_def_type dt
;
5647 stmt_vec_info prev_stmt_info
= NULL
;
5648 tree dataref_ptr
= NULL_TREE
;
5649 tree dataref_offset
= NULL_TREE
;
5650 gimple
*ptr_incr
= NULL
;
5653 gimple
*next_stmt
, *first_stmt
;
5655 unsigned int group_size
, i
;
5656 vec
<tree
> oprnds
= vNULL
;
5657 vec
<tree
> result_chain
= vNULL
;
5659 tree offset
= NULL_TREE
;
5660 vec
<tree
> vec_oprnds
= vNULL
;
5661 bool slp
= (slp_node
!= NULL
);
5662 unsigned int vec_num
;
5663 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
5664 vec_info
*vinfo
= stmt_info
->vinfo
;
5666 gather_scatter_info gs_info
;
5667 enum vect_def_type scatter_src_dt
= vect_unknown_def_type
;
5670 vec_load_store_type vls_type
;
5673 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
5676 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
5680 /* Is vectorizable store? */
5682 if (!is_gimple_assign (stmt
))
5685 scalar_dest
= gimple_assign_lhs (stmt
);
5686 if (TREE_CODE (scalar_dest
) == VIEW_CONVERT_EXPR
5687 && is_pattern_stmt_p (stmt_info
))
5688 scalar_dest
= TREE_OPERAND (scalar_dest
, 0);
5689 if (TREE_CODE (scalar_dest
) != ARRAY_REF
5690 && TREE_CODE (scalar_dest
) != BIT_FIELD_REF
5691 && TREE_CODE (scalar_dest
) != INDIRECT_REF
5692 && TREE_CODE (scalar_dest
) != COMPONENT_REF
5693 && TREE_CODE (scalar_dest
) != IMAGPART_EXPR
5694 && TREE_CODE (scalar_dest
) != REALPART_EXPR
5695 && TREE_CODE (scalar_dest
) != MEM_REF
)
5698 /* Cannot have hybrid store SLP -- that would mean storing to the
5699 same location twice. */
5700 gcc_assert (slp
== PURE_SLP_STMT (stmt_info
));
5702 gcc_assert (gimple_assign_single_p (stmt
));
5704 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
), rhs_vectype
= NULL_TREE
;
5705 unsigned int nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
5709 loop
= LOOP_VINFO_LOOP (loop_vinfo
);
5710 vf
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
5715 /* Multiple types in SLP are handled by creating the appropriate number of
5716 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
5721 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits
;
5723 gcc_assert (ncopies
>= 1);
5725 /* FORNOW. This restriction should be relaxed. */
5726 if (loop
&& nested_in_vect_loop_p (loop
, stmt
) && ncopies
> 1)
5728 if (dump_enabled_p ())
5729 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5730 "multiple types in nested loop.\n");
5734 op
= gimple_assign_rhs1 (stmt
);
5736 /* In the case this is a store from a STRING_CST make sure
5737 native_encode_expr can handle it. */
5738 if (TREE_CODE (op
) == STRING_CST
5739 && ! can_native_encode_string_p (op
))
5742 if (!vect_is_simple_use (op
, vinfo
, &def_stmt
, &dt
, &rhs_vectype
))
5744 if (dump_enabled_p ())
5745 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5746 "use not simple.\n");
5750 if (dt
== vect_constant_def
|| dt
== vect_external_def
)
5751 vls_type
= VLS_STORE_INVARIANT
;
5753 vls_type
= VLS_STORE
;
5755 if (rhs_vectype
&& !useless_type_conversion_p (vectype
, rhs_vectype
))
5758 elem_type
= TREE_TYPE (vectype
);
5759 vec_mode
= TYPE_MODE (vectype
);
5761 /* FORNOW. In some cases can vectorize even if data-type not supported
5762 (e.g. - array initialization with 0). */
5763 if (optab_handler (mov_optab
, vec_mode
) == CODE_FOR_nothing
)
5766 if (!STMT_VINFO_DATA_REF (stmt_info
))
5769 vect_memory_access_type memory_access_type
;
5770 if (!get_load_store_type (stmt
, vectype
, slp
, vls_type
, ncopies
,
5771 &memory_access_type
, &gs_info
))
5774 if (!vec_stmt
) /* transformation not required. */
5776 STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info
) = memory_access_type
;
5777 STMT_VINFO_TYPE (stmt_info
) = store_vec_info_type
;
5778 /* The SLP costs are calculated during SLP analysis. */
5779 if (!PURE_SLP_STMT (stmt_info
))
5780 vect_model_store_cost (stmt_info
, ncopies
, memory_access_type
, dt
,
5784 gcc_assert (memory_access_type
== STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info
));
5788 ensure_base_align (stmt_info
, dr
);
5790 if (memory_access_type
== VMAT_GATHER_SCATTER
)
5792 tree vec_oprnd0
= NULL_TREE
, vec_oprnd1
= NULL_TREE
, op
, src
;
5793 tree arglist
= TYPE_ARG_TYPES (TREE_TYPE (gs_info
.decl
));
5794 tree rettype
, srctype
, ptrtype
, idxtype
, masktype
, scaletype
;
5795 tree ptr
, mask
, var
, scale
, perm_mask
= NULL_TREE
;
5796 edge pe
= loop_preheader_edge (loop
);
5799 enum { NARROW
, NONE
, WIDEN
} modifier
;
5800 int scatter_off_nunits
= TYPE_VECTOR_SUBPARTS (gs_info
.offset_vectype
);
5802 if (nunits
== (unsigned int) scatter_off_nunits
)
5804 else if (nunits
== (unsigned int) scatter_off_nunits
/ 2)
5806 unsigned char *sel
= XALLOCAVEC (unsigned char, scatter_off_nunits
);
5809 for (i
= 0; i
< (unsigned int) scatter_off_nunits
; ++i
)
5810 sel
[i
] = i
| nunits
;
5812 perm_mask
= vect_gen_perm_mask_checked (gs_info
.offset_vectype
, sel
);
5813 gcc_assert (perm_mask
!= NULL_TREE
);
5815 else if (nunits
== (unsigned int) scatter_off_nunits
* 2)
5817 unsigned char *sel
= XALLOCAVEC (unsigned char, nunits
);
5820 for (i
= 0; i
< (unsigned int) nunits
; ++i
)
5821 sel
[i
] = i
| scatter_off_nunits
;
5823 perm_mask
= vect_gen_perm_mask_checked (vectype
, sel
);
5824 gcc_assert (perm_mask
!= NULL_TREE
);
5830 rettype
= TREE_TYPE (TREE_TYPE (gs_info
.decl
));
5831 ptrtype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
5832 masktype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
5833 idxtype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
5834 srctype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
5835 scaletype
= TREE_VALUE (arglist
);
5837 gcc_checking_assert (TREE_CODE (masktype
) == INTEGER_TYPE
5838 && TREE_CODE (rettype
) == VOID_TYPE
);
5840 ptr
= fold_convert (ptrtype
, gs_info
.base
);
5841 if (!is_gimple_min_invariant (ptr
))
5843 ptr
= force_gimple_operand (ptr
, &seq
, true, NULL_TREE
);
5844 new_bb
= gsi_insert_seq_on_edge_immediate (pe
, seq
);
5845 gcc_assert (!new_bb
);
5848 /* Currently we support only unconditional scatter stores,
5849 so mask should be all ones. */
5850 mask
= build_int_cst (masktype
, -1);
5851 mask
= vect_init_vector (stmt
, mask
, masktype
, NULL
);
5853 scale
= build_int_cst (scaletype
, gs_info
.scale
);
5855 prev_stmt_info
= NULL
;
5856 for (j
= 0; j
< ncopies
; ++j
)
5861 = vect_get_vec_def_for_operand (gimple_assign_rhs1 (stmt
), stmt
);
5863 = vect_get_vec_def_for_operand (gs_info
.offset
, stmt
);
5865 else if (modifier
!= NONE
&& (j
& 1))
5867 if (modifier
== WIDEN
)
5870 = vect_get_vec_def_for_stmt_copy (scatter_src_dt
, vec_oprnd1
);
5871 op
= permute_vec_elements (vec_oprnd0
, vec_oprnd0
, perm_mask
,
5874 else if (modifier
== NARROW
)
5876 src
= permute_vec_elements (vec_oprnd1
, vec_oprnd1
, perm_mask
,
5879 = vect_get_vec_def_for_stmt_copy (gs_info
.offset_dt
,
5888 = vect_get_vec_def_for_stmt_copy (scatter_src_dt
, vec_oprnd1
);
5890 = vect_get_vec_def_for_stmt_copy (gs_info
.offset_dt
,
5894 if (!useless_type_conversion_p (srctype
, TREE_TYPE (src
)))
5896 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (src
))
5897 == TYPE_VECTOR_SUBPARTS (srctype
));
5898 var
= vect_get_new_ssa_name (srctype
, vect_simple_var
);
5899 src
= build1 (VIEW_CONVERT_EXPR
, srctype
, src
);
5900 new_stmt
= gimple_build_assign (var
, VIEW_CONVERT_EXPR
, src
);
5901 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
5905 if (!useless_type_conversion_p (idxtype
, TREE_TYPE (op
)))
5907 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op
))
5908 == TYPE_VECTOR_SUBPARTS (idxtype
));
5909 var
= vect_get_new_ssa_name (idxtype
, vect_simple_var
);
5910 op
= build1 (VIEW_CONVERT_EXPR
, idxtype
, op
);
5911 new_stmt
= gimple_build_assign (var
, VIEW_CONVERT_EXPR
, op
);
5912 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
5917 = gimple_build_call (gs_info
.decl
, 5, ptr
, mask
, op
, src
, scale
);
5919 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
5921 if (prev_stmt_info
== NULL
)
5922 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
5924 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
5925 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
5930 grouped_store
= STMT_VINFO_GROUPED_ACCESS (stmt_info
);
5933 first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
5934 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
5935 group_size
= GROUP_SIZE (vinfo_for_stmt (first_stmt
));
5937 GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt
))++;
5940 gcc_assert (!loop
|| !nested_in_vect_loop_p (loop
, stmt
));
5942 /* We vectorize all the stmts of the interleaving group when we
5943 reach the last stmt in the group. */
5944 if (GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt
))
5945 < GROUP_SIZE (vinfo_for_stmt (first_stmt
))
5954 grouped_store
= false;
5955 /* VEC_NUM is the number of vect stmts to be created for this
5957 vec_num
= SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node
);
5958 first_stmt
= SLP_TREE_SCALAR_STMTS (slp_node
)[0];
5959 gcc_assert (GROUP_FIRST_ELEMENT (vinfo_for_stmt (first_stmt
)) == first_stmt
);
5960 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
5961 op
= gimple_assign_rhs1 (first_stmt
);
5964 /* VEC_NUM is the number of vect stmts to be created for this
5966 vec_num
= group_size
;
5968 ref_type
= get_group_alias_ptr_type (first_stmt
);
5974 group_size
= vec_num
= 1;
5975 ref_type
= reference_alias_ptr_type (DR_REF (first_dr
));
5978 if (dump_enabled_p ())
5979 dump_printf_loc (MSG_NOTE
, vect_location
,
5980 "transform store. ncopies = %d\n", ncopies
);
5982 if (memory_access_type
== VMAT_ELEMENTWISE
5983 || memory_access_type
== VMAT_STRIDED_SLP
)
5985 gimple_stmt_iterator incr_gsi
;
5991 gimple_seq stmts
= NULL
;
5992 tree stride_base
, stride_step
, alias_off
;
5996 gcc_assert (!nested_in_vect_loop_p (loop
, stmt
));
5999 = fold_build_pointer_plus
6000 (unshare_expr (DR_BASE_ADDRESS (first_dr
)),
6001 size_binop (PLUS_EXPR
,
6002 convert_to_ptrofftype (unshare_expr (DR_OFFSET (first_dr
))),
6003 convert_to_ptrofftype (DR_INIT (first_dr
))));
6004 stride_step
= fold_convert (sizetype
, unshare_expr (DR_STEP (first_dr
)));
6006 /* For a store with loop-invariant (but other than power-of-2)
6007 stride (i.e. not a grouped access) like so:
6009 for (i = 0; i < n; i += stride)
6012 we generate a new induction variable and new stores from
6013 the components of the (vectorized) rhs:
6015 for (j = 0; ; j += VF*stride)
6020 array[j + stride] = tmp2;
6024 unsigned nstores
= nunits
;
6026 tree ltype
= elem_type
;
6027 tree lvectype
= vectype
;
6030 if (group_size
< nunits
6031 && nunits
% group_size
== 0)
6033 nstores
= nunits
/ group_size
;
6035 ltype
= build_vector_type (elem_type
, group_size
);
6038 /* First check if vec_extract optab doesn't support extraction
6039 of vector elts directly. */
6040 scalar_mode elmode
= SCALAR_TYPE_MODE (elem_type
);
6042 if (!mode_for_vector (elmode
, group_size
).exists (&vmode
)
6043 || !VECTOR_MODE_P (vmode
)
6044 || (convert_optab_handler (vec_extract_optab
,
6045 TYPE_MODE (vectype
), vmode
)
6046 == CODE_FOR_nothing
))
6048 /* Try to avoid emitting an extract of vector elements
6049 by performing the extracts using an integer type of the
6050 same size, extracting from a vector of those and then
6051 re-interpreting it as the original vector type if
6054 = group_size
* GET_MODE_BITSIZE (elmode
);
6055 elmode
= int_mode_for_size (lsize
, 0).require ();
6056 /* If we can't construct such a vector fall back to
6057 element extracts from the original vector type and
6058 element size stores. */
6059 if (mode_for_vector (elmode
,
6060 nunits
/ group_size
).exists (&vmode
)
6061 && VECTOR_MODE_P (vmode
)
6062 && (convert_optab_handler (vec_extract_optab
,
6064 != CODE_FOR_nothing
))
6066 nstores
= nunits
/ group_size
;
6068 ltype
= build_nonstandard_integer_type (lsize
, 1);
6069 lvectype
= build_vector_type (ltype
, nstores
);
6071 /* Else fall back to vector extraction anyway.
6072 Fewer stores are more important than avoiding spilling
6073 of the vector we extract from. Compared to the
6074 construction case in vectorizable_load no store-forwarding
6075 issue exists here for reasonable archs. */
6078 else if (group_size
>= nunits
6079 && group_size
% nunits
== 0)
6086 ltype
= build_aligned_type (ltype
, TYPE_ALIGN (elem_type
));
6087 ncopies
= SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node
);
6090 ivstep
= stride_step
;
6091 ivstep
= fold_build2 (MULT_EXPR
, TREE_TYPE (ivstep
), ivstep
,
6092 build_int_cst (TREE_TYPE (ivstep
), vf
));
6094 standard_iv_increment_position (loop
, &incr_gsi
, &insert_after
);
6096 create_iv (stride_base
, ivstep
, NULL
,
6097 loop
, &incr_gsi
, insert_after
,
6099 incr
= gsi_stmt (incr_gsi
);
6100 set_vinfo_for_stmt (incr
, new_stmt_vec_info (incr
, loop_vinfo
));
6102 stride_step
= force_gimple_operand (stride_step
, &stmts
, true, NULL_TREE
);
6104 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop
), stmts
);
6106 prev_stmt_info
= NULL
;
6107 alias_off
= build_int_cst (ref_type
, 0);
6108 next_stmt
= first_stmt
;
6109 for (g
= 0; g
< group_size
; g
++)
6111 running_off
= offvar
;
6114 tree size
= TYPE_SIZE_UNIT (ltype
);
6115 tree pos
= fold_build2 (MULT_EXPR
, sizetype
, size_int (g
),
6117 tree newoff
= copy_ssa_name (running_off
, NULL
);
6118 incr
= gimple_build_assign (newoff
, POINTER_PLUS_EXPR
,
6120 vect_finish_stmt_generation (stmt
, incr
, gsi
);
6121 running_off
= newoff
;
6123 unsigned int group_el
= 0;
6124 unsigned HOST_WIDE_INT
6125 elsz
= tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (vectype
)));
6126 for (j
= 0; j
< ncopies
; j
++)
6128 /* We've set op and dt above, from gimple_assign_rhs1(stmt),
6129 and first_stmt == stmt. */
6134 vect_get_vec_defs (op
, NULL_TREE
, stmt
, &vec_oprnds
, NULL
,
6136 vec_oprnd
= vec_oprnds
[0];
6140 gcc_assert (gimple_assign_single_p (next_stmt
));
6141 op
= gimple_assign_rhs1 (next_stmt
);
6142 vec_oprnd
= vect_get_vec_def_for_operand (op
, next_stmt
);
6148 vec_oprnd
= vec_oprnds
[j
];
6151 vect_is_simple_use (vec_oprnd
, vinfo
, &def_stmt
, &dt
);
6152 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
, vec_oprnd
);
6155 /* Pun the vector to extract from if necessary. */
6156 if (lvectype
!= vectype
)
6158 tree tem
= make_ssa_name (lvectype
);
6160 = gimple_build_assign (tem
, build1 (VIEW_CONVERT_EXPR
,
6161 lvectype
, vec_oprnd
));
6162 vect_finish_stmt_generation (stmt
, pun
, gsi
);
6165 for (i
= 0; i
< nstores
; i
++)
6167 tree newref
, newoff
;
6168 gimple
*incr
, *assign
;
6169 tree size
= TYPE_SIZE (ltype
);
6170 /* Extract the i'th component. */
6171 tree pos
= fold_build2 (MULT_EXPR
, bitsizetype
,
6172 bitsize_int (i
), size
);
6173 tree elem
= fold_build3 (BIT_FIELD_REF
, ltype
, vec_oprnd
,
6176 elem
= force_gimple_operand_gsi (gsi
, elem
, true,
6180 tree this_off
= build_int_cst (TREE_TYPE (alias_off
),
6182 newref
= build2 (MEM_REF
, ltype
,
6183 running_off
, this_off
);
6185 /* And store it to *running_off. */
6186 assign
= gimple_build_assign (newref
, elem
);
6187 vect_finish_stmt_generation (stmt
, assign
, gsi
);
6191 || group_el
== group_size
)
6193 newoff
= copy_ssa_name (running_off
, NULL
);
6194 incr
= gimple_build_assign (newoff
, POINTER_PLUS_EXPR
,
6195 running_off
, stride_step
);
6196 vect_finish_stmt_generation (stmt
, incr
, gsi
);
6198 running_off
= newoff
;
6201 if (g
== group_size
- 1
6204 if (j
== 0 && i
== 0)
6205 STMT_VINFO_VEC_STMT (stmt_info
)
6206 = *vec_stmt
= assign
;
6208 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = assign
;
6209 prev_stmt_info
= vinfo_for_stmt (assign
);
6213 next_stmt
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt
));
6218 vec_oprnds
.release ();
6222 auto_vec
<tree
> dr_chain (group_size
);
6223 oprnds
.create (group_size
);
6225 alignment_support_scheme
= vect_supportable_dr_alignment (first_dr
, false);
6226 gcc_assert (alignment_support_scheme
);
6227 /* Targets with store-lane instructions must not require explicit
6229 gcc_assert (memory_access_type
!= VMAT_LOAD_STORE_LANES
6230 || alignment_support_scheme
== dr_aligned
6231 || alignment_support_scheme
== dr_unaligned_supported
);
6233 if (memory_access_type
== VMAT_CONTIGUOUS_DOWN
6234 || memory_access_type
== VMAT_CONTIGUOUS_REVERSE
)
6235 offset
= size_int (-TYPE_VECTOR_SUBPARTS (vectype
) + 1);
6237 if (memory_access_type
== VMAT_LOAD_STORE_LANES
)
6238 aggr_type
= build_array_type_nelts (elem_type
, vec_num
* nunits
);
6240 aggr_type
= vectype
;
6242 /* In case the vectorization factor (VF) is bigger than the number
6243 of elements that we can fit in a vectype (nunits), we have to generate
6244 more than one vector stmt - i.e - we need to "unroll" the
6245 vector stmt by a factor VF/nunits. For more details see documentation in
6246 vect_get_vec_def_for_copy_stmt. */
6248 /* In case of interleaving (non-unit grouped access):
6255 We create vectorized stores starting from base address (the access of the
6256 first stmt in the chain (S2 in the above example), when the last store stmt
6257 of the chain (S4) is reached:
6260 VS2: &base + vec_size*1 = vx0
6261 VS3: &base + vec_size*2 = vx1
6262 VS4: &base + vec_size*3 = vx3
6264 Then permutation statements are generated:
6266 VS5: vx5 = VEC_PERM_EXPR < vx0, vx3, {0, 8, 1, 9, 2, 10, 3, 11} >
6267 VS6: vx6 = VEC_PERM_EXPR < vx0, vx3, {4, 12, 5, 13, 6, 14, 7, 15} >
6270 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
6271 (the order of the data-refs in the output of vect_permute_store_chain
6272 corresponds to the order of scalar stmts in the interleaving chain - see
6273 the documentation of vect_permute_store_chain()).
6275 In case of both multiple types and interleaving, above vector stores and
6276 permutation stmts are created for every copy. The result vector stmts are
6277 put in STMT_VINFO_VEC_STMT for the first copy and in the corresponding
6278 STMT_VINFO_RELATED_STMT for the next copies.
6281 prev_stmt_info
= NULL
;
6282 for (j
= 0; j
< ncopies
; j
++)
6289 /* Get vectorized arguments for SLP_NODE. */
6290 vect_get_vec_defs (op
, NULL_TREE
, stmt
, &vec_oprnds
,
6293 vec_oprnd
= vec_oprnds
[0];
6297 /* For interleaved stores we collect vectorized defs for all the
6298 stores in the group in DR_CHAIN and OPRNDS. DR_CHAIN is then
6299 used as an input to vect_permute_store_chain(), and OPRNDS as
6300 an input to vect_get_vec_def_for_stmt_copy() for the next copy.
6302 If the store is not grouped, GROUP_SIZE is 1, and DR_CHAIN and
6303 OPRNDS are of size 1. */
6304 next_stmt
= first_stmt
;
6305 for (i
= 0; i
< group_size
; i
++)
6307 /* Since gaps are not supported for interleaved stores,
6308 GROUP_SIZE is the exact number of stmts in the chain.
6309 Therefore, NEXT_STMT can't be NULL_TREE. In case that
6310 there is no interleaving, GROUP_SIZE is 1, and only one
6311 iteration of the loop will be executed. */
6312 gcc_assert (next_stmt
6313 && gimple_assign_single_p (next_stmt
));
6314 op
= gimple_assign_rhs1 (next_stmt
);
6316 vec_oprnd
= vect_get_vec_def_for_operand (op
, next_stmt
);
6317 dr_chain
.quick_push (vec_oprnd
);
6318 oprnds
.quick_push (vec_oprnd
);
6319 next_stmt
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt
));
6323 /* We should have catched mismatched types earlier. */
6324 gcc_assert (useless_type_conversion_p (vectype
,
6325 TREE_TYPE (vec_oprnd
)));
6326 bool simd_lane_access_p
6327 = STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info
);
6328 if (simd_lane_access_p
6329 && TREE_CODE (DR_BASE_ADDRESS (first_dr
)) == ADDR_EXPR
6330 && VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (first_dr
), 0))
6331 && integer_zerop (DR_OFFSET (first_dr
))
6332 && integer_zerop (DR_INIT (first_dr
))
6333 && alias_sets_conflict_p (get_alias_set (aggr_type
),
6334 get_alias_set (TREE_TYPE (ref_type
))))
6336 dataref_ptr
= unshare_expr (DR_BASE_ADDRESS (first_dr
));
6337 dataref_offset
= build_int_cst (ref_type
, 0);
6342 = vect_create_data_ref_ptr (first_stmt
, aggr_type
,
6343 simd_lane_access_p
? loop
: NULL
,
6344 offset
, &dummy
, gsi
, &ptr_incr
,
6345 simd_lane_access_p
, &inv_p
);
6346 gcc_assert (bb_vinfo
|| !inv_p
);
6350 /* For interleaved stores we created vectorized defs for all the
6351 defs stored in OPRNDS in the previous iteration (previous copy).
6352 DR_CHAIN is then used as an input to vect_permute_store_chain(),
6353 and OPRNDS as an input to vect_get_vec_def_for_stmt_copy() for the
6355 If the store is not grouped, GROUP_SIZE is 1, and DR_CHAIN and
6356 OPRNDS are of size 1. */
6357 for (i
= 0; i
< group_size
; i
++)
6360 vect_is_simple_use (op
, vinfo
, &def_stmt
, &dt
);
6361 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
, op
);
6362 dr_chain
[i
] = vec_oprnd
;
6363 oprnds
[i
] = vec_oprnd
;
6367 = int_const_binop (PLUS_EXPR
, dataref_offset
,
6368 TYPE_SIZE_UNIT (aggr_type
));
6370 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
, stmt
,
6371 TYPE_SIZE_UNIT (aggr_type
));
6374 if (memory_access_type
== VMAT_LOAD_STORE_LANES
)
6378 /* Combine all the vectors into an array. */
6379 vec_array
= create_vector_array (vectype
, vec_num
);
6380 for (i
= 0; i
< vec_num
; i
++)
6382 vec_oprnd
= dr_chain
[i
];
6383 write_vector_array (stmt
, gsi
, vec_oprnd
, vec_array
, i
);
6387 MEM_REF[...all elements...] = STORE_LANES (VEC_ARRAY). */
6388 data_ref
= create_array_ref (aggr_type
, dataref_ptr
, ref_type
);
6389 gcall
*call
= gimple_build_call_internal (IFN_STORE_LANES
, 1,
6391 gimple_call_set_lhs (call
, data_ref
);
6392 gimple_call_set_nothrow (call
, true);
6394 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6402 result_chain
.create (group_size
);
6404 vect_permute_store_chain (dr_chain
, group_size
, stmt
, gsi
,
6408 next_stmt
= first_stmt
;
6409 for (i
= 0; i
< vec_num
; i
++)
6411 unsigned align
, misalign
;
6414 /* Bump the vector pointer. */
6415 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
,
6419 vec_oprnd
= vec_oprnds
[i
];
6420 else if (grouped_store
)
6421 /* For grouped stores vectorized defs are interleaved in
6422 vect_permute_store_chain(). */
6423 vec_oprnd
= result_chain
[i
];
6425 data_ref
= fold_build2 (MEM_REF
, vectype
,
6429 : build_int_cst (ref_type
, 0));
6430 align
= TYPE_ALIGN_UNIT (vectype
);
6431 if (aligned_access_p (first_dr
))
6433 else if (DR_MISALIGNMENT (first_dr
) == -1)
6435 align
= dr_alignment (vect_dr_behavior (first_dr
));
6437 TREE_TYPE (data_ref
)
6438 = build_aligned_type (TREE_TYPE (data_ref
),
6439 align
* BITS_PER_UNIT
);
6443 TREE_TYPE (data_ref
)
6444 = build_aligned_type (TREE_TYPE (data_ref
),
6445 TYPE_ALIGN (elem_type
));
6446 misalign
= DR_MISALIGNMENT (first_dr
);
6448 if (dataref_offset
== NULL_TREE
6449 && TREE_CODE (dataref_ptr
) == SSA_NAME
)
6450 set_ptr_info_alignment (get_ptr_info (dataref_ptr
), align
,
6453 if (memory_access_type
== VMAT_CONTIGUOUS_REVERSE
)
6455 tree perm_mask
= perm_mask_for_reverse (vectype
);
6457 = vect_create_destination_var (gimple_assign_rhs1 (stmt
),
6459 tree new_temp
= make_ssa_name (perm_dest
);
6461 /* Generate the permute statement. */
6463 = gimple_build_assign (new_temp
, VEC_PERM_EXPR
, vec_oprnd
,
6464 vec_oprnd
, perm_mask
);
6465 vect_finish_stmt_generation (stmt
, perm_stmt
, gsi
);
6467 perm_stmt
= SSA_NAME_DEF_STMT (new_temp
);
6468 vec_oprnd
= new_temp
;
6471 /* Arguments are ready. Create the new vector stmt. */
6472 new_stmt
= gimple_build_assign (data_ref
, vec_oprnd
);
6473 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6478 next_stmt
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt
));
6486 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
6488 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
6489 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
6494 result_chain
.release ();
6495 vec_oprnds
.release ();
6500 /* Given a vector type VECTYPE, turns permutation SEL into the equivalent
6501 VECTOR_CST mask. No checks are made that the target platform supports the
6502 mask, so callers may wish to test can_vec_perm_p separately, or use
6503 vect_gen_perm_mask_checked. */
6506 vect_gen_perm_mask_any (tree vectype
, const unsigned char *sel
)
6508 tree mask_elt_type
, mask_type
, mask_vec
, *mask_elts
;
6511 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
6513 mask_elt_type
= lang_hooks
.types
.type_for_mode
6514 (int_mode_for_mode (TYPE_MODE (TREE_TYPE (vectype
))).require (), 1);
6515 mask_type
= get_vectype_for_scalar_type (mask_elt_type
);
6517 mask_elts
= XALLOCAVEC (tree
, nunits
);
6518 for (i
= nunits
- 1; i
>= 0; i
--)
6519 mask_elts
[i
] = build_int_cst (mask_elt_type
, sel
[i
]);
6520 mask_vec
= build_vector (mask_type
, mask_elts
);
6525 /* Checked version of vect_gen_perm_mask_any. Asserts can_vec_perm_p,
6526 i.e. that the target supports the pattern _for arbitrary input vectors_. */
6529 vect_gen_perm_mask_checked (tree vectype
, const unsigned char *sel
)
6531 gcc_assert (can_vec_perm_p (TYPE_MODE (vectype
), false, sel
));
6532 return vect_gen_perm_mask_any (vectype
, sel
);
6535 /* Given a vector variable X and Y, that was generated for the scalar
6536 STMT, generate instructions to permute the vector elements of X and Y
6537 using permutation mask MASK_VEC, insert them at *GSI and return the
6538 permuted vector variable. */
6541 permute_vec_elements (tree x
, tree y
, tree mask_vec
, gimple
*stmt
,
6542 gimple_stmt_iterator
*gsi
)
6544 tree vectype
= TREE_TYPE (x
);
6545 tree perm_dest
, data_ref
;
6548 perm_dest
= vect_create_destination_var (gimple_get_lhs (stmt
), vectype
);
6549 data_ref
= make_ssa_name (perm_dest
);
6551 /* Generate the permute statement. */
6552 perm_stmt
= gimple_build_assign (data_ref
, VEC_PERM_EXPR
, x
, y
, mask_vec
);
6553 vect_finish_stmt_generation (stmt
, perm_stmt
, gsi
);
6558 /* Hoist the definitions of all SSA uses on STMT out of the loop LOOP,
6559 inserting them on the loops preheader edge. Returns true if we
6560 were successful in doing so (and thus STMT can be moved then),
6561 otherwise returns false. */
6564 hoist_defs_of_uses (gimple
*stmt
, struct loop
*loop
)
6570 FOR_EACH_SSA_TREE_OPERAND (op
, stmt
, i
, SSA_OP_USE
)
6572 gimple
*def_stmt
= SSA_NAME_DEF_STMT (op
);
6573 if (!gimple_nop_p (def_stmt
)
6574 && flow_bb_inside_loop_p (loop
, gimple_bb (def_stmt
)))
6576 /* Make sure we don't need to recurse. While we could do
6577 so in simple cases when there are more complex use webs
6578 we don't have an easy way to preserve stmt order to fulfil
6579 dependencies within them. */
6582 if (gimple_code (def_stmt
) == GIMPLE_PHI
)
6584 FOR_EACH_SSA_TREE_OPERAND (op2
, def_stmt
, i2
, SSA_OP_USE
)
6586 gimple
*def_stmt2
= SSA_NAME_DEF_STMT (op2
);
6587 if (!gimple_nop_p (def_stmt2
)
6588 && flow_bb_inside_loop_p (loop
, gimple_bb (def_stmt2
)))
6598 FOR_EACH_SSA_TREE_OPERAND (op
, stmt
, i
, SSA_OP_USE
)
6600 gimple
*def_stmt
= SSA_NAME_DEF_STMT (op
);
6601 if (!gimple_nop_p (def_stmt
)
6602 && flow_bb_inside_loop_p (loop
, gimple_bb (def_stmt
)))
6604 gimple_stmt_iterator gsi
= gsi_for_stmt (def_stmt
);
6605 gsi_remove (&gsi
, false);
6606 gsi_insert_on_edge_immediate (loop_preheader_edge (loop
), def_stmt
);
6613 /* vectorizable_load.
6615 Check if STMT reads a non scalar data-ref (array/pointer/structure) that
6617 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
6618 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
6619 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
6622 vectorizable_load (gimple
*stmt
, gimple_stmt_iterator
*gsi
, gimple
**vec_stmt
,
6623 slp_tree slp_node
, slp_instance slp_node_instance
)
6626 tree vec_dest
= NULL
;
6627 tree data_ref
= NULL
;
6628 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
6629 stmt_vec_info prev_stmt_info
;
6630 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
6631 struct loop
*loop
= NULL
;
6632 struct loop
*containing_loop
= (gimple_bb (stmt
))->loop_father
;
6633 bool nested_in_vect_loop
= false;
6634 struct data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
), *first_dr
= NULL
;
6638 gimple
*new_stmt
= NULL
;
6640 enum dr_alignment_support alignment_support_scheme
;
6641 tree dataref_ptr
= NULL_TREE
;
6642 tree dataref_offset
= NULL_TREE
;
6643 gimple
*ptr_incr
= NULL
;
6645 int i
, j
, group_size
, group_gap_adj
;
6646 tree msq
= NULL_TREE
, lsq
;
6647 tree offset
= NULL_TREE
;
6648 tree byte_offset
= NULL_TREE
;
6649 tree realignment_token
= NULL_TREE
;
6651 vec
<tree
> dr_chain
= vNULL
;
6652 bool grouped_load
= false;
6654 gimple
*first_stmt_for_drptr
= NULL
;
6656 bool compute_in_loop
= false;
6657 struct loop
*at_loop
;
6659 bool slp
= (slp_node
!= NULL
);
6660 bool slp_perm
= false;
6661 enum tree_code code
;
6662 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
6665 gather_scatter_info gs_info
;
6666 vec_info
*vinfo
= stmt_info
->vinfo
;
6669 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
6672 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
6676 /* Is vectorizable load? */
6677 if (!is_gimple_assign (stmt
))
6680 scalar_dest
= gimple_assign_lhs (stmt
);
6681 if (TREE_CODE (scalar_dest
) != SSA_NAME
)
6684 code
= gimple_assign_rhs_code (stmt
);
6685 if (code
!= ARRAY_REF
6686 && code
!= BIT_FIELD_REF
6687 && code
!= INDIRECT_REF
6688 && code
!= COMPONENT_REF
6689 && code
!= IMAGPART_EXPR
6690 && code
!= REALPART_EXPR
6692 && TREE_CODE_CLASS (code
) != tcc_declaration
)
6695 if (!STMT_VINFO_DATA_REF (stmt_info
))
6698 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
6699 int nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
6703 loop
= LOOP_VINFO_LOOP (loop_vinfo
);
6704 nested_in_vect_loop
= nested_in_vect_loop_p (loop
, stmt
);
6705 vf
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
6710 /* Multiple types in SLP are handled by creating the appropriate number of
6711 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
6716 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits
;
6718 gcc_assert (ncopies
>= 1);
6720 /* FORNOW. This restriction should be relaxed. */
6721 if (nested_in_vect_loop
&& ncopies
> 1)
6723 if (dump_enabled_p ())
6724 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6725 "multiple types in nested loop.\n");
6729 /* Invalidate assumptions made by dependence analysis when vectorization
6730 on the unrolled body effectively re-orders stmts. */
6732 && STMT_VINFO_MIN_NEG_DIST (stmt_info
) != 0
6733 && ((unsigned)LOOP_VINFO_VECT_FACTOR (loop_vinfo
)
6734 > STMT_VINFO_MIN_NEG_DIST (stmt_info
)))
6736 if (dump_enabled_p ())
6737 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6738 "cannot perform implicit CSE when unrolling "
6739 "with negative dependence distance\n");
6743 elem_type
= TREE_TYPE (vectype
);
6744 mode
= TYPE_MODE (vectype
);
6746 /* FORNOW. In some cases can vectorize even if data-type not supported
6747 (e.g. - data copies). */
6748 if (optab_handler (mov_optab
, mode
) == CODE_FOR_nothing
)
6750 if (dump_enabled_p ())
6751 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6752 "Aligned load, but unsupported type.\n");
6756 /* Check if the load is a part of an interleaving chain. */
6757 if (STMT_VINFO_GROUPED_ACCESS (stmt_info
))
6759 grouped_load
= true;
6761 gcc_assert (!nested_in_vect_loop
);
6762 gcc_assert (!STMT_VINFO_GATHER_SCATTER_P (stmt_info
));
6764 first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
6765 group_size
= GROUP_SIZE (vinfo_for_stmt (first_stmt
));
6767 if (slp
&& SLP_TREE_LOAD_PERMUTATION (slp_node
).exists ())
6770 /* Invalidate assumptions made by dependence analysis when vectorization
6771 on the unrolled body effectively re-orders stmts. */
6772 if (!PURE_SLP_STMT (stmt_info
)
6773 && STMT_VINFO_MIN_NEG_DIST (stmt_info
) != 0
6774 && ((unsigned)LOOP_VINFO_VECT_FACTOR (loop_vinfo
)
6775 > STMT_VINFO_MIN_NEG_DIST (stmt_info
)))
6777 if (dump_enabled_p ())
6778 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6779 "cannot perform implicit CSE when performing "
6780 "group loads with negative dependence distance\n");
6784 /* Similarly when the stmt is a load that is both part of a SLP
6785 instance and a loop vectorized stmt via the same-dr mechanism
6786 we have to give up. */
6787 if (STMT_VINFO_GROUP_SAME_DR_STMT (stmt_info
)
6788 && (STMT_SLP_TYPE (stmt_info
)
6789 != STMT_SLP_TYPE (vinfo_for_stmt
6790 (STMT_VINFO_GROUP_SAME_DR_STMT (stmt_info
)))))
6792 if (dump_enabled_p ())
6793 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6794 "conflicting SLP types for CSEd load\n");
6799 vect_memory_access_type memory_access_type
;
6800 if (!get_load_store_type (stmt
, vectype
, slp
, VLS_LOAD
, ncopies
,
6801 &memory_access_type
, &gs_info
))
6804 if (!vec_stmt
) /* transformation not required. */
6807 STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info
) = memory_access_type
;
6808 STMT_VINFO_TYPE (stmt_info
) = load_vec_info_type
;
6809 /* The SLP costs are calculated during SLP analysis. */
6810 if (!PURE_SLP_STMT (stmt_info
))
6811 vect_model_load_cost (stmt_info
, ncopies
, memory_access_type
,
6817 gcc_assert (memory_access_type
6818 == STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info
));
6820 if (dump_enabled_p ())
6821 dump_printf_loc (MSG_NOTE
, vect_location
,
6822 "transform load. ncopies = %d\n", ncopies
);
6826 ensure_base_align (stmt_info
, dr
);
6828 if (memory_access_type
== VMAT_GATHER_SCATTER
)
6830 tree vec_oprnd0
= NULL_TREE
, op
;
6831 tree arglist
= TYPE_ARG_TYPES (TREE_TYPE (gs_info
.decl
));
6832 tree rettype
, srctype
, ptrtype
, idxtype
, masktype
, scaletype
;
6833 tree ptr
, mask
, var
, scale
, merge
, perm_mask
= NULL_TREE
, prev_res
= NULL_TREE
;
6834 edge pe
= loop_preheader_edge (loop
);
6837 enum { NARROW
, NONE
, WIDEN
} modifier
;
6838 int gather_off_nunits
= TYPE_VECTOR_SUBPARTS (gs_info
.offset_vectype
);
6840 if (nunits
== gather_off_nunits
)
6842 else if (nunits
== gather_off_nunits
/ 2)
6844 unsigned char *sel
= XALLOCAVEC (unsigned char, gather_off_nunits
);
6847 for (i
= 0; i
< gather_off_nunits
; ++i
)
6848 sel
[i
] = i
| nunits
;
6850 perm_mask
= vect_gen_perm_mask_checked (gs_info
.offset_vectype
, sel
);
6852 else if (nunits
== gather_off_nunits
* 2)
6854 unsigned char *sel
= XALLOCAVEC (unsigned char, nunits
);
6857 for (i
= 0; i
< nunits
; ++i
)
6858 sel
[i
] = i
< gather_off_nunits
6859 ? i
: i
+ nunits
- gather_off_nunits
;
6861 perm_mask
= vect_gen_perm_mask_checked (vectype
, sel
);
6867 rettype
= TREE_TYPE (TREE_TYPE (gs_info
.decl
));
6868 srctype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
6869 ptrtype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
6870 idxtype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
6871 masktype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
6872 scaletype
= TREE_VALUE (arglist
);
6873 gcc_checking_assert (types_compatible_p (srctype
, rettype
));
6875 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
6877 ptr
= fold_convert (ptrtype
, gs_info
.base
);
6878 if (!is_gimple_min_invariant (ptr
))
6880 ptr
= force_gimple_operand (ptr
, &seq
, true, NULL_TREE
);
6881 new_bb
= gsi_insert_seq_on_edge_immediate (pe
, seq
);
6882 gcc_assert (!new_bb
);
6885 /* Currently we support only unconditional gather loads,
6886 so mask should be all ones. */
6887 if (TREE_CODE (masktype
) == INTEGER_TYPE
)
6888 mask
= build_int_cst (masktype
, -1);
6889 else if (TREE_CODE (TREE_TYPE (masktype
)) == INTEGER_TYPE
)
6891 mask
= build_int_cst (TREE_TYPE (masktype
), -1);
6892 mask
= build_vector_from_val (masktype
, mask
);
6893 mask
= vect_init_vector (stmt
, mask
, masktype
, NULL
);
6895 else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (masktype
)))
6899 for (j
= 0; j
< 6; ++j
)
6901 real_from_target (&r
, tmp
, TYPE_MODE (TREE_TYPE (masktype
)));
6902 mask
= build_real (TREE_TYPE (masktype
), r
);
6903 mask
= build_vector_from_val (masktype
, mask
);
6904 mask
= vect_init_vector (stmt
, mask
, masktype
, NULL
);
6909 scale
= build_int_cst (scaletype
, gs_info
.scale
);
6911 if (TREE_CODE (TREE_TYPE (rettype
)) == INTEGER_TYPE
)
6912 merge
= build_int_cst (TREE_TYPE (rettype
), 0);
6913 else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (rettype
)))
6917 for (j
= 0; j
< 6; ++j
)
6919 real_from_target (&r
, tmp
, TYPE_MODE (TREE_TYPE (rettype
)));
6920 merge
= build_real (TREE_TYPE (rettype
), r
);
6924 merge
= build_vector_from_val (rettype
, merge
);
6925 merge
= vect_init_vector (stmt
, merge
, rettype
, NULL
);
6927 prev_stmt_info
= NULL
;
6928 for (j
= 0; j
< ncopies
; ++j
)
6930 if (modifier
== WIDEN
&& (j
& 1))
6931 op
= permute_vec_elements (vec_oprnd0
, vec_oprnd0
,
6932 perm_mask
, stmt
, gsi
);
6935 = vect_get_vec_def_for_operand (gs_info
.offset
, stmt
);
6938 = vect_get_vec_def_for_stmt_copy (gs_info
.offset_dt
, vec_oprnd0
);
6940 if (!useless_type_conversion_p (idxtype
, TREE_TYPE (op
)))
6942 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op
))
6943 == TYPE_VECTOR_SUBPARTS (idxtype
));
6944 var
= vect_get_new_ssa_name (idxtype
, vect_simple_var
);
6945 op
= build1 (VIEW_CONVERT_EXPR
, idxtype
, op
);
6947 = gimple_build_assign (var
, VIEW_CONVERT_EXPR
, op
);
6948 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6953 = gimple_build_call (gs_info
.decl
, 5, merge
, ptr
, op
, mask
, scale
);
6955 if (!useless_type_conversion_p (vectype
, rettype
))
6957 gcc_assert (TYPE_VECTOR_SUBPARTS (vectype
)
6958 == TYPE_VECTOR_SUBPARTS (rettype
));
6959 op
= vect_get_new_ssa_name (rettype
, vect_simple_var
);
6960 gimple_call_set_lhs (new_stmt
, op
);
6961 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6962 var
= make_ssa_name (vec_dest
);
6963 op
= build1 (VIEW_CONVERT_EXPR
, vectype
, op
);
6965 = gimple_build_assign (var
, VIEW_CONVERT_EXPR
, op
);
6969 var
= make_ssa_name (vec_dest
, new_stmt
);
6970 gimple_call_set_lhs (new_stmt
, var
);
6973 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6975 if (modifier
== NARROW
)
6982 var
= permute_vec_elements (prev_res
, var
,
6983 perm_mask
, stmt
, gsi
);
6984 new_stmt
= SSA_NAME_DEF_STMT (var
);
6987 if (prev_stmt_info
== NULL
)
6988 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
6990 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
6991 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
6996 if (memory_access_type
== VMAT_ELEMENTWISE
6997 || memory_access_type
== VMAT_STRIDED_SLP
)
6999 gimple_stmt_iterator incr_gsi
;
7005 vec
<constructor_elt
, va_gc
> *v
= NULL
;
7006 gimple_seq stmts
= NULL
;
7007 tree stride_base
, stride_step
, alias_off
;
7009 gcc_assert (!nested_in_vect_loop
);
7011 if (slp
&& grouped_load
)
7013 first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
7014 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
7015 group_size
= GROUP_SIZE (vinfo_for_stmt (first_stmt
));
7016 ref_type
= get_group_alias_ptr_type (first_stmt
);
7023 ref_type
= reference_alias_ptr_type (DR_REF (first_dr
));
7027 = fold_build_pointer_plus
7028 (DR_BASE_ADDRESS (first_dr
),
7029 size_binop (PLUS_EXPR
,
7030 convert_to_ptrofftype (DR_OFFSET (first_dr
)),
7031 convert_to_ptrofftype (DR_INIT (first_dr
))));
7032 stride_step
= fold_convert (sizetype
, DR_STEP (first_dr
));
7034 /* For a load with loop-invariant (but other than power-of-2)
7035 stride (i.e. not a grouped access) like so:
7037 for (i = 0; i < n; i += stride)
7040 we generate a new induction variable and new accesses to
7041 form a new vector (or vectors, depending on ncopies):
7043 for (j = 0; ; j += VF*stride)
7045 tmp2 = array[j + stride];
7047 vectemp = {tmp1, tmp2, ...}
7050 ivstep
= fold_build2 (MULT_EXPR
, TREE_TYPE (stride_step
), stride_step
,
7051 build_int_cst (TREE_TYPE (stride_step
), vf
));
7053 standard_iv_increment_position (loop
, &incr_gsi
, &insert_after
);
7055 create_iv (unshare_expr (stride_base
), unshare_expr (ivstep
), NULL
,
7056 loop
, &incr_gsi
, insert_after
,
7058 incr
= gsi_stmt (incr_gsi
);
7059 set_vinfo_for_stmt (incr
, new_stmt_vec_info (incr
, loop_vinfo
));
7061 stride_step
= force_gimple_operand (unshare_expr (stride_step
),
7062 &stmts
, true, NULL_TREE
);
7064 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop
), stmts
);
7066 prev_stmt_info
= NULL
;
7067 running_off
= offvar
;
7068 alias_off
= build_int_cst (ref_type
, 0);
7069 int nloads
= nunits
;
7071 tree ltype
= TREE_TYPE (vectype
);
7072 tree lvectype
= vectype
;
7073 auto_vec
<tree
> dr_chain
;
7074 if (memory_access_type
== VMAT_STRIDED_SLP
)
7076 if (group_size
< nunits
)
7078 /* First check if vec_init optab supports construction from
7079 vector elts directly. */
7080 scalar_mode elmode
= SCALAR_TYPE_MODE (TREE_TYPE (vectype
));
7082 if (mode_for_vector (elmode
, group_size
).exists (&vmode
)
7083 && VECTOR_MODE_P (vmode
)
7084 && (convert_optab_handler (vec_init_optab
,
7085 TYPE_MODE (vectype
), vmode
)
7086 != CODE_FOR_nothing
))
7088 nloads
= nunits
/ group_size
;
7090 ltype
= build_vector_type (TREE_TYPE (vectype
), group_size
);
7094 /* Otherwise avoid emitting a constructor of vector elements
7095 by performing the loads using an integer type of the same
7096 size, constructing a vector of those and then
7097 re-interpreting it as the original vector type.
7098 This avoids a huge runtime penalty due to the general
7099 inability to perform store forwarding from smaller stores
7100 to a larger load. */
7102 = group_size
* TYPE_PRECISION (TREE_TYPE (vectype
));
7103 elmode
= int_mode_for_size (lsize
, 0).require ();
7104 /* If we can't construct such a vector fall back to
7105 element loads of the original vector type. */
7106 if (mode_for_vector (elmode
,
7107 nunits
/ group_size
).exists (&vmode
)
7108 && VECTOR_MODE_P (vmode
)
7109 && (convert_optab_handler (vec_init_optab
, vmode
, elmode
)
7110 != CODE_FOR_nothing
))
7112 nloads
= nunits
/ group_size
;
7114 ltype
= build_nonstandard_integer_type (lsize
, 1);
7115 lvectype
= build_vector_type (ltype
, nloads
);
7125 ltype
= build_aligned_type (ltype
, TYPE_ALIGN (TREE_TYPE (vectype
)));
7129 /* For SLP permutation support we need to load the whole group,
7130 not only the number of vector stmts the permutation result
7134 ncopies
= (group_size
* vf
+ nunits
- 1) / nunits
;
7135 dr_chain
.create (ncopies
);
7138 ncopies
= SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node
);
7141 unsigned HOST_WIDE_INT
7142 elsz
= tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (vectype
)));
7143 for (j
= 0; j
< ncopies
; j
++)
7146 vec_alloc (v
, nloads
);
7147 for (i
= 0; i
< nloads
; i
++)
7149 tree this_off
= build_int_cst (TREE_TYPE (alias_off
),
7151 new_stmt
= gimple_build_assign (make_ssa_name (ltype
),
7152 build2 (MEM_REF
, ltype
,
7153 running_off
, this_off
));
7154 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
7156 CONSTRUCTOR_APPEND_ELT (v
, NULL_TREE
,
7157 gimple_assign_lhs (new_stmt
));
7161 || group_el
== group_size
)
7163 tree newoff
= copy_ssa_name (running_off
);
7164 gimple
*incr
= gimple_build_assign (newoff
, POINTER_PLUS_EXPR
,
7165 running_off
, stride_step
);
7166 vect_finish_stmt_generation (stmt
, incr
, gsi
);
7168 running_off
= newoff
;
7174 tree vec_inv
= build_constructor (lvectype
, v
);
7175 new_temp
= vect_init_vector (stmt
, vec_inv
, lvectype
, gsi
);
7176 new_stmt
= SSA_NAME_DEF_STMT (new_temp
);
7177 if (lvectype
!= vectype
)
7179 new_stmt
= gimple_build_assign (make_ssa_name (vectype
),
7181 build1 (VIEW_CONVERT_EXPR
,
7182 vectype
, new_temp
));
7183 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
7190 dr_chain
.quick_push (gimple_assign_lhs (new_stmt
));
7192 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
7197 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
7199 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
7200 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
7206 vect_transform_slp_perm_load (slp_node
, dr_chain
, gsi
, vf
,
7207 slp_node_instance
, false, &n_perms
);
7214 first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
7215 group_size
= GROUP_SIZE (vinfo_for_stmt (first_stmt
));
7216 /* For SLP vectorization we directly vectorize a subchain
7217 without permutation. */
7218 if (slp
&& ! SLP_TREE_LOAD_PERMUTATION (slp_node
).exists ())
7219 first_stmt
= SLP_TREE_SCALAR_STMTS (slp_node
)[0];
7220 /* For BB vectorization always use the first stmt to base
7221 the data ref pointer on. */
7223 first_stmt_for_drptr
= SLP_TREE_SCALAR_STMTS (slp_node
)[0];
7225 /* Check if the chain of loads is already vectorized. */
7226 if (STMT_VINFO_VEC_STMT (vinfo_for_stmt (first_stmt
))
7227 /* For SLP we would need to copy over SLP_TREE_VEC_STMTS.
7228 ??? But we can only do so if there is exactly one
7229 as we have no way to get at the rest. Leave the CSE
7231 ??? With the group load eventually participating
7232 in multiple different permutations (having multiple
7233 slp nodes which refer to the same group) the CSE
7234 is even wrong code. See PR56270. */
7237 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
7240 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
7243 /* VEC_NUM is the number of vect stmts to be created for this group. */
7246 grouped_load
= false;
7247 /* For SLP permutation support we need to load the whole group,
7248 not only the number of vector stmts the permutation result
7252 vec_num
= (group_size
* vf
+ nunits
- 1) / nunits
;
7253 group_gap_adj
= vf
* group_size
- nunits
* vec_num
;
7257 vec_num
= SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node
);
7259 = group_size
- SLP_INSTANCE_GROUP_SIZE (slp_node_instance
);
7263 vec_num
= group_size
;
7265 ref_type
= get_group_alias_ptr_type (first_stmt
);
7271 group_size
= vec_num
= 1;
7273 ref_type
= reference_alias_ptr_type (DR_REF (first_dr
));
7276 alignment_support_scheme
= vect_supportable_dr_alignment (first_dr
, false);
7277 gcc_assert (alignment_support_scheme
);
7278 /* Targets with load-lane instructions must not require explicit
7280 gcc_assert (memory_access_type
!= VMAT_LOAD_STORE_LANES
7281 || alignment_support_scheme
== dr_aligned
7282 || alignment_support_scheme
== dr_unaligned_supported
);
7284 /* In case the vectorization factor (VF) is bigger than the number
7285 of elements that we can fit in a vectype (nunits), we have to generate
7286 more than one vector stmt - i.e - we need to "unroll" the
7287 vector stmt by a factor VF/nunits. In doing so, we record a pointer
7288 from one copy of the vector stmt to the next, in the field
7289 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
7290 stages to find the correct vector defs to be used when vectorizing
7291 stmts that use the defs of the current stmt. The example below
7292 illustrates the vectorization process when VF=16 and nunits=4 (i.e., we
7293 need to create 4 vectorized stmts):
7295 before vectorization:
7296 RELATED_STMT VEC_STMT
7300 step 1: vectorize stmt S1:
7301 We first create the vector stmt VS1_0, and, as usual, record a
7302 pointer to it in the STMT_VINFO_VEC_STMT of the scalar stmt S1.
7303 Next, we create the vector stmt VS1_1, and record a pointer to
7304 it in the STMT_VINFO_RELATED_STMT of the vector stmt VS1_0.
7305 Similarly, for VS1_2 and VS1_3. This is the resulting chain of
7307 RELATED_STMT VEC_STMT
7308 VS1_0: vx0 = memref0 VS1_1 -
7309 VS1_1: vx1 = memref1 VS1_2 -
7310 VS1_2: vx2 = memref2 VS1_3 -
7311 VS1_3: vx3 = memref3 - -
7312 S1: x = load - VS1_0
7315 See in documentation in vect_get_vec_def_for_stmt_copy for how the
7316 information we recorded in RELATED_STMT field is used to vectorize
7319 /* In case of interleaving (non-unit grouped access):
7326 Vectorized loads are created in the order of memory accesses
7327 starting from the access of the first stmt of the chain:
7330 VS2: vx1 = &base + vec_size*1
7331 VS3: vx3 = &base + vec_size*2
7332 VS4: vx4 = &base + vec_size*3
7334 Then permutation statements are generated:
7336 VS5: vx5 = VEC_PERM_EXPR < vx0, vx1, { 0, 2, ..., i*2 } >
7337 VS6: vx6 = VEC_PERM_EXPR < vx0, vx1, { 1, 3, ..., i*2+1 } >
7340 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
7341 (the order of the data-refs in the output of vect_permute_load_chain
7342 corresponds to the order of scalar stmts in the interleaving chain - see
7343 the documentation of vect_permute_load_chain()).
7344 The generation of permutation stmts and recording them in
7345 STMT_VINFO_VEC_STMT is done in vect_transform_grouped_load().
7347 In case of both multiple types and interleaving, the vector loads and
7348 permutation stmts above are created for every copy. The result vector
7349 stmts are put in STMT_VINFO_VEC_STMT for the first copy and in the
7350 corresponding STMT_VINFO_RELATED_STMT for the next copies. */
7352 /* If the data reference is aligned (dr_aligned) or potentially unaligned
7353 on a target that supports unaligned accesses (dr_unaligned_supported)
7354 we generate the following code:
7358 p = p + indx * vectype_size;
7363 Otherwise, the data reference is potentially unaligned on a target that
7364 does not support unaligned accesses (dr_explicit_realign_optimized) -
7365 then generate the following code, in which the data in each iteration is
7366 obtained by two vector loads, one from the previous iteration, and one
7367 from the current iteration:
7369 msq_init = *(floor(p1))
7370 p2 = initial_addr + VS - 1;
7371 realignment_token = call target_builtin;
7374 p2 = p2 + indx * vectype_size
7376 vec_dest = realign_load (msq, lsq, realignment_token)
7381 /* If the misalignment remains the same throughout the execution of the
7382 loop, we can create the init_addr and permutation mask at the loop
7383 preheader. Otherwise, it needs to be created inside the loop.
7384 This can only occur when vectorizing memory accesses in the inner-loop
7385 nested within an outer-loop that is being vectorized. */
7387 if (nested_in_vect_loop
7388 && (DR_STEP_ALIGNMENT (dr
) % GET_MODE_SIZE (TYPE_MODE (vectype
))) != 0)
7390 gcc_assert (alignment_support_scheme
!= dr_explicit_realign_optimized
);
7391 compute_in_loop
= true;
7394 if ((alignment_support_scheme
== dr_explicit_realign_optimized
7395 || alignment_support_scheme
== dr_explicit_realign
)
7396 && !compute_in_loop
)
7398 msq
= vect_setup_realignment (first_stmt
, gsi
, &realignment_token
,
7399 alignment_support_scheme
, NULL_TREE
,
7401 if (alignment_support_scheme
== dr_explicit_realign_optimized
)
7403 phi
= as_a
<gphi
*> (SSA_NAME_DEF_STMT (msq
));
7404 byte_offset
= size_binop (MINUS_EXPR
, TYPE_SIZE_UNIT (vectype
),
7411 if (memory_access_type
== VMAT_CONTIGUOUS_REVERSE
)
7412 offset
= size_int (-TYPE_VECTOR_SUBPARTS (vectype
) + 1);
7414 if (memory_access_type
== VMAT_LOAD_STORE_LANES
)
7415 aggr_type
= build_array_type_nelts (elem_type
, vec_num
* nunits
);
7417 aggr_type
= vectype
;
7419 prev_stmt_info
= NULL
;
7421 for (j
= 0; j
< ncopies
; j
++)
7423 /* 1. Create the vector or array pointer update chain. */
7426 bool simd_lane_access_p
7427 = STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info
);
7428 if (simd_lane_access_p
7429 && TREE_CODE (DR_BASE_ADDRESS (first_dr
)) == ADDR_EXPR
7430 && VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (first_dr
), 0))
7431 && integer_zerop (DR_OFFSET (first_dr
))
7432 && integer_zerop (DR_INIT (first_dr
))
7433 && alias_sets_conflict_p (get_alias_set (aggr_type
),
7434 get_alias_set (TREE_TYPE (ref_type
)))
7435 && (alignment_support_scheme
== dr_aligned
7436 || alignment_support_scheme
== dr_unaligned_supported
))
7438 dataref_ptr
= unshare_expr (DR_BASE_ADDRESS (first_dr
));
7439 dataref_offset
= build_int_cst (ref_type
, 0);
7442 else if (first_stmt_for_drptr
7443 && first_stmt
!= first_stmt_for_drptr
)
7446 = vect_create_data_ref_ptr (first_stmt_for_drptr
, aggr_type
,
7447 at_loop
, offset
, &dummy
, gsi
,
7448 &ptr_incr
, simd_lane_access_p
,
7449 &inv_p
, byte_offset
);
7450 /* Adjust the pointer by the difference to first_stmt. */
7451 data_reference_p ptrdr
7452 = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt_for_drptr
));
7453 tree diff
= fold_convert (sizetype
,
7454 size_binop (MINUS_EXPR
,
7457 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
,
7462 = vect_create_data_ref_ptr (first_stmt
, aggr_type
, at_loop
,
7463 offset
, &dummy
, gsi
, &ptr_incr
,
7464 simd_lane_access_p
, &inv_p
,
7467 else if (dataref_offset
)
7468 dataref_offset
= int_const_binop (PLUS_EXPR
, dataref_offset
,
7469 TYPE_SIZE_UNIT (aggr_type
));
7471 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
, stmt
,
7472 TYPE_SIZE_UNIT (aggr_type
));
7474 if (grouped_load
|| slp_perm
)
7475 dr_chain
.create (vec_num
);
7477 if (memory_access_type
== VMAT_LOAD_STORE_LANES
)
7481 vec_array
= create_vector_array (vectype
, vec_num
);
7484 VEC_ARRAY = LOAD_LANES (MEM_REF[...all elements...]). */
7485 data_ref
= create_array_ref (aggr_type
, dataref_ptr
, ref_type
);
7486 gcall
*call
= gimple_build_call_internal (IFN_LOAD_LANES
, 1,
7488 gimple_call_set_lhs (call
, vec_array
);
7489 gimple_call_set_nothrow (call
, true);
7491 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
7493 /* Extract each vector into an SSA_NAME. */
7494 for (i
= 0; i
< vec_num
; i
++)
7496 new_temp
= read_vector_array (stmt
, gsi
, scalar_dest
,
7498 dr_chain
.quick_push (new_temp
);
7501 /* Record the mapping between SSA_NAMEs and statements. */
7502 vect_record_grouped_load_vectors (stmt
, dr_chain
);
7506 for (i
= 0; i
< vec_num
; i
++)
7509 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
,
7512 /* 2. Create the vector-load in the loop. */
7513 switch (alignment_support_scheme
)
7516 case dr_unaligned_supported
:
7518 unsigned int align
, misalign
;
7521 = fold_build2 (MEM_REF
, vectype
, dataref_ptr
,
7524 : build_int_cst (ref_type
, 0));
7525 align
= TYPE_ALIGN_UNIT (vectype
);
7526 if (alignment_support_scheme
== dr_aligned
)
7528 gcc_assert (aligned_access_p (first_dr
));
7531 else if (DR_MISALIGNMENT (first_dr
) == -1)
7533 align
= dr_alignment (vect_dr_behavior (first_dr
));
7535 TREE_TYPE (data_ref
)
7536 = build_aligned_type (TREE_TYPE (data_ref
),
7537 align
* BITS_PER_UNIT
);
7541 TREE_TYPE (data_ref
)
7542 = build_aligned_type (TREE_TYPE (data_ref
),
7543 TYPE_ALIGN (elem_type
));
7544 misalign
= DR_MISALIGNMENT (first_dr
);
7546 if (dataref_offset
== NULL_TREE
7547 && TREE_CODE (dataref_ptr
) == SSA_NAME
)
7548 set_ptr_info_alignment (get_ptr_info (dataref_ptr
),
7552 case dr_explicit_realign
:
7556 tree vs
= size_int (TYPE_VECTOR_SUBPARTS (vectype
));
7558 if (compute_in_loop
)
7559 msq
= vect_setup_realignment (first_stmt
, gsi
,
7561 dr_explicit_realign
,
7564 if (TREE_CODE (dataref_ptr
) == SSA_NAME
)
7565 ptr
= copy_ssa_name (dataref_ptr
);
7567 ptr
= make_ssa_name (TREE_TYPE (dataref_ptr
));
7568 new_stmt
= gimple_build_assign
7569 (ptr
, BIT_AND_EXPR
, dataref_ptr
,
7571 (TREE_TYPE (dataref_ptr
),
7572 -(HOST_WIDE_INT
)TYPE_ALIGN_UNIT (vectype
)));
7573 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
7575 = build2 (MEM_REF
, vectype
, ptr
,
7576 build_int_cst (ref_type
, 0));
7577 vec_dest
= vect_create_destination_var (scalar_dest
,
7579 new_stmt
= gimple_build_assign (vec_dest
, data_ref
);
7580 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
7581 gimple_assign_set_lhs (new_stmt
, new_temp
);
7582 gimple_set_vdef (new_stmt
, gimple_vdef (stmt
));
7583 gimple_set_vuse (new_stmt
, gimple_vuse (stmt
));
7584 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
7587 bump
= size_binop (MULT_EXPR
, vs
,
7588 TYPE_SIZE_UNIT (elem_type
));
7589 bump
= size_binop (MINUS_EXPR
, bump
, size_one_node
);
7590 ptr
= bump_vector_ptr (dataref_ptr
, NULL
, gsi
, stmt
, bump
);
7591 new_stmt
= gimple_build_assign
7592 (NULL_TREE
, BIT_AND_EXPR
, ptr
,
7595 -(HOST_WIDE_INT
)TYPE_ALIGN_UNIT (vectype
)));
7596 ptr
= copy_ssa_name (ptr
, new_stmt
);
7597 gimple_assign_set_lhs (new_stmt
, ptr
);
7598 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
7600 = build2 (MEM_REF
, vectype
, ptr
,
7601 build_int_cst (ref_type
, 0));
7604 case dr_explicit_realign_optimized
:
7605 if (TREE_CODE (dataref_ptr
) == SSA_NAME
)
7606 new_temp
= copy_ssa_name (dataref_ptr
);
7608 new_temp
= make_ssa_name (TREE_TYPE (dataref_ptr
));
7609 new_stmt
= gimple_build_assign
7610 (new_temp
, BIT_AND_EXPR
, dataref_ptr
,
7612 (TREE_TYPE (dataref_ptr
),
7613 -(HOST_WIDE_INT
)TYPE_ALIGN_UNIT (vectype
)));
7614 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
7616 = build2 (MEM_REF
, vectype
, new_temp
,
7617 build_int_cst (ref_type
, 0));
7622 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
7623 new_stmt
= gimple_build_assign (vec_dest
, data_ref
);
7624 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
7625 gimple_assign_set_lhs (new_stmt
, new_temp
);
7626 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
7628 /* 3. Handle explicit realignment if necessary/supported.
7630 vec_dest = realign_load (msq, lsq, realignment_token) */
7631 if (alignment_support_scheme
== dr_explicit_realign_optimized
7632 || alignment_support_scheme
== dr_explicit_realign
)
7634 lsq
= gimple_assign_lhs (new_stmt
);
7635 if (!realignment_token
)
7636 realignment_token
= dataref_ptr
;
7637 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
7638 new_stmt
= gimple_build_assign (vec_dest
, REALIGN_LOAD_EXPR
,
7639 msq
, lsq
, realignment_token
);
7640 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
7641 gimple_assign_set_lhs (new_stmt
, new_temp
);
7642 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
7644 if (alignment_support_scheme
== dr_explicit_realign_optimized
)
7647 if (i
== vec_num
- 1 && j
== ncopies
- 1)
7648 add_phi_arg (phi
, lsq
,
7649 loop_latch_edge (containing_loop
),
7655 /* 4. Handle invariant-load. */
7656 if (inv_p
&& !bb_vinfo
)
7658 gcc_assert (!grouped_load
);
7659 /* If we have versioned for aliasing or the loop doesn't
7660 have any data dependencies that would preclude this,
7661 then we are sure this is a loop invariant load and
7662 thus we can insert it on the preheader edge. */
7663 if (LOOP_VINFO_NO_DATA_DEPENDENCIES (loop_vinfo
)
7664 && !nested_in_vect_loop
7665 && hoist_defs_of_uses (stmt
, loop
))
7667 if (dump_enabled_p ())
7669 dump_printf_loc (MSG_NOTE
, vect_location
,
7670 "hoisting out of the vectorized "
7672 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
7674 tree tem
= copy_ssa_name (scalar_dest
);
7675 gsi_insert_on_edge_immediate
7676 (loop_preheader_edge (loop
),
7677 gimple_build_assign (tem
,
7679 (gimple_assign_rhs1 (stmt
))));
7680 new_temp
= vect_init_vector (stmt
, tem
, vectype
, NULL
);
7681 new_stmt
= SSA_NAME_DEF_STMT (new_temp
);
7682 set_vinfo_for_stmt (new_stmt
,
7683 new_stmt_vec_info (new_stmt
, vinfo
));
7687 gimple_stmt_iterator gsi2
= *gsi
;
7689 new_temp
= vect_init_vector (stmt
, scalar_dest
,
7691 new_stmt
= SSA_NAME_DEF_STMT (new_temp
);
7695 if (memory_access_type
== VMAT_CONTIGUOUS_REVERSE
)
7697 tree perm_mask
= perm_mask_for_reverse (vectype
);
7698 new_temp
= permute_vec_elements (new_temp
, new_temp
,
7699 perm_mask
, stmt
, gsi
);
7700 new_stmt
= SSA_NAME_DEF_STMT (new_temp
);
7703 /* Collect vector loads and later create their permutation in
7704 vect_transform_grouped_load (). */
7705 if (grouped_load
|| slp_perm
)
7706 dr_chain
.quick_push (new_temp
);
7708 /* Store vector loads in the corresponding SLP_NODE. */
7709 if (slp
&& !slp_perm
)
7710 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
7712 /* With SLP permutation we load the gaps as well, without
7713 we need to skip the gaps after we manage to fully load
7714 all elements. group_gap_adj is GROUP_SIZE here. */
7715 group_elt
+= nunits
;
7716 if (group_gap_adj
!= 0 && ! slp_perm
7717 && group_elt
== group_size
- group_gap_adj
)
7721 = wide_int_to_tree (sizetype
,
7722 wi::smul (TYPE_SIZE_UNIT (elem_type
),
7723 group_gap_adj
, &ovf
));
7724 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
,
7729 /* Bump the vector pointer to account for a gap or for excess
7730 elements loaded for a permuted SLP load. */
7731 if (group_gap_adj
!= 0 && slp_perm
)
7735 = wide_int_to_tree (sizetype
,
7736 wi::smul (TYPE_SIZE_UNIT (elem_type
),
7737 group_gap_adj
, &ovf
));
7738 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
,
7743 if (slp
&& !slp_perm
)
7749 if (!vect_transform_slp_perm_load (slp_node
, dr_chain
, gsi
, vf
,
7750 slp_node_instance
, false,
7753 dr_chain
.release ();
7761 if (memory_access_type
!= VMAT_LOAD_STORE_LANES
)
7762 vect_transform_grouped_load (stmt
, dr_chain
, group_size
, gsi
);
7763 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
7768 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
7770 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
7771 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
7774 dr_chain
.release ();
7780 /* Function vect_is_simple_cond.
7783 LOOP - the loop that is being vectorized.
7784 COND - Condition that is checked for simple use.
7787 *COMP_VECTYPE - the vector type for the comparison.
7788 *DTS - The def types for the arguments of the comparison
7790 Returns whether a COND can be vectorized. Checks whether
7791 condition operands are supportable using vec_is_simple_use. */
7794 vect_is_simple_cond (tree cond
, vec_info
*vinfo
,
7795 tree
*comp_vectype
, enum vect_def_type
*dts
)
7798 tree vectype1
= NULL_TREE
, vectype2
= NULL_TREE
;
7801 if (TREE_CODE (cond
) == SSA_NAME
7802 && VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (cond
)))
7804 gimple
*lhs_def_stmt
= SSA_NAME_DEF_STMT (cond
);
7805 if (!vect_is_simple_use (cond
, vinfo
, &lhs_def_stmt
,
7806 &dts
[0], comp_vectype
)
7808 || !VECTOR_BOOLEAN_TYPE_P (*comp_vectype
))
7813 if (!COMPARISON_CLASS_P (cond
))
7816 lhs
= TREE_OPERAND (cond
, 0);
7817 rhs
= TREE_OPERAND (cond
, 1);
7819 if (TREE_CODE (lhs
) == SSA_NAME
)
7821 gimple
*lhs_def_stmt
= SSA_NAME_DEF_STMT (lhs
);
7822 if (!vect_is_simple_use (lhs
, vinfo
, &lhs_def_stmt
, &dts
[0], &vectype1
))
7825 else if (TREE_CODE (lhs
) == INTEGER_CST
|| TREE_CODE (lhs
) == REAL_CST
7826 || TREE_CODE (lhs
) == FIXED_CST
)
7827 dts
[0] = vect_constant_def
;
7831 if (TREE_CODE (rhs
) == SSA_NAME
)
7833 gimple
*rhs_def_stmt
= SSA_NAME_DEF_STMT (rhs
);
7834 if (!vect_is_simple_use (rhs
, vinfo
, &rhs_def_stmt
, &dts
[1], &vectype2
))
7837 else if (TREE_CODE (rhs
) == INTEGER_CST
|| TREE_CODE (rhs
) == REAL_CST
7838 || TREE_CODE (rhs
) == FIXED_CST
)
7839 dts
[1] = vect_constant_def
;
7843 if (vectype1
&& vectype2
7844 && TYPE_VECTOR_SUBPARTS (vectype1
) != TYPE_VECTOR_SUBPARTS (vectype2
))
7847 *comp_vectype
= vectype1
? vectype1
: vectype2
;
7851 /* vectorizable_condition.
7853 Check if STMT is conditional modify expression that can be vectorized.
7854 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
7855 stmt using VEC_COND_EXPR to replace it, put it in VEC_STMT, and insert it
7858 When STMT is vectorized as nested cycle, REDUC_DEF is the vector variable
7859 to be used at REDUC_INDEX (in then clause if REDUC_INDEX is 1, and in
7860 else clause if it is 2).
7862 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
7865 vectorizable_condition (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
7866 gimple
**vec_stmt
, tree reduc_def
, int reduc_index
,
7869 tree scalar_dest
= NULL_TREE
;
7870 tree vec_dest
= NULL_TREE
;
7871 tree cond_expr
, cond_expr0
= NULL_TREE
, cond_expr1
= NULL_TREE
;
7872 tree then_clause
, else_clause
;
7873 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
7874 tree comp_vectype
= NULL_TREE
;
7875 tree vec_cond_lhs
= NULL_TREE
, vec_cond_rhs
= NULL_TREE
;
7876 tree vec_then_clause
= NULL_TREE
, vec_else_clause
= NULL_TREE
;
7879 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
7880 enum vect_def_type dts
[4]
7881 = {vect_unknown_def_type
, vect_unknown_def_type
,
7882 vect_unknown_def_type
, vect_unknown_def_type
};
7885 enum tree_code code
, cond_code
, bitop1
= NOP_EXPR
, bitop2
= NOP_EXPR
;
7886 stmt_vec_info prev_stmt_info
= NULL
;
7888 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
7889 vec
<tree
> vec_oprnds0
= vNULL
;
7890 vec
<tree
> vec_oprnds1
= vNULL
;
7891 vec
<tree
> vec_oprnds2
= vNULL
;
7892 vec
<tree
> vec_oprnds3
= vNULL
;
7894 bool masked
= false;
7896 if (reduc_index
&& STMT_SLP_TYPE (stmt_info
))
7899 if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info
) == TREE_CODE_REDUCTION
)
7901 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
7904 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
7905 && !(STMT_VINFO_DEF_TYPE (stmt_info
) == vect_nested_cycle
7909 /* FORNOW: not yet supported. */
7910 if (STMT_VINFO_LIVE_P (stmt_info
))
7912 if (dump_enabled_p ())
7913 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7914 "value used after loop.\n");
7919 /* Is vectorizable conditional operation? */
7920 if (!is_gimple_assign (stmt
))
7923 code
= gimple_assign_rhs_code (stmt
);
7925 if (code
!= COND_EXPR
)
7928 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
7929 int nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
7930 tree vectype1
= NULL_TREE
, vectype2
= NULL_TREE
;
7935 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits
;
7937 gcc_assert (ncopies
>= 1);
7938 if (reduc_index
&& ncopies
> 1)
7939 return false; /* FORNOW */
7941 cond_expr
= gimple_assign_rhs1 (stmt
);
7942 then_clause
= gimple_assign_rhs2 (stmt
);
7943 else_clause
= gimple_assign_rhs3 (stmt
);
7945 if (!vect_is_simple_cond (cond_expr
, stmt_info
->vinfo
,
7946 &comp_vectype
, &dts
[0])
7951 if (!vect_is_simple_use (then_clause
, stmt_info
->vinfo
, &def_stmt
, &dts
[2],
7954 if (!vect_is_simple_use (else_clause
, stmt_info
->vinfo
, &def_stmt
, &dts
[3],
7958 if (vectype1
&& !useless_type_conversion_p (vectype
, vectype1
))
7961 if (vectype2
&& !useless_type_conversion_p (vectype
, vectype2
))
7964 masked
= !COMPARISON_CLASS_P (cond_expr
);
7965 vec_cmp_type
= build_same_sized_truth_vector_type (comp_vectype
);
7967 if (vec_cmp_type
== NULL_TREE
)
7970 cond_code
= TREE_CODE (cond_expr
);
7973 cond_expr0
= TREE_OPERAND (cond_expr
, 0);
7974 cond_expr1
= TREE_OPERAND (cond_expr
, 1);
7977 if (!masked
&& VECTOR_BOOLEAN_TYPE_P (comp_vectype
))
7979 /* Boolean values may have another representation in vectors
7980 and therefore we prefer bit operations over comparison for
7981 them (which also works for scalar masks). We store opcodes
7982 to use in bitop1 and bitop2. Statement is vectorized as
7983 BITOP2 (rhs1 BITOP1 rhs2) or rhs1 BITOP2 (BITOP1 rhs2)
7984 depending on bitop1 and bitop2 arity. */
7988 bitop1
= BIT_NOT_EXPR
;
7989 bitop2
= BIT_AND_EXPR
;
7992 bitop1
= BIT_NOT_EXPR
;
7993 bitop2
= BIT_IOR_EXPR
;
7996 bitop1
= BIT_NOT_EXPR
;
7997 bitop2
= BIT_AND_EXPR
;
7998 std::swap (cond_expr0
, cond_expr1
);
8001 bitop1
= BIT_NOT_EXPR
;
8002 bitop2
= BIT_IOR_EXPR
;
8003 std::swap (cond_expr0
, cond_expr1
);
8006 bitop1
= BIT_XOR_EXPR
;
8009 bitop1
= BIT_XOR_EXPR
;
8010 bitop2
= BIT_NOT_EXPR
;
8015 cond_code
= SSA_NAME
;
8020 STMT_VINFO_TYPE (stmt_info
) = condition_vec_info_type
;
8021 if (bitop1
!= NOP_EXPR
)
8023 machine_mode mode
= TYPE_MODE (comp_vectype
);
8026 optab
= optab_for_tree_code (bitop1
, comp_vectype
, optab_default
);
8027 if (!optab
|| optab_handler (optab
, mode
) == CODE_FOR_nothing
)
8030 if (bitop2
!= NOP_EXPR
)
8032 optab
= optab_for_tree_code (bitop2
, comp_vectype
,
8034 if (!optab
|| optab_handler (optab
, mode
) == CODE_FOR_nothing
)
8038 if (expand_vec_cond_expr_p (vectype
, comp_vectype
,
8041 vect_model_simple_cost (stmt_info
, ncopies
, dts
, ndts
, NULL
, NULL
);
8051 vec_oprnds0
.create (1);
8052 vec_oprnds1
.create (1);
8053 vec_oprnds2
.create (1);
8054 vec_oprnds3
.create (1);
8058 scalar_dest
= gimple_assign_lhs (stmt
);
8059 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
8061 /* Handle cond expr. */
8062 for (j
= 0; j
< ncopies
; j
++)
8064 gassign
*new_stmt
= NULL
;
8069 auto_vec
<tree
, 4> ops
;
8070 auto_vec
<vec
<tree
>, 4> vec_defs
;
8073 ops
.safe_push (cond_expr
);
8076 ops
.safe_push (cond_expr0
);
8077 ops
.safe_push (cond_expr1
);
8079 ops
.safe_push (then_clause
);
8080 ops
.safe_push (else_clause
);
8081 vect_get_slp_defs (ops
, slp_node
, &vec_defs
);
8082 vec_oprnds3
= vec_defs
.pop ();
8083 vec_oprnds2
= vec_defs
.pop ();
8085 vec_oprnds1
= vec_defs
.pop ();
8086 vec_oprnds0
= vec_defs
.pop ();
8094 = vect_get_vec_def_for_operand (cond_expr
, stmt
,
8096 vect_is_simple_use (cond_expr
, stmt_info
->vinfo
,
8102 = vect_get_vec_def_for_operand (cond_expr0
,
8103 stmt
, comp_vectype
);
8104 vect_is_simple_use (cond_expr0
, loop_vinfo
, >emp
, &dts
[0]);
8107 = vect_get_vec_def_for_operand (cond_expr1
,
8108 stmt
, comp_vectype
);
8109 vect_is_simple_use (cond_expr1
, loop_vinfo
, >emp
, &dts
[1]);
8111 if (reduc_index
== 1)
8112 vec_then_clause
= reduc_def
;
8115 vec_then_clause
= vect_get_vec_def_for_operand (then_clause
,
8117 vect_is_simple_use (then_clause
, loop_vinfo
,
8120 if (reduc_index
== 2)
8121 vec_else_clause
= reduc_def
;
8124 vec_else_clause
= vect_get_vec_def_for_operand (else_clause
,
8126 vect_is_simple_use (else_clause
, loop_vinfo
, >emp
, &dts
[3]);
8133 = vect_get_vec_def_for_stmt_copy (dts
[0],
8134 vec_oprnds0
.pop ());
8137 = vect_get_vec_def_for_stmt_copy (dts
[1],
8138 vec_oprnds1
.pop ());
8140 vec_then_clause
= vect_get_vec_def_for_stmt_copy (dts
[2],
8141 vec_oprnds2
.pop ());
8142 vec_else_clause
= vect_get_vec_def_for_stmt_copy (dts
[3],
8143 vec_oprnds3
.pop ());
8148 vec_oprnds0
.quick_push (vec_cond_lhs
);
8150 vec_oprnds1
.quick_push (vec_cond_rhs
);
8151 vec_oprnds2
.quick_push (vec_then_clause
);
8152 vec_oprnds3
.quick_push (vec_else_clause
);
8155 /* Arguments are ready. Create the new vector stmt. */
8156 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vec_cond_lhs
)
8158 vec_then_clause
= vec_oprnds2
[i
];
8159 vec_else_clause
= vec_oprnds3
[i
];
8162 vec_compare
= vec_cond_lhs
;
8165 vec_cond_rhs
= vec_oprnds1
[i
];
8166 if (bitop1
== NOP_EXPR
)
8167 vec_compare
= build2 (cond_code
, vec_cmp_type
,
8168 vec_cond_lhs
, vec_cond_rhs
);
8171 new_temp
= make_ssa_name (vec_cmp_type
);
8172 if (bitop1
== BIT_NOT_EXPR
)
8173 new_stmt
= gimple_build_assign (new_temp
, bitop1
,
8177 = gimple_build_assign (new_temp
, bitop1
, vec_cond_lhs
,
8179 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
8180 if (bitop2
== NOP_EXPR
)
8181 vec_compare
= new_temp
;
8182 else if (bitop2
== BIT_NOT_EXPR
)
8184 /* Instead of doing ~x ? y : z do x ? z : y. */
8185 vec_compare
= new_temp
;
8186 std::swap (vec_then_clause
, vec_else_clause
);
8190 vec_compare
= make_ssa_name (vec_cmp_type
);
8192 = gimple_build_assign (vec_compare
, bitop2
,
8193 vec_cond_lhs
, new_temp
);
8194 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
8198 new_temp
= make_ssa_name (vec_dest
);
8199 new_stmt
= gimple_build_assign (new_temp
, VEC_COND_EXPR
,
8200 vec_compare
, vec_then_clause
,
8202 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
8204 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
8211 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
8213 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
8215 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
8218 vec_oprnds0
.release ();
8219 vec_oprnds1
.release ();
8220 vec_oprnds2
.release ();
8221 vec_oprnds3
.release ();
8226 /* vectorizable_comparison.
8228 Check if STMT is comparison expression that can be vectorized.
8229 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
8230 comparison, put it in VEC_STMT, and insert it at GSI.
8232 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
8235 vectorizable_comparison (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
8236 gimple
**vec_stmt
, tree reduc_def
,
8239 tree lhs
, rhs1
, rhs2
;
8240 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
8241 tree vectype1
= NULL_TREE
, vectype2
= NULL_TREE
;
8242 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
8243 tree vec_rhs1
= NULL_TREE
, vec_rhs2
= NULL_TREE
;
8245 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
8246 enum vect_def_type dts
[2] = {vect_unknown_def_type
, vect_unknown_def_type
};
8250 enum tree_code code
, bitop1
= NOP_EXPR
, bitop2
= NOP_EXPR
;
8251 stmt_vec_info prev_stmt_info
= NULL
;
8253 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
8254 vec
<tree
> vec_oprnds0
= vNULL
;
8255 vec
<tree
> vec_oprnds1
= vNULL
;
8260 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
8263 if (!vectype
|| !VECTOR_BOOLEAN_TYPE_P (vectype
))
8266 mask_type
= vectype
;
8267 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
8272 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits
;
8274 gcc_assert (ncopies
>= 1);
8275 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
8276 && !(STMT_VINFO_DEF_TYPE (stmt_info
) == vect_nested_cycle
8280 if (STMT_VINFO_LIVE_P (stmt_info
))
8282 if (dump_enabled_p ())
8283 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
8284 "value used after loop.\n");
8288 if (!is_gimple_assign (stmt
))
8291 code
= gimple_assign_rhs_code (stmt
);
8293 if (TREE_CODE_CLASS (code
) != tcc_comparison
)
8296 rhs1
= gimple_assign_rhs1 (stmt
);
8297 rhs2
= gimple_assign_rhs2 (stmt
);
8299 if (!vect_is_simple_use (rhs1
, stmt_info
->vinfo
, &def_stmt
,
8300 &dts
[0], &vectype1
))
8303 if (!vect_is_simple_use (rhs2
, stmt_info
->vinfo
, &def_stmt
,
8304 &dts
[1], &vectype2
))
8307 if (vectype1
&& vectype2
8308 && TYPE_VECTOR_SUBPARTS (vectype1
) != TYPE_VECTOR_SUBPARTS (vectype2
))
8311 vectype
= vectype1
? vectype1
: vectype2
;
8313 /* Invariant comparison. */
8316 vectype
= get_vectype_for_scalar_type (TREE_TYPE (rhs1
));
8317 if (TYPE_VECTOR_SUBPARTS (vectype
) != nunits
)
8320 else if (nunits
!= TYPE_VECTOR_SUBPARTS (vectype
))
8323 /* Can't compare mask and non-mask types. */
8324 if (vectype1
&& vectype2
8325 && (VECTOR_BOOLEAN_TYPE_P (vectype1
) ^ VECTOR_BOOLEAN_TYPE_P (vectype2
)))
8328 /* Boolean values may have another representation in vectors
8329 and therefore we prefer bit operations over comparison for
8330 them (which also works for scalar masks). We store opcodes
8331 to use in bitop1 and bitop2. Statement is vectorized as
8332 BITOP2 (rhs1 BITOP1 rhs2) or
8333 rhs1 BITOP2 (BITOP1 rhs2)
8334 depending on bitop1 and bitop2 arity. */
8335 if (VECTOR_BOOLEAN_TYPE_P (vectype
))
8337 if (code
== GT_EXPR
)
8339 bitop1
= BIT_NOT_EXPR
;
8340 bitop2
= BIT_AND_EXPR
;
8342 else if (code
== GE_EXPR
)
8344 bitop1
= BIT_NOT_EXPR
;
8345 bitop2
= BIT_IOR_EXPR
;
8347 else if (code
== LT_EXPR
)
8349 bitop1
= BIT_NOT_EXPR
;
8350 bitop2
= BIT_AND_EXPR
;
8351 std::swap (rhs1
, rhs2
);
8352 std::swap (dts
[0], dts
[1]);
8354 else if (code
== LE_EXPR
)
8356 bitop1
= BIT_NOT_EXPR
;
8357 bitop2
= BIT_IOR_EXPR
;
8358 std::swap (rhs1
, rhs2
);
8359 std::swap (dts
[0], dts
[1]);
8363 bitop1
= BIT_XOR_EXPR
;
8364 if (code
== EQ_EXPR
)
8365 bitop2
= BIT_NOT_EXPR
;
8371 STMT_VINFO_TYPE (stmt_info
) = comparison_vec_info_type
;
8372 vect_model_simple_cost (stmt_info
, ncopies
* (1 + (bitop2
!= NOP_EXPR
)),
8373 dts
, ndts
, NULL
, NULL
);
8374 if (bitop1
== NOP_EXPR
)
8375 return expand_vec_cmp_expr_p (vectype
, mask_type
, code
);
8378 machine_mode mode
= TYPE_MODE (vectype
);
8381 optab
= optab_for_tree_code (bitop1
, vectype
, optab_default
);
8382 if (!optab
|| optab_handler (optab
, mode
) == CODE_FOR_nothing
)
8385 if (bitop2
!= NOP_EXPR
)
8387 optab
= optab_for_tree_code (bitop2
, vectype
, optab_default
);
8388 if (!optab
|| optab_handler (optab
, mode
) == CODE_FOR_nothing
)
8398 vec_oprnds0
.create (1);
8399 vec_oprnds1
.create (1);
8403 lhs
= gimple_assign_lhs (stmt
);
8404 mask
= vect_create_destination_var (lhs
, mask_type
);
8406 /* Handle cmp expr. */
8407 for (j
= 0; j
< ncopies
; j
++)
8409 gassign
*new_stmt
= NULL
;
8414 auto_vec
<tree
, 2> ops
;
8415 auto_vec
<vec
<tree
>, 2> vec_defs
;
8417 ops
.safe_push (rhs1
);
8418 ops
.safe_push (rhs2
);
8419 vect_get_slp_defs (ops
, slp_node
, &vec_defs
);
8420 vec_oprnds1
= vec_defs
.pop ();
8421 vec_oprnds0
= vec_defs
.pop ();
8425 vec_rhs1
= vect_get_vec_def_for_operand (rhs1
, stmt
, vectype
);
8426 vec_rhs2
= vect_get_vec_def_for_operand (rhs2
, stmt
, vectype
);
8431 vec_rhs1
= vect_get_vec_def_for_stmt_copy (dts
[0],
8432 vec_oprnds0
.pop ());
8433 vec_rhs2
= vect_get_vec_def_for_stmt_copy (dts
[1],
8434 vec_oprnds1
.pop ());
8439 vec_oprnds0
.quick_push (vec_rhs1
);
8440 vec_oprnds1
.quick_push (vec_rhs2
);
8443 /* Arguments are ready. Create the new vector stmt. */
8444 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vec_rhs1
)
8446 vec_rhs2
= vec_oprnds1
[i
];
8448 new_temp
= make_ssa_name (mask
);
8449 if (bitop1
== NOP_EXPR
)
8451 new_stmt
= gimple_build_assign (new_temp
, code
,
8452 vec_rhs1
, vec_rhs2
);
8453 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
8457 if (bitop1
== BIT_NOT_EXPR
)
8458 new_stmt
= gimple_build_assign (new_temp
, bitop1
, vec_rhs2
);
8460 new_stmt
= gimple_build_assign (new_temp
, bitop1
, vec_rhs1
,
8462 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
8463 if (bitop2
!= NOP_EXPR
)
8465 tree res
= make_ssa_name (mask
);
8466 if (bitop2
== BIT_NOT_EXPR
)
8467 new_stmt
= gimple_build_assign (res
, bitop2
, new_temp
);
8469 new_stmt
= gimple_build_assign (res
, bitop2
, vec_rhs1
,
8471 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
8475 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
8482 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
8484 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
8486 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
8489 vec_oprnds0
.release ();
8490 vec_oprnds1
.release ();
8495 /* Make sure the statement is vectorizable. */
8498 vect_analyze_stmt (gimple
*stmt
, bool *need_to_vectorize
, slp_tree node
,
8499 slp_instance node_instance
)
8501 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
8502 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
8503 enum vect_relevant relevance
= STMT_VINFO_RELEVANT (stmt_info
);
8505 gimple
*pattern_stmt
;
8506 gimple_seq pattern_def_seq
;
8508 if (dump_enabled_p ())
8510 dump_printf_loc (MSG_NOTE
, vect_location
, "==> examining statement: ");
8511 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
8514 if (gimple_has_volatile_ops (stmt
))
8516 if (dump_enabled_p ())
8517 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
8518 "not vectorized: stmt has volatile operands\n");
8523 /* Skip stmts that do not need to be vectorized. In loops this is expected
8525 - the COND_EXPR which is the loop exit condition
8526 - any LABEL_EXPRs in the loop
8527 - computations that are used only for array indexing or loop control.
8528 In basic blocks we only analyze statements that are a part of some SLP
8529 instance, therefore, all the statements are relevant.
8531 Pattern statement needs to be analyzed instead of the original statement
8532 if the original statement is not relevant. Otherwise, we analyze both
8533 statements. In basic blocks we are called from some SLP instance
8534 traversal, don't analyze pattern stmts instead, the pattern stmts
8535 already will be part of SLP instance. */
8537 pattern_stmt
= STMT_VINFO_RELATED_STMT (stmt_info
);
8538 if (!STMT_VINFO_RELEVANT_P (stmt_info
)
8539 && !STMT_VINFO_LIVE_P (stmt_info
))
8541 if (STMT_VINFO_IN_PATTERN_P (stmt_info
)
8543 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt
))
8544 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt
))))
8546 /* Analyze PATTERN_STMT instead of the original stmt. */
8547 stmt
= pattern_stmt
;
8548 stmt_info
= vinfo_for_stmt (pattern_stmt
);
8549 if (dump_enabled_p ())
8551 dump_printf_loc (MSG_NOTE
, vect_location
,
8552 "==> examining pattern statement: ");
8553 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
8558 if (dump_enabled_p ())
8559 dump_printf_loc (MSG_NOTE
, vect_location
, "irrelevant.\n");
8564 else if (STMT_VINFO_IN_PATTERN_P (stmt_info
)
8567 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt
))
8568 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt
))))
8570 /* Analyze PATTERN_STMT too. */
8571 if (dump_enabled_p ())
8573 dump_printf_loc (MSG_NOTE
, vect_location
,
8574 "==> examining pattern statement: ");
8575 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
8578 if (!vect_analyze_stmt (pattern_stmt
, need_to_vectorize
, node
,
8583 if (is_pattern_stmt_p (stmt_info
)
8585 && (pattern_def_seq
= STMT_VINFO_PATTERN_DEF_SEQ (stmt_info
)))
8587 gimple_stmt_iterator si
;
8589 for (si
= gsi_start (pattern_def_seq
); !gsi_end_p (si
); gsi_next (&si
))
8591 gimple
*pattern_def_stmt
= gsi_stmt (si
);
8592 if (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_def_stmt
))
8593 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_def_stmt
)))
8595 /* Analyze def stmt of STMT if it's a pattern stmt. */
8596 if (dump_enabled_p ())
8598 dump_printf_loc (MSG_NOTE
, vect_location
,
8599 "==> examining pattern def statement: ");
8600 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, pattern_def_stmt
, 0);
8603 if (!vect_analyze_stmt (pattern_def_stmt
,
8604 need_to_vectorize
, node
, node_instance
))
8610 switch (STMT_VINFO_DEF_TYPE (stmt_info
))
8612 case vect_internal_def
:
8615 case vect_reduction_def
:
8616 case vect_nested_cycle
:
8617 gcc_assert (!bb_vinfo
8618 && (relevance
== vect_used_in_outer
8619 || relevance
== vect_used_in_outer_by_reduction
8620 || relevance
== vect_used_by_reduction
8621 || relevance
== vect_unused_in_scope
8622 || relevance
== vect_used_only_live
));
8625 case vect_induction_def
:
8626 gcc_assert (!bb_vinfo
);
8629 case vect_constant_def
:
8630 case vect_external_def
:
8631 case vect_unknown_def_type
:
8636 if (STMT_VINFO_RELEVANT_P (stmt_info
))
8638 gcc_assert (!VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt
))));
8639 gcc_assert (STMT_VINFO_VECTYPE (stmt_info
)
8640 || (is_gimple_call (stmt
)
8641 && gimple_call_lhs (stmt
) == NULL_TREE
));
8642 *need_to_vectorize
= true;
8645 if (PURE_SLP_STMT (stmt_info
) && !node
)
8647 dump_printf_loc (MSG_NOTE
, vect_location
,
8648 "handled only by SLP analysis\n");
8654 && (STMT_VINFO_RELEVANT_P (stmt_info
)
8655 || STMT_VINFO_DEF_TYPE (stmt_info
) == vect_reduction_def
))
8656 ok
= (vectorizable_simd_clone_call (stmt
, NULL
, NULL
, node
)
8657 || vectorizable_conversion (stmt
, NULL
, NULL
, node
)
8658 || vectorizable_shift (stmt
, NULL
, NULL
, node
)
8659 || vectorizable_operation (stmt
, NULL
, NULL
, node
)
8660 || vectorizable_assignment (stmt
, NULL
, NULL
, node
)
8661 || vectorizable_load (stmt
, NULL
, NULL
, node
, NULL
)
8662 || vectorizable_call (stmt
, NULL
, NULL
, node
)
8663 || vectorizable_store (stmt
, NULL
, NULL
, node
)
8664 || vectorizable_reduction (stmt
, NULL
, NULL
, node
, node_instance
)
8665 || vectorizable_induction (stmt
, NULL
, NULL
, node
)
8666 || vectorizable_condition (stmt
, NULL
, NULL
, NULL
, 0, node
)
8667 || vectorizable_comparison (stmt
, NULL
, NULL
, NULL
, node
));
8671 ok
= (vectorizable_simd_clone_call (stmt
, NULL
, NULL
, node
)
8672 || vectorizable_conversion (stmt
, NULL
, NULL
, node
)
8673 || vectorizable_shift (stmt
, NULL
, NULL
, node
)
8674 || vectorizable_operation (stmt
, NULL
, NULL
, node
)
8675 || vectorizable_assignment (stmt
, NULL
, NULL
, node
)
8676 || vectorizable_load (stmt
, NULL
, NULL
, node
, NULL
)
8677 || vectorizable_call (stmt
, NULL
, NULL
, node
)
8678 || vectorizable_store (stmt
, NULL
, NULL
, node
)
8679 || vectorizable_condition (stmt
, NULL
, NULL
, NULL
, 0, node
)
8680 || vectorizable_comparison (stmt
, NULL
, NULL
, NULL
, node
));
8685 if (dump_enabled_p ())
8687 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
8688 "not vectorized: relevant stmt not ");
8689 dump_printf (MSG_MISSED_OPTIMIZATION
, "supported: ");
8690 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
8699 /* Stmts that are (also) "live" (i.e. - that are used out of the loop)
8700 need extra handling, except for vectorizable reductions. */
8701 if (STMT_VINFO_LIVE_P (stmt_info
)
8702 && STMT_VINFO_TYPE (stmt_info
) != reduc_vec_info_type
)
8703 ok
= vectorizable_live_operation (stmt
, NULL
, NULL
, -1, NULL
);
8707 if (dump_enabled_p ())
8709 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
8710 "not vectorized: live stmt not ");
8711 dump_printf (MSG_MISSED_OPTIMIZATION
, "supported: ");
8712 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
8722 /* Function vect_transform_stmt.
8724 Create a vectorized stmt to replace STMT, and insert it at BSI. */
8727 vect_transform_stmt (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
8728 bool *grouped_store
, slp_tree slp_node
,
8729 slp_instance slp_node_instance
)
8731 bool is_store
= false;
8732 gimple
*vec_stmt
= NULL
;
8733 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
8736 gcc_assert (slp_node
|| !PURE_SLP_STMT (stmt_info
));
8737 gimple
*old_vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
8739 switch (STMT_VINFO_TYPE (stmt_info
))
8741 case type_demotion_vec_info_type
:
8742 case type_promotion_vec_info_type
:
8743 case type_conversion_vec_info_type
:
8744 done
= vectorizable_conversion (stmt
, gsi
, &vec_stmt
, slp_node
);
8748 case induc_vec_info_type
:
8749 done
= vectorizable_induction (stmt
, gsi
, &vec_stmt
, slp_node
);
8753 case shift_vec_info_type
:
8754 done
= vectorizable_shift (stmt
, gsi
, &vec_stmt
, slp_node
);
8758 case op_vec_info_type
:
8759 done
= vectorizable_operation (stmt
, gsi
, &vec_stmt
, slp_node
);
8763 case assignment_vec_info_type
:
8764 done
= vectorizable_assignment (stmt
, gsi
, &vec_stmt
, slp_node
);
8768 case load_vec_info_type
:
8769 done
= vectorizable_load (stmt
, gsi
, &vec_stmt
, slp_node
,
8774 case store_vec_info_type
:
8775 done
= vectorizable_store (stmt
, gsi
, &vec_stmt
, slp_node
);
8777 if (STMT_VINFO_GROUPED_ACCESS (stmt_info
) && !slp_node
)
8779 /* In case of interleaving, the whole chain is vectorized when the
8780 last store in the chain is reached. Store stmts before the last
8781 one are skipped, and there vec_stmt_info shouldn't be freed
8783 *grouped_store
= true;
8784 if (STMT_VINFO_VEC_STMT (stmt_info
))
8791 case condition_vec_info_type
:
8792 done
= vectorizable_condition (stmt
, gsi
, &vec_stmt
, NULL
, 0, slp_node
);
8796 case comparison_vec_info_type
:
8797 done
= vectorizable_comparison (stmt
, gsi
, &vec_stmt
, NULL
, slp_node
);
8801 case call_vec_info_type
:
8802 done
= vectorizable_call (stmt
, gsi
, &vec_stmt
, slp_node
);
8803 stmt
= gsi_stmt (*gsi
);
8804 if (gimple_call_internal_p (stmt
, IFN_MASK_STORE
))
8808 case call_simd_clone_vec_info_type
:
8809 done
= vectorizable_simd_clone_call (stmt
, gsi
, &vec_stmt
, slp_node
);
8810 stmt
= gsi_stmt (*gsi
);
8813 case reduc_vec_info_type
:
8814 done
= vectorizable_reduction (stmt
, gsi
, &vec_stmt
, slp_node
,
8820 if (!STMT_VINFO_LIVE_P (stmt_info
))
8822 if (dump_enabled_p ())
8823 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
8824 "stmt not supported.\n");
8829 /* Verify SLP vectorization doesn't mess with STMT_VINFO_VEC_STMT.
8830 This would break hybrid SLP vectorization. */
8832 gcc_assert (!vec_stmt
8833 && STMT_VINFO_VEC_STMT (stmt_info
) == old_vec_stmt
);
8835 /* Handle inner-loop stmts whose DEF is used in the loop-nest that
8836 is being vectorized, but outside the immediately enclosing loop. */
8838 && STMT_VINFO_LOOP_VINFO (stmt_info
)
8839 && nested_in_vect_loop_p (LOOP_VINFO_LOOP (
8840 STMT_VINFO_LOOP_VINFO (stmt_info
)), stmt
)
8841 && STMT_VINFO_TYPE (stmt_info
) != reduc_vec_info_type
8842 && (STMT_VINFO_RELEVANT (stmt_info
) == vect_used_in_outer
8843 || STMT_VINFO_RELEVANT (stmt_info
) ==
8844 vect_used_in_outer_by_reduction
))
8846 struct loop
*innerloop
= LOOP_VINFO_LOOP (
8847 STMT_VINFO_LOOP_VINFO (stmt_info
))->inner
;
8848 imm_use_iterator imm_iter
;
8849 use_operand_p use_p
;
8853 if (dump_enabled_p ())
8854 dump_printf_loc (MSG_NOTE
, vect_location
,
8855 "Record the vdef for outer-loop vectorization.\n");
8857 /* Find the relevant loop-exit phi-node, and reord the vec_stmt there
8858 (to be used when vectorizing outer-loop stmts that use the DEF of
8860 if (gimple_code (stmt
) == GIMPLE_PHI
)
8861 scalar_dest
= PHI_RESULT (stmt
);
8863 scalar_dest
= gimple_assign_lhs (stmt
);
8865 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, scalar_dest
)
8867 if (!flow_bb_inside_loop_p (innerloop
, gimple_bb (USE_STMT (use_p
))))
8869 exit_phi
= USE_STMT (use_p
);
8870 STMT_VINFO_VEC_STMT (vinfo_for_stmt (exit_phi
)) = vec_stmt
;
8875 /* Handle stmts whose DEF is used outside the loop-nest that is
8876 being vectorized. */
8881 if (STMT_VINFO_TYPE (stmt_info
) != reduc_vec_info_type
)
8882 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (slp_node
), i
, slp_stmt
)
8884 stmt_vec_info slp_stmt_info
= vinfo_for_stmt (slp_stmt
);
8885 if (STMT_VINFO_LIVE_P (slp_stmt_info
))
8887 done
= vectorizable_live_operation (slp_stmt
, gsi
, slp_node
, i
,
8893 else if (STMT_VINFO_LIVE_P (stmt_info
)
8894 && STMT_VINFO_TYPE (stmt_info
) != reduc_vec_info_type
)
8896 done
= vectorizable_live_operation (stmt
, gsi
, slp_node
, -1, &vec_stmt
);
8901 STMT_VINFO_VEC_STMT (stmt_info
) = vec_stmt
;
8907 /* Remove a group of stores (for SLP or interleaving), free their
8911 vect_remove_stores (gimple
*first_stmt
)
8913 gimple
*next
= first_stmt
;
8915 gimple_stmt_iterator next_si
;
8919 stmt_vec_info stmt_info
= vinfo_for_stmt (next
);
8921 tmp
= GROUP_NEXT_ELEMENT (stmt_info
);
8922 if (is_pattern_stmt_p (stmt_info
))
8923 next
= STMT_VINFO_RELATED_STMT (stmt_info
);
8924 /* Free the attached stmt_vec_info and remove the stmt. */
8925 next_si
= gsi_for_stmt (next
);
8926 unlink_stmt_vdef (next
);
8927 gsi_remove (&next_si
, true);
8928 release_defs (next
);
8929 free_stmt_vec_info (next
);
8935 /* Function new_stmt_vec_info.
8937 Create and initialize a new stmt_vec_info struct for STMT. */
8940 new_stmt_vec_info (gimple
*stmt
, vec_info
*vinfo
)
8943 res
= (stmt_vec_info
) xcalloc (1, sizeof (struct _stmt_vec_info
));
8945 STMT_VINFO_TYPE (res
) = undef_vec_info_type
;
8946 STMT_VINFO_STMT (res
) = stmt
;
8948 STMT_VINFO_RELEVANT (res
) = vect_unused_in_scope
;
8949 STMT_VINFO_LIVE_P (res
) = false;
8950 STMT_VINFO_VECTYPE (res
) = NULL
;
8951 STMT_VINFO_VEC_STMT (res
) = NULL
;
8952 STMT_VINFO_VECTORIZABLE (res
) = true;
8953 STMT_VINFO_IN_PATTERN_P (res
) = false;
8954 STMT_VINFO_RELATED_STMT (res
) = NULL
;
8955 STMT_VINFO_PATTERN_DEF_SEQ (res
) = NULL
;
8956 STMT_VINFO_DATA_REF (res
) = NULL
;
8957 STMT_VINFO_VEC_REDUCTION_TYPE (res
) = TREE_CODE_REDUCTION
;
8958 STMT_VINFO_VEC_CONST_COND_REDUC_CODE (res
) = ERROR_MARK
;
8960 if (gimple_code (stmt
) == GIMPLE_PHI
8961 && is_loop_header_bb_p (gimple_bb (stmt
)))
8962 STMT_VINFO_DEF_TYPE (res
) = vect_unknown_def_type
;
8964 STMT_VINFO_DEF_TYPE (res
) = vect_internal_def
;
8966 STMT_VINFO_SAME_ALIGN_REFS (res
).create (0);
8967 STMT_SLP_TYPE (res
) = loop_vect
;
8968 STMT_VINFO_NUM_SLP_USES (res
) = 0;
8970 GROUP_FIRST_ELEMENT (res
) = NULL
;
8971 GROUP_NEXT_ELEMENT (res
) = NULL
;
8972 GROUP_SIZE (res
) = 0;
8973 GROUP_STORE_COUNT (res
) = 0;
8974 GROUP_GAP (res
) = 0;
8975 GROUP_SAME_DR_STMT (res
) = NULL
;
8981 /* Create a hash table for stmt_vec_info. */
8984 init_stmt_vec_info_vec (void)
8986 gcc_assert (!stmt_vec_info_vec
.exists ());
8987 stmt_vec_info_vec
.create (50);
8991 /* Free hash table for stmt_vec_info. */
8994 free_stmt_vec_info_vec (void)
8998 FOR_EACH_VEC_ELT (stmt_vec_info_vec
, i
, info
)
9000 free_stmt_vec_info (STMT_VINFO_STMT (info
));
9001 gcc_assert (stmt_vec_info_vec
.exists ());
9002 stmt_vec_info_vec
.release ();
9006 /* Free stmt vectorization related info. */
9009 free_stmt_vec_info (gimple
*stmt
)
9011 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
9016 /* Check if this statement has a related "pattern stmt"
9017 (introduced by the vectorizer during the pattern recognition
9018 pass). Free pattern's stmt_vec_info and def stmt's stmt_vec_info
9020 if (STMT_VINFO_IN_PATTERN_P (stmt_info
))
9022 stmt_vec_info patt_info
9023 = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info
));
9026 gimple_seq seq
= STMT_VINFO_PATTERN_DEF_SEQ (patt_info
);
9027 gimple
*patt_stmt
= STMT_VINFO_STMT (patt_info
);
9028 gimple_set_bb (patt_stmt
, NULL
);
9029 tree lhs
= gimple_get_lhs (patt_stmt
);
9030 if (lhs
&& TREE_CODE (lhs
) == SSA_NAME
)
9031 release_ssa_name (lhs
);
9034 gimple_stmt_iterator si
;
9035 for (si
= gsi_start (seq
); !gsi_end_p (si
); gsi_next (&si
))
9037 gimple
*seq_stmt
= gsi_stmt (si
);
9038 gimple_set_bb (seq_stmt
, NULL
);
9039 lhs
= gimple_get_lhs (seq_stmt
);
9040 if (lhs
&& TREE_CODE (lhs
) == SSA_NAME
)
9041 release_ssa_name (lhs
);
9042 free_stmt_vec_info (seq_stmt
);
9045 free_stmt_vec_info (patt_stmt
);
9049 STMT_VINFO_SAME_ALIGN_REFS (stmt_info
).release ();
9050 STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).release ();
9051 set_vinfo_for_stmt (stmt
, NULL
);
9056 /* Function get_vectype_for_scalar_type_and_size.
9058 Returns the vector type corresponding to SCALAR_TYPE and SIZE as supported
9062 get_vectype_for_scalar_type_and_size (tree scalar_type
, unsigned size
)
9064 tree orig_scalar_type
= scalar_type
;
9065 scalar_mode inner_mode
;
9066 machine_mode simd_mode
;
9070 if (!is_int_mode (TYPE_MODE (scalar_type
), &inner_mode
)
9071 && !is_float_mode (TYPE_MODE (scalar_type
), &inner_mode
))
9074 unsigned int nbytes
= GET_MODE_SIZE (inner_mode
);
9076 /* For vector types of elements whose mode precision doesn't
9077 match their types precision we use a element type of mode
9078 precision. The vectorization routines will have to make sure
9079 they support the proper result truncation/extension.
9080 We also make sure to build vector types with INTEGER_TYPE
9081 component type only. */
9082 if (INTEGRAL_TYPE_P (scalar_type
)
9083 && (GET_MODE_BITSIZE (inner_mode
) != TYPE_PRECISION (scalar_type
)
9084 || TREE_CODE (scalar_type
) != INTEGER_TYPE
))
9085 scalar_type
= build_nonstandard_integer_type (GET_MODE_BITSIZE (inner_mode
),
9086 TYPE_UNSIGNED (scalar_type
));
9088 /* We shouldn't end up building VECTOR_TYPEs of non-scalar components.
9089 When the component mode passes the above test simply use a type
9090 corresponding to that mode. The theory is that any use that
9091 would cause problems with this will disable vectorization anyway. */
9092 else if (!SCALAR_FLOAT_TYPE_P (scalar_type
)
9093 && !INTEGRAL_TYPE_P (scalar_type
))
9094 scalar_type
= lang_hooks
.types
.type_for_mode (inner_mode
, 1);
9096 /* We can't build a vector type of elements with alignment bigger than
9098 else if (nbytes
< TYPE_ALIGN_UNIT (scalar_type
))
9099 scalar_type
= lang_hooks
.types
.type_for_mode (inner_mode
,
9100 TYPE_UNSIGNED (scalar_type
));
9102 /* If we felt back to using the mode fail if there was
9103 no scalar type for it. */
9104 if (scalar_type
== NULL_TREE
)
9107 /* If no size was supplied use the mode the target prefers. Otherwise
9108 lookup a vector mode of the specified size. */
9110 simd_mode
= targetm
.vectorize
.preferred_simd_mode (inner_mode
);
9111 else if (!mode_for_vector (inner_mode
, size
/ nbytes
).exists (&simd_mode
))
9113 nunits
= GET_MODE_SIZE (simd_mode
) / nbytes
;
9114 /* NOTE: nunits == 1 is allowed to support single element vector types. */
9118 vectype
= build_vector_type (scalar_type
, nunits
);
9120 if (!VECTOR_MODE_P (TYPE_MODE (vectype
))
9121 && !INTEGRAL_MODE_P (TYPE_MODE (vectype
)))
9124 /* Re-attach the address-space qualifier if we canonicalized the scalar
9126 if (TYPE_ADDR_SPACE (orig_scalar_type
) != TYPE_ADDR_SPACE (vectype
))
9127 return build_qualified_type
9128 (vectype
, KEEP_QUAL_ADDR_SPACE (TYPE_QUALS (orig_scalar_type
)));
9133 unsigned int current_vector_size
;
9135 /* Function get_vectype_for_scalar_type.
9137 Returns the vector type corresponding to SCALAR_TYPE as supported
9141 get_vectype_for_scalar_type (tree scalar_type
)
9144 vectype
= get_vectype_for_scalar_type_and_size (scalar_type
,
9145 current_vector_size
);
9147 && current_vector_size
== 0)
9148 current_vector_size
= GET_MODE_SIZE (TYPE_MODE (vectype
));
9152 /* Function get_mask_type_for_scalar_type.
9154 Returns the mask type corresponding to a result of comparison
9155 of vectors of specified SCALAR_TYPE as supported by target. */
9158 get_mask_type_for_scalar_type (tree scalar_type
)
9160 tree vectype
= get_vectype_for_scalar_type (scalar_type
);
9165 return build_truth_vector_type (TYPE_VECTOR_SUBPARTS (vectype
),
9166 current_vector_size
);
9169 /* Function get_same_sized_vectype
9171 Returns a vector type corresponding to SCALAR_TYPE of size
9172 VECTOR_TYPE if supported by the target. */
9175 get_same_sized_vectype (tree scalar_type
, tree vector_type
)
9177 if (VECT_SCALAR_BOOLEAN_TYPE_P (scalar_type
))
9178 return build_same_sized_truth_vector_type (vector_type
);
9180 return get_vectype_for_scalar_type_and_size
9181 (scalar_type
, GET_MODE_SIZE (TYPE_MODE (vector_type
)));
9184 /* Function vect_is_simple_use.
9187 VINFO - the vect info of the loop or basic block that is being vectorized.
9188 OPERAND - operand in the loop or bb.
9190 DEF_STMT - the defining stmt in case OPERAND is an SSA_NAME.
9191 DT - the type of definition
9193 Returns whether a stmt with OPERAND can be vectorized.
9194 For loops, supportable operands are constants, loop invariants, and operands
9195 that are defined by the current iteration of the loop. Unsupportable
9196 operands are those that are defined by a previous iteration of the loop (as
9197 is the case in reduction/induction computations).
9198 For basic blocks, supportable operands are constants and bb invariants.
9199 For now, operands defined outside the basic block are not supported. */
9202 vect_is_simple_use (tree operand
, vec_info
*vinfo
,
9203 gimple
**def_stmt
, enum vect_def_type
*dt
)
9206 *dt
= vect_unknown_def_type
;
9208 if (dump_enabled_p ())
9210 dump_printf_loc (MSG_NOTE
, vect_location
,
9211 "vect_is_simple_use: operand ");
9212 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, operand
);
9213 dump_printf (MSG_NOTE
, "\n");
9216 if (CONSTANT_CLASS_P (operand
))
9218 *dt
= vect_constant_def
;
9222 if (is_gimple_min_invariant (operand
))
9224 *dt
= vect_external_def
;
9228 if (TREE_CODE (operand
) != SSA_NAME
)
9230 if (dump_enabled_p ())
9231 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
9236 if (SSA_NAME_IS_DEFAULT_DEF (operand
))
9238 *dt
= vect_external_def
;
9242 *def_stmt
= SSA_NAME_DEF_STMT (operand
);
9243 if (dump_enabled_p ())
9245 dump_printf_loc (MSG_NOTE
, vect_location
, "def_stmt: ");
9246 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, *def_stmt
, 0);
9249 if (! vect_stmt_in_region_p (vinfo
, *def_stmt
))
9250 *dt
= vect_external_def
;
9253 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (*def_stmt
);
9254 *dt
= STMT_VINFO_DEF_TYPE (stmt_vinfo
);
9257 if (dump_enabled_p ())
9259 dump_printf_loc (MSG_NOTE
, vect_location
, "type of def: ");
9262 case vect_uninitialized_def
:
9263 dump_printf (MSG_NOTE
, "uninitialized\n");
9265 case vect_constant_def
:
9266 dump_printf (MSG_NOTE
, "constant\n");
9268 case vect_external_def
:
9269 dump_printf (MSG_NOTE
, "external\n");
9271 case vect_internal_def
:
9272 dump_printf (MSG_NOTE
, "internal\n");
9274 case vect_induction_def
:
9275 dump_printf (MSG_NOTE
, "induction\n");
9277 case vect_reduction_def
:
9278 dump_printf (MSG_NOTE
, "reduction\n");
9280 case vect_double_reduction_def
:
9281 dump_printf (MSG_NOTE
, "double reduction\n");
9283 case vect_nested_cycle
:
9284 dump_printf (MSG_NOTE
, "nested cycle\n");
9286 case vect_unknown_def_type
:
9287 dump_printf (MSG_NOTE
, "unknown\n");
9292 if (*dt
== vect_unknown_def_type
)
9294 if (dump_enabled_p ())
9295 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
9296 "Unsupported pattern.\n");
9300 switch (gimple_code (*def_stmt
))
9307 if (dump_enabled_p ())
9308 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
9309 "unsupported defining stmt:\n");
9316 /* Function vect_is_simple_use.
9318 Same as vect_is_simple_use but also determines the vector operand
9319 type of OPERAND and stores it to *VECTYPE. If the definition of
9320 OPERAND is vect_uninitialized_def, vect_constant_def or
9321 vect_external_def *VECTYPE will be set to NULL_TREE and the caller
9322 is responsible to compute the best suited vector type for the
9326 vect_is_simple_use (tree operand
, vec_info
*vinfo
,
9327 gimple
**def_stmt
, enum vect_def_type
*dt
, tree
*vectype
)
9329 if (!vect_is_simple_use (operand
, vinfo
, def_stmt
, dt
))
9332 /* Now get a vector type if the def is internal, otherwise supply
9333 NULL_TREE and leave it up to the caller to figure out a proper
9334 type for the use stmt. */
9335 if (*dt
== vect_internal_def
9336 || *dt
== vect_induction_def
9337 || *dt
== vect_reduction_def
9338 || *dt
== vect_double_reduction_def
9339 || *dt
== vect_nested_cycle
)
9341 stmt_vec_info stmt_info
= vinfo_for_stmt (*def_stmt
);
9343 if (STMT_VINFO_IN_PATTERN_P (stmt_info
)
9344 && !STMT_VINFO_RELEVANT (stmt_info
)
9345 && !STMT_VINFO_LIVE_P (stmt_info
))
9346 stmt_info
= vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info
));
9348 *vectype
= STMT_VINFO_VECTYPE (stmt_info
);
9349 gcc_assert (*vectype
!= NULL_TREE
);
9351 else if (*dt
== vect_uninitialized_def
9352 || *dt
== vect_constant_def
9353 || *dt
== vect_external_def
)
9354 *vectype
= NULL_TREE
;
9362 /* Function supportable_widening_operation
9364 Check whether an operation represented by the code CODE is a
9365 widening operation that is supported by the target platform in
9366 vector form (i.e., when operating on arguments of type VECTYPE_IN
9367 producing a result of type VECTYPE_OUT).
9369 Widening operations we currently support are NOP (CONVERT), FLOAT
9370 and WIDEN_MULT. This function checks if these operations are supported
9371 by the target platform either directly (via vector tree-codes), or via
9375 - CODE1 and CODE2 are codes of vector operations to be used when
9376 vectorizing the operation, if available.
9377 - MULTI_STEP_CVT determines the number of required intermediate steps in
9378 case of multi-step conversion (like char->short->int - in that case
9379 MULTI_STEP_CVT will be 1).
9380 - INTERM_TYPES contains the intermediate type required to perform the
9381 widening operation (short in the above example). */
9384 supportable_widening_operation (enum tree_code code
, gimple
*stmt
,
9385 tree vectype_out
, tree vectype_in
,
9386 enum tree_code
*code1
, enum tree_code
*code2
,
9387 int *multi_step_cvt
,
9388 vec
<tree
> *interm_types
)
9390 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
9391 loop_vec_info loop_info
= STMT_VINFO_LOOP_VINFO (stmt_info
);
9392 struct loop
*vect_loop
= NULL
;
9393 machine_mode vec_mode
;
9394 enum insn_code icode1
, icode2
;
9395 optab optab1
, optab2
;
9396 tree vectype
= vectype_in
;
9397 tree wide_vectype
= vectype_out
;
9398 enum tree_code c1
, c2
;
9400 tree prev_type
, intermediate_type
;
9401 machine_mode intermediate_mode
, prev_mode
;
9402 optab optab3
, optab4
;
9404 *multi_step_cvt
= 0;
9406 vect_loop
= LOOP_VINFO_LOOP (loop_info
);
9410 case WIDEN_MULT_EXPR
:
9411 /* The result of a vectorized widening operation usually requires
9412 two vectors (because the widened results do not fit into one vector).
9413 The generated vector results would normally be expected to be
9414 generated in the same order as in the original scalar computation,
9415 i.e. if 8 results are generated in each vector iteration, they are
9416 to be organized as follows:
9417 vect1: [res1,res2,res3,res4],
9418 vect2: [res5,res6,res7,res8].
9420 However, in the special case that the result of the widening
9421 operation is used in a reduction computation only, the order doesn't
9422 matter (because when vectorizing a reduction we change the order of
9423 the computation). Some targets can take advantage of this and
9424 generate more efficient code. For example, targets like Altivec,
9425 that support widen_mult using a sequence of {mult_even,mult_odd}
9426 generate the following vectors:
9427 vect1: [res1,res3,res5,res7],
9428 vect2: [res2,res4,res6,res8].
9430 When vectorizing outer-loops, we execute the inner-loop sequentially
9431 (each vectorized inner-loop iteration contributes to VF outer-loop
9432 iterations in parallel). We therefore don't allow to change the
9433 order of the computation in the inner-loop during outer-loop
9435 /* TODO: Another case in which order doesn't *really* matter is when we
9436 widen and then contract again, e.g. (short)((int)x * y >> 8).
9437 Normally, pack_trunc performs an even/odd permute, whereas the
9438 repack from an even/odd expansion would be an interleave, which
9439 would be significantly simpler for e.g. AVX2. */
9440 /* In any case, in order to avoid duplicating the code below, recurse
9441 on VEC_WIDEN_MULT_EVEN_EXPR. If it succeeds, all the return values
9442 are properly set up for the caller. If we fail, we'll continue with
9443 a VEC_WIDEN_MULT_LO/HI_EXPR check. */
9445 && STMT_VINFO_RELEVANT (stmt_info
) == vect_used_by_reduction
9446 && !nested_in_vect_loop_p (vect_loop
, stmt
)
9447 && supportable_widening_operation (VEC_WIDEN_MULT_EVEN_EXPR
,
9448 stmt
, vectype_out
, vectype_in
,
9449 code1
, code2
, multi_step_cvt
,
9452 /* Elements in a vector with vect_used_by_reduction property cannot
9453 be reordered if the use chain with this property does not have the
9454 same operation. One such an example is s += a * b, where elements
9455 in a and b cannot be reordered. Here we check if the vector defined
9456 by STMT is only directly used in the reduction statement. */
9457 tree lhs
= gimple_assign_lhs (stmt
);
9458 use_operand_p dummy
;
9460 stmt_vec_info use_stmt_info
= NULL
;
9461 if (single_imm_use (lhs
, &dummy
, &use_stmt
)
9462 && (use_stmt_info
= vinfo_for_stmt (use_stmt
))
9463 && STMT_VINFO_DEF_TYPE (use_stmt_info
) == vect_reduction_def
)
9466 c1
= VEC_WIDEN_MULT_LO_EXPR
;
9467 c2
= VEC_WIDEN_MULT_HI_EXPR
;
9480 case VEC_WIDEN_MULT_EVEN_EXPR
:
9481 /* Support the recursion induced just above. */
9482 c1
= VEC_WIDEN_MULT_EVEN_EXPR
;
9483 c2
= VEC_WIDEN_MULT_ODD_EXPR
;
9486 case WIDEN_LSHIFT_EXPR
:
9487 c1
= VEC_WIDEN_LSHIFT_LO_EXPR
;
9488 c2
= VEC_WIDEN_LSHIFT_HI_EXPR
;
9492 c1
= VEC_UNPACK_LO_EXPR
;
9493 c2
= VEC_UNPACK_HI_EXPR
;
9497 c1
= VEC_UNPACK_FLOAT_LO_EXPR
;
9498 c2
= VEC_UNPACK_FLOAT_HI_EXPR
;
9501 case FIX_TRUNC_EXPR
:
9502 /* ??? Not yet implemented due to missing VEC_UNPACK_FIX_TRUNC_HI_EXPR/
9503 VEC_UNPACK_FIX_TRUNC_LO_EXPR tree codes and optabs used for
9504 computing the operation. */
9511 if (BYTES_BIG_ENDIAN
&& c1
!= VEC_WIDEN_MULT_EVEN_EXPR
)
9514 if (code
== FIX_TRUNC_EXPR
)
9516 /* The signedness is determined from output operand. */
9517 optab1
= optab_for_tree_code (c1
, vectype_out
, optab_default
);
9518 optab2
= optab_for_tree_code (c2
, vectype_out
, optab_default
);
9522 optab1
= optab_for_tree_code (c1
, vectype
, optab_default
);
9523 optab2
= optab_for_tree_code (c2
, vectype
, optab_default
);
9526 if (!optab1
|| !optab2
)
9529 vec_mode
= TYPE_MODE (vectype
);
9530 if ((icode1
= optab_handler (optab1
, vec_mode
)) == CODE_FOR_nothing
9531 || (icode2
= optab_handler (optab2
, vec_mode
)) == CODE_FOR_nothing
)
9537 if (insn_data
[icode1
].operand
[0].mode
== TYPE_MODE (wide_vectype
)
9538 && insn_data
[icode2
].operand
[0].mode
== TYPE_MODE (wide_vectype
))
9539 /* For scalar masks we may have different boolean
9540 vector types having the same QImode. Thus we
9541 add additional check for elements number. */
9542 return (!VECTOR_BOOLEAN_TYPE_P (vectype
)
9543 || (TYPE_VECTOR_SUBPARTS (vectype
) / 2
9544 == TYPE_VECTOR_SUBPARTS (wide_vectype
)));
9546 /* Check if it's a multi-step conversion that can be done using intermediate
9549 prev_type
= vectype
;
9550 prev_mode
= vec_mode
;
9552 if (!CONVERT_EXPR_CODE_P (code
))
9555 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
9556 intermediate steps in promotion sequence. We try
9557 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do
9559 interm_types
->create (MAX_INTERM_CVT_STEPS
);
9560 for (i
= 0; i
< MAX_INTERM_CVT_STEPS
; i
++)
9562 intermediate_mode
= insn_data
[icode1
].operand
[0].mode
;
9563 if (VECTOR_BOOLEAN_TYPE_P (prev_type
))
9566 = build_truth_vector_type (TYPE_VECTOR_SUBPARTS (prev_type
) / 2,
9567 current_vector_size
);
9568 if (intermediate_mode
!= TYPE_MODE (intermediate_type
))
9573 = lang_hooks
.types
.type_for_mode (intermediate_mode
,
9574 TYPE_UNSIGNED (prev_type
));
9576 optab3
= optab_for_tree_code (c1
, intermediate_type
, optab_default
);
9577 optab4
= optab_for_tree_code (c2
, intermediate_type
, optab_default
);
9579 if (!optab3
|| !optab4
9580 || (icode1
= optab_handler (optab1
, prev_mode
)) == CODE_FOR_nothing
9581 || insn_data
[icode1
].operand
[0].mode
!= intermediate_mode
9582 || (icode2
= optab_handler (optab2
, prev_mode
)) == CODE_FOR_nothing
9583 || insn_data
[icode2
].operand
[0].mode
!= intermediate_mode
9584 || ((icode1
= optab_handler (optab3
, intermediate_mode
))
9585 == CODE_FOR_nothing
)
9586 || ((icode2
= optab_handler (optab4
, intermediate_mode
))
9587 == CODE_FOR_nothing
))
9590 interm_types
->quick_push (intermediate_type
);
9591 (*multi_step_cvt
)++;
9593 if (insn_data
[icode1
].operand
[0].mode
== TYPE_MODE (wide_vectype
)
9594 && insn_data
[icode2
].operand
[0].mode
== TYPE_MODE (wide_vectype
))
9595 return (!VECTOR_BOOLEAN_TYPE_P (vectype
)
9596 || (TYPE_VECTOR_SUBPARTS (intermediate_type
) / 2
9597 == TYPE_VECTOR_SUBPARTS (wide_vectype
)));
9599 prev_type
= intermediate_type
;
9600 prev_mode
= intermediate_mode
;
9603 interm_types
->release ();
9608 /* Function supportable_narrowing_operation
9610 Check whether an operation represented by the code CODE is a
9611 narrowing operation that is supported by the target platform in
9612 vector form (i.e., when operating on arguments of type VECTYPE_IN
9613 and producing a result of type VECTYPE_OUT).
9615 Narrowing operations we currently support are NOP (CONVERT) and
9616 FIX_TRUNC. This function checks if these operations are supported by
9617 the target platform directly via vector tree-codes.
9620 - CODE1 is the code of a vector operation to be used when
9621 vectorizing the operation, if available.
9622 - MULTI_STEP_CVT determines the number of required intermediate steps in
9623 case of multi-step conversion (like int->short->char - in that case
9624 MULTI_STEP_CVT will be 1).
9625 - INTERM_TYPES contains the intermediate type required to perform the
9626 narrowing operation (short in the above example). */
9629 supportable_narrowing_operation (enum tree_code code
,
9630 tree vectype_out
, tree vectype_in
,
9631 enum tree_code
*code1
, int *multi_step_cvt
,
9632 vec
<tree
> *interm_types
)
9634 machine_mode vec_mode
;
9635 enum insn_code icode1
;
9636 optab optab1
, interm_optab
;
9637 tree vectype
= vectype_in
;
9638 tree narrow_vectype
= vectype_out
;
9640 tree intermediate_type
, prev_type
;
9641 machine_mode intermediate_mode
, prev_mode
;
9645 *multi_step_cvt
= 0;
9649 c1
= VEC_PACK_TRUNC_EXPR
;
9652 case FIX_TRUNC_EXPR
:
9653 c1
= VEC_PACK_FIX_TRUNC_EXPR
;
9657 /* ??? Not yet implemented due to missing VEC_PACK_FLOAT_EXPR
9658 tree code and optabs used for computing the operation. */
9665 if (code
== FIX_TRUNC_EXPR
)
9666 /* The signedness is determined from output operand. */
9667 optab1
= optab_for_tree_code (c1
, vectype_out
, optab_default
);
9669 optab1
= optab_for_tree_code (c1
, vectype
, optab_default
);
9674 vec_mode
= TYPE_MODE (vectype
);
9675 if ((icode1
= optab_handler (optab1
, vec_mode
)) == CODE_FOR_nothing
)
9680 if (insn_data
[icode1
].operand
[0].mode
== TYPE_MODE (narrow_vectype
))
9681 /* For scalar masks we may have different boolean
9682 vector types having the same QImode. Thus we
9683 add additional check for elements number. */
9684 return (!VECTOR_BOOLEAN_TYPE_P (vectype
)
9685 || (TYPE_VECTOR_SUBPARTS (vectype
) * 2
9686 == TYPE_VECTOR_SUBPARTS (narrow_vectype
)));
9688 /* Check if it's a multi-step conversion that can be done using intermediate
9690 prev_mode
= vec_mode
;
9691 prev_type
= vectype
;
9692 if (code
== FIX_TRUNC_EXPR
)
9693 uns
= TYPE_UNSIGNED (vectype_out
);
9695 uns
= TYPE_UNSIGNED (vectype
);
9697 /* For multi-step FIX_TRUNC_EXPR prefer signed floating to integer
9698 conversion over unsigned, as unsigned FIX_TRUNC_EXPR is often more
9699 costly than signed. */
9700 if (code
== FIX_TRUNC_EXPR
&& uns
)
9702 enum insn_code icode2
;
9705 = lang_hooks
.types
.type_for_mode (TYPE_MODE (vectype_out
), 0);
9707 = optab_for_tree_code (c1
, intermediate_type
, optab_default
);
9708 if (interm_optab
!= unknown_optab
9709 && (icode2
= optab_handler (optab1
, vec_mode
)) != CODE_FOR_nothing
9710 && insn_data
[icode1
].operand
[0].mode
9711 == insn_data
[icode2
].operand
[0].mode
)
9714 optab1
= interm_optab
;
9719 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
9720 intermediate steps in promotion sequence. We try
9721 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do not. */
9722 interm_types
->create (MAX_INTERM_CVT_STEPS
);
9723 for (i
= 0; i
< MAX_INTERM_CVT_STEPS
; i
++)
9725 intermediate_mode
= insn_data
[icode1
].operand
[0].mode
;
9726 if (VECTOR_BOOLEAN_TYPE_P (prev_type
))
9729 = build_truth_vector_type (TYPE_VECTOR_SUBPARTS (prev_type
) * 2,
9730 current_vector_size
);
9731 if (intermediate_mode
!= TYPE_MODE (intermediate_type
))
9736 = lang_hooks
.types
.type_for_mode (intermediate_mode
, uns
);
9738 = optab_for_tree_code (VEC_PACK_TRUNC_EXPR
, intermediate_type
,
9741 || ((icode1
= optab_handler (optab1
, prev_mode
)) == CODE_FOR_nothing
)
9742 || insn_data
[icode1
].operand
[0].mode
!= intermediate_mode
9743 || ((icode1
= optab_handler (interm_optab
, intermediate_mode
))
9744 == CODE_FOR_nothing
))
9747 interm_types
->quick_push (intermediate_type
);
9748 (*multi_step_cvt
)++;
9750 if (insn_data
[icode1
].operand
[0].mode
== TYPE_MODE (narrow_vectype
))
9751 return (!VECTOR_BOOLEAN_TYPE_P (vectype
)
9752 || (TYPE_VECTOR_SUBPARTS (intermediate_type
) * 2
9753 == TYPE_VECTOR_SUBPARTS (narrow_vectype
)));
9755 prev_mode
= intermediate_mode
;
9756 prev_type
= intermediate_type
;
9757 optab1
= interm_optab
;
9760 interm_types
->release ();