1 /* Statement Analysis and Transformation for Vectorization
2 Copyright (C) 2003-2017 Free Software Foundation, Inc.
3 Contributed by Dorit Naishlos <dorit@il.ibm.com>
4 and Ira Rosen <irar@il.ibm.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
24 #include "coretypes.h"
31 #include "optabs-tree.h"
32 #include "insn-config.h"
33 #include "recog.h" /* FIXME: for insn_data */
37 #include "fold-const.h"
38 #include "stor-layout.h"
41 #include "gimple-iterator.h"
42 #include "gimplify-me.h"
44 #include "tree-ssa-loop-manip.h"
46 #include "tree-ssa-loop.h"
47 #include "tree-scalar-evolution.h"
48 #include "tree-vectorizer.h"
50 #include "internal-fn.h"
52 /* For lang_hooks.types.type_for_mode. */
53 #include "langhooks.h"
55 /* Says whether a statement is a load, a store of a vectorized statement
56 result, or a store of an invariant value. */
57 enum vec_load_store_type
{
63 /* Return the vectorized type for the given statement. */
66 stmt_vectype (struct _stmt_vec_info
*stmt_info
)
68 return STMT_VINFO_VECTYPE (stmt_info
);
71 /* Return TRUE iff the given statement is in an inner loop relative to
72 the loop being vectorized. */
74 stmt_in_inner_loop_p (struct _stmt_vec_info
*stmt_info
)
76 gimple
*stmt
= STMT_VINFO_STMT (stmt_info
);
77 basic_block bb
= gimple_bb (stmt
);
78 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
84 loop
= LOOP_VINFO_LOOP (loop_vinfo
);
86 return (bb
->loop_father
== loop
->inner
);
89 /* Record the cost of a statement, either by directly informing the
90 target model or by saving it in a vector for later processing.
91 Return a preliminary estimate of the statement's cost. */
94 record_stmt_cost (stmt_vector_for_cost
*body_cost_vec
, int count
,
95 enum vect_cost_for_stmt kind
, stmt_vec_info stmt_info
,
96 int misalign
, enum vect_cost_model_location where
)
100 tree vectype
= stmt_info
? stmt_vectype (stmt_info
) : NULL_TREE
;
101 stmt_info_for_cost si
= { count
, kind
,
102 stmt_info
? STMT_VINFO_STMT (stmt_info
) : NULL
,
104 body_cost_vec
->safe_push (si
);
106 (builtin_vectorization_cost (kind
, vectype
, misalign
) * count
);
109 return add_stmt_cost (stmt_info
->vinfo
->target_cost_data
,
110 count
, kind
, stmt_info
, misalign
, where
);
113 /* Return a variable of type ELEM_TYPE[NELEMS]. */
116 create_vector_array (tree elem_type
, unsigned HOST_WIDE_INT nelems
)
118 return create_tmp_var (build_array_type_nelts (elem_type
, nelems
),
122 /* ARRAY is an array of vectors created by create_vector_array.
123 Return an SSA_NAME for the vector in index N. The reference
124 is part of the vectorization of STMT and the vector is associated
125 with scalar destination SCALAR_DEST. */
128 read_vector_array (gimple
*stmt
, gimple_stmt_iterator
*gsi
, tree scalar_dest
,
129 tree array
, unsigned HOST_WIDE_INT n
)
131 tree vect_type
, vect
, vect_name
, array_ref
;
134 gcc_assert (TREE_CODE (TREE_TYPE (array
)) == ARRAY_TYPE
);
135 vect_type
= TREE_TYPE (TREE_TYPE (array
));
136 vect
= vect_create_destination_var (scalar_dest
, vect_type
);
137 array_ref
= build4 (ARRAY_REF
, vect_type
, array
,
138 build_int_cst (size_type_node
, n
),
139 NULL_TREE
, NULL_TREE
);
141 new_stmt
= gimple_build_assign (vect
, array_ref
);
142 vect_name
= make_ssa_name (vect
, new_stmt
);
143 gimple_assign_set_lhs (new_stmt
, vect_name
);
144 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
149 /* ARRAY is an array of vectors created by create_vector_array.
150 Emit code to store SSA_NAME VECT in index N of the array.
151 The store is part of the vectorization of STMT. */
154 write_vector_array (gimple
*stmt
, gimple_stmt_iterator
*gsi
, tree vect
,
155 tree array
, unsigned HOST_WIDE_INT n
)
160 array_ref
= build4 (ARRAY_REF
, TREE_TYPE (vect
), array
,
161 build_int_cst (size_type_node
, n
),
162 NULL_TREE
, NULL_TREE
);
164 new_stmt
= gimple_build_assign (array_ref
, vect
);
165 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
168 /* PTR is a pointer to an array of type TYPE. Return a representation
169 of *PTR. The memory reference replaces those in FIRST_DR
173 create_array_ref (tree type
, tree ptr
, tree alias_ptr_type
)
177 mem_ref
= build2 (MEM_REF
, type
, ptr
, build_int_cst (alias_ptr_type
, 0));
178 /* Arrays have the same alignment as their type. */
179 set_ptr_info_alignment (get_ptr_info (ptr
), TYPE_ALIGN_UNIT (type
), 0);
183 /* Utility functions used by vect_mark_stmts_to_be_vectorized. */
185 /* Function vect_mark_relevant.
187 Mark STMT as "relevant for vectorization" and add it to WORKLIST. */
190 vect_mark_relevant (vec
<gimple
*> *worklist
, gimple
*stmt
,
191 enum vect_relevant relevant
, bool live_p
)
193 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
194 enum vect_relevant save_relevant
= STMT_VINFO_RELEVANT (stmt_info
);
195 bool save_live_p
= STMT_VINFO_LIVE_P (stmt_info
);
196 gimple
*pattern_stmt
;
198 if (dump_enabled_p ())
200 dump_printf_loc (MSG_NOTE
, vect_location
,
201 "mark relevant %d, live %d: ", relevant
, live_p
);
202 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
205 /* If this stmt is an original stmt in a pattern, we might need to mark its
206 related pattern stmt instead of the original stmt. However, such stmts
207 may have their own uses that are not in any pattern, in such cases the
208 stmt itself should be marked. */
209 if (STMT_VINFO_IN_PATTERN_P (stmt_info
))
211 /* This is the last stmt in a sequence that was detected as a
212 pattern that can potentially be vectorized. Don't mark the stmt
213 as relevant/live because it's not going to be vectorized.
214 Instead mark the pattern-stmt that replaces it. */
216 pattern_stmt
= STMT_VINFO_RELATED_STMT (stmt_info
);
218 if (dump_enabled_p ())
219 dump_printf_loc (MSG_NOTE
, vect_location
,
220 "last stmt in pattern. don't mark"
221 " relevant/live.\n");
222 stmt_info
= vinfo_for_stmt (pattern_stmt
);
223 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_info
) == stmt
);
224 save_relevant
= STMT_VINFO_RELEVANT (stmt_info
);
225 save_live_p
= STMT_VINFO_LIVE_P (stmt_info
);
229 STMT_VINFO_LIVE_P (stmt_info
) |= live_p
;
230 if (relevant
> STMT_VINFO_RELEVANT (stmt_info
))
231 STMT_VINFO_RELEVANT (stmt_info
) = relevant
;
233 if (STMT_VINFO_RELEVANT (stmt_info
) == save_relevant
234 && STMT_VINFO_LIVE_P (stmt_info
) == save_live_p
)
236 if (dump_enabled_p ())
237 dump_printf_loc (MSG_NOTE
, vect_location
,
238 "already marked relevant/live.\n");
242 worklist
->safe_push (stmt
);
246 /* Function is_simple_and_all_uses_invariant
248 Return true if STMT is simple and all uses of it are invariant. */
251 is_simple_and_all_uses_invariant (gimple
*stmt
, loop_vec_info loop_vinfo
)
257 if (!is_gimple_assign (stmt
))
260 FOR_EACH_SSA_TREE_OPERAND (op
, stmt
, iter
, SSA_OP_USE
)
262 enum vect_def_type dt
= vect_uninitialized_def
;
264 if (!vect_is_simple_use (op
, loop_vinfo
, &def_stmt
, &dt
))
266 if (dump_enabled_p ())
267 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
268 "use not simple.\n");
272 if (dt
!= vect_external_def
&& dt
!= vect_constant_def
)
278 /* Function vect_stmt_relevant_p.
280 Return true if STMT in loop that is represented by LOOP_VINFO is
281 "relevant for vectorization".
283 A stmt is considered "relevant for vectorization" if:
284 - it has uses outside the loop.
285 - it has vdefs (it alters memory).
286 - control stmts in the loop (except for the exit condition).
288 CHECKME: what other side effects would the vectorizer allow? */
291 vect_stmt_relevant_p (gimple
*stmt
, loop_vec_info loop_vinfo
,
292 enum vect_relevant
*relevant
, bool *live_p
)
294 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
296 imm_use_iterator imm_iter
;
300 *relevant
= vect_unused_in_scope
;
303 /* cond stmt other than loop exit cond. */
304 if (is_ctrl_stmt (stmt
)
305 && STMT_VINFO_TYPE (vinfo_for_stmt (stmt
))
306 != loop_exit_ctrl_vec_info_type
)
307 *relevant
= vect_used_in_scope
;
309 /* changing memory. */
310 if (gimple_code (stmt
) != GIMPLE_PHI
)
311 if (gimple_vdef (stmt
)
312 && !gimple_clobber_p (stmt
))
314 if (dump_enabled_p ())
315 dump_printf_loc (MSG_NOTE
, vect_location
,
316 "vec_stmt_relevant_p: stmt has vdefs.\n");
317 *relevant
= vect_used_in_scope
;
320 /* uses outside the loop. */
321 FOR_EACH_PHI_OR_STMT_DEF (def_p
, stmt
, op_iter
, SSA_OP_DEF
)
323 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, DEF_FROM_PTR (def_p
))
325 basic_block bb
= gimple_bb (USE_STMT (use_p
));
326 if (!flow_bb_inside_loop_p (loop
, bb
))
328 if (dump_enabled_p ())
329 dump_printf_loc (MSG_NOTE
, vect_location
,
330 "vec_stmt_relevant_p: used out of loop.\n");
332 if (is_gimple_debug (USE_STMT (use_p
)))
335 /* We expect all such uses to be in the loop exit phis
336 (because of loop closed form) */
337 gcc_assert (gimple_code (USE_STMT (use_p
)) == GIMPLE_PHI
);
338 gcc_assert (bb
== single_exit (loop
)->dest
);
345 if (*live_p
&& *relevant
== vect_unused_in_scope
346 && !is_simple_and_all_uses_invariant (stmt
, loop_vinfo
))
348 if (dump_enabled_p ())
349 dump_printf_loc (MSG_NOTE
, vect_location
,
350 "vec_stmt_relevant_p: stmt live but not relevant.\n");
351 *relevant
= vect_used_only_live
;
354 return (*live_p
|| *relevant
);
358 /* Function exist_non_indexing_operands_for_use_p
360 USE is one of the uses attached to STMT. Check if USE is
361 used in STMT for anything other than indexing an array. */
364 exist_non_indexing_operands_for_use_p (tree use
, gimple
*stmt
)
367 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
369 /* USE corresponds to some operand in STMT. If there is no data
370 reference in STMT, then any operand that corresponds to USE
371 is not indexing an array. */
372 if (!STMT_VINFO_DATA_REF (stmt_info
))
375 /* STMT has a data_ref. FORNOW this means that its of one of
379 (This should have been verified in analyze_data_refs).
381 'var' in the second case corresponds to a def, not a use,
382 so USE cannot correspond to any operands that are not used
385 Therefore, all we need to check is if STMT falls into the
386 first case, and whether var corresponds to USE. */
388 if (!gimple_assign_copy_p (stmt
))
390 if (is_gimple_call (stmt
)
391 && gimple_call_internal_p (stmt
))
392 switch (gimple_call_internal_fn (stmt
))
395 operand
= gimple_call_arg (stmt
, 3);
400 operand
= gimple_call_arg (stmt
, 2);
410 if (TREE_CODE (gimple_assign_lhs (stmt
)) == SSA_NAME
)
412 operand
= gimple_assign_rhs1 (stmt
);
413 if (TREE_CODE (operand
) != SSA_NAME
)
424 Function process_use.
427 - a USE in STMT in a loop represented by LOOP_VINFO
428 - RELEVANT - enum value to be set in the STMT_VINFO of the stmt
429 that defined USE. This is done by calling mark_relevant and passing it
430 the WORKLIST (to add DEF_STMT to the WORKLIST in case it is relevant).
431 - FORCE is true if exist_non_indexing_operands_for_use_p check shouldn't
435 Generally, LIVE_P and RELEVANT are used to define the liveness and
436 relevance info of the DEF_STMT of this USE:
437 STMT_VINFO_LIVE_P (DEF_STMT_info) <-- live_p
438 STMT_VINFO_RELEVANT (DEF_STMT_info) <-- relevant
440 - case 1: If USE is used only for address computations (e.g. array indexing),
441 which does not need to be directly vectorized, then the liveness/relevance
442 of the respective DEF_STMT is left unchanged.
443 - case 2: If STMT is a reduction phi and DEF_STMT is a reduction stmt, we
444 skip DEF_STMT cause it had already been processed.
445 - case 3: If DEF_STMT and STMT are in different nests, then "relevant" will
446 be modified accordingly.
448 Return true if everything is as expected. Return false otherwise. */
451 process_use (gimple
*stmt
, tree use
, loop_vec_info loop_vinfo
,
452 enum vect_relevant relevant
, vec
<gimple
*> *worklist
,
455 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
456 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (stmt
);
457 stmt_vec_info dstmt_vinfo
;
458 basic_block bb
, def_bb
;
460 enum vect_def_type dt
;
462 /* case 1: we are only interested in uses that need to be vectorized. Uses
463 that are used for address computation are not considered relevant. */
464 if (!force
&& !exist_non_indexing_operands_for_use_p (use
, stmt
))
467 if (!vect_is_simple_use (use
, loop_vinfo
, &def_stmt
, &dt
))
469 if (dump_enabled_p ())
470 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
471 "not vectorized: unsupported use in stmt.\n");
475 if (!def_stmt
|| gimple_nop_p (def_stmt
))
478 def_bb
= gimple_bb (def_stmt
);
479 if (!flow_bb_inside_loop_p (loop
, def_bb
))
481 if (dump_enabled_p ())
482 dump_printf_loc (MSG_NOTE
, vect_location
, "def_stmt is out of loop.\n");
486 /* case 2: A reduction phi (STMT) defined by a reduction stmt (DEF_STMT).
487 DEF_STMT must have already been processed, because this should be the
488 only way that STMT, which is a reduction-phi, was put in the worklist,
489 as there should be no other uses for DEF_STMT in the loop. So we just
490 check that everything is as expected, and we are done. */
491 dstmt_vinfo
= vinfo_for_stmt (def_stmt
);
492 bb
= gimple_bb (stmt
);
493 if (gimple_code (stmt
) == GIMPLE_PHI
494 && STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_reduction_def
495 && gimple_code (def_stmt
) != GIMPLE_PHI
496 && STMT_VINFO_DEF_TYPE (dstmt_vinfo
) == vect_reduction_def
497 && bb
->loop_father
== def_bb
->loop_father
)
499 if (dump_enabled_p ())
500 dump_printf_loc (MSG_NOTE
, vect_location
,
501 "reduc-stmt defining reduc-phi in the same nest.\n");
502 if (STMT_VINFO_IN_PATTERN_P (dstmt_vinfo
))
503 dstmt_vinfo
= vinfo_for_stmt (STMT_VINFO_RELATED_STMT (dstmt_vinfo
));
504 gcc_assert (STMT_VINFO_RELEVANT (dstmt_vinfo
) < vect_used_by_reduction
);
505 gcc_assert (STMT_VINFO_LIVE_P (dstmt_vinfo
)
506 || STMT_VINFO_RELEVANT (dstmt_vinfo
) > vect_unused_in_scope
);
510 /* case 3a: outer-loop stmt defining an inner-loop stmt:
511 outer-loop-header-bb:
517 if (flow_loop_nested_p (def_bb
->loop_father
, bb
->loop_father
))
519 if (dump_enabled_p ())
520 dump_printf_loc (MSG_NOTE
, vect_location
,
521 "outer-loop def-stmt defining inner-loop stmt.\n");
525 case vect_unused_in_scope
:
526 relevant
= (STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_nested_cycle
) ?
527 vect_used_in_scope
: vect_unused_in_scope
;
530 case vect_used_in_outer_by_reduction
:
531 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo
) != vect_reduction_def
);
532 relevant
= vect_used_by_reduction
;
535 case vect_used_in_outer
:
536 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo
) != vect_reduction_def
);
537 relevant
= vect_used_in_scope
;
540 case vect_used_in_scope
:
548 /* case 3b: inner-loop stmt defining an outer-loop stmt:
549 outer-loop-header-bb:
553 outer-loop-tail-bb (or outer-loop-exit-bb in double reduction):
555 else if (flow_loop_nested_p (bb
->loop_father
, def_bb
->loop_father
))
557 if (dump_enabled_p ())
558 dump_printf_loc (MSG_NOTE
, vect_location
,
559 "inner-loop def-stmt defining outer-loop stmt.\n");
563 case vect_unused_in_scope
:
564 relevant
= (STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_reduction_def
565 || STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_double_reduction_def
) ?
566 vect_used_in_outer_by_reduction
: vect_unused_in_scope
;
569 case vect_used_by_reduction
:
570 case vect_used_only_live
:
571 relevant
= vect_used_in_outer_by_reduction
;
574 case vect_used_in_scope
:
575 relevant
= vect_used_in_outer
;
582 /* We are also not interested in uses on loop PHI backedges that are
583 inductions. Otherwise we'll needlessly vectorize the IV increment
584 and cause hybrid SLP for SLP inductions. Unless the PHI is live
586 else if (gimple_code (stmt
) == GIMPLE_PHI
587 && STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_induction_def
588 && ! STMT_VINFO_LIVE_P (stmt_vinfo
)
589 && (PHI_ARG_DEF_FROM_EDGE (stmt
, loop_latch_edge (bb
->loop_father
))
592 if (dump_enabled_p ())
593 dump_printf_loc (MSG_NOTE
, vect_location
,
594 "induction value on backedge.\n");
599 vect_mark_relevant (worklist
, def_stmt
, relevant
, false);
604 /* Function vect_mark_stmts_to_be_vectorized.
606 Not all stmts in the loop need to be vectorized. For example:
615 Stmt 1 and 3 do not need to be vectorized, because loop control and
616 addressing of vectorized data-refs are handled differently.
618 This pass detects such stmts. */
621 vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo
)
623 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
624 basic_block
*bbs
= LOOP_VINFO_BBS (loop_vinfo
);
625 unsigned int nbbs
= loop
->num_nodes
;
626 gimple_stmt_iterator si
;
629 stmt_vec_info stmt_vinfo
;
633 enum vect_relevant relevant
;
635 if (dump_enabled_p ())
636 dump_printf_loc (MSG_NOTE
, vect_location
,
637 "=== vect_mark_stmts_to_be_vectorized ===\n");
639 auto_vec
<gimple
*, 64> worklist
;
641 /* 1. Init worklist. */
642 for (i
= 0; i
< nbbs
; i
++)
645 for (si
= gsi_start_phis (bb
); !gsi_end_p (si
); gsi_next (&si
))
648 if (dump_enabled_p ())
650 dump_printf_loc (MSG_NOTE
, vect_location
, "init: phi relevant? ");
651 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, phi
, 0);
654 if (vect_stmt_relevant_p (phi
, loop_vinfo
, &relevant
, &live_p
))
655 vect_mark_relevant (&worklist
, phi
, relevant
, live_p
);
657 for (si
= gsi_start_bb (bb
); !gsi_end_p (si
); gsi_next (&si
))
659 stmt
= gsi_stmt (si
);
660 if (dump_enabled_p ())
662 dump_printf_loc (MSG_NOTE
, vect_location
, "init: stmt relevant? ");
663 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
666 if (vect_stmt_relevant_p (stmt
, loop_vinfo
, &relevant
, &live_p
))
667 vect_mark_relevant (&worklist
, stmt
, relevant
, live_p
);
671 /* 2. Process_worklist */
672 while (worklist
.length () > 0)
677 stmt
= worklist
.pop ();
678 if (dump_enabled_p ())
680 dump_printf_loc (MSG_NOTE
, vect_location
, "worklist: examine stmt: ");
681 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
684 /* Examine the USEs of STMT. For each USE, mark the stmt that defines it
685 (DEF_STMT) as relevant/irrelevant according to the relevance property
687 stmt_vinfo
= vinfo_for_stmt (stmt
);
688 relevant
= STMT_VINFO_RELEVANT (stmt_vinfo
);
690 /* Generally, the relevance property of STMT (in STMT_VINFO_RELEVANT) is
691 propagated as is to the DEF_STMTs of its USEs.
693 One exception is when STMT has been identified as defining a reduction
694 variable; in this case we set the relevance to vect_used_by_reduction.
695 This is because we distinguish between two kinds of relevant stmts -
696 those that are used by a reduction computation, and those that are
697 (also) used by a regular computation. This allows us later on to
698 identify stmts that are used solely by a reduction, and therefore the
699 order of the results that they produce does not have to be kept. */
701 switch (STMT_VINFO_DEF_TYPE (stmt_vinfo
))
703 case vect_reduction_def
:
704 gcc_assert (relevant
!= vect_unused_in_scope
);
705 if (relevant
!= vect_unused_in_scope
706 && relevant
!= vect_used_in_scope
707 && relevant
!= vect_used_by_reduction
708 && relevant
!= vect_used_only_live
)
710 if (dump_enabled_p ())
711 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
712 "unsupported use of reduction.\n");
717 case vect_nested_cycle
:
718 if (relevant
!= vect_unused_in_scope
719 && relevant
!= vect_used_in_outer_by_reduction
720 && relevant
!= vect_used_in_outer
)
722 if (dump_enabled_p ())
723 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
724 "unsupported use of nested cycle.\n");
730 case vect_double_reduction_def
:
731 if (relevant
!= vect_unused_in_scope
732 && relevant
!= vect_used_by_reduction
733 && relevant
!= vect_used_only_live
)
735 if (dump_enabled_p ())
736 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
737 "unsupported use of double reduction.\n");
747 if (is_pattern_stmt_p (stmt_vinfo
))
749 /* Pattern statements are not inserted into the code, so
750 FOR_EACH_PHI_OR_STMT_USE optimizes their operands out, and we
751 have to scan the RHS or function arguments instead. */
752 if (is_gimple_assign (stmt
))
754 enum tree_code rhs_code
= gimple_assign_rhs_code (stmt
);
755 tree op
= gimple_assign_rhs1 (stmt
);
758 if (rhs_code
== COND_EXPR
&& COMPARISON_CLASS_P (op
))
760 if (!process_use (stmt
, TREE_OPERAND (op
, 0), loop_vinfo
,
761 relevant
, &worklist
, false)
762 || !process_use (stmt
, TREE_OPERAND (op
, 1), loop_vinfo
,
763 relevant
, &worklist
, false))
767 for (; i
< gimple_num_ops (stmt
); i
++)
769 op
= gimple_op (stmt
, i
);
770 if (TREE_CODE (op
) == SSA_NAME
771 && !process_use (stmt
, op
, loop_vinfo
, relevant
,
776 else if (is_gimple_call (stmt
))
778 for (i
= 0; i
< gimple_call_num_args (stmt
); i
++)
780 tree arg
= gimple_call_arg (stmt
, i
);
781 if (!process_use (stmt
, arg
, loop_vinfo
, relevant
,
788 FOR_EACH_PHI_OR_STMT_USE (use_p
, stmt
, iter
, SSA_OP_USE
)
790 tree op
= USE_FROM_PTR (use_p
);
791 if (!process_use (stmt
, op
, loop_vinfo
, relevant
,
796 if (STMT_VINFO_GATHER_SCATTER_P (stmt_vinfo
))
798 gather_scatter_info gs_info
;
799 if (!vect_check_gather_scatter (stmt
, loop_vinfo
, &gs_info
))
801 if (!process_use (stmt
, gs_info
.offset
, loop_vinfo
, relevant
,
805 } /* while worklist */
811 /* Function vect_model_simple_cost.
813 Models cost for simple operations, i.e. those that only emit ncopies of a
814 single op. Right now, this does not account for multiple insns that could
815 be generated for the single vector op. We will handle that shortly. */
818 vect_model_simple_cost (stmt_vec_info stmt_info
, int ncopies
,
819 enum vect_def_type
*dt
,
821 stmt_vector_for_cost
*prologue_cost_vec
,
822 stmt_vector_for_cost
*body_cost_vec
)
825 int inside_cost
= 0, prologue_cost
= 0;
827 /* The SLP costs were already calculated during SLP tree build. */
828 if (PURE_SLP_STMT (stmt_info
))
831 /* Cost the "broadcast" of a scalar operand in to a vector operand.
832 Use scalar_to_vec to cost the broadcast, as elsewhere in the vector
834 for (i
= 0; i
< ndts
; i
++)
835 if (dt
[i
] == vect_constant_def
|| dt
[i
] == vect_external_def
)
836 prologue_cost
+= record_stmt_cost (prologue_cost_vec
, 1, scalar_to_vec
,
837 stmt_info
, 0, vect_prologue
);
839 /* Pass the inside-of-loop statements to the target-specific cost model. */
840 inside_cost
= record_stmt_cost (body_cost_vec
, ncopies
, vector_stmt
,
841 stmt_info
, 0, vect_body
);
843 if (dump_enabled_p ())
844 dump_printf_loc (MSG_NOTE
, vect_location
,
845 "vect_model_simple_cost: inside_cost = %d, "
846 "prologue_cost = %d .\n", inside_cost
, prologue_cost
);
850 /* Model cost for type demotion and promotion operations. PWR is normally
851 zero for single-step promotions and demotions. It will be one if
852 two-step promotion/demotion is required, and so on. Each additional
853 step doubles the number of instructions required. */
856 vect_model_promotion_demotion_cost (stmt_vec_info stmt_info
,
857 enum vect_def_type
*dt
, int pwr
)
860 int inside_cost
= 0, prologue_cost
= 0;
861 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
862 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
863 void *target_cost_data
;
865 /* The SLP costs were already calculated during SLP tree build. */
866 if (PURE_SLP_STMT (stmt_info
))
870 target_cost_data
= LOOP_VINFO_TARGET_COST_DATA (loop_vinfo
);
872 target_cost_data
= BB_VINFO_TARGET_COST_DATA (bb_vinfo
);
874 for (i
= 0; i
< pwr
+ 1; i
++)
876 tmp
= (STMT_VINFO_TYPE (stmt_info
) == type_promotion_vec_info_type
) ?
878 inside_cost
+= add_stmt_cost (target_cost_data
, vect_pow2 (tmp
),
879 vec_promote_demote
, stmt_info
, 0,
883 /* FORNOW: Assuming maximum 2 args per stmts. */
884 for (i
= 0; i
< 2; i
++)
885 if (dt
[i
] == vect_constant_def
|| dt
[i
] == vect_external_def
)
886 prologue_cost
+= add_stmt_cost (target_cost_data
, 1, vector_stmt
,
887 stmt_info
, 0, vect_prologue
);
889 if (dump_enabled_p ())
890 dump_printf_loc (MSG_NOTE
, vect_location
,
891 "vect_model_promotion_demotion_cost: inside_cost = %d, "
892 "prologue_cost = %d .\n", inside_cost
, prologue_cost
);
895 /* Function vect_model_store_cost
897 Models cost for stores. In the case of grouped accesses, one access
898 has the overhead of the grouped access attributed to it. */
901 vect_model_store_cost (stmt_vec_info stmt_info
, int ncopies
,
902 vect_memory_access_type memory_access_type
,
903 enum vect_def_type dt
, slp_tree slp_node
,
904 stmt_vector_for_cost
*prologue_cost_vec
,
905 stmt_vector_for_cost
*body_cost_vec
)
907 unsigned int inside_cost
= 0, prologue_cost
= 0;
908 struct data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
);
909 gimple
*first_stmt
= STMT_VINFO_STMT (stmt_info
);
910 bool grouped_access_p
= STMT_VINFO_GROUPED_ACCESS (stmt_info
);
912 if (dt
== vect_constant_def
|| dt
== vect_external_def
)
913 prologue_cost
+= record_stmt_cost (prologue_cost_vec
, 1, scalar_to_vec
,
914 stmt_info
, 0, vect_prologue
);
916 /* Grouped stores update all elements in the group at once,
917 so we want the DR for the first statement. */
918 if (!slp_node
&& grouped_access_p
)
920 first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
921 dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
924 /* True if we should include any once-per-group costs as well as
925 the cost of the statement itself. For SLP we only get called
926 once per group anyhow. */
927 bool first_stmt_p
= (first_stmt
== STMT_VINFO_STMT (stmt_info
));
929 /* We assume that the cost of a single store-lanes instruction is
930 equivalent to the cost of GROUP_SIZE separate stores. If a grouped
931 access is instead being provided by a permute-and-store operation,
932 include the cost of the permutes. */
934 && memory_access_type
== VMAT_CONTIGUOUS_PERMUTE
)
936 /* Uses a high and low interleave or shuffle operations for each
938 int group_size
= GROUP_SIZE (vinfo_for_stmt (first_stmt
));
939 int nstmts
= ncopies
* ceil_log2 (group_size
) * group_size
;
940 inside_cost
= record_stmt_cost (body_cost_vec
, nstmts
, vec_perm
,
941 stmt_info
, 0, vect_body
);
943 if (dump_enabled_p ())
944 dump_printf_loc (MSG_NOTE
, vect_location
,
945 "vect_model_store_cost: strided group_size = %d .\n",
949 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
950 /* Costs of the stores. */
951 if (memory_access_type
== VMAT_ELEMENTWISE
952 || memory_access_type
== VMAT_GATHER_SCATTER
)
953 /* N scalar stores plus extracting the elements. */
954 inside_cost
+= record_stmt_cost (body_cost_vec
,
955 ncopies
* TYPE_VECTOR_SUBPARTS (vectype
),
956 scalar_store
, stmt_info
, 0, vect_body
);
958 vect_get_store_cost (dr
, ncopies
, &inside_cost
, body_cost_vec
);
960 if (memory_access_type
== VMAT_ELEMENTWISE
961 || memory_access_type
== VMAT_STRIDED_SLP
)
962 inside_cost
+= record_stmt_cost (body_cost_vec
,
963 ncopies
* TYPE_VECTOR_SUBPARTS (vectype
),
964 vec_to_scalar
, stmt_info
, 0, vect_body
);
966 if (dump_enabled_p ())
967 dump_printf_loc (MSG_NOTE
, vect_location
,
968 "vect_model_store_cost: inside_cost = %d, "
969 "prologue_cost = %d .\n", inside_cost
, prologue_cost
);
973 /* Calculate cost of DR's memory access. */
975 vect_get_store_cost (struct data_reference
*dr
, int ncopies
,
976 unsigned int *inside_cost
,
977 stmt_vector_for_cost
*body_cost_vec
)
979 int alignment_support_scheme
= vect_supportable_dr_alignment (dr
, false);
980 gimple
*stmt
= DR_STMT (dr
);
981 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
983 switch (alignment_support_scheme
)
987 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
,
988 vector_store
, stmt_info
, 0,
991 if (dump_enabled_p ())
992 dump_printf_loc (MSG_NOTE
, vect_location
,
993 "vect_model_store_cost: aligned.\n");
997 case dr_unaligned_supported
:
999 /* Here, we assign an additional cost for the unaligned store. */
1000 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
,
1001 unaligned_store
, stmt_info
,
1002 DR_MISALIGNMENT (dr
), vect_body
);
1003 if (dump_enabled_p ())
1004 dump_printf_loc (MSG_NOTE
, vect_location
,
1005 "vect_model_store_cost: unaligned supported by "
1010 case dr_unaligned_unsupported
:
1012 *inside_cost
= VECT_MAX_COST
;
1014 if (dump_enabled_p ())
1015 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1016 "vect_model_store_cost: unsupported access.\n");
1026 /* Function vect_model_load_cost
1028 Models cost for loads. In the case of grouped accesses, one access has
1029 the overhead of the grouped access attributed to it. Since unaligned
1030 accesses are supported for loads, we also account for the costs of the
1031 access scheme chosen. */
1034 vect_model_load_cost (stmt_vec_info stmt_info
, int ncopies
,
1035 vect_memory_access_type memory_access_type
,
1037 stmt_vector_for_cost
*prologue_cost_vec
,
1038 stmt_vector_for_cost
*body_cost_vec
)
1040 gimple
*first_stmt
= STMT_VINFO_STMT (stmt_info
);
1041 struct data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
);
1042 unsigned int inside_cost
= 0, prologue_cost
= 0;
1043 bool grouped_access_p
= STMT_VINFO_GROUPED_ACCESS (stmt_info
);
1045 /* Grouped loads read all elements in the group at once,
1046 so we want the DR for the first statement. */
1047 if (!slp_node
&& grouped_access_p
)
1049 first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
1050 dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
1053 /* True if we should include any once-per-group costs as well as
1054 the cost of the statement itself. For SLP we only get called
1055 once per group anyhow. */
1056 bool first_stmt_p
= (first_stmt
== STMT_VINFO_STMT (stmt_info
));
1058 /* We assume that the cost of a single load-lanes instruction is
1059 equivalent to the cost of GROUP_SIZE separate loads. If a grouped
1060 access is instead being provided by a load-and-permute operation,
1061 include the cost of the permutes. */
1063 && memory_access_type
== VMAT_CONTIGUOUS_PERMUTE
)
1065 /* Uses an even and odd extract operations or shuffle operations
1066 for each needed permute. */
1067 int group_size
= GROUP_SIZE (vinfo_for_stmt (first_stmt
));
1068 int nstmts
= ncopies
* ceil_log2 (group_size
) * group_size
;
1069 inside_cost
= record_stmt_cost (body_cost_vec
, nstmts
, vec_perm
,
1070 stmt_info
, 0, vect_body
);
1072 if (dump_enabled_p ())
1073 dump_printf_loc (MSG_NOTE
, vect_location
,
1074 "vect_model_load_cost: strided group_size = %d .\n",
1078 /* The loads themselves. */
1079 if (memory_access_type
== VMAT_ELEMENTWISE
1080 || memory_access_type
== VMAT_GATHER_SCATTER
)
1082 /* N scalar loads plus gathering them into a vector. */
1083 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
1084 inside_cost
+= record_stmt_cost (body_cost_vec
,
1085 ncopies
* TYPE_VECTOR_SUBPARTS (vectype
),
1086 scalar_load
, stmt_info
, 0, vect_body
);
1089 vect_get_load_cost (dr
, ncopies
, first_stmt_p
,
1090 &inside_cost
, &prologue_cost
,
1091 prologue_cost_vec
, body_cost_vec
, true);
1092 if (memory_access_type
== VMAT_ELEMENTWISE
1093 || memory_access_type
== VMAT_STRIDED_SLP
)
1094 inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
, vec_construct
,
1095 stmt_info
, 0, vect_body
);
1097 if (dump_enabled_p ())
1098 dump_printf_loc (MSG_NOTE
, vect_location
,
1099 "vect_model_load_cost: inside_cost = %d, "
1100 "prologue_cost = %d .\n", inside_cost
, prologue_cost
);
1104 /* Calculate cost of DR's memory access. */
1106 vect_get_load_cost (struct data_reference
*dr
, int ncopies
,
1107 bool add_realign_cost
, unsigned int *inside_cost
,
1108 unsigned int *prologue_cost
,
1109 stmt_vector_for_cost
*prologue_cost_vec
,
1110 stmt_vector_for_cost
*body_cost_vec
,
1111 bool record_prologue_costs
)
1113 int alignment_support_scheme
= vect_supportable_dr_alignment (dr
, false);
1114 gimple
*stmt
= DR_STMT (dr
);
1115 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1117 switch (alignment_support_scheme
)
1121 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
, vector_load
,
1122 stmt_info
, 0, vect_body
);
1124 if (dump_enabled_p ())
1125 dump_printf_loc (MSG_NOTE
, vect_location
,
1126 "vect_model_load_cost: aligned.\n");
1130 case dr_unaligned_supported
:
1132 /* Here, we assign an additional cost for the unaligned load. */
1133 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
,
1134 unaligned_load
, stmt_info
,
1135 DR_MISALIGNMENT (dr
), vect_body
);
1137 if (dump_enabled_p ())
1138 dump_printf_loc (MSG_NOTE
, vect_location
,
1139 "vect_model_load_cost: unaligned supported by "
1144 case dr_explicit_realign
:
1146 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
* 2,
1147 vector_load
, stmt_info
, 0, vect_body
);
1148 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
,
1149 vec_perm
, stmt_info
, 0, vect_body
);
1151 /* FIXME: If the misalignment remains fixed across the iterations of
1152 the containing loop, the following cost should be added to the
1154 if (targetm
.vectorize
.builtin_mask_for_load
)
1155 *inside_cost
+= record_stmt_cost (body_cost_vec
, 1, vector_stmt
,
1156 stmt_info
, 0, vect_body
);
1158 if (dump_enabled_p ())
1159 dump_printf_loc (MSG_NOTE
, vect_location
,
1160 "vect_model_load_cost: explicit realign\n");
1164 case dr_explicit_realign_optimized
:
1166 if (dump_enabled_p ())
1167 dump_printf_loc (MSG_NOTE
, vect_location
,
1168 "vect_model_load_cost: unaligned software "
1171 /* Unaligned software pipeline has a load of an address, an initial
1172 load, and possibly a mask operation to "prime" the loop. However,
1173 if this is an access in a group of loads, which provide grouped
1174 access, then the above cost should only be considered for one
1175 access in the group. Inside the loop, there is a load op
1176 and a realignment op. */
1178 if (add_realign_cost
&& record_prologue_costs
)
1180 *prologue_cost
+= record_stmt_cost (prologue_cost_vec
, 2,
1181 vector_stmt
, stmt_info
,
1183 if (targetm
.vectorize
.builtin_mask_for_load
)
1184 *prologue_cost
+= record_stmt_cost (prologue_cost_vec
, 1,
1185 vector_stmt
, stmt_info
,
1189 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
, vector_load
,
1190 stmt_info
, 0, vect_body
);
1191 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
, vec_perm
,
1192 stmt_info
, 0, vect_body
);
1194 if (dump_enabled_p ())
1195 dump_printf_loc (MSG_NOTE
, vect_location
,
1196 "vect_model_load_cost: explicit realign optimized"
1202 case dr_unaligned_unsupported
:
1204 *inside_cost
= VECT_MAX_COST
;
1206 if (dump_enabled_p ())
1207 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1208 "vect_model_load_cost: unsupported access.\n");
1217 /* Insert the new stmt NEW_STMT at *GSI or at the appropriate place in
1218 the loop preheader for the vectorized stmt STMT. */
1221 vect_init_vector_1 (gimple
*stmt
, gimple
*new_stmt
, gimple_stmt_iterator
*gsi
)
1224 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
1227 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (stmt
);
1228 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_vinfo
);
1232 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
1236 if (nested_in_vect_loop_p (loop
, stmt
))
1239 pe
= loop_preheader_edge (loop
);
1240 new_bb
= gsi_insert_on_edge_immediate (pe
, new_stmt
);
1241 gcc_assert (!new_bb
);
1245 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_vinfo
);
1247 gimple_stmt_iterator gsi_bb_start
;
1249 gcc_assert (bb_vinfo
);
1250 bb
= BB_VINFO_BB (bb_vinfo
);
1251 gsi_bb_start
= gsi_after_labels (bb
);
1252 gsi_insert_before (&gsi_bb_start
, new_stmt
, GSI_SAME_STMT
);
1256 if (dump_enabled_p ())
1258 dump_printf_loc (MSG_NOTE
, vect_location
,
1259 "created new init_stmt: ");
1260 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, new_stmt
, 0);
1264 /* Function vect_init_vector.
1266 Insert a new stmt (INIT_STMT) that initializes a new variable of type
1267 TYPE with the value VAL. If TYPE is a vector type and VAL does not have
1268 vector type a vector with all elements equal to VAL is created first.
1269 Place the initialization at BSI if it is not NULL. Otherwise, place the
1270 initialization at the loop preheader.
1271 Return the DEF of INIT_STMT.
1272 It will be used in the vectorization of STMT. */
1275 vect_init_vector (gimple
*stmt
, tree val
, tree type
, gimple_stmt_iterator
*gsi
)
1280 /* We abuse this function to push sth to a SSA name with initial 'val'. */
1281 if (! useless_type_conversion_p (type
, TREE_TYPE (val
)))
1283 gcc_assert (TREE_CODE (type
) == VECTOR_TYPE
);
1284 if (! types_compatible_p (TREE_TYPE (type
), TREE_TYPE (val
)))
1286 /* Scalar boolean value should be transformed into
1287 all zeros or all ones value before building a vector. */
1288 if (VECTOR_BOOLEAN_TYPE_P (type
))
1290 tree true_val
= build_all_ones_cst (TREE_TYPE (type
));
1291 tree false_val
= build_zero_cst (TREE_TYPE (type
));
1293 if (CONSTANT_CLASS_P (val
))
1294 val
= integer_zerop (val
) ? false_val
: true_val
;
1297 new_temp
= make_ssa_name (TREE_TYPE (type
));
1298 init_stmt
= gimple_build_assign (new_temp
, COND_EXPR
,
1299 val
, true_val
, false_val
);
1300 vect_init_vector_1 (stmt
, init_stmt
, gsi
);
1304 else if (CONSTANT_CLASS_P (val
))
1305 val
= fold_convert (TREE_TYPE (type
), val
);
1308 new_temp
= make_ssa_name (TREE_TYPE (type
));
1309 if (! INTEGRAL_TYPE_P (TREE_TYPE (val
)))
1310 init_stmt
= gimple_build_assign (new_temp
,
1311 fold_build1 (VIEW_CONVERT_EXPR
,
1315 init_stmt
= gimple_build_assign (new_temp
, NOP_EXPR
, val
);
1316 vect_init_vector_1 (stmt
, init_stmt
, gsi
);
1320 val
= build_vector_from_val (type
, val
);
1323 new_temp
= vect_get_new_ssa_name (type
, vect_simple_var
, "cst_");
1324 init_stmt
= gimple_build_assign (new_temp
, val
);
1325 vect_init_vector_1 (stmt
, init_stmt
, gsi
);
1329 /* Function vect_get_vec_def_for_operand_1.
1331 For a defining stmt DEF_STMT of a scalar stmt, return a vector def with type
1332 DT that will be used in the vectorized stmt. */
1335 vect_get_vec_def_for_operand_1 (gimple
*def_stmt
, enum vect_def_type dt
)
1339 stmt_vec_info def_stmt_info
= NULL
;
1343 /* operand is a constant or a loop invariant. */
1344 case vect_constant_def
:
1345 case vect_external_def
:
1346 /* Code should use vect_get_vec_def_for_operand. */
1349 /* operand is defined inside the loop. */
1350 case vect_internal_def
:
1352 /* Get the def from the vectorized stmt. */
1353 def_stmt_info
= vinfo_for_stmt (def_stmt
);
1355 vec_stmt
= STMT_VINFO_VEC_STMT (def_stmt_info
);
1356 /* Get vectorized pattern statement. */
1358 && STMT_VINFO_IN_PATTERN_P (def_stmt_info
)
1359 && !STMT_VINFO_RELEVANT (def_stmt_info
))
1360 vec_stmt
= STMT_VINFO_VEC_STMT (vinfo_for_stmt (
1361 STMT_VINFO_RELATED_STMT (def_stmt_info
)));
1362 gcc_assert (vec_stmt
);
1363 if (gimple_code (vec_stmt
) == GIMPLE_PHI
)
1364 vec_oprnd
= PHI_RESULT (vec_stmt
);
1365 else if (is_gimple_call (vec_stmt
))
1366 vec_oprnd
= gimple_call_lhs (vec_stmt
);
1368 vec_oprnd
= gimple_assign_lhs (vec_stmt
);
1372 /* operand is defined by a loop header phi. */
1373 case vect_reduction_def
:
1374 case vect_double_reduction_def
:
1375 case vect_nested_cycle
:
1376 case vect_induction_def
:
1378 gcc_assert (gimple_code (def_stmt
) == GIMPLE_PHI
);
1380 /* Get the def from the vectorized stmt. */
1381 def_stmt_info
= vinfo_for_stmt (def_stmt
);
1382 vec_stmt
= STMT_VINFO_VEC_STMT (def_stmt_info
);
1383 if (gimple_code (vec_stmt
) == GIMPLE_PHI
)
1384 vec_oprnd
= PHI_RESULT (vec_stmt
);
1386 vec_oprnd
= gimple_get_lhs (vec_stmt
);
1396 /* Function vect_get_vec_def_for_operand.
1398 OP is an operand in STMT. This function returns a (vector) def that will be
1399 used in the vectorized stmt for STMT.
1401 In the case that OP is an SSA_NAME which is defined in the loop, then
1402 STMT_VINFO_VEC_STMT of the defining stmt holds the relevant def.
1404 In case OP is an invariant or constant, a new stmt that creates a vector def
1405 needs to be introduced. VECTYPE may be used to specify a required type for
1406 vector invariant. */
1409 vect_get_vec_def_for_operand (tree op
, gimple
*stmt
, tree vectype
)
1412 enum vect_def_type dt
;
1414 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (stmt
);
1415 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_vinfo
);
1417 if (dump_enabled_p ())
1419 dump_printf_loc (MSG_NOTE
, vect_location
,
1420 "vect_get_vec_def_for_operand: ");
1421 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, op
);
1422 dump_printf (MSG_NOTE
, "\n");
1425 is_simple_use
= vect_is_simple_use (op
, loop_vinfo
, &def_stmt
, &dt
);
1426 gcc_assert (is_simple_use
);
1427 if (def_stmt
&& dump_enabled_p ())
1429 dump_printf_loc (MSG_NOTE
, vect_location
, " def_stmt = ");
1430 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, def_stmt
, 0);
1433 if (dt
== vect_constant_def
|| dt
== vect_external_def
)
1435 tree stmt_vectype
= STMT_VINFO_VECTYPE (stmt_vinfo
);
1439 vector_type
= vectype
;
1440 else if (VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (op
))
1441 && VECTOR_BOOLEAN_TYPE_P (stmt_vectype
))
1442 vector_type
= build_same_sized_truth_vector_type (stmt_vectype
);
1444 vector_type
= get_vectype_for_scalar_type (TREE_TYPE (op
));
1446 gcc_assert (vector_type
);
1447 return vect_init_vector (stmt
, op
, vector_type
, NULL
);
1450 return vect_get_vec_def_for_operand_1 (def_stmt
, dt
);
1454 /* Function vect_get_vec_def_for_stmt_copy
1456 Return a vector-def for an operand. This function is used when the
1457 vectorized stmt to be created (by the caller to this function) is a "copy"
1458 created in case the vectorized result cannot fit in one vector, and several
1459 copies of the vector-stmt are required. In this case the vector-def is
1460 retrieved from the vector stmt recorded in the STMT_VINFO_RELATED_STMT field
1461 of the stmt that defines VEC_OPRND.
1462 DT is the type of the vector def VEC_OPRND.
1465 In case the vectorization factor (VF) is bigger than the number
1466 of elements that can fit in a vectype (nunits), we have to generate
1467 more than one vector stmt to vectorize the scalar stmt. This situation
1468 arises when there are multiple data-types operated upon in the loop; the
1469 smallest data-type determines the VF, and as a result, when vectorizing
1470 stmts operating on wider types we need to create 'VF/nunits' "copies" of the
1471 vector stmt (each computing a vector of 'nunits' results, and together
1472 computing 'VF' results in each iteration). This function is called when
1473 vectorizing such a stmt (e.g. vectorizing S2 in the illustration below, in
1474 which VF=16 and nunits=4, so the number of copies required is 4):
1476 scalar stmt: vectorized into: STMT_VINFO_RELATED_STMT
1478 S1: x = load VS1.0: vx.0 = memref0 VS1.1
1479 VS1.1: vx.1 = memref1 VS1.2
1480 VS1.2: vx.2 = memref2 VS1.3
1481 VS1.3: vx.3 = memref3
1483 S2: z = x + ... VSnew.0: vz0 = vx.0 + ... VSnew.1
1484 VSnew.1: vz1 = vx.1 + ... VSnew.2
1485 VSnew.2: vz2 = vx.2 + ... VSnew.3
1486 VSnew.3: vz3 = vx.3 + ...
1488 The vectorization of S1 is explained in vectorizable_load.
1489 The vectorization of S2:
1490 To create the first vector-stmt out of the 4 copies - VSnew.0 -
1491 the function 'vect_get_vec_def_for_operand' is called to
1492 get the relevant vector-def for each operand of S2. For operand x it
1493 returns the vector-def 'vx.0'.
1495 To create the remaining copies of the vector-stmt (VSnew.j), this
1496 function is called to get the relevant vector-def for each operand. It is
1497 obtained from the respective VS1.j stmt, which is recorded in the
1498 STMT_VINFO_RELATED_STMT field of the stmt that defines VEC_OPRND.
1500 For example, to obtain the vector-def 'vx.1' in order to create the
1501 vector stmt 'VSnew.1', this function is called with VEC_OPRND='vx.0'.
1502 Given 'vx0' we obtain the stmt that defines it ('VS1.0'); from the
1503 STMT_VINFO_RELATED_STMT field of 'VS1.0' we obtain the next copy - 'VS1.1',
1504 and return its def ('vx.1').
1505 Overall, to create the above sequence this function will be called 3 times:
1506 vx.1 = vect_get_vec_def_for_stmt_copy (dt, vx.0);
1507 vx.2 = vect_get_vec_def_for_stmt_copy (dt, vx.1);
1508 vx.3 = vect_get_vec_def_for_stmt_copy (dt, vx.2); */
1511 vect_get_vec_def_for_stmt_copy (enum vect_def_type dt
, tree vec_oprnd
)
1513 gimple
*vec_stmt_for_operand
;
1514 stmt_vec_info def_stmt_info
;
1516 /* Do nothing; can reuse same def. */
1517 if (dt
== vect_external_def
|| dt
== vect_constant_def
)
1520 vec_stmt_for_operand
= SSA_NAME_DEF_STMT (vec_oprnd
);
1521 def_stmt_info
= vinfo_for_stmt (vec_stmt_for_operand
);
1522 gcc_assert (def_stmt_info
);
1523 vec_stmt_for_operand
= STMT_VINFO_RELATED_STMT (def_stmt_info
);
1524 gcc_assert (vec_stmt_for_operand
);
1525 if (gimple_code (vec_stmt_for_operand
) == GIMPLE_PHI
)
1526 vec_oprnd
= PHI_RESULT (vec_stmt_for_operand
);
1528 vec_oprnd
= gimple_get_lhs (vec_stmt_for_operand
);
1533 /* Get vectorized definitions for the operands to create a copy of an original
1534 stmt. See vect_get_vec_def_for_stmt_copy () for details. */
1537 vect_get_vec_defs_for_stmt_copy (enum vect_def_type
*dt
,
1538 vec
<tree
> *vec_oprnds0
,
1539 vec
<tree
> *vec_oprnds1
)
1541 tree vec_oprnd
= vec_oprnds0
->pop ();
1543 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
[0], vec_oprnd
);
1544 vec_oprnds0
->quick_push (vec_oprnd
);
1546 if (vec_oprnds1
&& vec_oprnds1
->length ())
1548 vec_oprnd
= vec_oprnds1
->pop ();
1549 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
[1], vec_oprnd
);
1550 vec_oprnds1
->quick_push (vec_oprnd
);
1555 /* Get vectorized definitions for OP0 and OP1. */
1558 vect_get_vec_defs (tree op0
, tree op1
, gimple
*stmt
,
1559 vec
<tree
> *vec_oprnds0
,
1560 vec
<tree
> *vec_oprnds1
,
1565 int nops
= (op1
== NULL_TREE
) ? 1 : 2;
1566 auto_vec
<tree
> ops (nops
);
1567 auto_vec
<vec
<tree
> > vec_defs (nops
);
1569 ops
.quick_push (op0
);
1571 ops
.quick_push (op1
);
1573 vect_get_slp_defs (ops
, slp_node
, &vec_defs
);
1575 *vec_oprnds0
= vec_defs
[0];
1577 *vec_oprnds1
= vec_defs
[1];
1583 vec_oprnds0
->create (1);
1584 vec_oprnd
= vect_get_vec_def_for_operand (op0
, stmt
);
1585 vec_oprnds0
->quick_push (vec_oprnd
);
1589 vec_oprnds1
->create (1);
1590 vec_oprnd
= vect_get_vec_def_for_operand (op1
, stmt
);
1591 vec_oprnds1
->quick_push (vec_oprnd
);
1597 /* Function vect_finish_stmt_generation.
1599 Insert a new stmt. */
1602 vect_finish_stmt_generation (gimple
*stmt
, gimple
*vec_stmt
,
1603 gimple_stmt_iterator
*gsi
)
1605 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1606 vec_info
*vinfo
= stmt_info
->vinfo
;
1608 gcc_assert (gimple_code (stmt
) != GIMPLE_LABEL
);
1610 if (!gsi_end_p (*gsi
)
1611 && gimple_has_mem_ops (vec_stmt
))
1613 gimple
*at_stmt
= gsi_stmt (*gsi
);
1614 tree vuse
= gimple_vuse (at_stmt
);
1615 if (vuse
&& TREE_CODE (vuse
) == SSA_NAME
)
1617 tree vdef
= gimple_vdef (at_stmt
);
1618 gimple_set_vuse (vec_stmt
, gimple_vuse (at_stmt
));
1619 /* If we have an SSA vuse and insert a store, update virtual
1620 SSA form to avoid triggering the renamer. Do so only
1621 if we can easily see all uses - which is what almost always
1622 happens with the way vectorized stmts are inserted. */
1623 if ((vdef
&& TREE_CODE (vdef
) == SSA_NAME
)
1624 && ((is_gimple_assign (vec_stmt
)
1625 && !is_gimple_reg (gimple_assign_lhs (vec_stmt
)))
1626 || (is_gimple_call (vec_stmt
)
1627 && !(gimple_call_flags (vec_stmt
)
1628 & (ECF_CONST
|ECF_PURE
|ECF_NOVOPS
)))))
1630 tree new_vdef
= copy_ssa_name (vuse
, vec_stmt
);
1631 gimple_set_vdef (vec_stmt
, new_vdef
);
1632 SET_USE (gimple_vuse_op (at_stmt
), new_vdef
);
1636 gsi_insert_before (gsi
, vec_stmt
, GSI_SAME_STMT
);
1638 set_vinfo_for_stmt (vec_stmt
, new_stmt_vec_info (vec_stmt
, vinfo
));
1640 if (dump_enabled_p ())
1642 dump_printf_loc (MSG_NOTE
, vect_location
, "add new stmt: ");
1643 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, vec_stmt
, 0);
1646 gimple_set_location (vec_stmt
, gimple_location (stmt
));
1648 /* While EH edges will generally prevent vectorization, stmt might
1649 e.g. be in a must-not-throw region. Ensure newly created stmts
1650 that could throw are part of the same region. */
1651 int lp_nr
= lookup_stmt_eh_lp (stmt
);
1652 if (lp_nr
!= 0 && stmt_could_throw_p (vec_stmt
))
1653 add_stmt_to_eh_lp (vec_stmt
, lp_nr
);
1656 /* We want to vectorize a call to combined function CFN with function
1657 decl FNDECL, using VECTYPE_OUT as the type of the output and VECTYPE_IN
1658 as the types of all inputs. Check whether this is possible using
1659 an internal function, returning its code if so or IFN_LAST if not. */
1662 vectorizable_internal_function (combined_fn cfn
, tree fndecl
,
1663 tree vectype_out
, tree vectype_in
)
1666 if (internal_fn_p (cfn
))
1667 ifn
= as_internal_fn (cfn
);
1669 ifn
= associated_internal_fn (fndecl
);
1670 if (ifn
!= IFN_LAST
&& direct_internal_fn_p (ifn
))
1672 const direct_internal_fn_info
&info
= direct_internal_fn (ifn
);
1673 if (info
.vectorizable
)
1675 tree type0
= (info
.type0
< 0 ? vectype_out
: vectype_in
);
1676 tree type1
= (info
.type1
< 0 ? vectype_out
: vectype_in
);
1677 if (direct_internal_fn_supported_p (ifn
, tree_pair (type0
, type1
),
1678 OPTIMIZE_FOR_SPEED
))
1686 static tree
permute_vec_elements (tree
, tree
, tree
, gimple
*,
1687 gimple_stmt_iterator
*);
1689 /* STMT is a non-strided load or store, meaning that it accesses
1690 elements with a known constant step. Return -1 if that step
1691 is negative, 0 if it is zero, and 1 if it is greater than zero. */
1694 compare_step_with_zero (gimple
*stmt
)
1696 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1697 data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
);
1698 return tree_int_cst_compare (vect_dr_behavior (dr
)->step
,
1702 /* If the target supports a permute mask that reverses the elements in
1703 a vector of type VECTYPE, return that mask, otherwise return null. */
1706 perm_mask_for_reverse (tree vectype
)
1710 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
1712 auto_vec_perm_indices
sel (nunits
);
1713 for (i
= 0; i
< nunits
; ++i
)
1714 sel
.quick_push (nunits
- 1 - i
);
1716 if (!can_vec_perm_p (TYPE_MODE (vectype
), false, &sel
))
1718 return vect_gen_perm_mask_checked (vectype
, sel
);
1721 /* A subroutine of get_load_store_type, with a subset of the same
1722 arguments. Handle the case where STMT is part of a grouped load
1725 For stores, the statements in the group are all consecutive
1726 and there is no gap at the end. For loads, the statements in the
1727 group might not be consecutive; there can be gaps between statements
1728 as well as at the end. */
1731 get_group_load_store_type (gimple
*stmt
, tree vectype
, bool slp
,
1732 vec_load_store_type vls_type
,
1733 vect_memory_access_type
*memory_access_type
)
1735 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1736 vec_info
*vinfo
= stmt_info
->vinfo
;
1737 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
1738 struct loop
*loop
= loop_vinfo
? LOOP_VINFO_LOOP (loop_vinfo
) : NULL
;
1739 gimple
*first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
1740 data_reference
*first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
1741 unsigned int group_size
= GROUP_SIZE (vinfo_for_stmt (first_stmt
));
1742 bool single_element_p
= (stmt
== first_stmt
1743 && !GROUP_NEXT_ELEMENT (stmt_info
));
1744 unsigned HOST_WIDE_INT gap
= GROUP_GAP (vinfo_for_stmt (first_stmt
));
1745 unsigned nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
1747 /* True if the vectorized statements would access beyond the last
1748 statement in the group. */
1749 bool overrun_p
= false;
1751 /* True if we can cope with such overrun by peeling for gaps, so that
1752 there is at least one final scalar iteration after the vector loop. */
1753 bool can_overrun_p
= (vls_type
== VLS_LOAD
&& loop_vinfo
&& !loop
->inner
);
1755 /* There can only be a gap at the end of the group if the stride is
1756 known at compile time. */
1757 gcc_assert (!STMT_VINFO_STRIDED_P (stmt_info
) || gap
== 0);
1759 /* Stores can't yet have gaps. */
1760 gcc_assert (slp
|| vls_type
== VLS_LOAD
|| gap
== 0);
1764 if (STMT_VINFO_STRIDED_P (stmt_info
))
1766 /* Try to use consecutive accesses of GROUP_SIZE elements,
1767 separated by the stride, until we have a complete vector.
1768 Fall back to scalar accesses if that isn't possible. */
1769 if (nunits
% group_size
== 0)
1770 *memory_access_type
= VMAT_STRIDED_SLP
;
1772 *memory_access_type
= VMAT_ELEMENTWISE
;
1776 overrun_p
= loop_vinfo
&& gap
!= 0;
1777 if (overrun_p
&& vls_type
!= VLS_LOAD
)
1779 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1780 "Grouped store with gaps requires"
1781 " non-consecutive accesses\n");
1784 /* An overrun is fine if the trailing elements are smaller
1785 than the alignment boundary B. Every vector access will
1786 be a multiple of B and so we are guaranteed to access a
1787 non-gap element in the same B-sized block. */
1789 && gap
< (vect_known_alignment_in_bytes (first_dr
)
1790 / vect_get_scalar_dr_size (first_dr
)))
1792 if (overrun_p
&& !can_overrun_p
)
1794 if (dump_enabled_p ())
1795 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1796 "Peeling for outer loop is not supported\n");
1799 *memory_access_type
= VMAT_CONTIGUOUS
;
1804 /* We can always handle this case using elementwise accesses,
1805 but see if something more efficient is available. */
1806 *memory_access_type
= VMAT_ELEMENTWISE
;
1808 /* If there is a gap at the end of the group then these optimizations
1809 would access excess elements in the last iteration. */
1810 bool would_overrun_p
= (gap
!= 0);
1811 /* An overrun is fine if the trailing elements are smaller than the
1812 alignment boundary B. Every vector access will be a multiple of B
1813 and so we are guaranteed to access a non-gap element in the
1814 same B-sized block. */
1816 && gap
< (vect_known_alignment_in_bytes (first_dr
)
1817 / vect_get_scalar_dr_size (first_dr
)))
1818 would_overrun_p
= false;
1820 if (!STMT_VINFO_STRIDED_P (stmt_info
)
1821 && (can_overrun_p
|| !would_overrun_p
)
1822 && compare_step_with_zero (stmt
) > 0)
1824 /* First try using LOAD/STORE_LANES. */
1825 if (vls_type
== VLS_LOAD
1826 ? vect_load_lanes_supported (vectype
, group_size
)
1827 : vect_store_lanes_supported (vectype
, group_size
))
1829 *memory_access_type
= VMAT_LOAD_STORE_LANES
;
1830 overrun_p
= would_overrun_p
;
1833 /* If that fails, try using permuting loads. */
1834 if (*memory_access_type
== VMAT_ELEMENTWISE
1835 && (vls_type
== VLS_LOAD
1836 ? vect_grouped_load_supported (vectype
, single_element_p
,
1838 : vect_grouped_store_supported (vectype
, group_size
)))
1840 *memory_access_type
= VMAT_CONTIGUOUS_PERMUTE
;
1841 overrun_p
= would_overrun_p
;
1846 if (vls_type
!= VLS_LOAD
&& first_stmt
== stmt
)
1848 /* STMT is the leader of the group. Check the operands of all the
1849 stmts of the group. */
1850 gimple
*next_stmt
= GROUP_NEXT_ELEMENT (stmt_info
);
1853 gcc_assert (gimple_assign_single_p (next_stmt
));
1854 tree op
= gimple_assign_rhs1 (next_stmt
);
1856 enum vect_def_type dt
;
1857 if (!vect_is_simple_use (op
, vinfo
, &def_stmt
, &dt
))
1859 if (dump_enabled_p ())
1860 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1861 "use not simple.\n");
1864 next_stmt
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt
));
1870 gcc_assert (can_overrun_p
);
1871 if (dump_enabled_p ())
1872 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1873 "Data access with gaps requires scalar "
1875 LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo
) = true;
1881 /* A subroutine of get_load_store_type, with a subset of the same
1882 arguments. Handle the case where STMT is a load or store that
1883 accesses consecutive elements with a negative step. */
1885 static vect_memory_access_type
1886 get_negative_load_store_type (gimple
*stmt
, tree vectype
,
1887 vec_load_store_type vls_type
,
1888 unsigned int ncopies
)
1890 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1891 struct data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
);
1892 dr_alignment_support alignment_support_scheme
;
1896 if (dump_enabled_p ())
1897 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1898 "multiple types with negative step.\n");
1899 return VMAT_ELEMENTWISE
;
1902 alignment_support_scheme
= vect_supportable_dr_alignment (dr
, false);
1903 if (alignment_support_scheme
!= dr_aligned
1904 && alignment_support_scheme
!= dr_unaligned_supported
)
1906 if (dump_enabled_p ())
1907 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1908 "negative step but alignment required.\n");
1909 return VMAT_ELEMENTWISE
;
1912 if (vls_type
== VLS_STORE_INVARIANT
)
1914 if (dump_enabled_p ())
1915 dump_printf_loc (MSG_NOTE
, vect_location
,
1916 "negative step with invariant source;"
1917 " no permute needed.\n");
1918 return VMAT_CONTIGUOUS_DOWN
;
1921 if (!perm_mask_for_reverse (vectype
))
1923 if (dump_enabled_p ())
1924 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1925 "negative step and reversing not supported.\n");
1926 return VMAT_ELEMENTWISE
;
1929 return VMAT_CONTIGUOUS_REVERSE
;
1932 /* Analyze load or store statement STMT of type VLS_TYPE. Return true
1933 if there is a memory access type that the vectorized form can use,
1934 storing it in *MEMORY_ACCESS_TYPE if so. If we decide to use gathers
1935 or scatters, fill in GS_INFO accordingly.
1937 SLP says whether we're performing SLP rather than loop vectorization.
1938 VECTYPE is the vector type that the vectorized statements will use.
1939 NCOPIES is the number of vector statements that will be needed. */
1942 get_load_store_type (gimple
*stmt
, tree vectype
, bool slp
,
1943 vec_load_store_type vls_type
, unsigned int ncopies
,
1944 vect_memory_access_type
*memory_access_type
,
1945 gather_scatter_info
*gs_info
)
1947 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1948 vec_info
*vinfo
= stmt_info
->vinfo
;
1949 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
1950 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info
))
1952 *memory_access_type
= VMAT_GATHER_SCATTER
;
1954 if (!vect_check_gather_scatter (stmt
, loop_vinfo
, gs_info
))
1956 else if (!vect_is_simple_use (gs_info
->offset
, vinfo
, &def_stmt
,
1957 &gs_info
->offset_dt
,
1958 &gs_info
->offset_vectype
))
1960 if (dump_enabled_p ())
1961 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1962 "%s index use not simple.\n",
1963 vls_type
== VLS_LOAD
? "gather" : "scatter");
1967 else if (STMT_VINFO_GROUPED_ACCESS (stmt_info
))
1969 if (!get_group_load_store_type (stmt
, vectype
, slp
, vls_type
,
1970 memory_access_type
))
1973 else if (STMT_VINFO_STRIDED_P (stmt_info
))
1976 *memory_access_type
= VMAT_ELEMENTWISE
;
1980 int cmp
= compare_step_with_zero (stmt
);
1982 *memory_access_type
= get_negative_load_store_type
1983 (stmt
, vectype
, vls_type
, ncopies
);
1986 gcc_assert (vls_type
== VLS_LOAD
);
1987 *memory_access_type
= VMAT_INVARIANT
;
1990 *memory_access_type
= VMAT_CONTIGUOUS
;
1993 /* FIXME: At the moment the cost model seems to underestimate the
1994 cost of using elementwise accesses. This check preserves the
1995 traditional behavior until that can be fixed. */
1996 if (*memory_access_type
== VMAT_ELEMENTWISE
1997 && !STMT_VINFO_STRIDED_P (stmt_info
))
1999 if (dump_enabled_p ())
2000 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2001 "not falling back to elementwise accesses\n");
2007 /* Function vectorizable_mask_load_store.
2009 Check if STMT performs a conditional load or store that can be vectorized.
2010 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2011 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
2012 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2015 vectorizable_mask_load_store (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
2016 gimple
**vec_stmt
, slp_tree slp_node
)
2018 tree vec_dest
= NULL
;
2019 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
2020 stmt_vec_info prev_stmt_info
;
2021 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
2022 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
2023 bool nested_in_vect_loop
= nested_in_vect_loop_p (loop
, stmt
);
2024 struct data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
);
2025 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
2026 tree rhs_vectype
= NULL_TREE
;
2031 tree dataref_ptr
= NULL_TREE
;
2033 int nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
2037 gather_scatter_info gs_info
;
2038 vec_load_store_type vls_type
;
2041 enum vect_def_type dt
;
2043 if (slp_node
!= NULL
)
2046 ncopies
= vect_get_num_copies (loop_vinfo
, vectype
);
2047 gcc_assert (ncopies
>= 1);
2049 mask
= gimple_call_arg (stmt
, 2);
2051 if (!VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (mask
)))
2054 /* FORNOW. This restriction should be relaxed. */
2055 if (nested_in_vect_loop
&& ncopies
> 1)
2057 if (dump_enabled_p ())
2058 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2059 "multiple types in nested loop.");
2063 if (!STMT_VINFO_RELEVANT_P (stmt_info
))
2066 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
2070 if (!STMT_VINFO_DATA_REF (stmt_info
))
2073 elem_type
= TREE_TYPE (vectype
);
2075 if (TREE_CODE (mask
) != SSA_NAME
)
2078 if (!vect_is_simple_use (mask
, loop_vinfo
, &def_stmt
, &dt
, &mask_vectype
))
2082 mask_vectype
= get_mask_type_for_scalar_type (TREE_TYPE (vectype
));
2084 if (!mask_vectype
|| !VECTOR_BOOLEAN_TYPE_P (mask_vectype
)
2085 || TYPE_VECTOR_SUBPARTS (mask_vectype
) != TYPE_VECTOR_SUBPARTS (vectype
))
2088 if (gimple_call_internal_fn (stmt
) == IFN_MASK_STORE
)
2090 tree rhs
= gimple_call_arg (stmt
, 3);
2091 if (!vect_is_simple_use (rhs
, loop_vinfo
, &def_stmt
, &dt
, &rhs_vectype
))
2093 if (dt
== vect_constant_def
|| dt
== vect_external_def
)
2094 vls_type
= VLS_STORE_INVARIANT
;
2096 vls_type
= VLS_STORE
;
2099 vls_type
= VLS_LOAD
;
2101 vect_memory_access_type memory_access_type
;
2102 if (!get_load_store_type (stmt
, vectype
, false, vls_type
, ncopies
,
2103 &memory_access_type
, &gs_info
))
2106 if (memory_access_type
== VMAT_GATHER_SCATTER
)
2108 tree arglist
= TYPE_ARG_TYPES (TREE_TYPE (gs_info
.decl
));
2110 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (TREE_CHAIN (arglist
))));
2111 if (TREE_CODE (masktype
) == INTEGER_TYPE
)
2113 if (dump_enabled_p ())
2114 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2115 "masked gather with integer mask not supported.");
2119 else if (memory_access_type
!= VMAT_CONTIGUOUS
)
2121 if (dump_enabled_p ())
2122 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2123 "unsupported access type for masked %s.\n",
2124 vls_type
== VLS_LOAD
? "load" : "store");
2127 else if (!VECTOR_MODE_P (TYPE_MODE (vectype
))
2128 || !can_vec_mask_load_store_p (TYPE_MODE (vectype
),
2129 TYPE_MODE (mask_vectype
),
2130 vls_type
== VLS_LOAD
)
2132 && !useless_type_conversion_p (vectype
, rhs_vectype
)))
2135 if (!vec_stmt
) /* transformation not required. */
2137 STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info
) = memory_access_type
;
2138 STMT_VINFO_TYPE (stmt_info
) = call_vec_info_type
;
2139 if (vls_type
== VLS_LOAD
)
2140 vect_model_load_cost (stmt_info
, ncopies
, memory_access_type
,
2143 vect_model_store_cost (stmt_info
, ncopies
, memory_access_type
,
2144 dt
, NULL
, NULL
, NULL
);
2147 gcc_assert (memory_access_type
== STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info
));
2151 if (memory_access_type
== VMAT_GATHER_SCATTER
)
2153 tree vec_oprnd0
= NULL_TREE
, op
;
2154 tree arglist
= TYPE_ARG_TYPES (TREE_TYPE (gs_info
.decl
));
2155 tree rettype
, srctype
, ptrtype
, idxtype
, masktype
, scaletype
;
2156 tree ptr
, vec_mask
= NULL_TREE
, mask_op
= NULL_TREE
, var
, scale
;
2157 tree perm_mask
= NULL_TREE
, prev_res
= NULL_TREE
;
2158 tree mask_perm_mask
= NULL_TREE
;
2159 edge pe
= loop_preheader_edge (loop
);
2162 enum { NARROW
, NONE
, WIDEN
} modifier
;
2163 int gather_off_nunits
= TYPE_VECTOR_SUBPARTS (gs_info
.offset_vectype
);
2165 rettype
= TREE_TYPE (TREE_TYPE (gs_info
.decl
));
2166 srctype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
2167 ptrtype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
2168 idxtype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
2169 masktype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
2170 scaletype
= TREE_VALUE (arglist
);
2171 gcc_checking_assert (types_compatible_p (srctype
, rettype
)
2172 && types_compatible_p (srctype
, masktype
));
2174 if (nunits
== gather_off_nunits
)
2176 else if (nunits
== gather_off_nunits
/ 2)
2180 auto_vec_perm_indices
sel (gather_off_nunits
);
2181 for (i
= 0; i
< gather_off_nunits
; ++i
)
2182 sel
.quick_push (i
| nunits
);
2184 perm_mask
= vect_gen_perm_mask_checked (gs_info
.offset_vectype
, sel
);
2186 else if (nunits
== gather_off_nunits
* 2)
2190 auto_vec_perm_indices
sel (nunits
);
2191 sel
.quick_grow (nunits
);
2192 for (i
= 0; i
< nunits
; ++i
)
2193 sel
[i
] = i
< gather_off_nunits
2194 ? i
: i
+ nunits
- gather_off_nunits
;
2196 perm_mask
= vect_gen_perm_mask_checked (vectype
, sel
);
2198 for (i
= 0; i
< nunits
; ++i
)
2199 sel
[i
] = i
| gather_off_nunits
;
2200 mask_perm_mask
= vect_gen_perm_mask_checked (masktype
, sel
);
2205 vec_dest
= vect_create_destination_var (gimple_call_lhs (stmt
), vectype
);
2207 ptr
= fold_convert (ptrtype
, gs_info
.base
);
2208 if (!is_gimple_min_invariant (ptr
))
2210 ptr
= force_gimple_operand (ptr
, &seq
, true, NULL_TREE
);
2211 new_bb
= gsi_insert_seq_on_edge_immediate (pe
, seq
);
2212 gcc_assert (!new_bb
);
2215 scale
= build_int_cst (scaletype
, gs_info
.scale
);
2217 prev_stmt_info
= NULL
;
2218 for (j
= 0; j
< ncopies
; ++j
)
2220 if (modifier
== WIDEN
&& (j
& 1))
2221 op
= permute_vec_elements (vec_oprnd0
, vec_oprnd0
,
2222 perm_mask
, stmt
, gsi
);
2225 = vect_get_vec_def_for_operand (gs_info
.offset
, stmt
);
2228 = vect_get_vec_def_for_stmt_copy (gs_info
.offset_dt
, vec_oprnd0
);
2230 if (!useless_type_conversion_p (idxtype
, TREE_TYPE (op
)))
2232 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op
))
2233 == TYPE_VECTOR_SUBPARTS (idxtype
));
2234 var
= vect_get_new_ssa_name (idxtype
, vect_simple_var
);
2235 op
= build1 (VIEW_CONVERT_EXPR
, idxtype
, op
);
2237 = gimple_build_assign (var
, VIEW_CONVERT_EXPR
, op
);
2238 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2242 if (mask_perm_mask
&& (j
& 1))
2243 mask_op
= permute_vec_elements (mask_op
, mask_op
,
2244 mask_perm_mask
, stmt
, gsi
);
2248 vec_mask
= vect_get_vec_def_for_operand (mask
, stmt
);
2251 vect_is_simple_use (vec_mask
, loop_vinfo
, &def_stmt
, &dt
);
2252 vec_mask
= vect_get_vec_def_for_stmt_copy (dt
, vec_mask
);
2256 if (!useless_type_conversion_p (masktype
, TREE_TYPE (vec_mask
)))
2258 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (mask_op
))
2259 == TYPE_VECTOR_SUBPARTS (masktype
));
2260 var
= vect_get_new_ssa_name (masktype
, vect_simple_var
);
2261 mask_op
= build1 (VIEW_CONVERT_EXPR
, masktype
, mask_op
);
2263 = gimple_build_assign (var
, VIEW_CONVERT_EXPR
, mask_op
);
2264 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2270 = gimple_build_call (gs_info
.decl
, 5, mask_op
, ptr
, op
, mask_op
,
2273 if (!useless_type_conversion_p (vectype
, rettype
))
2275 gcc_assert (TYPE_VECTOR_SUBPARTS (vectype
)
2276 == TYPE_VECTOR_SUBPARTS (rettype
));
2277 op
= vect_get_new_ssa_name (rettype
, vect_simple_var
);
2278 gimple_call_set_lhs (new_stmt
, op
);
2279 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2280 var
= make_ssa_name (vec_dest
);
2281 op
= build1 (VIEW_CONVERT_EXPR
, vectype
, op
);
2282 new_stmt
= gimple_build_assign (var
, VIEW_CONVERT_EXPR
, op
);
2286 var
= make_ssa_name (vec_dest
, new_stmt
);
2287 gimple_call_set_lhs (new_stmt
, var
);
2290 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2292 if (modifier
== NARROW
)
2299 var
= permute_vec_elements (prev_res
, var
,
2300 perm_mask
, stmt
, gsi
);
2301 new_stmt
= SSA_NAME_DEF_STMT (var
);
2304 if (prev_stmt_info
== NULL
)
2305 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
2307 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
2308 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
2311 /* Ensure that even with -fno-tree-dce the scalar MASK_LOAD is removed
2313 if (STMT_VINFO_RELATED_STMT (stmt_info
))
2315 stmt
= STMT_VINFO_RELATED_STMT (stmt_info
);
2316 stmt_info
= vinfo_for_stmt (stmt
);
2318 tree lhs
= gimple_call_lhs (stmt
);
2319 new_stmt
= gimple_build_assign (lhs
, build_zero_cst (TREE_TYPE (lhs
)));
2320 set_vinfo_for_stmt (new_stmt
, stmt_info
);
2321 set_vinfo_for_stmt (stmt
, NULL
);
2322 STMT_VINFO_STMT (stmt_info
) = new_stmt
;
2323 gsi_replace (gsi
, new_stmt
, true);
2326 else if (vls_type
!= VLS_LOAD
)
2328 tree vec_rhs
= NULL_TREE
, vec_mask
= NULL_TREE
;
2329 prev_stmt_info
= NULL
;
2330 LOOP_VINFO_HAS_MASK_STORE (loop_vinfo
) = true;
2331 for (i
= 0; i
< ncopies
; i
++)
2333 unsigned align
, misalign
;
2337 tree rhs
= gimple_call_arg (stmt
, 3);
2338 vec_rhs
= vect_get_vec_def_for_operand (rhs
, stmt
);
2339 vec_mask
= vect_get_vec_def_for_operand (mask
, stmt
,
2341 /* We should have catched mismatched types earlier. */
2342 gcc_assert (useless_type_conversion_p (vectype
,
2343 TREE_TYPE (vec_rhs
)));
2344 dataref_ptr
= vect_create_data_ref_ptr (stmt
, vectype
, NULL
,
2345 NULL_TREE
, &dummy
, gsi
,
2346 &ptr_incr
, false, &inv_p
);
2347 gcc_assert (!inv_p
);
2351 vect_is_simple_use (vec_rhs
, loop_vinfo
, &def_stmt
, &dt
);
2352 vec_rhs
= vect_get_vec_def_for_stmt_copy (dt
, vec_rhs
);
2353 vect_is_simple_use (vec_mask
, loop_vinfo
, &def_stmt
, &dt
);
2354 vec_mask
= vect_get_vec_def_for_stmt_copy (dt
, vec_mask
);
2355 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
, stmt
,
2356 TYPE_SIZE_UNIT (vectype
));
2359 align
= DR_TARGET_ALIGNMENT (dr
);
2360 if (aligned_access_p (dr
))
2362 else if (DR_MISALIGNMENT (dr
) == -1)
2364 align
= TYPE_ALIGN_UNIT (elem_type
);
2368 misalign
= DR_MISALIGNMENT (dr
);
2369 set_ptr_info_alignment (get_ptr_info (dataref_ptr
), align
,
2371 tree ptr
= build_int_cst (TREE_TYPE (gimple_call_arg (stmt
, 1)),
2372 misalign
? least_bit_hwi (misalign
) : align
);
2374 = gimple_build_call_internal (IFN_MASK_STORE
, 4, dataref_ptr
,
2375 ptr
, vec_mask
, vec_rhs
);
2376 gimple_call_set_nothrow (call
, true);
2378 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2380 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
2382 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
2383 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
2388 tree vec_mask
= NULL_TREE
;
2389 prev_stmt_info
= NULL
;
2390 vec_dest
= vect_create_destination_var (gimple_call_lhs (stmt
), vectype
);
2391 for (i
= 0; i
< ncopies
; i
++)
2393 unsigned align
, misalign
;
2397 vec_mask
= vect_get_vec_def_for_operand (mask
, stmt
,
2399 dataref_ptr
= vect_create_data_ref_ptr (stmt
, vectype
, NULL
,
2400 NULL_TREE
, &dummy
, gsi
,
2401 &ptr_incr
, false, &inv_p
);
2402 gcc_assert (!inv_p
);
2406 vect_is_simple_use (vec_mask
, loop_vinfo
, &def_stmt
, &dt
);
2407 vec_mask
= vect_get_vec_def_for_stmt_copy (dt
, vec_mask
);
2408 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
, stmt
,
2409 TYPE_SIZE_UNIT (vectype
));
2412 align
= DR_TARGET_ALIGNMENT (dr
);
2413 if (aligned_access_p (dr
))
2415 else if (DR_MISALIGNMENT (dr
) == -1)
2417 align
= TYPE_ALIGN_UNIT (elem_type
);
2421 misalign
= DR_MISALIGNMENT (dr
);
2422 set_ptr_info_alignment (get_ptr_info (dataref_ptr
), align
,
2424 tree ptr
= build_int_cst (TREE_TYPE (gimple_call_arg (stmt
, 1)),
2425 misalign
? least_bit_hwi (misalign
) : align
);
2427 = gimple_build_call_internal (IFN_MASK_LOAD
, 3, dataref_ptr
,
2429 gimple_call_set_lhs (call
, make_ssa_name (vec_dest
));
2430 gimple_call_set_nothrow (call
, true);
2431 vect_finish_stmt_generation (stmt
, call
, gsi
);
2433 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= call
;
2435 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = call
;
2436 prev_stmt_info
= vinfo_for_stmt (call
);
2440 if (vls_type
== VLS_LOAD
)
2442 /* Ensure that even with -fno-tree-dce the scalar MASK_LOAD is removed
2444 if (STMT_VINFO_RELATED_STMT (stmt_info
))
2446 stmt
= STMT_VINFO_RELATED_STMT (stmt_info
);
2447 stmt_info
= vinfo_for_stmt (stmt
);
2449 tree lhs
= gimple_call_lhs (stmt
);
2450 new_stmt
= gimple_build_assign (lhs
, build_zero_cst (TREE_TYPE (lhs
)));
2451 set_vinfo_for_stmt (new_stmt
, stmt_info
);
2452 set_vinfo_for_stmt (stmt
, NULL
);
2453 STMT_VINFO_STMT (stmt_info
) = new_stmt
;
2454 gsi_replace (gsi
, new_stmt
, true);
2460 /* Check and perform vectorization of BUILT_IN_BSWAP{16,32,64}. */
2463 vectorizable_bswap (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
2464 gimple
**vec_stmt
, slp_tree slp_node
,
2465 tree vectype_in
, enum vect_def_type
*dt
)
2468 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
2469 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
2470 unsigned ncopies
, nunits
;
2472 op
= gimple_call_arg (stmt
, 0);
2473 vectype
= STMT_VINFO_VECTYPE (stmt_info
);
2474 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
2476 /* Multiple types in SLP are handled by creating the appropriate number of
2477 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
2482 ncopies
= vect_get_num_copies (loop_vinfo
, vectype
);
2484 gcc_assert (ncopies
>= 1);
2486 tree char_vectype
= get_same_sized_vectype (char_type_node
, vectype_in
);
2490 unsigned int num_bytes
= TYPE_VECTOR_SUBPARTS (char_vectype
);
2491 unsigned word_bytes
= num_bytes
/ nunits
;
2493 auto_vec_perm_indices
elts (num_bytes
);
2494 for (unsigned i
= 0; i
< nunits
; ++i
)
2495 for (unsigned j
= 0; j
< word_bytes
; ++j
)
2496 elts
.quick_push ((i
+ 1) * word_bytes
- j
- 1);
2498 if (! can_vec_perm_p (TYPE_MODE (char_vectype
), false, &elts
))
2503 STMT_VINFO_TYPE (stmt_info
) = call_vec_info_type
;
2504 if (dump_enabled_p ())
2505 dump_printf_loc (MSG_NOTE
, vect_location
, "=== vectorizable_bswap ==="
2507 if (! PURE_SLP_STMT (stmt_info
))
2509 add_stmt_cost (stmt_info
->vinfo
->target_cost_data
,
2510 1, vector_stmt
, stmt_info
, 0, vect_prologue
);
2511 add_stmt_cost (stmt_info
->vinfo
->target_cost_data
,
2512 ncopies
, vec_perm
, stmt_info
, 0, vect_body
);
2517 auto_vec
<tree
, 32> telts (num_bytes
);
2518 for (unsigned i
= 0; i
< num_bytes
; ++i
)
2519 telts
.quick_push (build_int_cst (char_type_node
, elts
[i
]));
2520 tree bswap_vconst
= build_vector (char_vectype
, telts
);
2523 vec
<tree
> vec_oprnds
= vNULL
;
2524 gimple
*new_stmt
= NULL
;
2525 stmt_vec_info prev_stmt_info
= NULL
;
2526 for (unsigned j
= 0; j
< ncopies
; j
++)
2530 vect_get_vec_defs (op
, NULL
, stmt
, &vec_oprnds
, NULL
, slp_node
);
2532 vect_get_vec_defs_for_stmt_copy (dt
, &vec_oprnds
, NULL
);
2534 /* Arguments are ready. create the new vector stmt. */
2537 FOR_EACH_VEC_ELT (vec_oprnds
, i
, vop
)
2539 tree tem
= make_ssa_name (char_vectype
);
2540 new_stmt
= gimple_build_assign (tem
, build1 (VIEW_CONVERT_EXPR
,
2541 char_vectype
, vop
));
2542 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2543 tree tem2
= make_ssa_name (char_vectype
);
2544 new_stmt
= gimple_build_assign (tem2
, VEC_PERM_EXPR
,
2545 tem
, tem
, bswap_vconst
);
2546 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2547 tem
= make_ssa_name (vectype
);
2548 new_stmt
= gimple_build_assign (tem
, build1 (VIEW_CONVERT_EXPR
,
2550 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2552 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
2559 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
2561 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
2563 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
2566 vec_oprnds
.release ();
2570 /* Return true if vector types VECTYPE_IN and VECTYPE_OUT have
2571 integer elements and if we can narrow VECTYPE_IN to VECTYPE_OUT
2572 in a single step. On success, store the binary pack code in
2576 simple_integer_narrowing (tree vectype_out
, tree vectype_in
,
2577 tree_code
*convert_code
)
2579 if (!INTEGRAL_TYPE_P (TREE_TYPE (vectype_out
))
2580 || !INTEGRAL_TYPE_P (TREE_TYPE (vectype_in
)))
2584 int multi_step_cvt
= 0;
2585 auto_vec
<tree
, 8> interm_types
;
2586 if (!supportable_narrowing_operation (NOP_EXPR
, vectype_out
, vectype_in
,
2587 &code
, &multi_step_cvt
,
2592 *convert_code
= code
;
2596 /* Function vectorizable_call.
2598 Check if GS performs a function call that can be vectorized.
2599 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2600 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2601 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2604 vectorizable_call (gimple
*gs
, gimple_stmt_iterator
*gsi
, gimple
**vec_stmt
,
2611 tree vec_oprnd0
= NULL_TREE
, vec_oprnd1
= NULL_TREE
;
2612 stmt_vec_info stmt_info
= vinfo_for_stmt (gs
), prev_stmt_info
;
2613 tree vectype_out
, vectype_in
;
2616 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
2617 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
2618 vec_info
*vinfo
= stmt_info
->vinfo
;
2619 tree fndecl
, new_temp
, rhs_type
;
2621 enum vect_def_type dt
[3]
2622 = {vect_unknown_def_type
, vect_unknown_def_type
, vect_unknown_def_type
};
2624 gimple
*new_stmt
= NULL
;
2626 vec
<tree
> vargs
= vNULL
;
2627 enum { NARROW
, NONE
, WIDEN
} modifier
;
2631 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
2634 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
2638 /* Is GS a vectorizable call? */
2639 stmt
= dyn_cast
<gcall
*> (gs
);
2643 if (gimple_call_internal_p (stmt
)
2644 && (gimple_call_internal_fn (stmt
) == IFN_MASK_LOAD
2645 || gimple_call_internal_fn (stmt
) == IFN_MASK_STORE
))
2646 return vectorizable_mask_load_store (stmt
, gsi
, vec_stmt
,
2649 if (gimple_call_lhs (stmt
) == NULL_TREE
2650 || TREE_CODE (gimple_call_lhs (stmt
)) != SSA_NAME
)
2653 gcc_checking_assert (!stmt_can_throw_internal (stmt
));
2655 vectype_out
= STMT_VINFO_VECTYPE (stmt_info
);
2657 /* Process function arguments. */
2658 rhs_type
= NULL_TREE
;
2659 vectype_in
= NULL_TREE
;
2660 nargs
= gimple_call_num_args (stmt
);
2662 /* Bail out if the function has more than three arguments, we do not have
2663 interesting builtin functions to vectorize with more than two arguments
2664 except for fma. No arguments is also not good. */
2665 if (nargs
== 0 || nargs
> 3)
2668 /* Ignore the argument of IFN_GOMP_SIMD_LANE, it is magic. */
2669 if (gimple_call_internal_p (stmt
)
2670 && gimple_call_internal_fn (stmt
) == IFN_GOMP_SIMD_LANE
)
2673 rhs_type
= unsigned_type_node
;
2676 for (i
= 0; i
< nargs
; i
++)
2680 op
= gimple_call_arg (stmt
, i
);
2682 /* We can only handle calls with arguments of the same type. */
2684 && !types_compatible_p (rhs_type
, TREE_TYPE (op
)))
2686 if (dump_enabled_p ())
2687 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2688 "argument types differ.\n");
2692 rhs_type
= TREE_TYPE (op
);
2694 if (!vect_is_simple_use (op
, vinfo
, &def_stmt
, &dt
[i
], &opvectype
))
2696 if (dump_enabled_p ())
2697 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2698 "use not simple.\n");
2703 vectype_in
= opvectype
;
2705 && opvectype
!= vectype_in
)
2707 if (dump_enabled_p ())
2708 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2709 "argument vector types differ.\n");
2713 /* If all arguments are external or constant defs use a vector type with
2714 the same size as the output vector type. */
2716 vectype_in
= get_same_sized_vectype (rhs_type
, vectype_out
);
2718 gcc_assert (vectype_in
);
2721 if (dump_enabled_p ())
2723 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2724 "no vectype for scalar type ");
2725 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, rhs_type
);
2726 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
2733 nunits_in
= TYPE_VECTOR_SUBPARTS (vectype_in
);
2734 nunits_out
= TYPE_VECTOR_SUBPARTS (vectype_out
);
2735 if (nunits_in
== nunits_out
/ 2)
2737 else if (nunits_out
== nunits_in
)
2739 else if (nunits_out
== nunits_in
/ 2)
2744 /* We only handle functions that do not read or clobber memory. */
2745 if (gimple_vuse (stmt
))
2747 if (dump_enabled_p ())
2748 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2749 "function reads from or writes to memory.\n");
2753 /* For now, we only vectorize functions if a target specific builtin
2754 is available. TODO -- in some cases, it might be profitable to
2755 insert the calls for pieces of the vector, in order to be able
2756 to vectorize other operations in the loop. */
2758 internal_fn ifn
= IFN_LAST
;
2759 combined_fn cfn
= gimple_call_combined_fn (stmt
);
2760 tree callee
= gimple_call_fndecl (stmt
);
2762 /* First try using an internal function. */
2763 tree_code convert_code
= ERROR_MARK
;
2765 && (modifier
== NONE
2766 || (modifier
== NARROW
2767 && simple_integer_narrowing (vectype_out
, vectype_in
,
2769 ifn
= vectorizable_internal_function (cfn
, callee
, vectype_out
,
2772 /* If that fails, try asking for a target-specific built-in function. */
2773 if (ifn
== IFN_LAST
)
2775 if (cfn
!= CFN_LAST
)
2776 fndecl
= targetm
.vectorize
.builtin_vectorized_function
2777 (cfn
, vectype_out
, vectype_in
);
2779 fndecl
= targetm
.vectorize
.builtin_md_vectorized_function
2780 (callee
, vectype_out
, vectype_in
);
2783 if (ifn
== IFN_LAST
&& !fndecl
)
2785 if (cfn
== CFN_GOMP_SIMD_LANE
2788 && LOOP_VINFO_LOOP (loop_vinfo
)->simduid
2789 && TREE_CODE (gimple_call_arg (stmt
, 0)) == SSA_NAME
2790 && LOOP_VINFO_LOOP (loop_vinfo
)->simduid
2791 == SSA_NAME_VAR (gimple_call_arg (stmt
, 0)))
2793 /* We can handle IFN_GOMP_SIMD_LANE by returning a
2794 { 0, 1, 2, ... vf - 1 } vector. */
2795 gcc_assert (nargs
== 0);
2797 else if (modifier
== NONE
2798 && (gimple_call_builtin_p (stmt
, BUILT_IN_BSWAP16
)
2799 || gimple_call_builtin_p (stmt
, BUILT_IN_BSWAP32
)
2800 || gimple_call_builtin_p (stmt
, BUILT_IN_BSWAP64
)))
2801 return vectorizable_bswap (stmt
, gsi
, vec_stmt
, slp_node
,
2805 if (dump_enabled_p ())
2806 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2807 "function is not vectorizable.\n");
2814 else if (modifier
== NARROW
&& ifn
== IFN_LAST
)
2815 ncopies
= vect_get_num_copies (loop_vinfo
, vectype_out
);
2817 ncopies
= vect_get_num_copies (loop_vinfo
, vectype_in
);
2819 /* Sanity check: make sure that at least one copy of the vectorized stmt
2820 needs to be generated. */
2821 gcc_assert (ncopies
>= 1);
2823 if (!vec_stmt
) /* transformation not required. */
2825 STMT_VINFO_TYPE (stmt_info
) = call_vec_info_type
;
2826 if (dump_enabled_p ())
2827 dump_printf_loc (MSG_NOTE
, vect_location
, "=== vectorizable_call ==="
2829 vect_model_simple_cost (stmt_info
, ncopies
, dt
, ndts
, NULL
, NULL
);
2830 if (ifn
!= IFN_LAST
&& modifier
== NARROW
&& !slp_node
)
2831 add_stmt_cost (stmt_info
->vinfo
->target_cost_data
, ncopies
/ 2,
2832 vec_promote_demote
, stmt_info
, 0, vect_body
);
2839 if (dump_enabled_p ())
2840 dump_printf_loc (MSG_NOTE
, vect_location
, "transform call.\n");
2843 scalar_dest
= gimple_call_lhs (stmt
);
2844 vec_dest
= vect_create_destination_var (scalar_dest
, vectype_out
);
2846 prev_stmt_info
= NULL
;
2847 if (modifier
== NONE
|| ifn
!= IFN_LAST
)
2849 tree prev_res
= NULL_TREE
;
2850 for (j
= 0; j
< ncopies
; ++j
)
2852 /* Build argument list for the vectorized call. */
2854 vargs
.create (nargs
);
2860 auto_vec
<vec
<tree
> > vec_defs (nargs
);
2861 vec
<tree
> vec_oprnds0
;
2863 for (i
= 0; i
< nargs
; i
++)
2864 vargs
.quick_push (gimple_call_arg (stmt
, i
));
2865 vect_get_slp_defs (vargs
, slp_node
, &vec_defs
);
2866 vec_oprnds0
= vec_defs
[0];
2868 /* Arguments are ready. Create the new vector stmt. */
2869 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vec_oprnd0
)
2872 for (k
= 0; k
< nargs
; k
++)
2874 vec
<tree
> vec_oprndsk
= vec_defs
[k
];
2875 vargs
[k
] = vec_oprndsk
[i
];
2877 if (modifier
== NARROW
)
2879 tree half_res
= make_ssa_name (vectype_in
);
2881 = gimple_build_call_internal_vec (ifn
, vargs
);
2882 gimple_call_set_lhs (call
, half_res
);
2883 gimple_call_set_nothrow (call
, true);
2885 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2888 prev_res
= half_res
;
2891 new_temp
= make_ssa_name (vec_dest
);
2892 new_stmt
= gimple_build_assign (new_temp
, convert_code
,
2893 prev_res
, half_res
);
2898 if (ifn
!= IFN_LAST
)
2899 call
= gimple_build_call_internal_vec (ifn
, vargs
);
2901 call
= gimple_build_call_vec (fndecl
, vargs
);
2902 new_temp
= make_ssa_name (vec_dest
, call
);
2903 gimple_call_set_lhs (call
, new_temp
);
2904 gimple_call_set_nothrow (call
, true);
2907 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2908 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
2911 for (i
= 0; i
< nargs
; i
++)
2913 vec
<tree
> vec_oprndsi
= vec_defs
[i
];
2914 vec_oprndsi
.release ();
2919 for (i
= 0; i
< nargs
; i
++)
2921 op
= gimple_call_arg (stmt
, i
);
2924 = vect_get_vec_def_for_operand (op
, stmt
);
2927 vec_oprnd0
= gimple_call_arg (new_stmt
, i
);
2929 = vect_get_vec_def_for_stmt_copy (dt
[i
], vec_oprnd0
);
2932 vargs
.quick_push (vec_oprnd0
);
2935 if (gimple_call_internal_p (stmt
)
2936 && gimple_call_internal_fn (stmt
) == IFN_GOMP_SIMD_LANE
)
2938 auto_vec
<tree
, 32> v (nunits_out
);
2939 for (int k
= 0; k
< nunits_out
; ++k
)
2940 v
.quick_push (build_int_cst (unsigned_type_node
,
2941 j
* nunits_out
+ k
));
2942 tree cst
= build_vector (vectype_out
, v
);
2944 = vect_get_new_ssa_name (vectype_out
, vect_simple_var
, "cst_");
2945 gimple
*init_stmt
= gimple_build_assign (new_var
, cst
);
2946 vect_init_vector_1 (stmt
, init_stmt
, NULL
);
2947 new_temp
= make_ssa_name (vec_dest
);
2948 new_stmt
= gimple_build_assign (new_temp
, new_var
);
2950 else if (modifier
== NARROW
)
2952 tree half_res
= make_ssa_name (vectype_in
);
2953 gcall
*call
= gimple_build_call_internal_vec (ifn
, vargs
);
2954 gimple_call_set_lhs (call
, half_res
);
2955 gimple_call_set_nothrow (call
, true);
2957 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2960 prev_res
= half_res
;
2963 new_temp
= make_ssa_name (vec_dest
);
2964 new_stmt
= gimple_build_assign (new_temp
, convert_code
,
2965 prev_res
, half_res
);
2970 if (ifn
!= IFN_LAST
)
2971 call
= gimple_build_call_internal_vec (ifn
, vargs
);
2973 call
= gimple_build_call_vec (fndecl
, vargs
);
2974 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
2975 gimple_call_set_lhs (call
, new_temp
);
2976 gimple_call_set_nothrow (call
, true);
2979 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2981 if (j
== (modifier
== NARROW
? 1 : 0))
2982 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
2984 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
2986 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
2989 else if (modifier
== NARROW
)
2991 for (j
= 0; j
< ncopies
; ++j
)
2993 /* Build argument list for the vectorized call. */
2995 vargs
.create (nargs
* 2);
3001 auto_vec
<vec
<tree
> > vec_defs (nargs
);
3002 vec
<tree
> vec_oprnds0
;
3004 for (i
= 0; i
< nargs
; i
++)
3005 vargs
.quick_push (gimple_call_arg (stmt
, i
));
3006 vect_get_slp_defs (vargs
, slp_node
, &vec_defs
);
3007 vec_oprnds0
= vec_defs
[0];
3009 /* Arguments are ready. Create the new vector stmt. */
3010 for (i
= 0; vec_oprnds0
.iterate (i
, &vec_oprnd0
); i
+= 2)
3014 for (k
= 0; k
< nargs
; k
++)
3016 vec
<tree
> vec_oprndsk
= vec_defs
[k
];
3017 vargs
.quick_push (vec_oprndsk
[i
]);
3018 vargs
.quick_push (vec_oprndsk
[i
+ 1]);
3021 if (ifn
!= IFN_LAST
)
3022 call
= gimple_build_call_internal_vec (ifn
, vargs
);
3024 call
= gimple_build_call_vec (fndecl
, vargs
);
3025 new_temp
= make_ssa_name (vec_dest
, call
);
3026 gimple_call_set_lhs (call
, new_temp
);
3027 gimple_call_set_nothrow (call
, true);
3029 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3030 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
3033 for (i
= 0; i
< nargs
; i
++)
3035 vec
<tree
> vec_oprndsi
= vec_defs
[i
];
3036 vec_oprndsi
.release ();
3041 for (i
= 0; i
< nargs
; i
++)
3043 op
= gimple_call_arg (stmt
, i
);
3047 = vect_get_vec_def_for_operand (op
, stmt
);
3049 = vect_get_vec_def_for_stmt_copy (dt
[i
], vec_oprnd0
);
3053 vec_oprnd1
= gimple_call_arg (new_stmt
, 2*i
+ 1);
3055 = vect_get_vec_def_for_stmt_copy (dt
[i
], vec_oprnd1
);
3057 = vect_get_vec_def_for_stmt_copy (dt
[i
], vec_oprnd0
);
3060 vargs
.quick_push (vec_oprnd0
);
3061 vargs
.quick_push (vec_oprnd1
);
3064 new_stmt
= gimple_build_call_vec (fndecl
, vargs
);
3065 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
3066 gimple_call_set_lhs (new_stmt
, new_temp
);
3067 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3070 STMT_VINFO_VEC_STMT (stmt_info
) = new_stmt
;
3072 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
3074 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
3077 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
3080 /* No current target implements this case. */
3085 /* The call in STMT might prevent it from being removed in dce.
3086 We however cannot remove it here, due to the way the ssa name
3087 it defines is mapped to the new definition. So just replace
3088 rhs of the statement with something harmless. */
3093 type
= TREE_TYPE (scalar_dest
);
3094 if (is_pattern_stmt_p (stmt_info
))
3095 lhs
= gimple_call_lhs (STMT_VINFO_RELATED_STMT (stmt_info
));
3097 lhs
= gimple_call_lhs (stmt
);
3099 new_stmt
= gimple_build_assign (lhs
, build_zero_cst (type
));
3100 set_vinfo_for_stmt (new_stmt
, stmt_info
);
3101 set_vinfo_for_stmt (stmt
, NULL
);
3102 STMT_VINFO_STMT (stmt_info
) = new_stmt
;
3103 gsi_replace (gsi
, new_stmt
, false);
3109 struct simd_call_arg_info
3113 HOST_WIDE_INT linear_step
;
3114 enum vect_def_type dt
;
3116 bool simd_lane_linear
;
3119 /* Helper function of vectorizable_simd_clone_call. If OP, an SSA_NAME,
3120 is linear within simd lane (but not within whole loop), note it in
3124 vect_simd_lane_linear (tree op
, struct loop
*loop
,
3125 struct simd_call_arg_info
*arginfo
)
3127 gimple
*def_stmt
= SSA_NAME_DEF_STMT (op
);
3129 if (!is_gimple_assign (def_stmt
)
3130 || gimple_assign_rhs_code (def_stmt
) != POINTER_PLUS_EXPR
3131 || !is_gimple_min_invariant (gimple_assign_rhs1 (def_stmt
)))
3134 tree base
= gimple_assign_rhs1 (def_stmt
);
3135 HOST_WIDE_INT linear_step
= 0;
3136 tree v
= gimple_assign_rhs2 (def_stmt
);
3137 while (TREE_CODE (v
) == SSA_NAME
)
3140 def_stmt
= SSA_NAME_DEF_STMT (v
);
3141 if (is_gimple_assign (def_stmt
))
3142 switch (gimple_assign_rhs_code (def_stmt
))
3145 t
= gimple_assign_rhs2 (def_stmt
);
3146 if (linear_step
|| TREE_CODE (t
) != INTEGER_CST
)
3148 base
= fold_build2 (POINTER_PLUS_EXPR
, TREE_TYPE (base
), base
, t
);
3149 v
= gimple_assign_rhs1 (def_stmt
);
3152 t
= gimple_assign_rhs2 (def_stmt
);
3153 if (linear_step
|| !tree_fits_shwi_p (t
) || integer_zerop (t
))
3155 linear_step
= tree_to_shwi (t
);
3156 v
= gimple_assign_rhs1 (def_stmt
);
3159 t
= gimple_assign_rhs1 (def_stmt
);
3160 if (TREE_CODE (TREE_TYPE (t
)) != INTEGER_TYPE
3161 || (TYPE_PRECISION (TREE_TYPE (v
))
3162 < TYPE_PRECISION (TREE_TYPE (t
))))
3171 else if (gimple_call_internal_p (def_stmt
, IFN_GOMP_SIMD_LANE
)
3173 && TREE_CODE (gimple_call_arg (def_stmt
, 0)) == SSA_NAME
3174 && (SSA_NAME_VAR (gimple_call_arg (def_stmt
, 0))
3179 arginfo
->linear_step
= linear_step
;
3181 arginfo
->simd_lane_linear
= true;
3187 /* Function vectorizable_simd_clone_call.
3189 Check if STMT performs a function call that can be vectorized
3190 by calling a simd clone of the function.
3191 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3192 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
3193 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3196 vectorizable_simd_clone_call (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
3197 gimple
**vec_stmt
, slp_tree slp_node
)
3202 tree vec_oprnd0
= NULL_TREE
;
3203 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
), prev_stmt_info
;
3205 unsigned int nunits
;
3206 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
3207 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
3208 vec_info
*vinfo
= stmt_info
->vinfo
;
3209 struct loop
*loop
= loop_vinfo
? LOOP_VINFO_LOOP (loop_vinfo
) : NULL
;
3210 tree fndecl
, new_temp
;
3212 gimple
*new_stmt
= NULL
;
3214 auto_vec
<simd_call_arg_info
> arginfo
;
3215 vec
<tree
> vargs
= vNULL
;
3217 tree lhs
, rtype
, ratype
;
3218 vec
<constructor_elt
, va_gc
> *ret_ctor_elts
;
3220 /* Is STMT a vectorizable call? */
3221 if (!is_gimple_call (stmt
))
3224 fndecl
= gimple_call_fndecl (stmt
);
3225 if (fndecl
== NULL_TREE
)
3228 struct cgraph_node
*node
= cgraph_node::get (fndecl
);
3229 if (node
== NULL
|| node
->simd_clones
== NULL
)
3232 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
3235 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
3239 if (gimple_call_lhs (stmt
)
3240 && TREE_CODE (gimple_call_lhs (stmt
)) != SSA_NAME
)
3243 gcc_checking_assert (!stmt_can_throw_internal (stmt
));
3245 vectype
= STMT_VINFO_VECTYPE (stmt_info
);
3247 if (loop_vinfo
&& nested_in_vect_loop_p (loop
, stmt
))
3254 /* Process function arguments. */
3255 nargs
= gimple_call_num_args (stmt
);
3257 /* Bail out if the function has zero arguments. */
3261 arginfo
.reserve (nargs
, true);
3263 for (i
= 0; i
< nargs
; i
++)
3265 simd_call_arg_info thisarginfo
;
3268 thisarginfo
.linear_step
= 0;
3269 thisarginfo
.align
= 0;
3270 thisarginfo
.op
= NULL_TREE
;
3271 thisarginfo
.simd_lane_linear
= false;
3273 op
= gimple_call_arg (stmt
, i
);
3274 if (!vect_is_simple_use (op
, vinfo
, &def_stmt
, &thisarginfo
.dt
,
3275 &thisarginfo
.vectype
)
3276 || thisarginfo
.dt
== vect_uninitialized_def
)
3278 if (dump_enabled_p ())
3279 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3280 "use not simple.\n");
3284 if (thisarginfo
.dt
== vect_constant_def
3285 || thisarginfo
.dt
== vect_external_def
)
3286 gcc_assert (thisarginfo
.vectype
== NULL_TREE
);
3288 gcc_assert (thisarginfo
.vectype
!= NULL_TREE
);
3290 /* For linear arguments, the analyze phase should have saved
3291 the base and step in STMT_VINFO_SIMD_CLONE_INFO. */
3292 if (i
* 3 + 4 <= STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).length ()
3293 && STMT_VINFO_SIMD_CLONE_INFO (stmt_info
)[i
* 3 + 2])
3295 gcc_assert (vec_stmt
);
3296 thisarginfo
.linear_step
3297 = tree_to_shwi (STMT_VINFO_SIMD_CLONE_INFO (stmt_info
)[i
* 3 + 2]);
3299 = STMT_VINFO_SIMD_CLONE_INFO (stmt_info
)[i
* 3 + 1];
3300 thisarginfo
.simd_lane_linear
3301 = (STMT_VINFO_SIMD_CLONE_INFO (stmt_info
)[i
* 3 + 3]
3302 == boolean_true_node
);
3303 /* If loop has been peeled for alignment, we need to adjust it. */
3304 tree n1
= LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo
);
3305 tree n2
= LOOP_VINFO_NITERS (loop_vinfo
);
3306 if (n1
!= n2
&& !thisarginfo
.simd_lane_linear
)
3308 tree bias
= fold_build2 (MINUS_EXPR
, TREE_TYPE (n1
), n1
, n2
);
3309 tree step
= STMT_VINFO_SIMD_CLONE_INFO (stmt_info
)[i
* 3 + 2];
3310 tree opt
= TREE_TYPE (thisarginfo
.op
);
3311 bias
= fold_convert (TREE_TYPE (step
), bias
);
3312 bias
= fold_build2 (MULT_EXPR
, TREE_TYPE (step
), bias
, step
);
3314 = fold_build2 (POINTER_TYPE_P (opt
)
3315 ? POINTER_PLUS_EXPR
: PLUS_EXPR
, opt
,
3316 thisarginfo
.op
, bias
);
3320 && thisarginfo
.dt
!= vect_constant_def
3321 && thisarginfo
.dt
!= vect_external_def
3323 && TREE_CODE (op
) == SSA_NAME
3324 && simple_iv (loop
, loop_containing_stmt (stmt
), op
,
3326 && tree_fits_shwi_p (iv
.step
))
3328 thisarginfo
.linear_step
= tree_to_shwi (iv
.step
);
3329 thisarginfo
.op
= iv
.base
;
3331 else if ((thisarginfo
.dt
== vect_constant_def
3332 || thisarginfo
.dt
== vect_external_def
)
3333 && POINTER_TYPE_P (TREE_TYPE (op
)))
3334 thisarginfo
.align
= get_pointer_alignment (op
) / BITS_PER_UNIT
;
3335 /* Addresses of array elements indexed by GOMP_SIMD_LANE are
3337 if (POINTER_TYPE_P (TREE_TYPE (op
))
3338 && !thisarginfo
.linear_step
3340 && thisarginfo
.dt
!= vect_constant_def
3341 && thisarginfo
.dt
!= vect_external_def
3344 && TREE_CODE (op
) == SSA_NAME
)
3345 vect_simd_lane_linear (op
, loop
, &thisarginfo
);
3347 arginfo
.quick_push (thisarginfo
);
3350 unsigned int badness
= 0;
3351 struct cgraph_node
*bestn
= NULL
;
3352 if (STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).exists ())
3353 bestn
= cgraph_node::get (STMT_VINFO_SIMD_CLONE_INFO (stmt_info
)[0]);
3355 for (struct cgraph_node
*n
= node
->simd_clones
; n
!= NULL
;
3356 n
= n
->simdclone
->next_clone
)
3358 unsigned int this_badness
= 0;
3359 if (n
->simdclone
->simdlen
3360 > (unsigned) LOOP_VINFO_VECT_FACTOR (loop_vinfo
)
3361 || n
->simdclone
->nargs
!= nargs
)
3363 if (n
->simdclone
->simdlen
3364 < (unsigned) LOOP_VINFO_VECT_FACTOR (loop_vinfo
))
3365 this_badness
+= (exact_log2 (LOOP_VINFO_VECT_FACTOR (loop_vinfo
))
3366 - exact_log2 (n
->simdclone
->simdlen
)) * 1024;
3367 if (n
->simdclone
->inbranch
)
3368 this_badness
+= 2048;
3369 int target_badness
= targetm
.simd_clone
.usable (n
);
3370 if (target_badness
< 0)
3372 this_badness
+= target_badness
* 512;
3373 /* FORNOW: Have to add code to add the mask argument. */
3374 if (n
->simdclone
->inbranch
)
3376 for (i
= 0; i
< nargs
; i
++)
3378 switch (n
->simdclone
->args
[i
].arg_type
)
3380 case SIMD_CLONE_ARG_TYPE_VECTOR
:
3381 if (!useless_type_conversion_p
3382 (n
->simdclone
->args
[i
].orig_type
,
3383 TREE_TYPE (gimple_call_arg (stmt
, i
))))
3385 else if (arginfo
[i
].dt
== vect_constant_def
3386 || arginfo
[i
].dt
== vect_external_def
3387 || arginfo
[i
].linear_step
)
3390 case SIMD_CLONE_ARG_TYPE_UNIFORM
:
3391 if (arginfo
[i
].dt
!= vect_constant_def
3392 && arginfo
[i
].dt
!= vect_external_def
)
3395 case SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP
:
3396 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP
:
3397 if (arginfo
[i
].dt
== vect_constant_def
3398 || arginfo
[i
].dt
== vect_external_def
3399 || (arginfo
[i
].linear_step
3400 != n
->simdclone
->args
[i
].linear_step
))
3403 case SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP
:
3404 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_CONSTANT_STEP
:
3405 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_CONSTANT_STEP
:
3406 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_VARIABLE_STEP
:
3407 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_VARIABLE_STEP
:
3408 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_VARIABLE_STEP
:
3412 case SIMD_CLONE_ARG_TYPE_MASK
:
3415 if (i
== (size_t) -1)
3417 if (n
->simdclone
->args
[i
].alignment
> arginfo
[i
].align
)
3422 if (arginfo
[i
].align
)
3423 this_badness
+= (exact_log2 (arginfo
[i
].align
)
3424 - exact_log2 (n
->simdclone
->args
[i
].alignment
));
3426 if (i
== (size_t) -1)
3428 if (bestn
== NULL
|| this_badness
< badness
)
3431 badness
= this_badness
;
3438 for (i
= 0; i
< nargs
; i
++)
3439 if ((arginfo
[i
].dt
== vect_constant_def
3440 || arginfo
[i
].dt
== vect_external_def
)
3441 && bestn
->simdclone
->args
[i
].arg_type
== SIMD_CLONE_ARG_TYPE_VECTOR
)
3444 = get_vectype_for_scalar_type (TREE_TYPE (gimple_call_arg (stmt
,
3446 if (arginfo
[i
].vectype
== NULL
3447 || (TYPE_VECTOR_SUBPARTS (arginfo
[i
].vectype
)
3448 > bestn
->simdclone
->simdlen
))
3452 fndecl
= bestn
->decl
;
3453 nunits
= bestn
->simdclone
->simdlen
;
3454 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits
;
3456 /* If the function isn't const, only allow it in simd loops where user
3457 has asserted that at least nunits consecutive iterations can be
3458 performed using SIMD instructions. */
3459 if ((loop
== NULL
|| (unsigned) loop
->safelen
< nunits
)
3460 && gimple_vuse (stmt
))
3463 /* Sanity check: make sure that at least one copy of the vectorized stmt
3464 needs to be generated. */
3465 gcc_assert (ncopies
>= 1);
3467 if (!vec_stmt
) /* transformation not required. */
3469 STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).safe_push (bestn
->decl
);
3470 for (i
= 0; i
< nargs
; i
++)
3471 if ((bestn
->simdclone
->args
[i
].arg_type
3472 == SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP
)
3473 || (bestn
->simdclone
->args
[i
].arg_type
3474 == SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP
))
3476 STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).safe_grow_cleared (i
* 3
3478 STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).safe_push (arginfo
[i
].op
);
3479 tree lst
= POINTER_TYPE_P (TREE_TYPE (arginfo
[i
].op
))
3480 ? size_type_node
: TREE_TYPE (arginfo
[i
].op
);
3481 tree ls
= build_int_cst (lst
, arginfo
[i
].linear_step
);
3482 STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).safe_push (ls
);
3483 tree sll
= arginfo
[i
].simd_lane_linear
3484 ? boolean_true_node
: boolean_false_node
;
3485 STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).safe_push (sll
);
3487 STMT_VINFO_TYPE (stmt_info
) = call_simd_clone_vec_info_type
;
3488 if (dump_enabled_p ())
3489 dump_printf_loc (MSG_NOTE
, vect_location
,
3490 "=== vectorizable_simd_clone_call ===\n");
3491 /* vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL); */
3497 if (dump_enabled_p ())
3498 dump_printf_loc (MSG_NOTE
, vect_location
, "transform call.\n");
3501 scalar_dest
= gimple_call_lhs (stmt
);
3502 vec_dest
= NULL_TREE
;
3507 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
3508 rtype
= TREE_TYPE (TREE_TYPE (fndecl
));
3509 if (TREE_CODE (rtype
) == ARRAY_TYPE
)
3512 rtype
= TREE_TYPE (ratype
);
3516 prev_stmt_info
= NULL
;
3517 for (j
= 0; j
< ncopies
; ++j
)
3519 /* Build argument list for the vectorized call. */
3521 vargs
.create (nargs
);
3525 for (i
= 0; i
< nargs
; i
++)
3527 unsigned int k
, l
, m
, o
;
3529 op
= gimple_call_arg (stmt
, i
);
3530 switch (bestn
->simdclone
->args
[i
].arg_type
)
3532 case SIMD_CLONE_ARG_TYPE_VECTOR
:
3533 atype
= bestn
->simdclone
->args
[i
].vector_type
;
3534 o
= nunits
/ TYPE_VECTOR_SUBPARTS (atype
);
3535 for (m
= j
* o
; m
< (j
+ 1) * o
; m
++)
3537 if (TYPE_VECTOR_SUBPARTS (atype
)
3538 < TYPE_VECTOR_SUBPARTS (arginfo
[i
].vectype
))
3540 unsigned int prec
= GET_MODE_BITSIZE (TYPE_MODE (atype
));
3541 k
= (TYPE_VECTOR_SUBPARTS (arginfo
[i
].vectype
)
3542 / TYPE_VECTOR_SUBPARTS (atype
));
3543 gcc_assert ((k
& (k
- 1)) == 0);
3546 = vect_get_vec_def_for_operand (op
, stmt
);
3549 vec_oprnd0
= arginfo
[i
].op
;
3550 if ((m
& (k
- 1)) == 0)
3552 = vect_get_vec_def_for_stmt_copy (arginfo
[i
].dt
,
3555 arginfo
[i
].op
= vec_oprnd0
;
3557 = build3 (BIT_FIELD_REF
, atype
, vec_oprnd0
,
3559 bitsize_int ((m
& (k
- 1)) * prec
));
3561 = gimple_build_assign (make_ssa_name (atype
),
3563 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3564 vargs
.safe_push (gimple_assign_lhs (new_stmt
));
3568 k
= (TYPE_VECTOR_SUBPARTS (atype
)
3569 / TYPE_VECTOR_SUBPARTS (arginfo
[i
].vectype
));
3570 gcc_assert ((k
& (k
- 1)) == 0);
3571 vec
<constructor_elt
, va_gc
> *ctor_elts
;
3573 vec_alloc (ctor_elts
, k
);
3576 for (l
= 0; l
< k
; l
++)
3578 if (m
== 0 && l
== 0)
3580 = vect_get_vec_def_for_operand (op
, stmt
);
3583 = vect_get_vec_def_for_stmt_copy (arginfo
[i
].dt
,
3585 arginfo
[i
].op
= vec_oprnd0
;
3588 CONSTRUCTOR_APPEND_ELT (ctor_elts
, NULL_TREE
,
3592 vargs
.safe_push (vec_oprnd0
);
3595 vec_oprnd0
= build_constructor (atype
, ctor_elts
);
3597 = gimple_build_assign (make_ssa_name (atype
),
3599 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3600 vargs
.safe_push (gimple_assign_lhs (new_stmt
));
3605 case SIMD_CLONE_ARG_TYPE_UNIFORM
:
3606 vargs
.safe_push (op
);
3608 case SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP
:
3609 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP
:
3614 = force_gimple_operand (arginfo
[i
].op
, &stmts
, true,
3619 edge pe
= loop_preheader_edge (loop
);
3620 new_bb
= gsi_insert_seq_on_edge_immediate (pe
, stmts
);
3621 gcc_assert (!new_bb
);
3623 if (arginfo
[i
].simd_lane_linear
)
3625 vargs
.safe_push (arginfo
[i
].op
);
3628 tree phi_res
= copy_ssa_name (op
);
3629 gphi
*new_phi
= create_phi_node (phi_res
, loop
->header
);
3630 set_vinfo_for_stmt (new_phi
,
3631 new_stmt_vec_info (new_phi
, loop_vinfo
));
3632 add_phi_arg (new_phi
, arginfo
[i
].op
,
3633 loop_preheader_edge (loop
), UNKNOWN_LOCATION
);
3635 = POINTER_TYPE_P (TREE_TYPE (op
))
3636 ? POINTER_PLUS_EXPR
: PLUS_EXPR
;
3637 tree type
= POINTER_TYPE_P (TREE_TYPE (op
))
3638 ? sizetype
: TREE_TYPE (op
);
3640 = wi::mul (bestn
->simdclone
->args
[i
].linear_step
,
3642 tree tcst
= wide_int_to_tree (type
, cst
);
3643 tree phi_arg
= copy_ssa_name (op
);
3645 = gimple_build_assign (phi_arg
, code
, phi_res
, tcst
);
3646 gimple_stmt_iterator si
= gsi_after_labels (loop
->header
);
3647 gsi_insert_after (&si
, new_stmt
, GSI_NEW_STMT
);
3648 set_vinfo_for_stmt (new_stmt
,
3649 new_stmt_vec_info (new_stmt
, loop_vinfo
));
3650 add_phi_arg (new_phi
, phi_arg
, loop_latch_edge (loop
),
3652 arginfo
[i
].op
= phi_res
;
3653 vargs
.safe_push (phi_res
);
3658 = POINTER_TYPE_P (TREE_TYPE (op
))
3659 ? POINTER_PLUS_EXPR
: PLUS_EXPR
;
3660 tree type
= POINTER_TYPE_P (TREE_TYPE (op
))
3661 ? sizetype
: TREE_TYPE (op
);
3663 = wi::mul (bestn
->simdclone
->args
[i
].linear_step
,
3665 tree tcst
= wide_int_to_tree (type
, cst
);
3666 new_temp
= make_ssa_name (TREE_TYPE (op
));
3667 new_stmt
= gimple_build_assign (new_temp
, code
,
3668 arginfo
[i
].op
, tcst
);
3669 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3670 vargs
.safe_push (new_temp
);
3673 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_CONSTANT_STEP
:
3674 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_CONSTANT_STEP
:
3675 case SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP
:
3676 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_VARIABLE_STEP
:
3677 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_VARIABLE_STEP
:
3678 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_VARIABLE_STEP
:
3684 new_stmt
= gimple_build_call_vec (fndecl
, vargs
);
3687 gcc_assert (ratype
|| TYPE_VECTOR_SUBPARTS (rtype
) == nunits
);
3689 new_temp
= create_tmp_var (ratype
);
3690 else if (TYPE_VECTOR_SUBPARTS (vectype
)
3691 == TYPE_VECTOR_SUBPARTS (rtype
))
3692 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
3694 new_temp
= make_ssa_name (rtype
, new_stmt
);
3695 gimple_call_set_lhs (new_stmt
, new_temp
);
3697 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3701 if (TYPE_VECTOR_SUBPARTS (vectype
) < nunits
)
3704 unsigned int prec
= GET_MODE_BITSIZE (TYPE_MODE (vectype
));
3705 k
= nunits
/ TYPE_VECTOR_SUBPARTS (vectype
);
3706 gcc_assert ((k
& (k
- 1)) == 0);
3707 for (l
= 0; l
< k
; l
++)
3712 t
= build_fold_addr_expr (new_temp
);
3713 t
= build2 (MEM_REF
, vectype
, t
,
3714 build_int_cst (TREE_TYPE (t
),
3715 l
* prec
/ BITS_PER_UNIT
));
3718 t
= build3 (BIT_FIELD_REF
, vectype
, new_temp
,
3719 bitsize_int (prec
), bitsize_int (l
* prec
));
3721 = gimple_build_assign (make_ssa_name (vectype
), t
);
3722 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3723 if (j
== 0 && l
== 0)
3724 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
3726 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
3728 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
3733 tree clobber
= build_constructor (ratype
, NULL
);
3734 TREE_THIS_VOLATILE (clobber
) = 1;
3735 new_stmt
= gimple_build_assign (new_temp
, clobber
);
3736 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3740 else if (TYPE_VECTOR_SUBPARTS (vectype
) > nunits
)
3742 unsigned int k
= (TYPE_VECTOR_SUBPARTS (vectype
)
3743 / TYPE_VECTOR_SUBPARTS (rtype
));
3744 gcc_assert ((k
& (k
- 1)) == 0);
3745 if ((j
& (k
- 1)) == 0)
3746 vec_alloc (ret_ctor_elts
, k
);
3749 unsigned int m
, o
= nunits
/ TYPE_VECTOR_SUBPARTS (rtype
);
3750 for (m
= 0; m
< o
; m
++)
3752 tree tem
= build4 (ARRAY_REF
, rtype
, new_temp
,
3753 size_int (m
), NULL_TREE
, NULL_TREE
);
3755 = gimple_build_assign (make_ssa_name (rtype
), tem
);
3756 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3757 CONSTRUCTOR_APPEND_ELT (ret_ctor_elts
, NULL_TREE
,
3758 gimple_assign_lhs (new_stmt
));
3760 tree clobber
= build_constructor (ratype
, NULL
);
3761 TREE_THIS_VOLATILE (clobber
) = 1;
3762 new_stmt
= gimple_build_assign (new_temp
, clobber
);
3763 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3766 CONSTRUCTOR_APPEND_ELT (ret_ctor_elts
, NULL_TREE
, new_temp
);
3767 if ((j
& (k
- 1)) != k
- 1)
3769 vec_oprnd0
= build_constructor (vectype
, ret_ctor_elts
);
3771 = gimple_build_assign (make_ssa_name (vec_dest
), vec_oprnd0
);
3772 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3774 if ((unsigned) j
== k
- 1)
3775 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
3777 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
3779 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
3784 tree t
= build_fold_addr_expr (new_temp
);
3785 t
= build2 (MEM_REF
, vectype
, t
,
3786 build_int_cst (TREE_TYPE (t
), 0));
3788 = gimple_build_assign (make_ssa_name (vec_dest
), t
);
3789 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3790 tree clobber
= build_constructor (ratype
, NULL
);
3791 TREE_THIS_VOLATILE (clobber
) = 1;
3792 vect_finish_stmt_generation (stmt
,
3793 gimple_build_assign (new_temp
,
3799 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
3801 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
3803 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
3808 /* The call in STMT might prevent it from being removed in dce.
3809 We however cannot remove it here, due to the way the ssa name
3810 it defines is mapped to the new definition. So just replace
3811 rhs of the statement with something harmless. */
3818 type
= TREE_TYPE (scalar_dest
);
3819 if (is_pattern_stmt_p (stmt_info
))
3820 lhs
= gimple_call_lhs (STMT_VINFO_RELATED_STMT (stmt_info
));
3822 lhs
= gimple_call_lhs (stmt
);
3823 new_stmt
= gimple_build_assign (lhs
, build_zero_cst (type
));
3826 new_stmt
= gimple_build_nop ();
3827 set_vinfo_for_stmt (new_stmt
, stmt_info
);
3828 set_vinfo_for_stmt (stmt
, NULL
);
3829 STMT_VINFO_STMT (stmt_info
) = new_stmt
;
3830 gsi_replace (gsi
, new_stmt
, true);
3831 unlink_stmt_vdef (stmt
);
3837 /* Function vect_gen_widened_results_half
3839 Create a vector stmt whose code, type, number of arguments, and result
3840 variable are CODE, OP_TYPE, and VEC_DEST, and its arguments are
3841 VEC_OPRND0 and VEC_OPRND1. The new vector stmt is to be inserted at BSI.
3842 In the case that CODE is a CALL_EXPR, this means that a call to DECL
3843 needs to be created (DECL is a function-decl of a target-builtin).
3844 STMT is the original scalar stmt that we are vectorizing. */
3847 vect_gen_widened_results_half (enum tree_code code
,
3849 tree vec_oprnd0
, tree vec_oprnd1
, int op_type
,
3850 tree vec_dest
, gimple_stmt_iterator
*gsi
,
3856 /* Generate half of the widened result: */
3857 if (code
== CALL_EXPR
)
3859 /* Target specific support */
3860 if (op_type
== binary_op
)
3861 new_stmt
= gimple_build_call (decl
, 2, vec_oprnd0
, vec_oprnd1
);
3863 new_stmt
= gimple_build_call (decl
, 1, vec_oprnd0
);
3864 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
3865 gimple_call_set_lhs (new_stmt
, new_temp
);
3869 /* Generic support */
3870 gcc_assert (op_type
== TREE_CODE_LENGTH (code
));
3871 if (op_type
!= binary_op
)
3873 new_stmt
= gimple_build_assign (vec_dest
, code
, vec_oprnd0
, vec_oprnd1
);
3874 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
3875 gimple_assign_set_lhs (new_stmt
, new_temp
);
3877 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3883 /* Get vectorized definitions for loop-based vectorization. For the first
3884 operand we call vect_get_vec_def_for_operand() (with OPRND containing
3885 scalar operand), and for the rest we get a copy with
3886 vect_get_vec_def_for_stmt_copy() using the previous vector definition
3887 (stored in OPRND). See vect_get_vec_def_for_stmt_copy() for details.
3888 The vectors are collected into VEC_OPRNDS. */
3891 vect_get_loop_based_defs (tree
*oprnd
, gimple
*stmt
, enum vect_def_type dt
,
3892 vec
<tree
> *vec_oprnds
, int multi_step_cvt
)
3896 /* Get first vector operand. */
3897 /* All the vector operands except the very first one (that is scalar oprnd)
3899 if (TREE_CODE (TREE_TYPE (*oprnd
)) != VECTOR_TYPE
)
3900 vec_oprnd
= vect_get_vec_def_for_operand (*oprnd
, stmt
);
3902 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
, *oprnd
);
3904 vec_oprnds
->quick_push (vec_oprnd
);
3906 /* Get second vector operand. */
3907 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
, vec_oprnd
);
3908 vec_oprnds
->quick_push (vec_oprnd
);
3912 /* For conversion in multiple steps, continue to get operands
3915 vect_get_loop_based_defs (oprnd
, stmt
, dt
, vec_oprnds
, multi_step_cvt
- 1);
3919 /* Create vectorized demotion statements for vector operands from VEC_OPRNDS.
3920 For multi-step conversions store the resulting vectors and call the function
3924 vect_create_vectorized_demotion_stmts (vec
<tree
> *vec_oprnds
,
3925 int multi_step_cvt
, gimple
*stmt
,
3927 gimple_stmt_iterator
*gsi
,
3928 slp_tree slp_node
, enum tree_code code
,
3929 stmt_vec_info
*prev_stmt_info
)
3932 tree vop0
, vop1
, new_tmp
, vec_dest
;
3934 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
3936 vec_dest
= vec_dsts
.pop ();
3938 for (i
= 0; i
< vec_oprnds
->length (); i
+= 2)
3940 /* Create demotion operation. */
3941 vop0
= (*vec_oprnds
)[i
];
3942 vop1
= (*vec_oprnds
)[i
+ 1];
3943 new_stmt
= gimple_build_assign (vec_dest
, code
, vop0
, vop1
);
3944 new_tmp
= make_ssa_name (vec_dest
, new_stmt
);
3945 gimple_assign_set_lhs (new_stmt
, new_tmp
);
3946 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3949 /* Store the resulting vector for next recursive call. */
3950 (*vec_oprnds
)[i
/2] = new_tmp
;
3953 /* This is the last step of the conversion sequence. Store the
3954 vectors in SLP_NODE or in vector info of the scalar statement
3955 (or in STMT_VINFO_RELATED_STMT chain). */
3957 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
3960 if (!*prev_stmt_info
)
3961 STMT_VINFO_VEC_STMT (stmt_info
) = new_stmt
;
3963 STMT_VINFO_RELATED_STMT (*prev_stmt_info
) = new_stmt
;
3965 *prev_stmt_info
= vinfo_for_stmt (new_stmt
);
3970 /* For multi-step demotion operations we first generate demotion operations
3971 from the source type to the intermediate types, and then combine the
3972 results (stored in VEC_OPRNDS) in demotion operation to the destination
3976 /* At each level of recursion we have half of the operands we had at the
3978 vec_oprnds
->truncate ((i
+1)/2);
3979 vect_create_vectorized_demotion_stmts (vec_oprnds
, multi_step_cvt
- 1,
3980 stmt
, vec_dsts
, gsi
, slp_node
,
3981 VEC_PACK_TRUNC_EXPR
,
3985 vec_dsts
.quick_push (vec_dest
);
3989 /* Create vectorized promotion statements for vector operands from VEC_OPRNDS0
3990 and VEC_OPRNDS1 (for binary operations). For multi-step conversions store
3991 the resulting vectors and call the function recursively. */
3994 vect_create_vectorized_promotion_stmts (vec
<tree
> *vec_oprnds0
,
3995 vec
<tree
> *vec_oprnds1
,
3996 gimple
*stmt
, tree vec_dest
,
3997 gimple_stmt_iterator
*gsi
,
3998 enum tree_code code1
,
3999 enum tree_code code2
, tree decl1
,
4000 tree decl2
, int op_type
)
4003 tree vop0
, vop1
, new_tmp1
, new_tmp2
;
4004 gimple
*new_stmt1
, *new_stmt2
;
4005 vec
<tree
> vec_tmp
= vNULL
;
4007 vec_tmp
.create (vec_oprnds0
->length () * 2);
4008 FOR_EACH_VEC_ELT (*vec_oprnds0
, i
, vop0
)
4010 if (op_type
== binary_op
)
4011 vop1
= (*vec_oprnds1
)[i
];
4015 /* Generate the two halves of promotion operation. */
4016 new_stmt1
= vect_gen_widened_results_half (code1
, decl1
, vop0
, vop1
,
4017 op_type
, vec_dest
, gsi
, stmt
);
4018 new_stmt2
= vect_gen_widened_results_half (code2
, decl2
, vop0
, vop1
,
4019 op_type
, vec_dest
, gsi
, stmt
);
4020 if (is_gimple_call (new_stmt1
))
4022 new_tmp1
= gimple_call_lhs (new_stmt1
);
4023 new_tmp2
= gimple_call_lhs (new_stmt2
);
4027 new_tmp1
= gimple_assign_lhs (new_stmt1
);
4028 new_tmp2
= gimple_assign_lhs (new_stmt2
);
4031 /* Store the results for the next step. */
4032 vec_tmp
.quick_push (new_tmp1
);
4033 vec_tmp
.quick_push (new_tmp2
);
4036 vec_oprnds0
->release ();
4037 *vec_oprnds0
= vec_tmp
;
4041 /* Check if STMT performs a conversion operation, that can be vectorized.
4042 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4043 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
4044 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4047 vectorizable_conversion (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
4048 gimple
**vec_stmt
, slp_tree slp_node
)
4052 tree op0
, op1
= NULL_TREE
;
4053 tree vec_oprnd0
= NULL_TREE
, vec_oprnd1
= NULL_TREE
;
4054 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
4055 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
4056 enum tree_code code
, code1
= ERROR_MARK
, code2
= ERROR_MARK
;
4057 enum tree_code codecvt1
= ERROR_MARK
, codecvt2
= ERROR_MARK
;
4058 tree decl1
= NULL_TREE
, decl2
= NULL_TREE
;
4061 enum vect_def_type dt
[2] = {vect_unknown_def_type
, vect_unknown_def_type
};
4063 gimple
*new_stmt
= NULL
;
4064 stmt_vec_info prev_stmt_info
;
4067 tree vectype_out
, vectype_in
;
4069 tree lhs_type
, rhs_type
;
4070 enum { NARROW
, NONE
, WIDEN
} modifier
;
4071 vec
<tree
> vec_oprnds0
= vNULL
;
4072 vec
<tree
> vec_oprnds1
= vNULL
;
4074 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
4075 vec_info
*vinfo
= stmt_info
->vinfo
;
4076 int multi_step_cvt
= 0;
4077 vec
<tree
> interm_types
= vNULL
;
4078 tree last_oprnd
, intermediate_type
, cvt_type
= NULL_TREE
;
4080 unsigned short fltsz
;
4082 /* Is STMT a vectorizable conversion? */
4084 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
4087 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
4091 if (!is_gimple_assign (stmt
))
4094 if (TREE_CODE (gimple_assign_lhs (stmt
)) != SSA_NAME
)
4097 code
= gimple_assign_rhs_code (stmt
);
4098 if (!CONVERT_EXPR_CODE_P (code
)
4099 && code
!= FIX_TRUNC_EXPR
4100 && code
!= FLOAT_EXPR
4101 && code
!= WIDEN_MULT_EXPR
4102 && code
!= WIDEN_LSHIFT_EXPR
)
4105 op_type
= TREE_CODE_LENGTH (code
);
4107 /* Check types of lhs and rhs. */
4108 scalar_dest
= gimple_assign_lhs (stmt
);
4109 lhs_type
= TREE_TYPE (scalar_dest
);
4110 vectype_out
= STMT_VINFO_VECTYPE (stmt_info
);
4112 op0
= gimple_assign_rhs1 (stmt
);
4113 rhs_type
= TREE_TYPE (op0
);
4115 if ((code
!= FIX_TRUNC_EXPR
&& code
!= FLOAT_EXPR
)
4116 && !((INTEGRAL_TYPE_P (lhs_type
)
4117 && INTEGRAL_TYPE_P (rhs_type
))
4118 || (SCALAR_FLOAT_TYPE_P (lhs_type
)
4119 && SCALAR_FLOAT_TYPE_P (rhs_type
))))
4122 if (!VECTOR_BOOLEAN_TYPE_P (vectype_out
)
4123 && ((INTEGRAL_TYPE_P (lhs_type
)
4124 && !type_has_mode_precision_p (lhs_type
))
4125 || (INTEGRAL_TYPE_P (rhs_type
)
4126 && !type_has_mode_precision_p (rhs_type
))))
4128 if (dump_enabled_p ())
4129 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4130 "type conversion to/from bit-precision unsupported."
4135 /* Check the operands of the operation. */
4136 if (!vect_is_simple_use (op0
, vinfo
, &def_stmt
, &dt
[0], &vectype_in
))
4138 if (dump_enabled_p ())
4139 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4140 "use not simple.\n");
4143 if (op_type
== binary_op
)
4147 op1
= gimple_assign_rhs2 (stmt
);
4148 gcc_assert (code
== WIDEN_MULT_EXPR
|| code
== WIDEN_LSHIFT_EXPR
);
4149 /* For WIDEN_MULT_EXPR, if OP0 is a constant, use the type of
4151 if (CONSTANT_CLASS_P (op0
))
4152 ok
= vect_is_simple_use (op1
, vinfo
, &def_stmt
, &dt
[1], &vectype_in
);
4154 ok
= vect_is_simple_use (op1
, vinfo
, &def_stmt
, &dt
[1]);
4158 if (dump_enabled_p ())
4159 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4160 "use not simple.\n");
4165 /* If op0 is an external or constant defs use a vector type of
4166 the same size as the output vector type. */
4168 vectype_in
= get_same_sized_vectype (rhs_type
, vectype_out
);
4170 gcc_assert (vectype_in
);
4173 if (dump_enabled_p ())
4175 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4176 "no vectype for scalar type ");
4177 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, rhs_type
);
4178 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
4184 if (VECTOR_BOOLEAN_TYPE_P (vectype_out
)
4185 && !VECTOR_BOOLEAN_TYPE_P (vectype_in
))
4187 if (dump_enabled_p ())
4189 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4190 "can't convert between boolean and non "
4192 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, rhs_type
);
4193 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
4199 nunits_in
= TYPE_VECTOR_SUBPARTS (vectype_in
);
4200 nunits_out
= TYPE_VECTOR_SUBPARTS (vectype_out
);
4201 if (nunits_in
< nunits_out
)
4203 else if (nunits_out
== nunits_in
)
4208 /* Multiple types in SLP are handled by creating the appropriate number of
4209 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4213 else if (modifier
== NARROW
)
4214 ncopies
= vect_get_num_copies (loop_vinfo
, vectype_out
);
4216 ncopies
= vect_get_num_copies (loop_vinfo
, vectype_in
);
4218 /* Sanity check: make sure that at least one copy of the vectorized stmt
4219 needs to be generated. */
4220 gcc_assert (ncopies
>= 1);
4222 bool found_mode
= false;
4223 scalar_mode lhs_mode
= SCALAR_TYPE_MODE (lhs_type
);
4224 scalar_mode rhs_mode
= SCALAR_TYPE_MODE (rhs_type
);
4225 opt_scalar_mode rhs_mode_iter
;
4227 /* Supportable by target? */
4231 if (code
!= FIX_TRUNC_EXPR
&& code
!= FLOAT_EXPR
)
4233 if (supportable_convert_operation (code
, vectype_out
, vectype_in
,
4238 if (dump_enabled_p ())
4239 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4240 "conversion not supported by target.\n");
4244 if (supportable_widening_operation (code
, stmt
, vectype_out
, vectype_in
,
4245 &code1
, &code2
, &multi_step_cvt
,
4248 /* Binary widening operation can only be supported directly by the
4250 gcc_assert (!(multi_step_cvt
&& op_type
== binary_op
));
4254 if (code
!= FLOAT_EXPR
4255 || GET_MODE_SIZE (lhs_mode
) <= GET_MODE_SIZE (rhs_mode
))
4258 fltsz
= GET_MODE_SIZE (lhs_mode
);
4259 FOR_EACH_2XWIDER_MODE (rhs_mode_iter
, rhs_mode
)
4261 rhs_mode
= rhs_mode_iter
.require ();
4262 if (GET_MODE_SIZE (rhs_mode
) > fltsz
)
4266 = build_nonstandard_integer_type (GET_MODE_BITSIZE (rhs_mode
), 0);
4267 cvt_type
= get_same_sized_vectype (cvt_type
, vectype_in
);
4268 if (cvt_type
== NULL_TREE
)
4271 if (GET_MODE_SIZE (rhs_mode
) == fltsz
)
4273 if (!supportable_convert_operation (code
, vectype_out
,
4274 cvt_type
, &decl1
, &codecvt1
))
4277 else if (!supportable_widening_operation (code
, stmt
, vectype_out
,
4278 cvt_type
, &codecvt1
,
4279 &codecvt2
, &multi_step_cvt
,
4283 gcc_assert (multi_step_cvt
== 0);
4285 if (supportable_widening_operation (NOP_EXPR
, stmt
, cvt_type
,
4286 vectype_in
, &code1
, &code2
,
4287 &multi_step_cvt
, &interm_types
))
4297 if (GET_MODE_SIZE (rhs_mode
) == fltsz
)
4298 codecvt2
= ERROR_MARK
;
4302 interm_types
.safe_push (cvt_type
);
4303 cvt_type
= NULL_TREE
;
4308 gcc_assert (op_type
== unary_op
);
4309 if (supportable_narrowing_operation (code
, vectype_out
, vectype_in
,
4310 &code1
, &multi_step_cvt
,
4314 if (code
!= FIX_TRUNC_EXPR
4315 || GET_MODE_SIZE (lhs_mode
) >= GET_MODE_SIZE (rhs_mode
))
4319 = build_nonstandard_integer_type (GET_MODE_BITSIZE (rhs_mode
), 0);
4320 cvt_type
= get_same_sized_vectype (cvt_type
, vectype_in
);
4321 if (cvt_type
== NULL_TREE
)
4323 if (!supportable_convert_operation (code
, cvt_type
, vectype_in
,
4326 if (supportable_narrowing_operation (NOP_EXPR
, vectype_out
, cvt_type
,
4327 &code1
, &multi_step_cvt
,
4336 if (!vec_stmt
) /* transformation not required. */
4338 if (dump_enabled_p ())
4339 dump_printf_loc (MSG_NOTE
, vect_location
,
4340 "=== vectorizable_conversion ===\n");
4341 if (code
== FIX_TRUNC_EXPR
|| code
== FLOAT_EXPR
)
4343 STMT_VINFO_TYPE (stmt_info
) = type_conversion_vec_info_type
;
4344 vect_model_simple_cost (stmt_info
, ncopies
, dt
, ndts
, NULL
, NULL
);
4346 else if (modifier
== NARROW
)
4348 STMT_VINFO_TYPE (stmt_info
) = type_demotion_vec_info_type
;
4349 vect_model_promotion_demotion_cost (stmt_info
, dt
, multi_step_cvt
);
4353 STMT_VINFO_TYPE (stmt_info
) = type_promotion_vec_info_type
;
4354 vect_model_promotion_demotion_cost (stmt_info
, dt
, multi_step_cvt
);
4356 interm_types
.release ();
4361 if (dump_enabled_p ())
4362 dump_printf_loc (MSG_NOTE
, vect_location
,
4363 "transform conversion. ncopies = %d.\n", ncopies
);
4365 if (op_type
== binary_op
)
4367 if (CONSTANT_CLASS_P (op0
))
4368 op0
= fold_convert (TREE_TYPE (op1
), op0
);
4369 else if (CONSTANT_CLASS_P (op1
))
4370 op1
= fold_convert (TREE_TYPE (op0
), op1
);
4373 /* In case of multi-step conversion, we first generate conversion operations
4374 to the intermediate types, and then from that types to the final one.
4375 We create vector destinations for the intermediate type (TYPES) received
4376 from supportable_*_operation, and store them in the correct order
4377 for future use in vect_create_vectorized_*_stmts (). */
4378 auto_vec
<tree
> vec_dsts (multi_step_cvt
+ 1);
4379 vec_dest
= vect_create_destination_var (scalar_dest
,
4380 (cvt_type
&& modifier
== WIDEN
)
4381 ? cvt_type
: vectype_out
);
4382 vec_dsts
.quick_push (vec_dest
);
4386 for (i
= interm_types
.length () - 1;
4387 interm_types
.iterate (i
, &intermediate_type
); i
--)
4389 vec_dest
= vect_create_destination_var (scalar_dest
,
4391 vec_dsts
.quick_push (vec_dest
);
4396 vec_dest
= vect_create_destination_var (scalar_dest
,
4398 ? vectype_out
: cvt_type
);
4402 if (modifier
== WIDEN
)
4404 vec_oprnds0
.create (multi_step_cvt
? vect_pow2 (multi_step_cvt
) : 1);
4405 if (op_type
== binary_op
)
4406 vec_oprnds1
.create (1);
4408 else if (modifier
== NARROW
)
4409 vec_oprnds0
.create (
4410 2 * (multi_step_cvt
? vect_pow2 (multi_step_cvt
) : 1));
4412 else if (code
== WIDEN_LSHIFT_EXPR
)
4413 vec_oprnds1
.create (slp_node
->vec_stmts_size
);
4416 prev_stmt_info
= NULL
;
4420 for (j
= 0; j
< ncopies
; j
++)
4423 vect_get_vec_defs (op0
, NULL
, stmt
, &vec_oprnds0
, NULL
, slp_node
);
4425 vect_get_vec_defs_for_stmt_copy (dt
, &vec_oprnds0
, NULL
);
4427 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vop0
)
4429 /* Arguments are ready, create the new vector stmt. */
4430 if (code1
== CALL_EXPR
)
4432 new_stmt
= gimple_build_call (decl1
, 1, vop0
);
4433 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
4434 gimple_call_set_lhs (new_stmt
, new_temp
);
4438 gcc_assert (TREE_CODE_LENGTH (code1
) == unary_op
);
4439 new_stmt
= gimple_build_assign (vec_dest
, code1
, vop0
);
4440 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
4441 gimple_assign_set_lhs (new_stmt
, new_temp
);
4444 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
4446 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
4449 if (!prev_stmt_info
)
4450 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
4452 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
4453 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
4460 /* In case the vectorization factor (VF) is bigger than the number
4461 of elements that we can fit in a vectype (nunits), we have to
4462 generate more than one vector stmt - i.e - we need to "unroll"
4463 the vector stmt by a factor VF/nunits. */
4464 for (j
= 0; j
< ncopies
; j
++)
4471 if (code
== WIDEN_LSHIFT_EXPR
)
4476 /* Store vec_oprnd1 for every vector stmt to be created
4477 for SLP_NODE. We check during the analysis that all
4478 the shift arguments are the same. */
4479 for (k
= 0; k
< slp_node
->vec_stmts_size
- 1; k
++)
4480 vec_oprnds1
.quick_push (vec_oprnd1
);
4482 vect_get_vec_defs (op0
, NULL_TREE
, stmt
, &vec_oprnds0
, NULL
,
4486 vect_get_vec_defs (op0
, op1
, stmt
, &vec_oprnds0
,
4487 &vec_oprnds1
, slp_node
);
4491 vec_oprnd0
= vect_get_vec_def_for_operand (op0
, stmt
);
4492 vec_oprnds0
.quick_push (vec_oprnd0
);
4493 if (op_type
== binary_op
)
4495 if (code
== WIDEN_LSHIFT_EXPR
)
4498 vec_oprnd1
= vect_get_vec_def_for_operand (op1
, stmt
);
4499 vec_oprnds1
.quick_push (vec_oprnd1
);
4505 vec_oprnd0
= vect_get_vec_def_for_stmt_copy (dt
[0], vec_oprnd0
);
4506 vec_oprnds0
.truncate (0);
4507 vec_oprnds0
.quick_push (vec_oprnd0
);
4508 if (op_type
== binary_op
)
4510 if (code
== WIDEN_LSHIFT_EXPR
)
4513 vec_oprnd1
= vect_get_vec_def_for_stmt_copy (dt
[1],
4515 vec_oprnds1
.truncate (0);
4516 vec_oprnds1
.quick_push (vec_oprnd1
);
4520 /* Arguments are ready. Create the new vector stmts. */
4521 for (i
= multi_step_cvt
; i
>= 0; i
--)
4523 tree this_dest
= vec_dsts
[i
];
4524 enum tree_code c1
= code1
, c2
= code2
;
4525 if (i
== 0 && codecvt2
!= ERROR_MARK
)
4530 vect_create_vectorized_promotion_stmts (&vec_oprnds0
,
4532 stmt
, this_dest
, gsi
,
4533 c1
, c2
, decl1
, decl2
,
4537 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vop0
)
4541 if (codecvt1
== CALL_EXPR
)
4543 new_stmt
= gimple_build_call (decl1
, 1, vop0
);
4544 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
4545 gimple_call_set_lhs (new_stmt
, new_temp
);
4549 gcc_assert (TREE_CODE_LENGTH (codecvt1
) == unary_op
);
4550 new_temp
= make_ssa_name (vec_dest
);
4551 new_stmt
= gimple_build_assign (new_temp
, codecvt1
,
4555 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
4558 new_stmt
= SSA_NAME_DEF_STMT (vop0
);
4561 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
4564 if (!prev_stmt_info
)
4565 STMT_VINFO_VEC_STMT (stmt_info
) = new_stmt
;
4567 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
4568 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
4573 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
4577 /* In case the vectorization factor (VF) is bigger than the number
4578 of elements that we can fit in a vectype (nunits), we have to
4579 generate more than one vector stmt - i.e - we need to "unroll"
4580 the vector stmt by a factor VF/nunits. */
4581 for (j
= 0; j
< ncopies
; j
++)
4585 vect_get_vec_defs (op0
, NULL_TREE
, stmt
, &vec_oprnds0
, NULL
,
4589 vec_oprnds0
.truncate (0);
4590 vect_get_loop_based_defs (&last_oprnd
, stmt
, dt
[0], &vec_oprnds0
,
4591 vect_pow2 (multi_step_cvt
) - 1);
4594 /* Arguments are ready. Create the new vector stmts. */
4596 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vop0
)
4598 if (codecvt1
== CALL_EXPR
)
4600 new_stmt
= gimple_build_call (decl1
, 1, vop0
);
4601 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
4602 gimple_call_set_lhs (new_stmt
, new_temp
);
4606 gcc_assert (TREE_CODE_LENGTH (codecvt1
) == unary_op
);
4607 new_temp
= make_ssa_name (vec_dest
);
4608 new_stmt
= gimple_build_assign (new_temp
, codecvt1
,
4612 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
4613 vec_oprnds0
[i
] = new_temp
;
4616 vect_create_vectorized_demotion_stmts (&vec_oprnds0
, multi_step_cvt
,
4617 stmt
, vec_dsts
, gsi
,
4622 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
4626 vec_oprnds0
.release ();
4627 vec_oprnds1
.release ();
4628 interm_types
.release ();
4634 /* Function vectorizable_assignment.
4636 Check if STMT performs an assignment (copy) that can be vectorized.
4637 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4638 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
4639 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4642 vectorizable_assignment (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
4643 gimple
**vec_stmt
, slp_tree slp_node
)
4648 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
4649 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
4652 enum vect_def_type dt
[1] = {vect_unknown_def_type
};
4656 vec
<tree
> vec_oprnds
= vNULL
;
4658 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
4659 vec_info
*vinfo
= stmt_info
->vinfo
;
4660 gimple
*new_stmt
= NULL
;
4661 stmt_vec_info prev_stmt_info
= NULL
;
4662 enum tree_code code
;
4665 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
4668 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
4672 /* Is vectorizable assignment? */
4673 if (!is_gimple_assign (stmt
))
4676 scalar_dest
= gimple_assign_lhs (stmt
);
4677 if (TREE_CODE (scalar_dest
) != SSA_NAME
)
4680 code
= gimple_assign_rhs_code (stmt
);
4681 if (gimple_assign_single_p (stmt
)
4682 || code
== PAREN_EXPR
4683 || CONVERT_EXPR_CODE_P (code
))
4684 op
= gimple_assign_rhs1 (stmt
);
4688 if (code
== VIEW_CONVERT_EXPR
)
4689 op
= TREE_OPERAND (op
, 0);
4691 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
4692 unsigned int nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
4694 /* Multiple types in SLP are handled by creating the appropriate number of
4695 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4700 ncopies
= vect_get_num_copies (loop_vinfo
, vectype
);
4702 gcc_assert (ncopies
>= 1);
4704 if (!vect_is_simple_use (op
, vinfo
, &def_stmt
, &dt
[0], &vectype_in
))
4706 if (dump_enabled_p ())
4707 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4708 "use not simple.\n");
4712 /* We can handle NOP_EXPR conversions that do not change the number
4713 of elements or the vector size. */
4714 if ((CONVERT_EXPR_CODE_P (code
)
4715 || code
== VIEW_CONVERT_EXPR
)
4717 || TYPE_VECTOR_SUBPARTS (vectype_in
) != nunits
4718 || (GET_MODE_SIZE (TYPE_MODE (vectype
))
4719 != GET_MODE_SIZE (TYPE_MODE (vectype_in
)))))
4722 /* We do not handle bit-precision changes. */
4723 if ((CONVERT_EXPR_CODE_P (code
)
4724 || code
== VIEW_CONVERT_EXPR
)
4725 && INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest
))
4726 && (!type_has_mode_precision_p (TREE_TYPE (scalar_dest
))
4727 || !type_has_mode_precision_p (TREE_TYPE (op
)))
4728 /* But a conversion that does not change the bit-pattern is ok. */
4729 && !((TYPE_PRECISION (TREE_TYPE (scalar_dest
))
4730 > TYPE_PRECISION (TREE_TYPE (op
)))
4731 && TYPE_UNSIGNED (TREE_TYPE (op
)))
4732 /* Conversion between boolean types of different sizes is
4733 a simple assignment in case their vectypes are same
4735 && (!VECTOR_BOOLEAN_TYPE_P (vectype
)
4736 || !VECTOR_BOOLEAN_TYPE_P (vectype_in
)))
4738 if (dump_enabled_p ())
4739 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4740 "type conversion to/from bit-precision "
4745 if (!vec_stmt
) /* transformation not required. */
4747 STMT_VINFO_TYPE (stmt_info
) = assignment_vec_info_type
;
4748 if (dump_enabled_p ())
4749 dump_printf_loc (MSG_NOTE
, vect_location
,
4750 "=== vectorizable_assignment ===\n");
4751 vect_model_simple_cost (stmt_info
, ncopies
, dt
, ndts
, NULL
, NULL
);
4756 if (dump_enabled_p ())
4757 dump_printf_loc (MSG_NOTE
, vect_location
, "transform assignment.\n");
4760 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
4763 for (j
= 0; j
< ncopies
; j
++)
4767 vect_get_vec_defs (op
, NULL
, stmt
, &vec_oprnds
, NULL
, slp_node
);
4769 vect_get_vec_defs_for_stmt_copy (dt
, &vec_oprnds
, NULL
);
4771 /* Arguments are ready. create the new vector stmt. */
4772 FOR_EACH_VEC_ELT (vec_oprnds
, i
, vop
)
4774 if (CONVERT_EXPR_CODE_P (code
)
4775 || code
== VIEW_CONVERT_EXPR
)
4776 vop
= build1 (VIEW_CONVERT_EXPR
, vectype
, vop
);
4777 new_stmt
= gimple_build_assign (vec_dest
, vop
);
4778 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
4779 gimple_assign_set_lhs (new_stmt
, new_temp
);
4780 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
4782 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
4789 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
4791 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
4793 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
4796 vec_oprnds
.release ();
4801 /* Return TRUE if CODE (a shift operation) is supported for SCALAR_TYPE
4802 either as shift by a scalar or by a vector. */
4805 vect_supportable_shift (enum tree_code code
, tree scalar_type
)
4808 machine_mode vec_mode
;
4813 vectype
= get_vectype_for_scalar_type (scalar_type
);
4817 optab
= optab_for_tree_code (code
, vectype
, optab_scalar
);
4819 || optab_handler (optab
, TYPE_MODE (vectype
)) == CODE_FOR_nothing
)
4821 optab
= optab_for_tree_code (code
, vectype
, optab_vector
);
4823 || (optab_handler (optab
, TYPE_MODE (vectype
))
4824 == CODE_FOR_nothing
))
4828 vec_mode
= TYPE_MODE (vectype
);
4829 icode
= (int) optab_handler (optab
, vec_mode
);
4830 if (icode
== CODE_FOR_nothing
)
4837 /* Function vectorizable_shift.
4839 Check if STMT performs a shift operation that can be vectorized.
4840 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4841 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
4842 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4845 vectorizable_shift (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
4846 gimple
**vec_stmt
, slp_tree slp_node
)
4850 tree op0
, op1
= NULL
;
4851 tree vec_oprnd1
= NULL_TREE
;
4852 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
4854 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
4855 enum tree_code code
;
4856 machine_mode vec_mode
;
4860 machine_mode optab_op2_mode
;
4862 enum vect_def_type dt
[2] = {vect_unknown_def_type
, vect_unknown_def_type
};
4864 gimple
*new_stmt
= NULL
;
4865 stmt_vec_info prev_stmt_info
;
4872 vec
<tree
> vec_oprnds0
= vNULL
;
4873 vec
<tree
> vec_oprnds1
= vNULL
;
4876 bool scalar_shift_arg
= true;
4877 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
4878 vec_info
*vinfo
= stmt_info
->vinfo
;
4880 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
4883 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
4887 /* Is STMT a vectorizable binary/unary operation? */
4888 if (!is_gimple_assign (stmt
))
4891 if (TREE_CODE (gimple_assign_lhs (stmt
)) != SSA_NAME
)
4894 code
= gimple_assign_rhs_code (stmt
);
4896 if (!(code
== LSHIFT_EXPR
|| code
== RSHIFT_EXPR
|| code
== LROTATE_EXPR
4897 || code
== RROTATE_EXPR
))
4900 scalar_dest
= gimple_assign_lhs (stmt
);
4901 vectype_out
= STMT_VINFO_VECTYPE (stmt_info
);
4902 if (!type_has_mode_precision_p (TREE_TYPE (scalar_dest
)))
4904 if (dump_enabled_p ())
4905 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4906 "bit-precision shifts not supported.\n");
4910 op0
= gimple_assign_rhs1 (stmt
);
4911 if (!vect_is_simple_use (op0
, vinfo
, &def_stmt
, &dt
[0], &vectype
))
4913 if (dump_enabled_p ())
4914 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4915 "use not simple.\n");
4918 /* If op0 is an external or constant def use a vector type with
4919 the same size as the output vector type. */
4921 vectype
= get_same_sized_vectype (TREE_TYPE (op0
), vectype_out
);
4923 gcc_assert (vectype
);
4926 if (dump_enabled_p ())
4927 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4928 "no vectype for scalar type\n");
4932 nunits_out
= TYPE_VECTOR_SUBPARTS (vectype_out
);
4933 nunits_in
= TYPE_VECTOR_SUBPARTS (vectype
);
4934 if (nunits_out
!= nunits_in
)
4937 op1
= gimple_assign_rhs2 (stmt
);
4938 if (!vect_is_simple_use (op1
, vinfo
, &def_stmt
, &dt
[1], &op1_vectype
))
4940 if (dump_enabled_p ())
4941 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4942 "use not simple.\n");
4946 /* Multiple types in SLP are handled by creating the appropriate number of
4947 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4952 ncopies
= vect_get_num_copies (loop_vinfo
, vectype
);
4954 gcc_assert (ncopies
>= 1);
4956 /* Determine whether the shift amount is a vector, or scalar. If the
4957 shift/rotate amount is a vector, use the vector/vector shift optabs. */
4959 if ((dt
[1] == vect_internal_def
4960 || dt
[1] == vect_induction_def
)
4962 scalar_shift_arg
= false;
4963 else if (dt
[1] == vect_constant_def
4964 || dt
[1] == vect_external_def
4965 || dt
[1] == vect_internal_def
)
4967 /* In SLP, need to check whether the shift count is the same,
4968 in loops if it is a constant or invariant, it is always
4972 vec
<gimple
*> stmts
= SLP_TREE_SCALAR_STMTS (slp_node
);
4975 FOR_EACH_VEC_ELT (stmts
, k
, slpstmt
)
4976 if (!operand_equal_p (gimple_assign_rhs2 (slpstmt
), op1
, 0))
4977 scalar_shift_arg
= false;
4980 /* If the shift amount is computed by a pattern stmt we cannot
4981 use the scalar amount directly thus give up and use a vector
4983 if (dt
[1] == vect_internal_def
)
4985 gimple
*def
= SSA_NAME_DEF_STMT (op1
);
4986 if (is_pattern_stmt_p (vinfo_for_stmt (def
)))
4987 scalar_shift_arg
= false;
4992 if (dump_enabled_p ())
4993 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4994 "operand mode requires invariant argument.\n");
4998 /* Vector shifted by vector. */
4999 if (!scalar_shift_arg
)
5001 optab
= optab_for_tree_code (code
, vectype
, optab_vector
);
5002 if (dump_enabled_p ())
5003 dump_printf_loc (MSG_NOTE
, vect_location
,
5004 "vector/vector shift/rotate found.\n");
5007 op1_vectype
= get_same_sized_vectype (TREE_TYPE (op1
), vectype_out
);
5008 if (op1_vectype
== NULL_TREE
5009 || TYPE_MODE (op1_vectype
) != TYPE_MODE (vectype
))
5011 if (dump_enabled_p ())
5012 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5013 "unusable type for last operand in"
5014 " vector/vector shift/rotate.\n");
5018 /* See if the machine has a vector shifted by scalar insn and if not
5019 then see if it has a vector shifted by vector insn. */
5022 optab
= optab_for_tree_code (code
, vectype
, optab_scalar
);
5024 && optab_handler (optab
, TYPE_MODE (vectype
)) != CODE_FOR_nothing
)
5026 if (dump_enabled_p ())
5027 dump_printf_loc (MSG_NOTE
, vect_location
,
5028 "vector/scalar shift/rotate found.\n");
5032 optab
= optab_for_tree_code (code
, vectype
, optab_vector
);
5034 && (optab_handler (optab
, TYPE_MODE (vectype
))
5035 != CODE_FOR_nothing
))
5037 scalar_shift_arg
= false;
5039 if (dump_enabled_p ())
5040 dump_printf_loc (MSG_NOTE
, vect_location
,
5041 "vector/vector shift/rotate found.\n");
5043 /* Unlike the other binary operators, shifts/rotates have
5044 the rhs being int, instead of the same type as the lhs,
5045 so make sure the scalar is the right type if we are
5046 dealing with vectors of long long/long/short/char. */
5047 if (dt
[1] == vect_constant_def
)
5048 op1
= fold_convert (TREE_TYPE (vectype
), op1
);
5049 else if (!useless_type_conversion_p (TREE_TYPE (vectype
),
5053 && TYPE_MODE (TREE_TYPE (vectype
))
5054 != TYPE_MODE (TREE_TYPE (op1
)))
5056 if (dump_enabled_p ())
5057 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5058 "unusable type for last operand in"
5059 " vector/vector shift/rotate.\n");
5062 if (vec_stmt
&& !slp_node
)
5064 op1
= fold_convert (TREE_TYPE (vectype
), op1
);
5065 op1
= vect_init_vector (stmt
, op1
,
5066 TREE_TYPE (vectype
), NULL
);
5073 /* Supportable by target? */
5076 if (dump_enabled_p ())
5077 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5081 vec_mode
= TYPE_MODE (vectype
);
5082 icode
= (int) optab_handler (optab
, vec_mode
);
5083 if (icode
== CODE_FOR_nothing
)
5085 if (dump_enabled_p ())
5086 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5087 "op not supported by target.\n");
5088 /* Check only during analysis. */
5089 if (GET_MODE_SIZE (vec_mode
) != UNITS_PER_WORD
5091 && !vect_worthwhile_without_simd_p (vinfo
, code
)))
5093 if (dump_enabled_p ())
5094 dump_printf_loc (MSG_NOTE
, vect_location
,
5095 "proceeding using word mode.\n");
5098 /* Worthwhile without SIMD support? Check only during analysis. */
5100 && !VECTOR_MODE_P (TYPE_MODE (vectype
))
5101 && !vect_worthwhile_without_simd_p (vinfo
, code
))
5103 if (dump_enabled_p ())
5104 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5105 "not worthwhile without SIMD support.\n");
5109 if (!vec_stmt
) /* transformation not required. */
5111 STMT_VINFO_TYPE (stmt_info
) = shift_vec_info_type
;
5112 if (dump_enabled_p ())
5113 dump_printf_loc (MSG_NOTE
, vect_location
,
5114 "=== vectorizable_shift ===\n");
5115 vect_model_simple_cost (stmt_info
, ncopies
, dt
, ndts
, NULL
, NULL
);
5121 if (dump_enabled_p ())
5122 dump_printf_loc (MSG_NOTE
, vect_location
,
5123 "transform binary/unary operation.\n");
5126 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
5128 prev_stmt_info
= NULL
;
5129 for (j
= 0; j
< ncopies
; j
++)
5134 if (scalar_shift_arg
)
5136 /* Vector shl and shr insn patterns can be defined with scalar
5137 operand 2 (shift operand). In this case, use constant or loop
5138 invariant op1 directly, without extending it to vector mode
5140 optab_op2_mode
= insn_data
[icode
].operand
[2].mode
;
5141 if (!VECTOR_MODE_P (optab_op2_mode
))
5143 if (dump_enabled_p ())
5144 dump_printf_loc (MSG_NOTE
, vect_location
,
5145 "operand 1 using scalar mode.\n");
5147 vec_oprnds1
.create (slp_node
? slp_node
->vec_stmts_size
: 1);
5148 vec_oprnds1
.quick_push (vec_oprnd1
);
5151 /* Store vec_oprnd1 for every vector stmt to be created
5152 for SLP_NODE. We check during the analysis that all
5153 the shift arguments are the same.
5154 TODO: Allow different constants for different vector
5155 stmts generated for an SLP instance. */
5156 for (k
= 0; k
< slp_node
->vec_stmts_size
- 1; k
++)
5157 vec_oprnds1
.quick_push (vec_oprnd1
);
5162 /* vec_oprnd1 is available if operand 1 should be of a scalar-type
5163 (a special case for certain kind of vector shifts); otherwise,
5164 operand 1 should be of a vector type (the usual case). */
5166 vect_get_vec_defs (op0
, NULL_TREE
, stmt
, &vec_oprnds0
, NULL
,
5169 vect_get_vec_defs (op0
, op1
, stmt
, &vec_oprnds0
, &vec_oprnds1
,
5173 vect_get_vec_defs_for_stmt_copy (dt
, &vec_oprnds0
, &vec_oprnds1
);
5175 /* Arguments are ready. Create the new vector stmt. */
5176 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vop0
)
5178 vop1
= vec_oprnds1
[i
];
5179 new_stmt
= gimple_build_assign (vec_dest
, code
, vop0
, vop1
);
5180 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
5181 gimple_assign_set_lhs (new_stmt
, new_temp
);
5182 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
5184 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
5191 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
5193 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
5194 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
5197 vec_oprnds0
.release ();
5198 vec_oprnds1
.release ();
5204 /* Function vectorizable_operation.
5206 Check if STMT performs a binary, unary or ternary operation that can
5208 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
5209 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
5210 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
5213 vectorizable_operation (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
5214 gimple
**vec_stmt
, slp_tree slp_node
)
5218 tree op0
, op1
= NULL_TREE
, op2
= NULL_TREE
;
5219 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
5221 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
5222 enum tree_code code
;
5223 machine_mode vec_mode
;
5227 bool target_support_p
;
5229 enum vect_def_type dt
[3]
5230 = {vect_unknown_def_type
, vect_unknown_def_type
, vect_unknown_def_type
};
5232 gimple
*new_stmt
= NULL
;
5233 stmt_vec_info prev_stmt_info
;
5239 vec
<tree
> vec_oprnds0
= vNULL
;
5240 vec
<tree
> vec_oprnds1
= vNULL
;
5241 vec
<tree
> vec_oprnds2
= vNULL
;
5242 tree vop0
, vop1
, vop2
;
5243 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
5244 vec_info
*vinfo
= stmt_info
->vinfo
;
5246 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
5249 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
5253 /* Is STMT a vectorizable binary/unary operation? */
5254 if (!is_gimple_assign (stmt
))
5257 if (TREE_CODE (gimple_assign_lhs (stmt
)) != SSA_NAME
)
5260 code
= gimple_assign_rhs_code (stmt
);
5262 /* For pointer addition, we should use the normal plus for
5263 the vector addition. */
5264 if (code
== POINTER_PLUS_EXPR
)
5267 /* Support only unary or binary operations. */
5268 op_type
= TREE_CODE_LENGTH (code
);
5269 if (op_type
!= unary_op
&& op_type
!= binary_op
&& op_type
!= ternary_op
)
5271 if (dump_enabled_p ())
5272 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5273 "num. args = %d (not unary/binary/ternary op).\n",
5278 scalar_dest
= gimple_assign_lhs (stmt
);
5279 vectype_out
= STMT_VINFO_VECTYPE (stmt_info
);
5281 /* Most operations cannot handle bit-precision types without extra
5283 if (!VECTOR_BOOLEAN_TYPE_P (vectype_out
)
5284 && !type_has_mode_precision_p (TREE_TYPE (scalar_dest
))
5285 /* Exception are bitwise binary operations. */
5286 && code
!= BIT_IOR_EXPR
5287 && code
!= BIT_XOR_EXPR
5288 && code
!= BIT_AND_EXPR
)
5290 if (dump_enabled_p ())
5291 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5292 "bit-precision arithmetic not supported.\n");
5296 op0
= gimple_assign_rhs1 (stmt
);
5297 if (!vect_is_simple_use (op0
, vinfo
, &def_stmt
, &dt
[0], &vectype
))
5299 if (dump_enabled_p ())
5300 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5301 "use not simple.\n");
5304 /* If op0 is an external or constant def use a vector type with
5305 the same size as the output vector type. */
5308 /* For boolean type we cannot determine vectype by
5309 invariant value (don't know whether it is a vector
5310 of booleans or vector of integers). We use output
5311 vectype because operations on boolean don't change
5313 if (VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (op0
)))
5315 if (!VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (scalar_dest
)))
5317 if (dump_enabled_p ())
5318 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5319 "not supported operation on bool value.\n");
5322 vectype
= vectype_out
;
5325 vectype
= get_same_sized_vectype (TREE_TYPE (op0
), vectype_out
);
5328 gcc_assert (vectype
);
5331 if (dump_enabled_p ())
5333 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5334 "no vectype for scalar type ");
5335 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
5337 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
5343 nunits_out
= TYPE_VECTOR_SUBPARTS (vectype_out
);
5344 nunits_in
= TYPE_VECTOR_SUBPARTS (vectype
);
5345 if (nunits_out
!= nunits_in
)
5348 if (op_type
== binary_op
|| op_type
== ternary_op
)
5350 op1
= gimple_assign_rhs2 (stmt
);
5351 if (!vect_is_simple_use (op1
, vinfo
, &def_stmt
, &dt
[1]))
5353 if (dump_enabled_p ())
5354 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5355 "use not simple.\n");
5359 if (op_type
== ternary_op
)
5361 op2
= gimple_assign_rhs3 (stmt
);
5362 if (!vect_is_simple_use (op2
, vinfo
, &def_stmt
, &dt
[2]))
5364 if (dump_enabled_p ())
5365 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5366 "use not simple.\n");
5371 /* Multiple types in SLP are handled by creating the appropriate number of
5372 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
5377 ncopies
= vect_get_num_copies (loop_vinfo
, vectype
);
5379 gcc_assert (ncopies
>= 1);
5381 /* Shifts are handled in vectorizable_shift (). */
5382 if (code
== LSHIFT_EXPR
|| code
== RSHIFT_EXPR
|| code
== LROTATE_EXPR
5383 || code
== RROTATE_EXPR
)
5386 /* Supportable by target? */
5388 vec_mode
= TYPE_MODE (vectype
);
5389 if (code
== MULT_HIGHPART_EXPR
)
5390 target_support_p
= can_mult_highpart_p (vec_mode
, TYPE_UNSIGNED (vectype
));
5393 optab
= optab_for_tree_code (code
, vectype
, optab_default
);
5396 if (dump_enabled_p ())
5397 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5401 target_support_p
= (optab_handler (optab
, vec_mode
)
5402 != CODE_FOR_nothing
);
5405 if (!target_support_p
)
5407 if (dump_enabled_p ())
5408 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5409 "op not supported by target.\n");
5410 /* Check only during analysis. */
5411 if (GET_MODE_SIZE (vec_mode
) != UNITS_PER_WORD
5412 || (!vec_stmt
&& !vect_worthwhile_without_simd_p (vinfo
, code
)))
5414 if (dump_enabled_p ())
5415 dump_printf_loc (MSG_NOTE
, vect_location
,
5416 "proceeding using word mode.\n");
5419 /* Worthwhile without SIMD support? Check only during analysis. */
5420 if (!VECTOR_MODE_P (vec_mode
)
5422 && !vect_worthwhile_without_simd_p (vinfo
, code
))
5424 if (dump_enabled_p ())
5425 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5426 "not worthwhile without SIMD support.\n");
5430 if (!vec_stmt
) /* transformation not required. */
5432 STMT_VINFO_TYPE (stmt_info
) = op_vec_info_type
;
5433 if (dump_enabled_p ())
5434 dump_printf_loc (MSG_NOTE
, vect_location
,
5435 "=== vectorizable_operation ===\n");
5436 vect_model_simple_cost (stmt_info
, ncopies
, dt
, ndts
, NULL
, NULL
);
5442 if (dump_enabled_p ())
5443 dump_printf_loc (MSG_NOTE
, vect_location
,
5444 "transform binary/unary operation.\n");
5447 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
5449 /* In case the vectorization factor (VF) is bigger than the number
5450 of elements that we can fit in a vectype (nunits), we have to generate
5451 more than one vector stmt - i.e - we need to "unroll" the
5452 vector stmt by a factor VF/nunits. In doing so, we record a pointer
5453 from one copy of the vector stmt to the next, in the field
5454 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
5455 stages to find the correct vector defs to be used when vectorizing
5456 stmts that use the defs of the current stmt. The example below
5457 illustrates the vectorization process when VF=16 and nunits=4 (i.e.,
5458 we need to create 4 vectorized stmts):
5460 before vectorization:
5461 RELATED_STMT VEC_STMT
5465 step 1: vectorize stmt S1 (done in vectorizable_load. See more details
5467 RELATED_STMT VEC_STMT
5468 VS1_0: vx0 = memref0 VS1_1 -
5469 VS1_1: vx1 = memref1 VS1_2 -
5470 VS1_2: vx2 = memref2 VS1_3 -
5471 VS1_3: vx3 = memref3 - -
5472 S1: x = load - VS1_0
5475 step2: vectorize stmt S2 (done here):
5476 To vectorize stmt S2 we first need to find the relevant vector
5477 def for the first operand 'x'. This is, as usual, obtained from
5478 the vector stmt recorded in the STMT_VINFO_VEC_STMT of the stmt
5479 that defines 'x' (S1). This way we find the stmt VS1_0, and the
5480 relevant vector def 'vx0'. Having found 'vx0' we can generate
5481 the vector stmt VS2_0, and as usual, record it in the
5482 STMT_VINFO_VEC_STMT of stmt S2.
5483 When creating the second copy (VS2_1), we obtain the relevant vector
5484 def from the vector stmt recorded in the STMT_VINFO_RELATED_STMT of
5485 stmt VS1_0. This way we find the stmt VS1_1 and the relevant
5486 vector def 'vx1'. Using 'vx1' we create stmt VS2_1 and record a
5487 pointer to it in the STMT_VINFO_RELATED_STMT of the vector stmt VS2_0.
5488 Similarly when creating stmts VS2_2 and VS2_3. This is the resulting
5489 chain of stmts and pointers:
5490 RELATED_STMT VEC_STMT
5491 VS1_0: vx0 = memref0 VS1_1 -
5492 VS1_1: vx1 = memref1 VS1_2 -
5493 VS1_2: vx2 = memref2 VS1_3 -
5494 VS1_3: vx3 = memref3 - -
5495 S1: x = load - VS1_0
5496 VS2_0: vz0 = vx0 + v1 VS2_1 -
5497 VS2_1: vz1 = vx1 + v1 VS2_2 -
5498 VS2_2: vz2 = vx2 + v1 VS2_3 -
5499 VS2_3: vz3 = vx3 + v1 - -
5500 S2: z = x + 1 - VS2_0 */
5502 prev_stmt_info
= NULL
;
5503 for (j
= 0; j
< ncopies
; j
++)
5508 if (op_type
== binary_op
|| op_type
== ternary_op
)
5509 vect_get_vec_defs (op0
, op1
, stmt
, &vec_oprnds0
, &vec_oprnds1
,
5512 vect_get_vec_defs (op0
, NULL_TREE
, stmt
, &vec_oprnds0
, NULL
,
5514 if (op_type
== ternary_op
)
5515 vect_get_vec_defs (op2
, NULL_TREE
, stmt
, &vec_oprnds2
, NULL
,
5520 vect_get_vec_defs_for_stmt_copy (dt
, &vec_oprnds0
, &vec_oprnds1
);
5521 if (op_type
== ternary_op
)
5523 tree vec_oprnd
= vec_oprnds2
.pop ();
5524 vec_oprnds2
.quick_push (vect_get_vec_def_for_stmt_copy (dt
[2],
5529 /* Arguments are ready. Create the new vector stmt. */
5530 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vop0
)
5532 vop1
= ((op_type
== binary_op
|| op_type
== ternary_op
)
5533 ? vec_oprnds1
[i
] : NULL_TREE
);
5534 vop2
= ((op_type
== ternary_op
)
5535 ? vec_oprnds2
[i
] : NULL_TREE
);
5536 new_stmt
= gimple_build_assign (vec_dest
, code
, vop0
, vop1
, vop2
);
5537 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
5538 gimple_assign_set_lhs (new_stmt
, new_temp
);
5539 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
5541 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
5548 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
5550 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
5551 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
5554 vec_oprnds0
.release ();
5555 vec_oprnds1
.release ();
5556 vec_oprnds2
.release ();
5561 /* A helper function to ensure data reference DR's base alignment. */
5564 ensure_base_align (struct data_reference
*dr
)
5569 if (DR_VECT_AUX (dr
)->base_misaligned
)
5571 tree base_decl
= DR_VECT_AUX (dr
)->base_decl
;
5573 unsigned int align_base_to
= DR_TARGET_ALIGNMENT (dr
) * BITS_PER_UNIT
;
5575 if (decl_in_symtab_p (base_decl
))
5576 symtab_node::get (base_decl
)->increase_alignment (align_base_to
);
5579 SET_DECL_ALIGN (base_decl
, align_base_to
);
5580 DECL_USER_ALIGN (base_decl
) = 1;
5582 DR_VECT_AUX (dr
)->base_misaligned
= false;
5587 /* Function get_group_alias_ptr_type.
5589 Return the alias type for the group starting at FIRST_STMT. */
5592 get_group_alias_ptr_type (gimple
*first_stmt
)
5594 struct data_reference
*first_dr
, *next_dr
;
5597 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
5598 next_stmt
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (first_stmt
));
5601 next_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (next_stmt
));
5602 if (get_alias_set (DR_REF (first_dr
))
5603 != get_alias_set (DR_REF (next_dr
)))
5605 if (dump_enabled_p ())
5606 dump_printf_loc (MSG_NOTE
, vect_location
,
5607 "conflicting alias set types.\n");
5608 return ptr_type_node
;
5610 next_stmt
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt
));
5612 return reference_alias_ptr_type (DR_REF (first_dr
));
5616 /* Function vectorizable_store.
5618 Check if STMT defines a non scalar data-ref (array/pointer/structure) that
5620 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
5621 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
5622 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
5625 vectorizable_store (gimple
*stmt
, gimple_stmt_iterator
*gsi
, gimple
**vec_stmt
,
5631 tree vec_oprnd
= NULL_TREE
;
5632 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
5633 struct data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
), *first_dr
= NULL
;
5635 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
5636 struct loop
*loop
= NULL
;
5637 machine_mode vec_mode
;
5639 enum dr_alignment_support alignment_support_scheme
;
5641 enum vect_def_type dt
;
5642 stmt_vec_info prev_stmt_info
= NULL
;
5643 tree dataref_ptr
= NULL_TREE
;
5644 tree dataref_offset
= NULL_TREE
;
5645 gimple
*ptr_incr
= NULL
;
5648 gimple
*next_stmt
, *first_stmt
;
5650 unsigned int group_size
, i
;
5651 vec
<tree
> oprnds
= vNULL
;
5652 vec
<tree
> result_chain
= vNULL
;
5654 tree offset
= NULL_TREE
;
5655 vec
<tree
> vec_oprnds
= vNULL
;
5656 bool slp
= (slp_node
!= NULL
);
5657 unsigned int vec_num
;
5658 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
5659 vec_info
*vinfo
= stmt_info
->vinfo
;
5661 gather_scatter_info gs_info
;
5662 enum vect_def_type scatter_src_dt
= vect_unknown_def_type
;
5665 vec_load_store_type vls_type
;
5668 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
5671 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
5675 /* Is vectorizable store? */
5677 if (!is_gimple_assign (stmt
))
5680 scalar_dest
= gimple_assign_lhs (stmt
);
5681 if (TREE_CODE (scalar_dest
) == VIEW_CONVERT_EXPR
5682 && is_pattern_stmt_p (stmt_info
))
5683 scalar_dest
= TREE_OPERAND (scalar_dest
, 0);
5684 if (TREE_CODE (scalar_dest
) != ARRAY_REF
5685 && TREE_CODE (scalar_dest
) != BIT_FIELD_REF
5686 && TREE_CODE (scalar_dest
) != INDIRECT_REF
5687 && TREE_CODE (scalar_dest
) != COMPONENT_REF
5688 && TREE_CODE (scalar_dest
) != IMAGPART_EXPR
5689 && TREE_CODE (scalar_dest
) != REALPART_EXPR
5690 && TREE_CODE (scalar_dest
) != MEM_REF
)
5693 /* Cannot have hybrid store SLP -- that would mean storing to the
5694 same location twice. */
5695 gcc_assert (slp
== PURE_SLP_STMT (stmt_info
));
5697 gcc_assert (gimple_assign_single_p (stmt
));
5699 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
), rhs_vectype
= NULL_TREE
;
5700 unsigned int nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
5704 loop
= LOOP_VINFO_LOOP (loop_vinfo
);
5705 vf
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
5710 /* Multiple types in SLP are handled by creating the appropriate number of
5711 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
5716 ncopies
= vect_get_num_copies (loop_vinfo
, vectype
);
5718 gcc_assert (ncopies
>= 1);
5720 /* FORNOW. This restriction should be relaxed. */
5721 if (loop
&& nested_in_vect_loop_p (loop
, stmt
) && ncopies
> 1)
5723 if (dump_enabled_p ())
5724 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5725 "multiple types in nested loop.\n");
5729 op
= gimple_assign_rhs1 (stmt
);
5731 /* In the case this is a store from a STRING_CST make sure
5732 native_encode_expr can handle it. */
5733 if (TREE_CODE (op
) == STRING_CST
5734 && ! can_native_encode_string_p (op
))
5737 if (!vect_is_simple_use (op
, vinfo
, &def_stmt
, &dt
, &rhs_vectype
))
5739 if (dump_enabled_p ())
5740 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5741 "use not simple.\n");
5745 if (dt
== vect_constant_def
|| dt
== vect_external_def
)
5746 vls_type
= VLS_STORE_INVARIANT
;
5748 vls_type
= VLS_STORE
;
5750 if (rhs_vectype
&& !useless_type_conversion_p (vectype
, rhs_vectype
))
5753 elem_type
= TREE_TYPE (vectype
);
5754 vec_mode
= TYPE_MODE (vectype
);
5756 /* FORNOW. In some cases can vectorize even if data-type not supported
5757 (e.g. - array initialization with 0). */
5758 if (optab_handler (mov_optab
, vec_mode
) == CODE_FOR_nothing
)
5761 if (!STMT_VINFO_DATA_REF (stmt_info
))
5764 vect_memory_access_type memory_access_type
;
5765 if (!get_load_store_type (stmt
, vectype
, slp
, vls_type
, ncopies
,
5766 &memory_access_type
, &gs_info
))
5769 if (!vec_stmt
) /* transformation not required. */
5771 STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info
) = memory_access_type
;
5772 STMT_VINFO_TYPE (stmt_info
) = store_vec_info_type
;
5773 /* The SLP costs are calculated during SLP analysis. */
5774 if (!PURE_SLP_STMT (stmt_info
))
5775 vect_model_store_cost (stmt_info
, ncopies
, memory_access_type
, dt
,
5779 gcc_assert (memory_access_type
== STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info
));
5783 ensure_base_align (dr
);
5785 if (memory_access_type
== VMAT_GATHER_SCATTER
)
5787 tree vec_oprnd0
= NULL_TREE
, vec_oprnd1
= NULL_TREE
, op
, src
;
5788 tree arglist
= TYPE_ARG_TYPES (TREE_TYPE (gs_info
.decl
));
5789 tree rettype
, srctype
, ptrtype
, idxtype
, masktype
, scaletype
;
5790 tree ptr
, mask
, var
, scale
, perm_mask
= NULL_TREE
;
5791 edge pe
= loop_preheader_edge (loop
);
5794 enum { NARROW
, NONE
, WIDEN
} modifier
;
5795 int scatter_off_nunits
= TYPE_VECTOR_SUBPARTS (gs_info
.offset_vectype
);
5797 if (nunits
== (unsigned int) scatter_off_nunits
)
5799 else if (nunits
== (unsigned int) scatter_off_nunits
/ 2)
5803 auto_vec_perm_indices
sel (scatter_off_nunits
);
5804 for (i
= 0; i
< (unsigned int) scatter_off_nunits
; ++i
)
5805 sel
.quick_push (i
| nunits
);
5807 perm_mask
= vect_gen_perm_mask_checked (gs_info
.offset_vectype
, sel
);
5808 gcc_assert (perm_mask
!= NULL_TREE
);
5810 else if (nunits
== (unsigned int) scatter_off_nunits
* 2)
5814 auto_vec_perm_indices
sel (nunits
);
5815 for (i
= 0; i
< (unsigned int) nunits
; ++i
)
5816 sel
.quick_push (i
| scatter_off_nunits
);
5818 perm_mask
= vect_gen_perm_mask_checked (vectype
, sel
);
5819 gcc_assert (perm_mask
!= NULL_TREE
);
5825 rettype
= TREE_TYPE (TREE_TYPE (gs_info
.decl
));
5826 ptrtype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
5827 masktype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
5828 idxtype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
5829 srctype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
5830 scaletype
= TREE_VALUE (arglist
);
5832 gcc_checking_assert (TREE_CODE (masktype
) == INTEGER_TYPE
5833 && TREE_CODE (rettype
) == VOID_TYPE
);
5835 ptr
= fold_convert (ptrtype
, gs_info
.base
);
5836 if (!is_gimple_min_invariant (ptr
))
5838 ptr
= force_gimple_operand (ptr
, &seq
, true, NULL_TREE
);
5839 new_bb
= gsi_insert_seq_on_edge_immediate (pe
, seq
);
5840 gcc_assert (!new_bb
);
5843 /* Currently we support only unconditional scatter stores,
5844 so mask should be all ones. */
5845 mask
= build_int_cst (masktype
, -1);
5846 mask
= vect_init_vector (stmt
, mask
, masktype
, NULL
);
5848 scale
= build_int_cst (scaletype
, gs_info
.scale
);
5850 prev_stmt_info
= NULL
;
5851 for (j
= 0; j
< ncopies
; ++j
)
5856 = vect_get_vec_def_for_operand (gimple_assign_rhs1 (stmt
), stmt
);
5858 = vect_get_vec_def_for_operand (gs_info
.offset
, stmt
);
5860 else if (modifier
!= NONE
&& (j
& 1))
5862 if (modifier
== WIDEN
)
5865 = vect_get_vec_def_for_stmt_copy (scatter_src_dt
, vec_oprnd1
);
5866 op
= permute_vec_elements (vec_oprnd0
, vec_oprnd0
, perm_mask
,
5869 else if (modifier
== NARROW
)
5871 src
= permute_vec_elements (vec_oprnd1
, vec_oprnd1
, perm_mask
,
5874 = vect_get_vec_def_for_stmt_copy (gs_info
.offset_dt
,
5883 = vect_get_vec_def_for_stmt_copy (scatter_src_dt
, vec_oprnd1
);
5885 = vect_get_vec_def_for_stmt_copy (gs_info
.offset_dt
,
5889 if (!useless_type_conversion_p (srctype
, TREE_TYPE (src
)))
5891 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (src
))
5892 == TYPE_VECTOR_SUBPARTS (srctype
));
5893 var
= vect_get_new_ssa_name (srctype
, vect_simple_var
);
5894 src
= build1 (VIEW_CONVERT_EXPR
, srctype
, src
);
5895 new_stmt
= gimple_build_assign (var
, VIEW_CONVERT_EXPR
, src
);
5896 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
5900 if (!useless_type_conversion_p (idxtype
, TREE_TYPE (op
)))
5902 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op
))
5903 == TYPE_VECTOR_SUBPARTS (idxtype
));
5904 var
= vect_get_new_ssa_name (idxtype
, vect_simple_var
);
5905 op
= build1 (VIEW_CONVERT_EXPR
, idxtype
, op
);
5906 new_stmt
= gimple_build_assign (var
, VIEW_CONVERT_EXPR
, op
);
5907 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
5912 = gimple_build_call (gs_info
.decl
, 5, ptr
, mask
, op
, src
, scale
);
5914 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
5916 if (prev_stmt_info
== NULL
)
5917 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
5919 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
5920 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
5925 grouped_store
= STMT_VINFO_GROUPED_ACCESS (stmt_info
);
5928 first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
5929 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
5930 group_size
= GROUP_SIZE (vinfo_for_stmt (first_stmt
));
5932 GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt
))++;
5935 gcc_assert (!loop
|| !nested_in_vect_loop_p (loop
, stmt
));
5937 /* We vectorize all the stmts of the interleaving group when we
5938 reach the last stmt in the group. */
5939 if (GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt
))
5940 < GROUP_SIZE (vinfo_for_stmt (first_stmt
))
5949 grouped_store
= false;
5950 /* VEC_NUM is the number of vect stmts to be created for this
5952 vec_num
= SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node
);
5953 first_stmt
= SLP_TREE_SCALAR_STMTS (slp_node
)[0];
5954 gcc_assert (GROUP_FIRST_ELEMENT (vinfo_for_stmt (first_stmt
)) == first_stmt
);
5955 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
5956 op
= gimple_assign_rhs1 (first_stmt
);
5959 /* VEC_NUM is the number of vect stmts to be created for this
5961 vec_num
= group_size
;
5963 ref_type
= get_group_alias_ptr_type (first_stmt
);
5969 group_size
= vec_num
= 1;
5970 ref_type
= reference_alias_ptr_type (DR_REF (first_dr
));
5973 if (dump_enabled_p ())
5974 dump_printf_loc (MSG_NOTE
, vect_location
,
5975 "transform store. ncopies = %d\n", ncopies
);
5977 if (memory_access_type
== VMAT_ELEMENTWISE
5978 || memory_access_type
== VMAT_STRIDED_SLP
)
5980 gimple_stmt_iterator incr_gsi
;
5986 gimple_seq stmts
= NULL
;
5987 tree stride_base
, stride_step
, alias_off
;
5991 gcc_assert (!nested_in_vect_loop_p (loop
, stmt
));
5994 = fold_build_pointer_plus
5995 (unshare_expr (DR_BASE_ADDRESS (first_dr
)),
5996 size_binop (PLUS_EXPR
,
5997 convert_to_ptrofftype (unshare_expr (DR_OFFSET (first_dr
))),
5998 convert_to_ptrofftype (DR_INIT (first_dr
))));
5999 stride_step
= fold_convert (sizetype
, unshare_expr (DR_STEP (first_dr
)));
6001 /* For a store with loop-invariant (but other than power-of-2)
6002 stride (i.e. not a grouped access) like so:
6004 for (i = 0; i < n; i += stride)
6007 we generate a new induction variable and new stores from
6008 the components of the (vectorized) rhs:
6010 for (j = 0; ; j += VF*stride)
6015 array[j + stride] = tmp2;
6019 unsigned nstores
= nunits
;
6021 tree ltype
= elem_type
;
6022 tree lvectype
= vectype
;
6025 if (group_size
< nunits
6026 && nunits
% group_size
== 0)
6028 nstores
= nunits
/ group_size
;
6030 ltype
= build_vector_type (elem_type
, group_size
);
6033 /* First check if vec_extract optab doesn't support extraction
6034 of vector elts directly. */
6035 scalar_mode elmode
= SCALAR_TYPE_MODE (elem_type
);
6037 if (!mode_for_vector (elmode
, group_size
).exists (&vmode
)
6038 || !VECTOR_MODE_P (vmode
)
6039 || (convert_optab_handler (vec_extract_optab
,
6040 TYPE_MODE (vectype
), vmode
)
6041 == CODE_FOR_nothing
))
6043 /* Try to avoid emitting an extract of vector elements
6044 by performing the extracts using an integer type of the
6045 same size, extracting from a vector of those and then
6046 re-interpreting it as the original vector type if
6049 = group_size
* GET_MODE_BITSIZE (elmode
);
6050 elmode
= int_mode_for_size (lsize
, 0).require ();
6051 /* If we can't construct such a vector fall back to
6052 element extracts from the original vector type and
6053 element size stores. */
6054 if (mode_for_vector (elmode
,
6055 nunits
/ group_size
).exists (&vmode
)
6056 && VECTOR_MODE_P (vmode
)
6057 && (convert_optab_handler (vec_extract_optab
,
6059 != CODE_FOR_nothing
))
6061 nstores
= nunits
/ group_size
;
6063 ltype
= build_nonstandard_integer_type (lsize
, 1);
6064 lvectype
= build_vector_type (ltype
, nstores
);
6066 /* Else fall back to vector extraction anyway.
6067 Fewer stores are more important than avoiding spilling
6068 of the vector we extract from. Compared to the
6069 construction case in vectorizable_load no store-forwarding
6070 issue exists here for reasonable archs. */
6073 else if (group_size
>= nunits
6074 && group_size
% nunits
== 0)
6081 ltype
= build_aligned_type (ltype
, TYPE_ALIGN (elem_type
));
6082 ncopies
= SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node
);
6085 ivstep
= stride_step
;
6086 ivstep
= fold_build2 (MULT_EXPR
, TREE_TYPE (ivstep
), ivstep
,
6087 build_int_cst (TREE_TYPE (ivstep
), vf
));
6089 standard_iv_increment_position (loop
, &incr_gsi
, &insert_after
);
6091 create_iv (stride_base
, ivstep
, NULL
,
6092 loop
, &incr_gsi
, insert_after
,
6094 incr
= gsi_stmt (incr_gsi
);
6095 set_vinfo_for_stmt (incr
, new_stmt_vec_info (incr
, loop_vinfo
));
6097 stride_step
= force_gimple_operand (stride_step
, &stmts
, true, NULL_TREE
);
6099 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop
), stmts
);
6101 prev_stmt_info
= NULL
;
6102 alias_off
= build_int_cst (ref_type
, 0);
6103 next_stmt
= first_stmt
;
6104 for (g
= 0; g
< group_size
; g
++)
6106 running_off
= offvar
;
6109 tree size
= TYPE_SIZE_UNIT (ltype
);
6110 tree pos
= fold_build2 (MULT_EXPR
, sizetype
, size_int (g
),
6112 tree newoff
= copy_ssa_name (running_off
, NULL
);
6113 incr
= gimple_build_assign (newoff
, POINTER_PLUS_EXPR
,
6115 vect_finish_stmt_generation (stmt
, incr
, gsi
);
6116 running_off
= newoff
;
6118 unsigned int group_el
= 0;
6119 unsigned HOST_WIDE_INT
6120 elsz
= tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (vectype
)));
6121 for (j
= 0; j
< ncopies
; j
++)
6123 /* We've set op and dt above, from gimple_assign_rhs1(stmt),
6124 and first_stmt == stmt. */
6129 vect_get_vec_defs (op
, NULL_TREE
, stmt
, &vec_oprnds
, NULL
,
6131 vec_oprnd
= vec_oprnds
[0];
6135 gcc_assert (gimple_assign_single_p (next_stmt
));
6136 op
= gimple_assign_rhs1 (next_stmt
);
6137 vec_oprnd
= vect_get_vec_def_for_operand (op
, next_stmt
);
6143 vec_oprnd
= vec_oprnds
[j
];
6146 vect_is_simple_use (vec_oprnd
, vinfo
, &def_stmt
, &dt
);
6147 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
, vec_oprnd
);
6150 /* Pun the vector to extract from if necessary. */
6151 if (lvectype
!= vectype
)
6153 tree tem
= make_ssa_name (lvectype
);
6155 = gimple_build_assign (tem
, build1 (VIEW_CONVERT_EXPR
,
6156 lvectype
, vec_oprnd
));
6157 vect_finish_stmt_generation (stmt
, pun
, gsi
);
6160 for (i
= 0; i
< nstores
; i
++)
6162 tree newref
, newoff
;
6163 gimple
*incr
, *assign
;
6164 tree size
= TYPE_SIZE (ltype
);
6165 /* Extract the i'th component. */
6166 tree pos
= fold_build2 (MULT_EXPR
, bitsizetype
,
6167 bitsize_int (i
), size
);
6168 tree elem
= fold_build3 (BIT_FIELD_REF
, ltype
, vec_oprnd
,
6171 elem
= force_gimple_operand_gsi (gsi
, elem
, true,
6175 tree this_off
= build_int_cst (TREE_TYPE (alias_off
),
6177 newref
= build2 (MEM_REF
, ltype
,
6178 running_off
, this_off
);
6180 /* And store it to *running_off. */
6181 assign
= gimple_build_assign (newref
, elem
);
6182 vect_finish_stmt_generation (stmt
, assign
, gsi
);
6186 || group_el
== group_size
)
6188 newoff
= copy_ssa_name (running_off
, NULL
);
6189 incr
= gimple_build_assign (newoff
, POINTER_PLUS_EXPR
,
6190 running_off
, stride_step
);
6191 vect_finish_stmt_generation (stmt
, incr
, gsi
);
6193 running_off
= newoff
;
6196 if (g
== group_size
- 1
6199 if (j
== 0 && i
== 0)
6200 STMT_VINFO_VEC_STMT (stmt_info
)
6201 = *vec_stmt
= assign
;
6203 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = assign
;
6204 prev_stmt_info
= vinfo_for_stmt (assign
);
6208 next_stmt
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt
));
6213 vec_oprnds
.release ();
6217 auto_vec
<tree
> dr_chain (group_size
);
6218 oprnds
.create (group_size
);
6220 alignment_support_scheme
= vect_supportable_dr_alignment (first_dr
, false);
6221 gcc_assert (alignment_support_scheme
);
6222 /* Targets with store-lane instructions must not require explicit
6224 gcc_assert (memory_access_type
!= VMAT_LOAD_STORE_LANES
6225 || alignment_support_scheme
== dr_aligned
6226 || alignment_support_scheme
== dr_unaligned_supported
);
6228 if (memory_access_type
== VMAT_CONTIGUOUS_DOWN
6229 || memory_access_type
== VMAT_CONTIGUOUS_REVERSE
)
6230 offset
= size_int (-TYPE_VECTOR_SUBPARTS (vectype
) + 1);
6232 if (memory_access_type
== VMAT_LOAD_STORE_LANES
)
6233 aggr_type
= build_array_type_nelts (elem_type
, vec_num
* nunits
);
6235 aggr_type
= vectype
;
6237 /* In case the vectorization factor (VF) is bigger than the number
6238 of elements that we can fit in a vectype (nunits), we have to generate
6239 more than one vector stmt - i.e - we need to "unroll" the
6240 vector stmt by a factor VF/nunits. For more details see documentation in
6241 vect_get_vec_def_for_copy_stmt. */
6243 /* In case of interleaving (non-unit grouped access):
6250 We create vectorized stores starting from base address (the access of the
6251 first stmt in the chain (S2 in the above example), when the last store stmt
6252 of the chain (S4) is reached:
6255 VS2: &base + vec_size*1 = vx0
6256 VS3: &base + vec_size*2 = vx1
6257 VS4: &base + vec_size*3 = vx3
6259 Then permutation statements are generated:
6261 VS5: vx5 = VEC_PERM_EXPR < vx0, vx3, {0, 8, 1, 9, 2, 10, 3, 11} >
6262 VS6: vx6 = VEC_PERM_EXPR < vx0, vx3, {4, 12, 5, 13, 6, 14, 7, 15} >
6265 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
6266 (the order of the data-refs in the output of vect_permute_store_chain
6267 corresponds to the order of scalar stmts in the interleaving chain - see
6268 the documentation of vect_permute_store_chain()).
6270 In case of both multiple types and interleaving, above vector stores and
6271 permutation stmts are created for every copy. The result vector stmts are
6272 put in STMT_VINFO_VEC_STMT for the first copy and in the corresponding
6273 STMT_VINFO_RELATED_STMT for the next copies.
6276 prev_stmt_info
= NULL
;
6277 for (j
= 0; j
< ncopies
; j
++)
6284 /* Get vectorized arguments for SLP_NODE. */
6285 vect_get_vec_defs (op
, NULL_TREE
, stmt
, &vec_oprnds
,
6288 vec_oprnd
= vec_oprnds
[0];
6292 /* For interleaved stores we collect vectorized defs for all the
6293 stores in the group in DR_CHAIN and OPRNDS. DR_CHAIN is then
6294 used as an input to vect_permute_store_chain(), and OPRNDS as
6295 an input to vect_get_vec_def_for_stmt_copy() for the next copy.
6297 If the store is not grouped, GROUP_SIZE is 1, and DR_CHAIN and
6298 OPRNDS are of size 1. */
6299 next_stmt
= first_stmt
;
6300 for (i
= 0; i
< group_size
; i
++)
6302 /* Since gaps are not supported for interleaved stores,
6303 GROUP_SIZE is the exact number of stmts in the chain.
6304 Therefore, NEXT_STMT can't be NULL_TREE. In case that
6305 there is no interleaving, GROUP_SIZE is 1, and only one
6306 iteration of the loop will be executed. */
6307 gcc_assert (next_stmt
6308 && gimple_assign_single_p (next_stmt
));
6309 op
= gimple_assign_rhs1 (next_stmt
);
6311 vec_oprnd
= vect_get_vec_def_for_operand (op
, next_stmt
);
6312 dr_chain
.quick_push (vec_oprnd
);
6313 oprnds
.quick_push (vec_oprnd
);
6314 next_stmt
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt
));
6318 /* We should have catched mismatched types earlier. */
6319 gcc_assert (useless_type_conversion_p (vectype
,
6320 TREE_TYPE (vec_oprnd
)));
6321 bool simd_lane_access_p
6322 = STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info
);
6323 if (simd_lane_access_p
6324 && TREE_CODE (DR_BASE_ADDRESS (first_dr
)) == ADDR_EXPR
6325 && VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (first_dr
), 0))
6326 && integer_zerop (DR_OFFSET (first_dr
))
6327 && integer_zerop (DR_INIT (first_dr
))
6328 && alias_sets_conflict_p (get_alias_set (aggr_type
),
6329 get_alias_set (TREE_TYPE (ref_type
))))
6331 dataref_ptr
= unshare_expr (DR_BASE_ADDRESS (first_dr
));
6332 dataref_offset
= build_int_cst (ref_type
, 0);
6337 = vect_create_data_ref_ptr (first_stmt
, aggr_type
,
6338 simd_lane_access_p
? loop
: NULL
,
6339 offset
, &dummy
, gsi
, &ptr_incr
,
6340 simd_lane_access_p
, &inv_p
);
6341 gcc_assert (bb_vinfo
|| !inv_p
);
6345 /* For interleaved stores we created vectorized defs for all the
6346 defs stored in OPRNDS in the previous iteration (previous copy).
6347 DR_CHAIN is then used as an input to vect_permute_store_chain(),
6348 and OPRNDS as an input to vect_get_vec_def_for_stmt_copy() for the
6350 If the store is not grouped, GROUP_SIZE is 1, and DR_CHAIN and
6351 OPRNDS are of size 1. */
6352 for (i
= 0; i
< group_size
; i
++)
6355 vect_is_simple_use (op
, vinfo
, &def_stmt
, &dt
);
6356 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
, op
);
6357 dr_chain
[i
] = vec_oprnd
;
6358 oprnds
[i
] = vec_oprnd
;
6362 = int_const_binop (PLUS_EXPR
, dataref_offset
,
6363 TYPE_SIZE_UNIT (aggr_type
));
6365 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
, stmt
,
6366 TYPE_SIZE_UNIT (aggr_type
));
6369 if (memory_access_type
== VMAT_LOAD_STORE_LANES
)
6373 /* Combine all the vectors into an array. */
6374 vec_array
= create_vector_array (vectype
, vec_num
);
6375 for (i
= 0; i
< vec_num
; i
++)
6377 vec_oprnd
= dr_chain
[i
];
6378 write_vector_array (stmt
, gsi
, vec_oprnd
, vec_array
, i
);
6382 MEM_REF[...all elements...] = STORE_LANES (VEC_ARRAY). */
6383 data_ref
= create_array_ref (aggr_type
, dataref_ptr
, ref_type
);
6384 gcall
*call
= gimple_build_call_internal (IFN_STORE_LANES
, 1,
6386 gimple_call_set_lhs (call
, data_ref
);
6387 gimple_call_set_nothrow (call
, true);
6389 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6397 result_chain
.create (group_size
);
6399 vect_permute_store_chain (dr_chain
, group_size
, stmt
, gsi
,
6403 next_stmt
= first_stmt
;
6404 for (i
= 0; i
< vec_num
; i
++)
6406 unsigned align
, misalign
;
6409 /* Bump the vector pointer. */
6410 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
,
6414 vec_oprnd
= vec_oprnds
[i
];
6415 else if (grouped_store
)
6416 /* For grouped stores vectorized defs are interleaved in
6417 vect_permute_store_chain(). */
6418 vec_oprnd
= result_chain
[i
];
6420 data_ref
= fold_build2 (MEM_REF
, vectype
,
6424 : build_int_cst (ref_type
, 0));
6425 align
= DR_TARGET_ALIGNMENT (first_dr
);
6426 if (aligned_access_p (first_dr
))
6428 else if (DR_MISALIGNMENT (first_dr
) == -1)
6430 align
= dr_alignment (vect_dr_behavior (first_dr
));
6432 TREE_TYPE (data_ref
)
6433 = build_aligned_type (TREE_TYPE (data_ref
),
6434 align
* BITS_PER_UNIT
);
6438 TREE_TYPE (data_ref
)
6439 = build_aligned_type (TREE_TYPE (data_ref
),
6440 TYPE_ALIGN (elem_type
));
6441 misalign
= DR_MISALIGNMENT (first_dr
);
6443 if (dataref_offset
== NULL_TREE
6444 && TREE_CODE (dataref_ptr
) == SSA_NAME
)
6445 set_ptr_info_alignment (get_ptr_info (dataref_ptr
), align
,
6448 if (memory_access_type
== VMAT_CONTIGUOUS_REVERSE
)
6450 tree perm_mask
= perm_mask_for_reverse (vectype
);
6452 = vect_create_destination_var (gimple_assign_rhs1 (stmt
),
6454 tree new_temp
= make_ssa_name (perm_dest
);
6456 /* Generate the permute statement. */
6458 = gimple_build_assign (new_temp
, VEC_PERM_EXPR
, vec_oprnd
,
6459 vec_oprnd
, perm_mask
);
6460 vect_finish_stmt_generation (stmt
, perm_stmt
, gsi
);
6462 perm_stmt
= SSA_NAME_DEF_STMT (new_temp
);
6463 vec_oprnd
= new_temp
;
6466 /* Arguments are ready. Create the new vector stmt. */
6467 new_stmt
= gimple_build_assign (data_ref
, vec_oprnd
);
6468 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6473 next_stmt
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt
));
6481 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
6483 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
6484 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
6489 result_chain
.release ();
6490 vec_oprnds
.release ();
6495 /* Given a vector type VECTYPE, turns permutation SEL into the equivalent
6496 VECTOR_CST mask. No checks are made that the target platform supports the
6497 mask, so callers may wish to test can_vec_perm_p separately, or use
6498 vect_gen_perm_mask_checked. */
6501 vect_gen_perm_mask_any (tree vectype
, vec_perm_indices sel
)
6503 tree mask_elt_type
, mask_type
, mask_vec
;
6505 unsigned int nunits
= sel
.length ();
6506 gcc_checking_assert (nunits
== TYPE_VECTOR_SUBPARTS (vectype
));
6508 mask_elt_type
= lang_hooks
.types
.type_for_mode
6509 (int_mode_for_mode (TYPE_MODE (TREE_TYPE (vectype
))).require (), 1);
6510 mask_type
= get_vectype_for_scalar_type (mask_elt_type
);
6512 auto_vec
<tree
, 32> mask_elts (nunits
);
6513 for (unsigned int i
= 0; i
< nunits
; ++i
)
6514 mask_elts
.quick_push (build_int_cst (mask_elt_type
, sel
[i
]));
6515 mask_vec
= build_vector (mask_type
, mask_elts
);
6520 /* Checked version of vect_gen_perm_mask_any. Asserts can_vec_perm_p,
6521 i.e. that the target supports the pattern _for arbitrary input vectors_. */
6524 vect_gen_perm_mask_checked (tree vectype
, vec_perm_indices sel
)
6526 gcc_assert (can_vec_perm_p (TYPE_MODE (vectype
), false, &sel
));
6527 return vect_gen_perm_mask_any (vectype
, sel
);
6530 /* Given a vector variable X and Y, that was generated for the scalar
6531 STMT, generate instructions to permute the vector elements of X and Y
6532 using permutation mask MASK_VEC, insert them at *GSI and return the
6533 permuted vector variable. */
6536 permute_vec_elements (tree x
, tree y
, tree mask_vec
, gimple
*stmt
,
6537 gimple_stmt_iterator
*gsi
)
6539 tree vectype
= TREE_TYPE (x
);
6540 tree perm_dest
, data_ref
;
6543 perm_dest
= vect_create_destination_var (gimple_get_lhs (stmt
), vectype
);
6544 data_ref
= make_ssa_name (perm_dest
);
6546 /* Generate the permute statement. */
6547 perm_stmt
= gimple_build_assign (data_ref
, VEC_PERM_EXPR
, x
, y
, mask_vec
);
6548 vect_finish_stmt_generation (stmt
, perm_stmt
, gsi
);
6553 /* Hoist the definitions of all SSA uses on STMT out of the loop LOOP,
6554 inserting them on the loops preheader edge. Returns true if we
6555 were successful in doing so (and thus STMT can be moved then),
6556 otherwise returns false. */
6559 hoist_defs_of_uses (gimple
*stmt
, struct loop
*loop
)
6565 FOR_EACH_SSA_TREE_OPERAND (op
, stmt
, i
, SSA_OP_USE
)
6567 gimple
*def_stmt
= SSA_NAME_DEF_STMT (op
);
6568 if (!gimple_nop_p (def_stmt
)
6569 && flow_bb_inside_loop_p (loop
, gimple_bb (def_stmt
)))
6571 /* Make sure we don't need to recurse. While we could do
6572 so in simple cases when there are more complex use webs
6573 we don't have an easy way to preserve stmt order to fulfil
6574 dependencies within them. */
6577 if (gimple_code (def_stmt
) == GIMPLE_PHI
)
6579 FOR_EACH_SSA_TREE_OPERAND (op2
, def_stmt
, i2
, SSA_OP_USE
)
6581 gimple
*def_stmt2
= SSA_NAME_DEF_STMT (op2
);
6582 if (!gimple_nop_p (def_stmt2
)
6583 && flow_bb_inside_loop_p (loop
, gimple_bb (def_stmt2
)))
6593 FOR_EACH_SSA_TREE_OPERAND (op
, stmt
, i
, SSA_OP_USE
)
6595 gimple
*def_stmt
= SSA_NAME_DEF_STMT (op
);
6596 if (!gimple_nop_p (def_stmt
)
6597 && flow_bb_inside_loop_p (loop
, gimple_bb (def_stmt
)))
6599 gimple_stmt_iterator gsi
= gsi_for_stmt (def_stmt
);
6600 gsi_remove (&gsi
, false);
6601 gsi_insert_on_edge_immediate (loop_preheader_edge (loop
), def_stmt
);
6608 /* vectorizable_load.
6610 Check if STMT reads a non scalar data-ref (array/pointer/structure) that
6612 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
6613 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
6614 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
6617 vectorizable_load (gimple
*stmt
, gimple_stmt_iterator
*gsi
, gimple
**vec_stmt
,
6618 slp_tree slp_node
, slp_instance slp_node_instance
)
6621 tree vec_dest
= NULL
;
6622 tree data_ref
= NULL
;
6623 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
6624 stmt_vec_info prev_stmt_info
;
6625 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
6626 struct loop
*loop
= NULL
;
6627 struct loop
*containing_loop
= (gimple_bb (stmt
))->loop_father
;
6628 bool nested_in_vect_loop
= false;
6629 struct data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
), *first_dr
= NULL
;
6633 gimple
*new_stmt
= NULL
;
6635 enum dr_alignment_support alignment_support_scheme
;
6636 tree dataref_ptr
= NULL_TREE
;
6637 tree dataref_offset
= NULL_TREE
;
6638 gimple
*ptr_incr
= NULL
;
6640 int i
, j
, group_size
, group_gap_adj
;
6641 tree msq
= NULL_TREE
, lsq
;
6642 tree offset
= NULL_TREE
;
6643 tree byte_offset
= NULL_TREE
;
6644 tree realignment_token
= NULL_TREE
;
6646 vec
<tree
> dr_chain
= vNULL
;
6647 bool grouped_load
= false;
6649 gimple
*first_stmt_for_drptr
= NULL
;
6651 bool compute_in_loop
= false;
6652 struct loop
*at_loop
;
6654 bool slp
= (slp_node
!= NULL
);
6655 bool slp_perm
= false;
6656 enum tree_code code
;
6657 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
6660 gather_scatter_info gs_info
;
6661 vec_info
*vinfo
= stmt_info
->vinfo
;
6664 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
6667 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
6671 /* Is vectorizable load? */
6672 if (!is_gimple_assign (stmt
))
6675 scalar_dest
= gimple_assign_lhs (stmt
);
6676 if (TREE_CODE (scalar_dest
) != SSA_NAME
)
6679 code
= gimple_assign_rhs_code (stmt
);
6680 if (code
!= ARRAY_REF
6681 && code
!= BIT_FIELD_REF
6682 && code
!= INDIRECT_REF
6683 && code
!= COMPONENT_REF
6684 && code
!= IMAGPART_EXPR
6685 && code
!= REALPART_EXPR
6687 && TREE_CODE_CLASS (code
) != tcc_declaration
)
6690 if (!STMT_VINFO_DATA_REF (stmt_info
))
6693 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
6694 int nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
6698 loop
= LOOP_VINFO_LOOP (loop_vinfo
);
6699 nested_in_vect_loop
= nested_in_vect_loop_p (loop
, stmt
);
6700 vf
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
6705 /* Multiple types in SLP are handled by creating the appropriate number of
6706 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
6711 ncopies
= vect_get_num_copies (loop_vinfo
, vectype
);
6713 gcc_assert (ncopies
>= 1);
6715 /* FORNOW. This restriction should be relaxed. */
6716 if (nested_in_vect_loop
&& ncopies
> 1)
6718 if (dump_enabled_p ())
6719 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6720 "multiple types in nested loop.\n");
6724 /* Invalidate assumptions made by dependence analysis when vectorization
6725 on the unrolled body effectively re-orders stmts. */
6727 && STMT_VINFO_MIN_NEG_DIST (stmt_info
) != 0
6728 && ((unsigned)LOOP_VINFO_VECT_FACTOR (loop_vinfo
)
6729 > STMT_VINFO_MIN_NEG_DIST (stmt_info
)))
6731 if (dump_enabled_p ())
6732 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6733 "cannot perform implicit CSE when unrolling "
6734 "with negative dependence distance\n");
6738 elem_type
= TREE_TYPE (vectype
);
6739 mode
= TYPE_MODE (vectype
);
6741 /* FORNOW. In some cases can vectorize even if data-type not supported
6742 (e.g. - data copies). */
6743 if (optab_handler (mov_optab
, mode
) == CODE_FOR_nothing
)
6745 if (dump_enabled_p ())
6746 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6747 "Aligned load, but unsupported type.\n");
6751 /* Check if the load is a part of an interleaving chain. */
6752 if (STMT_VINFO_GROUPED_ACCESS (stmt_info
))
6754 grouped_load
= true;
6756 gcc_assert (!nested_in_vect_loop
);
6757 gcc_assert (!STMT_VINFO_GATHER_SCATTER_P (stmt_info
));
6759 first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
6760 group_size
= GROUP_SIZE (vinfo_for_stmt (first_stmt
));
6762 if (slp
&& SLP_TREE_LOAD_PERMUTATION (slp_node
).exists ())
6765 /* Invalidate assumptions made by dependence analysis when vectorization
6766 on the unrolled body effectively re-orders stmts. */
6767 if (!PURE_SLP_STMT (stmt_info
)
6768 && STMT_VINFO_MIN_NEG_DIST (stmt_info
) != 0
6769 && ((unsigned)LOOP_VINFO_VECT_FACTOR (loop_vinfo
)
6770 > STMT_VINFO_MIN_NEG_DIST (stmt_info
)))
6772 if (dump_enabled_p ())
6773 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6774 "cannot perform implicit CSE when performing "
6775 "group loads with negative dependence distance\n");
6779 /* Similarly when the stmt is a load that is both part of a SLP
6780 instance and a loop vectorized stmt via the same-dr mechanism
6781 we have to give up. */
6782 if (STMT_VINFO_GROUP_SAME_DR_STMT (stmt_info
)
6783 && (STMT_SLP_TYPE (stmt_info
)
6784 != STMT_SLP_TYPE (vinfo_for_stmt
6785 (STMT_VINFO_GROUP_SAME_DR_STMT (stmt_info
)))))
6787 if (dump_enabled_p ())
6788 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6789 "conflicting SLP types for CSEd load\n");
6794 vect_memory_access_type memory_access_type
;
6795 if (!get_load_store_type (stmt
, vectype
, slp
, VLS_LOAD
, ncopies
,
6796 &memory_access_type
, &gs_info
))
6799 if (!vec_stmt
) /* transformation not required. */
6802 STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info
) = memory_access_type
;
6803 STMT_VINFO_TYPE (stmt_info
) = load_vec_info_type
;
6804 /* The SLP costs are calculated during SLP analysis. */
6805 if (!PURE_SLP_STMT (stmt_info
))
6806 vect_model_load_cost (stmt_info
, ncopies
, memory_access_type
,
6812 gcc_assert (memory_access_type
6813 == STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info
));
6815 if (dump_enabled_p ())
6816 dump_printf_loc (MSG_NOTE
, vect_location
,
6817 "transform load. ncopies = %d\n", ncopies
);
6821 ensure_base_align (dr
);
6823 if (memory_access_type
== VMAT_GATHER_SCATTER
)
6825 tree vec_oprnd0
= NULL_TREE
, op
;
6826 tree arglist
= TYPE_ARG_TYPES (TREE_TYPE (gs_info
.decl
));
6827 tree rettype
, srctype
, ptrtype
, idxtype
, masktype
, scaletype
;
6828 tree ptr
, mask
, var
, scale
, merge
, perm_mask
= NULL_TREE
, prev_res
= NULL_TREE
;
6829 edge pe
= loop_preheader_edge (loop
);
6832 enum { NARROW
, NONE
, WIDEN
} modifier
;
6833 int gather_off_nunits
= TYPE_VECTOR_SUBPARTS (gs_info
.offset_vectype
);
6835 if (nunits
== gather_off_nunits
)
6837 else if (nunits
== gather_off_nunits
/ 2)
6841 auto_vec_perm_indices
sel (gather_off_nunits
);
6842 for (i
= 0; i
< gather_off_nunits
; ++i
)
6843 sel
.quick_push (i
| nunits
);
6845 perm_mask
= vect_gen_perm_mask_checked (gs_info
.offset_vectype
, sel
);
6847 else if (nunits
== gather_off_nunits
* 2)
6851 auto_vec_perm_indices
sel (nunits
);
6852 for (i
= 0; i
< nunits
; ++i
)
6853 sel
.quick_push (i
< gather_off_nunits
6854 ? i
: i
+ nunits
- gather_off_nunits
);
6856 perm_mask
= vect_gen_perm_mask_checked (vectype
, sel
);
6862 rettype
= TREE_TYPE (TREE_TYPE (gs_info
.decl
));
6863 srctype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
6864 ptrtype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
6865 idxtype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
6866 masktype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
6867 scaletype
= TREE_VALUE (arglist
);
6868 gcc_checking_assert (types_compatible_p (srctype
, rettype
));
6870 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
6872 ptr
= fold_convert (ptrtype
, gs_info
.base
);
6873 if (!is_gimple_min_invariant (ptr
))
6875 ptr
= force_gimple_operand (ptr
, &seq
, true, NULL_TREE
);
6876 new_bb
= gsi_insert_seq_on_edge_immediate (pe
, seq
);
6877 gcc_assert (!new_bb
);
6880 /* Currently we support only unconditional gather loads,
6881 so mask should be all ones. */
6882 if (TREE_CODE (masktype
) == INTEGER_TYPE
)
6883 mask
= build_int_cst (masktype
, -1);
6884 else if (TREE_CODE (TREE_TYPE (masktype
)) == INTEGER_TYPE
)
6886 mask
= build_int_cst (TREE_TYPE (masktype
), -1);
6887 mask
= build_vector_from_val (masktype
, mask
);
6888 mask
= vect_init_vector (stmt
, mask
, masktype
, NULL
);
6890 else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (masktype
)))
6894 for (j
= 0; j
< 6; ++j
)
6896 real_from_target (&r
, tmp
, TYPE_MODE (TREE_TYPE (masktype
)));
6897 mask
= build_real (TREE_TYPE (masktype
), r
);
6898 mask
= build_vector_from_val (masktype
, mask
);
6899 mask
= vect_init_vector (stmt
, mask
, masktype
, NULL
);
6904 scale
= build_int_cst (scaletype
, gs_info
.scale
);
6906 if (TREE_CODE (TREE_TYPE (rettype
)) == INTEGER_TYPE
)
6907 merge
= build_int_cst (TREE_TYPE (rettype
), 0);
6908 else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (rettype
)))
6912 for (j
= 0; j
< 6; ++j
)
6914 real_from_target (&r
, tmp
, TYPE_MODE (TREE_TYPE (rettype
)));
6915 merge
= build_real (TREE_TYPE (rettype
), r
);
6919 merge
= build_vector_from_val (rettype
, merge
);
6920 merge
= vect_init_vector (stmt
, merge
, rettype
, NULL
);
6922 prev_stmt_info
= NULL
;
6923 for (j
= 0; j
< ncopies
; ++j
)
6925 if (modifier
== WIDEN
&& (j
& 1))
6926 op
= permute_vec_elements (vec_oprnd0
, vec_oprnd0
,
6927 perm_mask
, stmt
, gsi
);
6930 = vect_get_vec_def_for_operand (gs_info
.offset
, stmt
);
6933 = vect_get_vec_def_for_stmt_copy (gs_info
.offset_dt
, vec_oprnd0
);
6935 if (!useless_type_conversion_p (idxtype
, TREE_TYPE (op
)))
6937 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op
))
6938 == TYPE_VECTOR_SUBPARTS (idxtype
));
6939 var
= vect_get_new_ssa_name (idxtype
, vect_simple_var
);
6940 op
= build1 (VIEW_CONVERT_EXPR
, idxtype
, op
);
6942 = gimple_build_assign (var
, VIEW_CONVERT_EXPR
, op
);
6943 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6948 = gimple_build_call (gs_info
.decl
, 5, merge
, ptr
, op
, mask
, scale
);
6950 if (!useless_type_conversion_p (vectype
, rettype
))
6952 gcc_assert (TYPE_VECTOR_SUBPARTS (vectype
)
6953 == TYPE_VECTOR_SUBPARTS (rettype
));
6954 op
= vect_get_new_ssa_name (rettype
, vect_simple_var
);
6955 gimple_call_set_lhs (new_stmt
, op
);
6956 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6957 var
= make_ssa_name (vec_dest
);
6958 op
= build1 (VIEW_CONVERT_EXPR
, vectype
, op
);
6960 = gimple_build_assign (var
, VIEW_CONVERT_EXPR
, op
);
6964 var
= make_ssa_name (vec_dest
, new_stmt
);
6965 gimple_call_set_lhs (new_stmt
, var
);
6968 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6970 if (modifier
== NARROW
)
6977 var
= permute_vec_elements (prev_res
, var
,
6978 perm_mask
, stmt
, gsi
);
6979 new_stmt
= SSA_NAME_DEF_STMT (var
);
6982 if (prev_stmt_info
== NULL
)
6983 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
6985 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
6986 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
6991 if (memory_access_type
== VMAT_ELEMENTWISE
6992 || memory_access_type
== VMAT_STRIDED_SLP
)
6994 gimple_stmt_iterator incr_gsi
;
7000 vec
<constructor_elt
, va_gc
> *v
= NULL
;
7001 gimple_seq stmts
= NULL
;
7002 tree stride_base
, stride_step
, alias_off
;
7004 gcc_assert (!nested_in_vect_loop
);
7006 if (slp
&& grouped_load
)
7008 first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
7009 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
7010 group_size
= GROUP_SIZE (vinfo_for_stmt (first_stmt
));
7011 ref_type
= get_group_alias_ptr_type (first_stmt
);
7018 ref_type
= reference_alias_ptr_type (DR_REF (first_dr
));
7022 = fold_build_pointer_plus
7023 (DR_BASE_ADDRESS (first_dr
),
7024 size_binop (PLUS_EXPR
,
7025 convert_to_ptrofftype (DR_OFFSET (first_dr
)),
7026 convert_to_ptrofftype (DR_INIT (first_dr
))));
7027 stride_step
= fold_convert (sizetype
, DR_STEP (first_dr
));
7029 /* For a load with loop-invariant (but other than power-of-2)
7030 stride (i.e. not a grouped access) like so:
7032 for (i = 0; i < n; i += stride)
7035 we generate a new induction variable and new accesses to
7036 form a new vector (or vectors, depending on ncopies):
7038 for (j = 0; ; j += VF*stride)
7040 tmp2 = array[j + stride];
7042 vectemp = {tmp1, tmp2, ...}
7045 ivstep
= fold_build2 (MULT_EXPR
, TREE_TYPE (stride_step
), stride_step
,
7046 build_int_cst (TREE_TYPE (stride_step
), vf
));
7048 standard_iv_increment_position (loop
, &incr_gsi
, &insert_after
);
7050 create_iv (unshare_expr (stride_base
), unshare_expr (ivstep
), NULL
,
7051 loop
, &incr_gsi
, insert_after
,
7053 incr
= gsi_stmt (incr_gsi
);
7054 set_vinfo_for_stmt (incr
, new_stmt_vec_info (incr
, loop_vinfo
));
7056 stride_step
= force_gimple_operand (unshare_expr (stride_step
),
7057 &stmts
, true, NULL_TREE
);
7059 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop
), stmts
);
7061 prev_stmt_info
= NULL
;
7062 running_off
= offvar
;
7063 alias_off
= build_int_cst (ref_type
, 0);
7064 int nloads
= nunits
;
7066 tree ltype
= TREE_TYPE (vectype
);
7067 tree lvectype
= vectype
;
7068 auto_vec
<tree
> dr_chain
;
7069 if (memory_access_type
== VMAT_STRIDED_SLP
)
7071 if (group_size
< nunits
)
7073 /* First check if vec_init optab supports construction from
7074 vector elts directly. */
7075 scalar_mode elmode
= SCALAR_TYPE_MODE (TREE_TYPE (vectype
));
7077 if (mode_for_vector (elmode
, group_size
).exists (&vmode
)
7078 && VECTOR_MODE_P (vmode
)
7079 && (convert_optab_handler (vec_init_optab
,
7080 TYPE_MODE (vectype
), vmode
)
7081 != CODE_FOR_nothing
))
7083 nloads
= nunits
/ group_size
;
7085 ltype
= build_vector_type (TREE_TYPE (vectype
), group_size
);
7089 /* Otherwise avoid emitting a constructor of vector elements
7090 by performing the loads using an integer type of the same
7091 size, constructing a vector of those and then
7092 re-interpreting it as the original vector type.
7093 This avoids a huge runtime penalty due to the general
7094 inability to perform store forwarding from smaller stores
7095 to a larger load. */
7097 = group_size
* TYPE_PRECISION (TREE_TYPE (vectype
));
7098 elmode
= int_mode_for_size (lsize
, 0).require ();
7099 /* If we can't construct such a vector fall back to
7100 element loads of the original vector type. */
7101 if (mode_for_vector (elmode
,
7102 nunits
/ group_size
).exists (&vmode
)
7103 && VECTOR_MODE_P (vmode
)
7104 && (convert_optab_handler (vec_init_optab
, vmode
, elmode
)
7105 != CODE_FOR_nothing
))
7107 nloads
= nunits
/ group_size
;
7109 ltype
= build_nonstandard_integer_type (lsize
, 1);
7110 lvectype
= build_vector_type (ltype
, nloads
);
7120 ltype
= build_aligned_type (ltype
, TYPE_ALIGN (TREE_TYPE (vectype
)));
7124 /* For SLP permutation support we need to load the whole group,
7125 not only the number of vector stmts the permutation result
7129 ncopies
= (group_size
* vf
+ nunits
- 1) / nunits
;
7130 dr_chain
.create (ncopies
);
7133 ncopies
= SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node
);
7136 unsigned HOST_WIDE_INT
7137 elsz
= tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (vectype
)));
7138 for (j
= 0; j
< ncopies
; j
++)
7141 vec_alloc (v
, nloads
);
7142 for (i
= 0; i
< nloads
; i
++)
7144 tree this_off
= build_int_cst (TREE_TYPE (alias_off
),
7146 new_stmt
= gimple_build_assign (make_ssa_name (ltype
),
7147 build2 (MEM_REF
, ltype
,
7148 running_off
, this_off
));
7149 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
7151 CONSTRUCTOR_APPEND_ELT (v
, NULL_TREE
,
7152 gimple_assign_lhs (new_stmt
));
7156 || group_el
== group_size
)
7158 tree newoff
= copy_ssa_name (running_off
);
7159 gimple
*incr
= gimple_build_assign (newoff
, POINTER_PLUS_EXPR
,
7160 running_off
, stride_step
);
7161 vect_finish_stmt_generation (stmt
, incr
, gsi
);
7163 running_off
= newoff
;
7169 tree vec_inv
= build_constructor (lvectype
, v
);
7170 new_temp
= vect_init_vector (stmt
, vec_inv
, lvectype
, gsi
);
7171 new_stmt
= SSA_NAME_DEF_STMT (new_temp
);
7172 if (lvectype
!= vectype
)
7174 new_stmt
= gimple_build_assign (make_ssa_name (vectype
),
7176 build1 (VIEW_CONVERT_EXPR
,
7177 vectype
, new_temp
));
7178 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
7185 dr_chain
.quick_push (gimple_assign_lhs (new_stmt
));
7187 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
7192 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
7194 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
7195 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
7201 vect_transform_slp_perm_load (slp_node
, dr_chain
, gsi
, vf
,
7202 slp_node_instance
, false, &n_perms
);
7209 first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
7210 group_size
= GROUP_SIZE (vinfo_for_stmt (first_stmt
));
7211 /* For SLP vectorization we directly vectorize a subchain
7212 without permutation. */
7213 if (slp
&& ! SLP_TREE_LOAD_PERMUTATION (slp_node
).exists ())
7214 first_stmt
= SLP_TREE_SCALAR_STMTS (slp_node
)[0];
7215 /* For BB vectorization always use the first stmt to base
7216 the data ref pointer on. */
7218 first_stmt_for_drptr
= SLP_TREE_SCALAR_STMTS (slp_node
)[0];
7220 /* Check if the chain of loads is already vectorized. */
7221 if (STMT_VINFO_VEC_STMT (vinfo_for_stmt (first_stmt
))
7222 /* For SLP we would need to copy over SLP_TREE_VEC_STMTS.
7223 ??? But we can only do so if there is exactly one
7224 as we have no way to get at the rest. Leave the CSE
7226 ??? With the group load eventually participating
7227 in multiple different permutations (having multiple
7228 slp nodes which refer to the same group) the CSE
7229 is even wrong code. See PR56270. */
7232 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
7235 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
7238 /* VEC_NUM is the number of vect stmts to be created for this group. */
7241 grouped_load
= false;
7242 /* For SLP permutation support we need to load the whole group,
7243 not only the number of vector stmts the permutation result
7247 vec_num
= (group_size
* vf
+ nunits
- 1) / nunits
;
7248 group_gap_adj
= vf
* group_size
- nunits
* vec_num
;
7252 vec_num
= SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node
);
7254 = group_size
- SLP_INSTANCE_GROUP_SIZE (slp_node_instance
);
7258 vec_num
= group_size
;
7260 ref_type
= get_group_alias_ptr_type (first_stmt
);
7266 group_size
= vec_num
= 1;
7268 ref_type
= reference_alias_ptr_type (DR_REF (first_dr
));
7271 alignment_support_scheme
= vect_supportable_dr_alignment (first_dr
, false);
7272 gcc_assert (alignment_support_scheme
);
7273 /* Targets with load-lane instructions must not require explicit
7275 gcc_assert (memory_access_type
!= VMAT_LOAD_STORE_LANES
7276 || alignment_support_scheme
== dr_aligned
7277 || alignment_support_scheme
== dr_unaligned_supported
);
7279 /* In case the vectorization factor (VF) is bigger than the number
7280 of elements that we can fit in a vectype (nunits), we have to generate
7281 more than one vector stmt - i.e - we need to "unroll" the
7282 vector stmt by a factor VF/nunits. In doing so, we record a pointer
7283 from one copy of the vector stmt to the next, in the field
7284 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
7285 stages to find the correct vector defs to be used when vectorizing
7286 stmts that use the defs of the current stmt. The example below
7287 illustrates the vectorization process when VF=16 and nunits=4 (i.e., we
7288 need to create 4 vectorized stmts):
7290 before vectorization:
7291 RELATED_STMT VEC_STMT
7295 step 1: vectorize stmt S1:
7296 We first create the vector stmt VS1_0, and, as usual, record a
7297 pointer to it in the STMT_VINFO_VEC_STMT of the scalar stmt S1.
7298 Next, we create the vector stmt VS1_1, and record a pointer to
7299 it in the STMT_VINFO_RELATED_STMT of the vector stmt VS1_0.
7300 Similarly, for VS1_2 and VS1_3. This is the resulting chain of
7302 RELATED_STMT VEC_STMT
7303 VS1_0: vx0 = memref0 VS1_1 -
7304 VS1_1: vx1 = memref1 VS1_2 -
7305 VS1_2: vx2 = memref2 VS1_3 -
7306 VS1_3: vx3 = memref3 - -
7307 S1: x = load - VS1_0
7310 See in documentation in vect_get_vec_def_for_stmt_copy for how the
7311 information we recorded in RELATED_STMT field is used to vectorize
7314 /* In case of interleaving (non-unit grouped access):
7321 Vectorized loads are created in the order of memory accesses
7322 starting from the access of the first stmt of the chain:
7325 VS2: vx1 = &base + vec_size*1
7326 VS3: vx3 = &base + vec_size*2
7327 VS4: vx4 = &base + vec_size*3
7329 Then permutation statements are generated:
7331 VS5: vx5 = VEC_PERM_EXPR < vx0, vx1, { 0, 2, ..., i*2 } >
7332 VS6: vx6 = VEC_PERM_EXPR < vx0, vx1, { 1, 3, ..., i*2+1 } >
7335 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
7336 (the order of the data-refs in the output of vect_permute_load_chain
7337 corresponds to the order of scalar stmts in the interleaving chain - see
7338 the documentation of vect_permute_load_chain()).
7339 The generation of permutation stmts and recording them in
7340 STMT_VINFO_VEC_STMT is done in vect_transform_grouped_load().
7342 In case of both multiple types and interleaving, the vector loads and
7343 permutation stmts above are created for every copy. The result vector
7344 stmts are put in STMT_VINFO_VEC_STMT for the first copy and in the
7345 corresponding STMT_VINFO_RELATED_STMT for the next copies. */
7347 /* If the data reference is aligned (dr_aligned) or potentially unaligned
7348 on a target that supports unaligned accesses (dr_unaligned_supported)
7349 we generate the following code:
7353 p = p + indx * vectype_size;
7358 Otherwise, the data reference is potentially unaligned on a target that
7359 does not support unaligned accesses (dr_explicit_realign_optimized) -
7360 then generate the following code, in which the data in each iteration is
7361 obtained by two vector loads, one from the previous iteration, and one
7362 from the current iteration:
7364 msq_init = *(floor(p1))
7365 p2 = initial_addr + VS - 1;
7366 realignment_token = call target_builtin;
7369 p2 = p2 + indx * vectype_size
7371 vec_dest = realign_load (msq, lsq, realignment_token)
7376 /* If the misalignment remains the same throughout the execution of the
7377 loop, we can create the init_addr and permutation mask at the loop
7378 preheader. Otherwise, it needs to be created inside the loop.
7379 This can only occur when vectorizing memory accesses in the inner-loop
7380 nested within an outer-loop that is being vectorized. */
7382 if (nested_in_vect_loop
7383 && (DR_STEP_ALIGNMENT (dr
) % GET_MODE_SIZE (TYPE_MODE (vectype
))) != 0)
7385 gcc_assert (alignment_support_scheme
!= dr_explicit_realign_optimized
);
7386 compute_in_loop
= true;
7389 if ((alignment_support_scheme
== dr_explicit_realign_optimized
7390 || alignment_support_scheme
== dr_explicit_realign
)
7391 && !compute_in_loop
)
7393 msq
= vect_setup_realignment (first_stmt
, gsi
, &realignment_token
,
7394 alignment_support_scheme
, NULL_TREE
,
7396 if (alignment_support_scheme
== dr_explicit_realign_optimized
)
7398 phi
= as_a
<gphi
*> (SSA_NAME_DEF_STMT (msq
));
7399 byte_offset
= size_binop (MINUS_EXPR
, TYPE_SIZE_UNIT (vectype
),
7406 if (memory_access_type
== VMAT_CONTIGUOUS_REVERSE
)
7407 offset
= size_int (-TYPE_VECTOR_SUBPARTS (vectype
) + 1);
7409 if (memory_access_type
== VMAT_LOAD_STORE_LANES
)
7410 aggr_type
= build_array_type_nelts (elem_type
, vec_num
* nunits
);
7412 aggr_type
= vectype
;
7414 prev_stmt_info
= NULL
;
7416 for (j
= 0; j
< ncopies
; j
++)
7418 /* 1. Create the vector or array pointer update chain. */
7421 bool simd_lane_access_p
7422 = STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info
);
7423 if (simd_lane_access_p
7424 && TREE_CODE (DR_BASE_ADDRESS (first_dr
)) == ADDR_EXPR
7425 && VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (first_dr
), 0))
7426 && integer_zerop (DR_OFFSET (first_dr
))
7427 && integer_zerop (DR_INIT (first_dr
))
7428 && alias_sets_conflict_p (get_alias_set (aggr_type
),
7429 get_alias_set (TREE_TYPE (ref_type
)))
7430 && (alignment_support_scheme
== dr_aligned
7431 || alignment_support_scheme
== dr_unaligned_supported
))
7433 dataref_ptr
= unshare_expr (DR_BASE_ADDRESS (first_dr
));
7434 dataref_offset
= build_int_cst (ref_type
, 0);
7437 else if (first_stmt_for_drptr
7438 && first_stmt
!= first_stmt_for_drptr
)
7441 = vect_create_data_ref_ptr (first_stmt_for_drptr
, aggr_type
,
7442 at_loop
, offset
, &dummy
, gsi
,
7443 &ptr_incr
, simd_lane_access_p
,
7444 &inv_p
, byte_offset
);
7445 /* Adjust the pointer by the difference to first_stmt. */
7446 data_reference_p ptrdr
7447 = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt_for_drptr
));
7448 tree diff
= fold_convert (sizetype
,
7449 size_binop (MINUS_EXPR
,
7452 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
,
7457 = vect_create_data_ref_ptr (first_stmt
, aggr_type
, at_loop
,
7458 offset
, &dummy
, gsi
, &ptr_incr
,
7459 simd_lane_access_p
, &inv_p
,
7462 else if (dataref_offset
)
7463 dataref_offset
= int_const_binop (PLUS_EXPR
, dataref_offset
,
7464 TYPE_SIZE_UNIT (aggr_type
));
7466 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
, stmt
,
7467 TYPE_SIZE_UNIT (aggr_type
));
7469 if (grouped_load
|| slp_perm
)
7470 dr_chain
.create (vec_num
);
7472 if (memory_access_type
== VMAT_LOAD_STORE_LANES
)
7476 vec_array
= create_vector_array (vectype
, vec_num
);
7479 VEC_ARRAY = LOAD_LANES (MEM_REF[...all elements...]). */
7480 data_ref
= create_array_ref (aggr_type
, dataref_ptr
, ref_type
);
7481 gcall
*call
= gimple_build_call_internal (IFN_LOAD_LANES
, 1,
7483 gimple_call_set_lhs (call
, vec_array
);
7484 gimple_call_set_nothrow (call
, true);
7486 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
7488 /* Extract each vector into an SSA_NAME. */
7489 for (i
= 0; i
< vec_num
; i
++)
7491 new_temp
= read_vector_array (stmt
, gsi
, scalar_dest
,
7493 dr_chain
.quick_push (new_temp
);
7496 /* Record the mapping between SSA_NAMEs and statements. */
7497 vect_record_grouped_load_vectors (stmt
, dr_chain
);
7501 for (i
= 0; i
< vec_num
; i
++)
7504 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
,
7507 /* 2. Create the vector-load in the loop. */
7508 switch (alignment_support_scheme
)
7511 case dr_unaligned_supported
:
7513 unsigned int align
, misalign
;
7516 = fold_build2 (MEM_REF
, vectype
, dataref_ptr
,
7519 : build_int_cst (ref_type
, 0));
7520 align
= DR_TARGET_ALIGNMENT (dr
);
7521 if (alignment_support_scheme
== dr_aligned
)
7523 gcc_assert (aligned_access_p (first_dr
));
7526 else if (DR_MISALIGNMENT (first_dr
) == -1)
7528 align
= dr_alignment (vect_dr_behavior (first_dr
));
7530 TREE_TYPE (data_ref
)
7531 = build_aligned_type (TREE_TYPE (data_ref
),
7532 align
* BITS_PER_UNIT
);
7536 TREE_TYPE (data_ref
)
7537 = build_aligned_type (TREE_TYPE (data_ref
),
7538 TYPE_ALIGN (elem_type
));
7539 misalign
= DR_MISALIGNMENT (first_dr
);
7541 if (dataref_offset
== NULL_TREE
7542 && TREE_CODE (dataref_ptr
) == SSA_NAME
)
7543 set_ptr_info_alignment (get_ptr_info (dataref_ptr
),
7547 case dr_explicit_realign
:
7551 tree vs
= size_int (TYPE_VECTOR_SUBPARTS (vectype
));
7553 if (compute_in_loop
)
7554 msq
= vect_setup_realignment (first_stmt
, gsi
,
7556 dr_explicit_realign
,
7559 if (TREE_CODE (dataref_ptr
) == SSA_NAME
)
7560 ptr
= copy_ssa_name (dataref_ptr
);
7562 ptr
= make_ssa_name (TREE_TYPE (dataref_ptr
));
7563 unsigned int align
= DR_TARGET_ALIGNMENT (first_dr
);
7564 new_stmt
= gimple_build_assign
7565 (ptr
, BIT_AND_EXPR
, dataref_ptr
,
7567 (TREE_TYPE (dataref_ptr
),
7568 -(HOST_WIDE_INT
) align
));
7569 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
7571 = build2 (MEM_REF
, vectype
, ptr
,
7572 build_int_cst (ref_type
, 0));
7573 vec_dest
= vect_create_destination_var (scalar_dest
,
7575 new_stmt
= gimple_build_assign (vec_dest
, data_ref
);
7576 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
7577 gimple_assign_set_lhs (new_stmt
, new_temp
);
7578 gimple_set_vdef (new_stmt
, gimple_vdef (stmt
));
7579 gimple_set_vuse (new_stmt
, gimple_vuse (stmt
));
7580 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
7583 bump
= size_binop (MULT_EXPR
, vs
,
7584 TYPE_SIZE_UNIT (elem_type
));
7585 bump
= size_binop (MINUS_EXPR
, bump
, size_one_node
);
7586 ptr
= bump_vector_ptr (dataref_ptr
, NULL
, gsi
, stmt
, bump
);
7587 new_stmt
= gimple_build_assign
7588 (NULL_TREE
, BIT_AND_EXPR
, ptr
,
7590 (TREE_TYPE (ptr
), -(HOST_WIDE_INT
) align
));
7591 ptr
= copy_ssa_name (ptr
, new_stmt
);
7592 gimple_assign_set_lhs (new_stmt
, ptr
);
7593 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
7595 = build2 (MEM_REF
, vectype
, ptr
,
7596 build_int_cst (ref_type
, 0));
7599 case dr_explicit_realign_optimized
:
7601 if (TREE_CODE (dataref_ptr
) == SSA_NAME
)
7602 new_temp
= copy_ssa_name (dataref_ptr
);
7604 new_temp
= make_ssa_name (TREE_TYPE (dataref_ptr
));
7605 unsigned int align
= DR_TARGET_ALIGNMENT (first_dr
);
7606 new_stmt
= gimple_build_assign
7607 (new_temp
, BIT_AND_EXPR
, dataref_ptr
,
7608 build_int_cst (TREE_TYPE (dataref_ptr
),
7609 -(HOST_WIDE_INT
) align
));
7610 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
7612 = build2 (MEM_REF
, vectype
, new_temp
,
7613 build_int_cst (ref_type
, 0));
7619 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
7620 new_stmt
= gimple_build_assign (vec_dest
, data_ref
);
7621 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
7622 gimple_assign_set_lhs (new_stmt
, new_temp
);
7623 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
7625 /* 3. Handle explicit realignment if necessary/supported.
7627 vec_dest = realign_load (msq, lsq, realignment_token) */
7628 if (alignment_support_scheme
== dr_explicit_realign_optimized
7629 || alignment_support_scheme
== dr_explicit_realign
)
7631 lsq
= gimple_assign_lhs (new_stmt
);
7632 if (!realignment_token
)
7633 realignment_token
= dataref_ptr
;
7634 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
7635 new_stmt
= gimple_build_assign (vec_dest
, REALIGN_LOAD_EXPR
,
7636 msq
, lsq
, realignment_token
);
7637 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
7638 gimple_assign_set_lhs (new_stmt
, new_temp
);
7639 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
7641 if (alignment_support_scheme
== dr_explicit_realign_optimized
)
7644 if (i
== vec_num
- 1 && j
== ncopies
- 1)
7645 add_phi_arg (phi
, lsq
,
7646 loop_latch_edge (containing_loop
),
7652 /* 4. Handle invariant-load. */
7653 if (inv_p
&& !bb_vinfo
)
7655 gcc_assert (!grouped_load
);
7656 /* If we have versioned for aliasing or the loop doesn't
7657 have any data dependencies that would preclude this,
7658 then we are sure this is a loop invariant load and
7659 thus we can insert it on the preheader edge. */
7660 if (LOOP_VINFO_NO_DATA_DEPENDENCIES (loop_vinfo
)
7661 && !nested_in_vect_loop
7662 && hoist_defs_of_uses (stmt
, loop
))
7664 if (dump_enabled_p ())
7666 dump_printf_loc (MSG_NOTE
, vect_location
,
7667 "hoisting out of the vectorized "
7669 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
7671 tree tem
= copy_ssa_name (scalar_dest
);
7672 gsi_insert_on_edge_immediate
7673 (loop_preheader_edge (loop
),
7674 gimple_build_assign (tem
,
7676 (gimple_assign_rhs1 (stmt
))));
7677 new_temp
= vect_init_vector (stmt
, tem
, vectype
, NULL
);
7678 new_stmt
= SSA_NAME_DEF_STMT (new_temp
);
7679 set_vinfo_for_stmt (new_stmt
,
7680 new_stmt_vec_info (new_stmt
, vinfo
));
7684 gimple_stmt_iterator gsi2
= *gsi
;
7686 new_temp
= vect_init_vector (stmt
, scalar_dest
,
7688 new_stmt
= SSA_NAME_DEF_STMT (new_temp
);
7692 if (memory_access_type
== VMAT_CONTIGUOUS_REVERSE
)
7694 tree perm_mask
= perm_mask_for_reverse (vectype
);
7695 new_temp
= permute_vec_elements (new_temp
, new_temp
,
7696 perm_mask
, stmt
, gsi
);
7697 new_stmt
= SSA_NAME_DEF_STMT (new_temp
);
7700 /* Collect vector loads and later create their permutation in
7701 vect_transform_grouped_load (). */
7702 if (grouped_load
|| slp_perm
)
7703 dr_chain
.quick_push (new_temp
);
7705 /* Store vector loads in the corresponding SLP_NODE. */
7706 if (slp
&& !slp_perm
)
7707 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
7709 /* With SLP permutation we load the gaps as well, without
7710 we need to skip the gaps after we manage to fully load
7711 all elements. group_gap_adj is GROUP_SIZE here. */
7712 group_elt
+= nunits
;
7713 if (group_gap_adj
!= 0 && ! slp_perm
7714 && group_elt
== group_size
- group_gap_adj
)
7718 = wide_int_to_tree (sizetype
,
7719 wi::smul (TYPE_SIZE_UNIT (elem_type
),
7720 group_gap_adj
, &ovf
));
7721 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
,
7726 /* Bump the vector pointer to account for a gap or for excess
7727 elements loaded for a permuted SLP load. */
7728 if (group_gap_adj
!= 0 && slp_perm
)
7732 = wide_int_to_tree (sizetype
,
7733 wi::smul (TYPE_SIZE_UNIT (elem_type
),
7734 group_gap_adj
, &ovf
));
7735 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
,
7740 if (slp
&& !slp_perm
)
7746 if (!vect_transform_slp_perm_load (slp_node
, dr_chain
, gsi
, vf
,
7747 slp_node_instance
, false,
7750 dr_chain
.release ();
7758 if (memory_access_type
!= VMAT_LOAD_STORE_LANES
)
7759 vect_transform_grouped_load (stmt
, dr_chain
, group_size
, gsi
);
7760 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
7765 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
7767 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
7768 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
7771 dr_chain
.release ();
7777 /* Function vect_is_simple_cond.
7780 LOOP - the loop that is being vectorized.
7781 COND - Condition that is checked for simple use.
7784 *COMP_VECTYPE - the vector type for the comparison.
7785 *DTS - The def types for the arguments of the comparison
7787 Returns whether a COND can be vectorized. Checks whether
7788 condition operands are supportable using vec_is_simple_use. */
7791 vect_is_simple_cond (tree cond
, vec_info
*vinfo
,
7792 tree
*comp_vectype
, enum vect_def_type
*dts
)
7795 tree vectype1
= NULL_TREE
, vectype2
= NULL_TREE
;
7798 if (TREE_CODE (cond
) == SSA_NAME
7799 && VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (cond
)))
7801 gimple
*lhs_def_stmt
= SSA_NAME_DEF_STMT (cond
);
7802 if (!vect_is_simple_use (cond
, vinfo
, &lhs_def_stmt
,
7803 &dts
[0], comp_vectype
)
7805 || !VECTOR_BOOLEAN_TYPE_P (*comp_vectype
))
7810 if (!COMPARISON_CLASS_P (cond
))
7813 lhs
= TREE_OPERAND (cond
, 0);
7814 rhs
= TREE_OPERAND (cond
, 1);
7816 if (TREE_CODE (lhs
) == SSA_NAME
)
7818 gimple
*lhs_def_stmt
= SSA_NAME_DEF_STMT (lhs
);
7819 if (!vect_is_simple_use (lhs
, vinfo
, &lhs_def_stmt
, &dts
[0], &vectype1
))
7822 else if (TREE_CODE (lhs
) == INTEGER_CST
|| TREE_CODE (lhs
) == REAL_CST
7823 || TREE_CODE (lhs
) == FIXED_CST
)
7824 dts
[0] = vect_constant_def
;
7828 if (TREE_CODE (rhs
) == SSA_NAME
)
7830 gimple
*rhs_def_stmt
= SSA_NAME_DEF_STMT (rhs
);
7831 if (!vect_is_simple_use (rhs
, vinfo
, &rhs_def_stmt
, &dts
[1], &vectype2
))
7834 else if (TREE_CODE (rhs
) == INTEGER_CST
|| TREE_CODE (rhs
) == REAL_CST
7835 || TREE_CODE (rhs
) == FIXED_CST
)
7836 dts
[1] = vect_constant_def
;
7840 if (vectype1
&& vectype2
7841 && TYPE_VECTOR_SUBPARTS (vectype1
) != TYPE_VECTOR_SUBPARTS (vectype2
))
7844 *comp_vectype
= vectype1
? vectype1
: vectype2
;
7848 /* vectorizable_condition.
7850 Check if STMT is conditional modify expression that can be vectorized.
7851 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
7852 stmt using VEC_COND_EXPR to replace it, put it in VEC_STMT, and insert it
7855 When STMT is vectorized as nested cycle, REDUC_DEF is the vector variable
7856 to be used at REDUC_INDEX (in then clause if REDUC_INDEX is 1, and in
7857 else clause if it is 2).
7859 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
7862 vectorizable_condition (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
7863 gimple
**vec_stmt
, tree reduc_def
, int reduc_index
,
7866 tree scalar_dest
= NULL_TREE
;
7867 tree vec_dest
= NULL_TREE
;
7868 tree cond_expr
, cond_expr0
= NULL_TREE
, cond_expr1
= NULL_TREE
;
7869 tree then_clause
, else_clause
;
7870 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
7871 tree comp_vectype
= NULL_TREE
;
7872 tree vec_cond_lhs
= NULL_TREE
, vec_cond_rhs
= NULL_TREE
;
7873 tree vec_then_clause
= NULL_TREE
, vec_else_clause
= NULL_TREE
;
7876 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
7877 enum vect_def_type dts
[4]
7878 = {vect_unknown_def_type
, vect_unknown_def_type
,
7879 vect_unknown_def_type
, vect_unknown_def_type
};
7882 enum tree_code code
, cond_code
, bitop1
= NOP_EXPR
, bitop2
= NOP_EXPR
;
7883 stmt_vec_info prev_stmt_info
= NULL
;
7885 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
7886 vec
<tree
> vec_oprnds0
= vNULL
;
7887 vec
<tree
> vec_oprnds1
= vNULL
;
7888 vec
<tree
> vec_oprnds2
= vNULL
;
7889 vec
<tree
> vec_oprnds3
= vNULL
;
7891 bool masked
= false;
7893 if (reduc_index
&& STMT_SLP_TYPE (stmt_info
))
7896 if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info
) == TREE_CODE_REDUCTION
)
7898 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
7901 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
7902 && !(STMT_VINFO_DEF_TYPE (stmt_info
) == vect_nested_cycle
7906 /* FORNOW: not yet supported. */
7907 if (STMT_VINFO_LIVE_P (stmt_info
))
7909 if (dump_enabled_p ())
7910 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7911 "value used after loop.\n");
7916 /* Is vectorizable conditional operation? */
7917 if (!is_gimple_assign (stmt
))
7920 code
= gimple_assign_rhs_code (stmt
);
7922 if (code
!= COND_EXPR
)
7925 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
7926 tree vectype1
= NULL_TREE
, vectype2
= NULL_TREE
;
7931 ncopies
= vect_get_num_copies (loop_vinfo
, vectype
);
7933 gcc_assert (ncopies
>= 1);
7934 if (reduc_index
&& ncopies
> 1)
7935 return false; /* FORNOW */
7937 cond_expr
= gimple_assign_rhs1 (stmt
);
7938 then_clause
= gimple_assign_rhs2 (stmt
);
7939 else_clause
= gimple_assign_rhs3 (stmt
);
7941 if (!vect_is_simple_cond (cond_expr
, stmt_info
->vinfo
,
7942 &comp_vectype
, &dts
[0])
7947 if (!vect_is_simple_use (then_clause
, stmt_info
->vinfo
, &def_stmt
, &dts
[2],
7950 if (!vect_is_simple_use (else_clause
, stmt_info
->vinfo
, &def_stmt
, &dts
[3],
7954 if (vectype1
&& !useless_type_conversion_p (vectype
, vectype1
))
7957 if (vectype2
&& !useless_type_conversion_p (vectype
, vectype2
))
7960 masked
= !COMPARISON_CLASS_P (cond_expr
);
7961 vec_cmp_type
= build_same_sized_truth_vector_type (comp_vectype
);
7963 if (vec_cmp_type
== NULL_TREE
)
7966 cond_code
= TREE_CODE (cond_expr
);
7969 cond_expr0
= TREE_OPERAND (cond_expr
, 0);
7970 cond_expr1
= TREE_OPERAND (cond_expr
, 1);
7973 if (!masked
&& VECTOR_BOOLEAN_TYPE_P (comp_vectype
))
7975 /* Boolean values may have another representation in vectors
7976 and therefore we prefer bit operations over comparison for
7977 them (which also works for scalar masks). We store opcodes
7978 to use in bitop1 and bitop2. Statement is vectorized as
7979 BITOP2 (rhs1 BITOP1 rhs2) or rhs1 BITOP2 (BITOP1 rhs2)
7980 depending on bitop1 and bitop2 arity. */
7984 bitop1
= BIT_NOT_EXPR
;
7985 bitop2
= BIT_AND_EXPR
;
7988 bitop1
= BIT_NOT_EXPR
;
7989 bitop2
= BIT_IOR_EXPR
;
7992 bitop1
= BIT_NOT_EXPR
;
7993 bitop2
= BIT_AND_EXPR
;
7994 std::swap (cond_expr0
, cond_expr1
);
7997 bitop1
= BIT_NOT_EXPR
;
7998 bitop2
= BIT_IOR_EXPR
;
7999 std::swap (cond_expr0
, cond_expr1
);
8002 bitop1
= BIT_XOR_EXPR
;
8005 bitop1
= BIT_XOR_EXPR
;
8006 bitop2
= BIT_NOT_EXPR
;
8011 cond_code
= SSA_NAME
;
8016 STMT_VINFO_TYPE (stmt_info
) = condition_vec_info_type
;
8017 if (bitop1
!= NOP_EXPR
)
8019 machine_mode mode
= TYPE_MODE (comp_vectype
);
8022 optab
= optab_for_tree_code (bitop1
, comp_vectype
, optab_default
);
8023 if (!optab
|| optab_handler (optab
, mode
) == CODE_FOR_nothing
)
8026 if (bitop2
!= NOP_EXPR
)
8028 optab
= optab_for_tree_code (bitop2
, comp_vectype
,
8030 if (!optab
|| optab_handler (optab
, mode
) == CODE_FOR_nothing
)
8034 if (expand_vec_cond_expr_p (vectype
, comp_vectype
,
8037 vect_model_simple_cost (stmt_info
, ncopies
, dts
, ndts
, NULL
, NULL
);
8047 vec_oprnds0
.create (1);
8048 vec_oprnds1
.create (1);
8049 vec_oprnds2
.create (1);
8050 vec_oprnds3
.create (1);
8054 scalar_dest
= gimple_assign_lhs (stmt
);
8055 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
8057 /* Handle cond expr. */
8058 for (j
= 0; j
< ncopies
; j
++)
8060 gassign
*new_stmt
= NULL
;
8065 auto_vec
<tree
, 4> ops
;
8066 auto_vec
<vec
<tree
>, 4> vec_defs
;
8069 ops
.safe_push (cond_expr
);
8072 ops
.safe_push (cond_expr0
);
8073 ops
.safe_push (cond_expr1
);
8075 ops
.safe_push (then_clause
);
8076 ops
.safe_push (else_clause
);
8077 vect_get_slp_defs (ops
, slp_node
, &vec_defs
);
8078 vec_oprnds3
= vec_defs
.pop ();
8079 vec_oprnds2
= vec_defs
.pop ();
8081 vec_oprnds1
= vec_defs
.pop ();
8082 vec_oprnds0
= vec_defs
.pop ();
8090 = vect_get_vec_def_for_operand (cond_expr
, stmt
,
8092 vect_is_simple_use (cond_expr
, stmt_info
->vinfo
,
8098 = vect_get_vec_def_for_operand (cond_expr0
,
8099 stmt
, comp_vectype
);
8100 vect_is_simple_use (cond_expr0
, loop_vinfo
, >emp
, &dts
[0]);
8103 = vect_get_vec_def_for_operand (cond_expr1
,
8104 stmt
, comp_vectype
);
8105 vect_is_simple_use (cond_expr1
, loop_vinfo
, >emp
, &dts
[1]);
8107 if (reduc_index
== 1)
8108 vec_then_clause
= reduc_def
;
8111 vec_then_clause
= vect_get_vec_def_for_operand (then_clause
,
8113 vect_is_simple_use (then_clause
, loop_vinfo
,
8116 if (reduc_index
== 2)
8117 vec_else_clause
= reduc_def
;
8120 vec_else_clause
= vect_get_vec_def_for_operand (else_clause
,
8122 vect_is_simple_use (else_clause
, loop_vinfo
, >emp
, &dts
[3]);
8129 = vect_get_vec_def_for_stmt_copy (dts
[0],
8130 vec_oprnds0
.pop ());
8133 = vect_get_vec_def_for_stmt_copy (dts
[1],
8134 vec_oprnds1
.pop ());
8136 vec_then_clause
= vect_get_vec_def_for_stmt_copy (dts
[2],
8137 vec_oprnds2
.pop ());
8138 vec_else_clause
= vect_get_vec_def_for_stmt_copy (dts
[3],
8139 vec_oprnds3
.pop ());
8144 vec_oprnds0
.quick_push (vec_cond_lhs
);
8146 vec_oprnds1
.quick_push (vec_cond_rhs
);
8147 vec_oprnds2
.quick_push (vec_then_clause
);
8148 vec_oprnds3
.quick_push (vec_else_clause
);
8151 /* Arguments are ready. Create the new vector stmt. */
8152 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vec_cond_lhs
)
8154 vec_then_clause
= vec_oprnds2
[i
];
8155 vec_else_clause
= vec_oprnds3
[i
];
8158 vec_compare
= vec_cond_lhs
;
8161 vec_cond_rhs
= vec_oprnds1
[i
];
8162 if (bitop1
== NOP_EXPR
)
8163 vec_compare
= build2 (cond_code
, vec_cmp_type
,
8164 vec_cond_lhs
, vec_cond_rhs
);
8167 new_temp
= make_ssa_name (vec_cmp_type
);
8168 if (bitop1
== BIT_NOT_EXPR
)
8169 new_stmt
= gimple_build_assign (new_temp
, bitop1
,
8173 = gimple_build_assign (new_temp
, bitop1
, vec_cond_lhs
,
8175 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
8176 if (bitop2
== NOP_EXPR
)
8177 vec_compare
= new_temp
;
8178 else if (bitop2
== BIT_NOT_EXPR
)
8180 /* Instead of doing ~x ? y : z do x ? z : y. */
8181 vec_compare
= new_temp
;
8182 std::swap (vec_then_clause
, vec_else_clause
);
8186 vec_compare
= make_ssa_name (vec_cmp_type
);
8188 = gimple_build_assign (vec_compare
, bitop2
,
8189 vec_cond_lhs
, new_temp
);
8190 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
8194 new_temp
= make_ssa_name (vec_dest
);
8195 new_stmt
= gimple_build_assign (new_temp
, VEC_COND_EXPR
,
8196 vec_compare
, vec_then_clause
,
8198 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
8200 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
8207 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
8209 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
8211 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
8214 vec_oprnds0
.release ();
8215 vec_oprnds1
.release ();
8216 vec_oprnds2
.release ();
8217 vec_oprnds3
.release ();
8222 /* vectorizable_comparison.
8224 Check if STMT is comparison expression that can be vectorized.
8225 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
8226 comparison, put it in VEC_STMT, and insert it at GSI.
8228 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
8231 vectorizable_comparison (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
8232 gimple
**vec_stmt
, tree reduc_def
,
8235 tree lhs
, rhs1
, rhs2
;
8236 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
8237 tree vectype1
= NULL_TREE
, vectype2
= NULL_TREE
;
8238 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
8239 tree vec_rhs1
= NULL_TREE
, vec_rhs2
= NULL_TREE
;
8241 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
8242 enum vect_def_type dts
[2] = {vect_unknown_def_type
, vect_unknown_def_type
};
8246 enum tree_code code
, bitop1
= NOP_EXPR
, bitop2
= NOP_EXPR
;
8247 stmt_vec_info prev_stmt_info
= NULL
;
8249 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
8250 vec
<tree
> vec_oprnds0
= vNULL
;
8251 vec
<tree
> vec_oprnds1
= vNULL
;
8256 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
8259 if (!vectype
|| !VECTOR_BOOLEAN_TYPE_P (vectype
))
8262 mask_type
= vectype
;
8263 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
8268 ncopies
= vect_get_num_copies (loop_vinfo
, vectype
);
8270 gcc_assert (ncopies
>= 1);
8271 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
8272 && !(STMT_VINFO_DEF_TYPE (stmt_info
) == vect_nested_cycle
8276 if (STMT_VINFO_LIVE_P (stmt_info
))
8278 if (dump_enabled_p ())
8279 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
8280 "value used after loop.\n");
8284 if (!is_gimple_assign (stmt
))
8287 code
= gimple_assign_rhs_code (stmt
);
8289 if (TREE_CODE_CLASS (code
) != tcc_comparison
)
8292 rhs1
= gimple_assign_rhs1 (stmt
);
8293 rhs2
= gimple_assign_rhs2 (stmt
);
8295 if (!vect_is_simple_use (rhs1
, stmt_info
->vinfo
, &def_stmt
,
8296 &dts
[0], &vectype1
))
8299 if (!vect_is_simple_use (rhs2
, stmt_info
->vinfo
, &def_stmt
,
8300 &dts
[1], &vectype2
))
8303 if (vectype1
&& vectype2
8304 && TYPE_VECTOR_SUBPARTS (vectype1
) != TYPE_VECTOR_SUBPARTS (vectype2
))
8307 vectype
= vectype1
? vectype1
: vectype2
;
8309 /* Invariant comparison. */
8312 vectype
= get_vectype_for_scalar_type (TREE_TYPE (rhs1
));
8313 if (TYPE_VECTOR_SUBPARTS (vectype
) != nunits
)
8316 else if (nunits
!= TYPE_VECTOR_SUBPARTS (vectype
))
8319 /* Can't compare mask and non-mask types. */
8320 if (vectype1
&& vectype2
8321 && (VECTOR_BOOLEAN_TYPE_P (vectype1
) ^ VECTOR_BOOLEAN_TYPE_P (vectype2
)))
8324 /* Boolean values may have another representation in vectors
8325 and therefore we prefer bit operations over comparison for
8326 them (which also works for scalar masks). We store opcodes
8327 to use in bitop1 and bitop2. Statement is vectorized as
8328 BITOP2 (rhs1 BITOP1 rhs2) or
8329 rhs1 BITOP2 (BITOP1 rhs2)
8330 depending on bitop1 and bitop2 arity. */
8331 if (VECTOR_BOOLEAN_TYPE_P (vectype
))
8333 if (code
== GT_EXPR
)
8335 bitop1
= BIT_NOT_EXPR
;
8336 bitop2
= BIT_AND_EXPR
;
8338 else if (code
== GE_EXPR
)
8340 bitop1
= BIT_NOT_EXPR
;
8341 bitop2
= BIT_IOR_EXPR
;
8343 else if (code
== LT_EXPR
)
8345 bitop1
= BIT_NOT_EXPR
;
8346 bitop2
= BIT_AND_EXPR
;
8347 std::swap (rhs1
, rhs2
);
8348 std::swap (dts
[0], dts
[1]);
8350 else if (code
== LE_EXPR
)
8352 bitop1
= BIT_NOT_EXPR
;
8353 bitop2
= BIT_IOR_EXPR
;
8354 std::swap (rhs1
, rhs2
);
8355 std::swap (dts
[0], dts
[1]);
8359 bitop1
= BIT_XOR_EXPR
;
8360 if (code
== EQ_EXPR
)
8361 bitop2
= BIT_NOT_EXPR
;
8367 STMT_VINFO_TYPE (stmt_info
) = comparison_vec_info_type
;
8368 vect_model_simple_cost (stmt_info
, ncopies
* (1 + (bitop2
!= NOP_EXPR
)),
8369 dts
, ndts
, NULL
, NULL
);
8370 if (bitop1
== NOP_EXPR
)
8371 return expand_vec_cmp_expr_p (vectype
, mask_type
, code
);
8374 machine_mode mode
= TYPE_MODE (vectype
);
8377 optab
= optab_for_tree_code (bitop1
, vectype
, optab_default
);
8378 if (!optab
|| optab_handler (optab
, mode
) == CODE_FOR_nothing
)
8381 if (bitop2
!= NOP_EXPR
)
8383 optab
= optab_for_tree_code (bitop2
, vectype
, optab_default
);
8384 if (!optab
|| optab_handler (optab
, mode
) == CODE_FOR_nothing
)
8394 vec_oprnds0
.create (1);
8395 vec_oprnds1
.create (1);
8399 lhs
= gimple_assign_lhs (stmt
);
8400 mask
= vect_create_destination_var (lhs
, mask_type
);
8402 /* Handle cmp expr. */
8403 for (j
= 0; j
< ncopies
; j
++)
8405 gassign
*new_stmt
= NULL
;
8410 auto_vec
<tree
, 2> ops
;
8411 auto_vec
<vec
<tree
>, 2> vec_defs
;
8413 ops
.safe_push (rhs1
);
8414 ops
.safe_push (rhs2
);
8415 vect_get_slp_defs (ops
, slp_node
, &vec_defs
);
8416 vec_oprnds1
= vec_defs
.pop ();
8417 vec_oprnds0
= vec_defs
.pop ();
8421 vec_rhs1
= vect_get_vec_def_for_operand (rhs1
, stmt
, vectype
);
8422 vec_rhs2
= vect_get_vec_def_for_operand (rhs2
, stmt
, vectype
);
8427 vec_rhs1
= vect_get_vec_def_for_stmt_copy (dts
[0],
8428 vec_oprnds0
.pop ());
8429 vec_rhs2
= vect_get_vec_def_for_stmt_copy (dts
[1],
8430 vec_oprnds1
.pop ());
8435 vec_oprnds0
.quick_push (vec_rhs1
);
8436 vec_oprnds1
.quick_push (vec_rhs2
);
8439 /* Arguments are ready. Create the new vector stmt. */
8440 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vec_rhs1
)
8442 vec_rhs2
= vec_oprnds1
[i
];
8444 new_temp
= make_ssa_name (mask
);
8445 if (bitop1
== NOP_EXPR
)
8447 new_stmt
= gimple_build_assign (new_temp
, code
,
8448 vec_rhs1
, vec_rhs2
);
8449 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
8453 if (bitop1
== BIT_NOT_EXPR
)
8454 new_stmt
= gimple_build_assign (new_temp
, bitop1
, vec_rhs2
);
8456 new_stmt
= gimple_build_assign (new_temp
, bitop1
, vec_rhs1
,
8458 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
8459 if (bitop2
!= NOP_EXPR
)
8461 tree res
= make_ssa_name (mask
);
8462 if (bitop2
== BIT_NOT_EXPR
)
8463 new_stmt
= gimple_build_assign (res
, bitop2
, new_temp
);
8465 new_stmt
= gimple_build_assign (res
, bitop2
, vec_rhs1
,
8467 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
8471 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
8478 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
8480 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
8482 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
8485 vec_oprnds0
.release ();
8486 vec_oprnds1
.release ();
8491 /* If SLP_NODE is nonnull, return true if vectorizable_live_operation
8492 can handle all live statements in the node. Otherwise return true
8493 if STMT is not live or if vectorizable_live_operation can handle it.
8494 GSI and VEC_STMT are as for vectorizable_live_operation. */
8497 can_vectorize_live_stmts (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
8498 slp_tree slp_node
, gimple
**vec_stmt
)
8504 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (slp_node
), i
, slp_stmt
)
8506 stmt_vec_info slp_stmt_info
= vinfo_for_stmt (slp_stmt
);
8507 if (STMT_VINFO_LIVE_P (slp_stmt_info
)
8508 && !vectorizable_live_operation (slp_stmt
, gsi
, slp_node
, i
,
8513 else if (STMT_VINFO_LIVE_P (vinfo_for_stmt (stmt
))
8514 && !vectorizable_live_operation (stmt
, gsi
, slp_node
, -1, vec_stmt
))
8520 /* Make sure the statement is vectorizable. */
8523 vect_analyze_stmt (gimple
*stmt
, bool *need_to_vectorize
, slp_tree node
,
8524 slp_instance node_instance
)
8526 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
8527 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
8528 enum vect_relevant relevance
= STMT_VINFO_RELEVANT (stmt_info
);
8530 gimple
*pattern_stmt
;
8531 gimple_seq pattern_def_seq
;
8533 if (dump_enabled_p ())
8535 dump_printf_loc (MSG_NOTE
, vect_location
, "==> examining statement: ");
8536 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
8539 if (gimple_has_volatile_ops (stmt
))
8541 if (dump_enabled_p ())
8542 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
8543 "not vectorized: stmt has volatile operands\n");
8548 /* Skip stmts that do not need to be vectorized. In loops this is expected
8550 - the COND_EXPR which is the loop exit condition
8551 - any LABEL_EXPRs in the loop
8552 - computations that are used only for array indexing or loop control.
8553 In basic blocks we only analyze statements that are a part of some SLP
8554 instance, therefore, all the statements are relevant.
8556 Pattern statement needs to be analyzed instead of the original statement
8557 if the original statement is not relevant. Otherwise, we analyze both
8558 statements. In basic blocks we are called from some SLP instance
8559 traversal, don't analyze pattern stmts instead, the pattern stmts
8560 already will be part of SLP instance. */
8562 pattern_stmt
= STMT_VINFO_RELATED_STMT (stmt_info
);
8563 if (!STMT_VINFO_RELEVANT_P (stmt_info
)
8564 && !STMT_VINFO_LIVE_P (stmt_info
))
8566 if (STMT_VINFO_IN_PATTERN_P (stmt_info
)
8568 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt
))
8569 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt
))))
8571 /* Analyze PATTERN_STMT instead of the original stmt. */
8572 stmt
= pattern_stmt
;
8573 stmt_info
= vinfo_for_stmt (pattern_stmt
);
8574 if (dump_enabled_p ())
8576 dump_printf_loc (MSG_NOTE
, vect_location
,
8577 "==> examining pattern statement: ");
8578 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
8583 if (dump_enabled_p ())
8584 dump_printf_loc (MSG_NOTE
, vect_location
, "irrelevant.\n");
8589 else if (STMT_VINFO_IN_PATTERN_P (stmt_info
)
8592 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt
))
8593 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt
))))
8595 /* Analyze PATTERN_STMT too. */
8596 if (dump_enabled_p ())
8598 dump_printf_loc (MSG_NOTE
, vect_location
,
8599 "==> examining pattern statement: ");
8600 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
8603 if (!vect_analyze_stmt (pattern_stmt
, need_to_vectorize
, node
,
8608 if (is_pattern_stmt_p (stmt_info
)
8610 && (pattern_def_seq
= STMT_VINFO_PATTERN_DEF_SEQ (stmt_info
)))
8612 gimple_stmt_iterator si
;
8614 for (si
= gsi_start (pattern_def_seq
); !gsi_end_p (si
); gsi_next (&si
))
8616 gimple
*pattern_def_stmt
= gsi_stmt (si
);
8617 if (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_def_stmt
))
8618 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_def_stmt
)))
8620 /* Analyze def stmt of STMT if it's a pattern stmt. */
8621 if (dump_enabled_p ())
8623 dump_printf_loc (MSG_NOTE
, vect_location
,
8624 "==> examining pattern def statement: ");
8625 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, pattern_def_stmt
, 0);
8628 if (!vect_analyze_stmt (pattern_def_stmt
,
8629 need_to_vectorize
, node
, node_instance
))
8635 switch (STMT_VINFO_DEF_TYPE (stmt_info
))
8637 case vect_internal_def
:
8640 case vect_reduction_def
:
8641 case vect_nested_cycle
:
8642 gcc_assert (!bb_vinfo
8643 && (relevance
== vect_used_in_outer
8644 || relevance
== vect_used_in_outer_by_reduction
8645 || relevance
== vect_used_by_reduction
8646 || relevance
== vect_unused_in_scope
8647 || relevance
== vect_used_only_live
));
8650 case vect_induction_def
:
8651 gcc_assert (!bb_vinfo
);
8654 case vect_constant_def
:
8655 case vect_external_def
:
8656 case vect_unknown_def_type
:
8661 if (STMT_VINFO_RELEVANT_P (stmt_info
))
8663 gcc_assert (!VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt
))));
8664 gcc_assert (STMT_VINFO_VECTYPE (stmt_info
)
8665 || (is_gimple_call (stmt
)
8666 && gimple_call_lhs (stmt
) == NULL_TREE
));
8667 *need_to_vectorize
= true;
8670 if (PURE_SLP_STMT (stmt_info
) && !node
)
8672 dump_printf_loc (MSG_NOTE
, vect_location
,
8673 "handled only by SLP analysis\n");
8679 && (STMT_VINFO_RELEVANT_P (stmt_info
)
8680 || STMT_VINFO_DEF_TYPE (stmt_info
) == vect_reduction_def
))
8681 ok
= (vectorizable_simd_clone_call (stmt
, NULL
, NULL
, node
)
8682 || vectorizable_conversion (stmt
, NULL
, NULL
, node
)
8683 || vectorizable_shift (stmt
, NULL
, NULL
, node
)
8684 || vectorizable_operation (stmt
, NULL
, NULL
, node
)
8685 || vectorizable_assignment (stmt
, NULL
, NULL
, node
)
8686 || vectorizable_load (stmt
, NULL
, NULL
, node
, NULL
)
8687 || vectorizable_call (stmt
, NULL
, NULL
, node
)
8688 || vectorizable_store (stmt
, NULL
, NULL
, node
)
8689 || vectorizable_reduction (stmt
, NULL
, NULL
, node
, node_instance
)
8690 || vectorizable_induction (stmt
, NULL
, NULL
, node
)
8691 || vectorizable_condition (stmt
, NULL
, NULL
, NULL
, 0, node
)
8692 || vectorizable_comparison (stmt
, NULL
, NULL
, NULL
, node
));
8696 ok
= (vectorizable_simd_clone_call (stmt
, NULL
, NULL
, node
)
8697 || vectorizable_conversion (stmt
, NULL
, NULL
, node
)
8698 || vectorizable_shift (stmt
, NULL
, NULL
, node
)
8699 || vectorizable_operation (stmt
, NULL
, NULL
, node
)
8700 || vectorizable_assignment (stmt
, NULL
, NULL
, node
)
8701 || vectorizable_load (stmt
, NULL
, NULL
, node
, NULL
)
8702 || vectorizable_call (stmt
, NULL
, NULL
, node
)
8703 || vectorizable_store (stmt
, NULL
, NULL
, node
)
8704 || vectorizable_condition (stmt
, NULL
, NULL
, NULL
, 0, node
)
8705 || vectorizable_comparison (stmt
, NULL
, NULL
, NULL
, node
));
8710 if (dump_enabled_p ())
8712 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
8713 "not vectorized: relevant stmt not ");
8714 dump_printf (MSG_MISSED_OPTIMIZATION
, "supported: ");
8715 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
8724 /* Stmts that are (also) "live" (i.e. - that are used out of the loop)
8725 need extra handling, except for vectorizable reductions. */
8726 if (STMT_VINFO_TYPE (stmt_info
) != reduc_vec_info_type
8727 && !can_vectorize_live_stmts (stmt
, NULL
, node
, NULL
))
8729 if (dump_enabled_p ())
8731 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
8732 "not vectorized: live stmt not supported: ");
8733 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
8743 /* Function vect_transform_stmt.
8745 Create a vectorized stmt to replace STMT, and insert it at BSI. */
8748 vect_transform_stmt (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
8749 bool *grouped_store
, slp_tree slp_node
,
8750 slp_instance slp_node_instance
)
8752 bool is_store
= false;
8753 gimple
*vec_stmt
= NULL
;
8754 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
8757 gcc_assert (slp_node
|| !PURE_SLP_STMT (stmt_info
));
8758 gimple
*old_vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
8760 switch (STMT_VINFO_TYPE (stmt_info
))
8762 case type_demotion_vec_info_type
:
8763 case type_promotion_vec_info_type
:
8764 case type_conversion_vec_info_type
:
8765 done
= vectorizable_conversion (stmt
, gsi
, &vec_stmt
, slp_node
);
8769 case induc_vec_info_type
:
8770 done
= vectorizable_induction (stmt
, gsi
, &vec_stmt
, slp_node
);
8774 case shift_vec_info_type
:
8775 done
= vectorizable_shift (stmt
, gsi
, &vec_stmt
, slp_node
);
8779 case op_vec_info_type
:
8780 done
= vectorizable_operation (stmt
, gsi
, &vec_stmt
, slp_node
);
8784 case assignment_vec_info_type
:
8785 done
= vectorizable_assignment (stmt
, gsi
, &vec_stmt
, slp_node
);
8789 case load_vec_info_type
:
8790 done
= vectorizable_load (stmt
, gsi
, &vec_stmt
, slp_node
,
8795 case store_vec_info_type
:
8796 done
= vectorizable_store (stmt
, gsi
, &vec_stmt
, slp_node
);
8798 if (STMT_VINFO_GROUPED_ACCESS (stmt_info
) && !slp_node
)
8800 /* In case of interleaving, the whole chain is vectorized when the
8801 last store in the chain is reached. Store stmts before the last
8802 one are skipped, and there vec_stmt_info shouldn't be freed
8804 *grouped_store
= true;
8805 if (STMT_VINFO_VEC_STMT (stmt_info
))
8812 case condition_vec_info_type
:
8813 done
= vectorizable_condition (stmt
, gsi
, &vec_stmt
, NULL
, 0, slp_node
);
8817 case comparison_vec_info_type
:
8818 done
= vectorizable_comparison (stmt
, gsi
, &vec_stmt
, NULL
, slp_node
);
8822 case call_vec_info_type
:
8823 done
= vectorizable_call (stmt
, gsi
, &vec_stmt
, slp_node
);
8824 stmt
= gsi_stmt (*gsi
);
8825 if (gimple_call_internal_p (stmt
, IFN_MASK_STORE
))
8829 case call_simd_clone_vec_info_type
:
8830 done
= vectorizable_simd_clone_call (stmt
, gsi
, &vec_stmt
, slp_node
);
8831 stmt
= gsi_stmt (*gsi
);
8834 case reduc_vec_info_type
:
8835 done
= vectorizable_reduction (stmt
, gsi
, &vec_stmt
, slp_node
,
8841 if (!STMT_VINFO_LIVE_P (stmt_info
))
8843 if (dump_enabled_p ())
8844 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
8845 "stmt not supported.\n");
8850 /* Verify SLP vectorization doesn't mess with STMT_VINFO_VEC_STMT.
8851 This would break hybrid SLP vectorization. */
8853 gcc_assert (!vec_stmt
8854 && STMT_VINFO_VEC_STMT (stmt_info
) == old_vec_stmt
);
8856 /* Handle inner-loop stmts whose DEF is used in the loop-nest that
8857 is being vectorized, but outside the immediately enclosing loop. */
8859 && STMT_VINFO_LOOP_VINFO (stmt_info
)
8860 && nested_in_vect_loop_p (LOOP_VINFO_LOOP (
8861 STMT_VINFO_LOOP_VINFO (stmt_info
)), stmt
)
8862 && STMT_VINFO_TYPE (stmt_info
) != reduc_vec_info_type
8863 && (STMT_VINFO_RELEVANT (stmt_info
) == vect_used_in_outer
8864 || STMT_VINFO_RELEVANT (stmt_info
) ==
8865 vect_used_in_outer_by_reduction
))
8867 struct loop
*innerloop
= LOOP_VINFO_LOOP (
8868 STMT_VINFO_LOOP_VINFO (stmt_info
))->inner
;
8869 imm_use_iterator imm_iter
;
8870 use_operand_p use_p
;
8874 if (dump_enabled_p ())
8875 dump_printf_loc (MSG_NOTE
, vect_location
,
8876 "Record the vdef for outer-loop vectorization.\n");
8878 /* Find the relevant loop-exit phi-node, and reord the vec_stmt there
8879 (to be used when vectorizing outer-loop stmts that use the DEF of
8881 if (gimple_code (stmt
) == GIMPLE_PHI
)
8882 scalar_dest
= PHI_RESULT (stmt
);
8884 scalar_dest
= gimple_assign_lhs (stmt
);
8886 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, scalar_dest
)
8888 if (!flow_bb_inside_loop_p (innerloop
, gimple_bb (USE_STMT (use_p
))))
8890 exit_phi
= USE_STMT (use_p
);
8891 STMT_VINFO_VEC_STMT (vinfo_for_stmt (exit_phi
)) = vec_stmt
;
8896 /* Handle stmts whose DEF is used outside the loop-nest that is
8897 being vectorized. */
8898 if (STMT_VINFO_TYPE (stmt_info
) != reduc_vec_info_type
)
8900 done
= can_vectorize_live_stmts (stmt
, gsi
, slp_node
, &vec_stmt
);
8905 STMT_VINFO_VEC_STMT (stmt_info
) = vec_stmt
;
8911 /* Remove a group of stores (for SLP or interleaving), free their
8915 vect_remove_stores (gimple
*first_stmt
)
8917 gimple
*next
= first_stmt
;
8919 gimple_stmt_iterator next_si
;
8923 stmt_vec_info stmt_info
= vinfo_for_stmt (next
);
8925 tmp
= GROUP_NEXT_ELEMENT (stmt_info
);
8926 if (is_pattern_stmt_p (stmt_info
))
8927 next
= STMT_VINFO_RELATED_STMT (stmt_info
);
8928 /* Free the attached stmt_vec_info and remove the stmt. */
8929 next_si
= gsi_for_stmt (next
);
8930 unlink_stmt_vdef (next
);
8931 gsi_remove (&next_si
, true);
8932 release_defs (next
);
8933 free_stmt_vec_info (next
);
8939 /* Function new_stmt_vec_info.
8941 Create and initialize a new stmt_vec_info struct for STMT. */
8944 new_stmt_vec_info (gimple
*stmt
, vec_info
*vinfo
)
8947 res
= (stmt_vec_info
) xcalloc (1, sizeof (struct _stmt_vec_info
));
8949 STMT_VINFO_TYPE (res
) = undef_vec_info_type
;
8950 STMT_VINFO_STMT (res
) = stmt
;
8952 STMT_VINFO_RELEVANT (res
) = vect_unused_in_scope
;
8953 STMT_VINFO_LIVE_P (res
) = false;
8954 STMT_VINFO_VECTYPE (res
) = NULL
;
8955 STMT_VINFO_VEC_STMT (res
) = NULL
;
8956 STMT_VINFO_VECTORIZABLE (res
) = true;
8957 STMT_VINFO_IN_PATTERN_P (res
) = false;
8958 STMT_VINFO_RELATED_STMT (res
) = NULL
;
8959 STMT_VINFO_PATTERN_DEF_SEQ (res
) = NULL
;
8960 STMT_VINFO_DATA_REF (res
) = NULL
;
8961 STMT_VINFO_VEC_REDUCTION_TYPE (res
) = TREE_CODE_REDUCTION
;
8962 STMT_VINFO_VEC_CONST_COND_REDUC_CODE (res
) = ERROR_MARK
;
8964 if (gimple_code (stmt
) == GIMPLE_PHI
8965 && is_loop_header_bb_p (gimple_bb (stmt
)))
8966 STMT_VINFO_DEF_TYPE (res
) = vect_unknown_def_type
;
8968 STMT_VINFO_DEF_TYPE (res
) = vect_internal_def
;
8970 STMT_VINFO_SAME_ALIGN_REFS (res
).create (0);
8971 STMT_SLP_TYPE (res
) = loop_vect
;
8972 STMT_VINFO_NUM_SLP_USES (res
) = 0;
8974 GROUP_FIRST_ELEMENT (res
) = NULL
;
8975 GROUP_NEXT_ELEMENT (res
) = NULL
;
8976 GROUP_SIZE (res
) = 0;
8977 GROUP_STORE_COUNT (res
) = 0;
8978 GROUP_GAP (res
) = 0;
8979 GROUP_SAME_DR_STMT (res
) = NULL
;
8985 /* Create a hash table for stmt_vec_info. */
8988 init_stmt_vec_info_vec (void)
8990 gcc_assert (!stmt_vec_info_vec
.exists ());
8991 stmt_vec_info_vec
.create (50);
8995 /* Free hash table for stmt_vec_info. */
8998 free_stmt_vec_info_vec (void)
9002 FOR_EACH_VEC_ELT (stmt_vec_info_vec
, i
, info
)
9004 free_stmt_vec_info (STMT_VINFO_STMT (info
));
9005 gcc_assert (stmt_vec_info_vec
.exists ());
9006 stmt_vec_info_vec
.release ();
9010 /* Free stmt vectorization related info. */
9013 free_stmt_vec_info (gimple
*stmt
)
9015 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
9020 /* Check if this statement has a related "pattern stmt"
9021 (introduced by the vectorizer during the pattern recognition
9022 pass). Free pattern's stmt_vec_info and def stmt's stmt_vec_info
9024 if (STMT_VINFO_IN_PATTERN_P (stmt_info
))
9026 stmt_vec_info patt_info
9027 = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info
));
9030 gimple_seq seq
= STMT_VINFO_PATTERN_DEF_SEQ (patt_info
);
9031 gimple
*patt_stmt
= STMT_VINFO_STMT (patt_info
);
9032 gimple_set_bb (patt_stmt
, NULL
);
9033 tree lhs
= gimple_get_lhs (patt_stmt
);
9034 if (lhs
&& TREE_CODE (lhs
) == SSA_NAME
)
9035 release_ssa_name (lhs
);
9038 gimple_stmt_iterator si
;
9039 for (si
= gsi_start (seq
); !gsi_end_p (si
); gsi_next (&si
))
9041 gimple
*seq_stmt
= gsi_stmt (si
);
9042 gimple_set_bb (seq_stmt
, NULL
);
9043 lhs
= gimple_get_lhs (seq_stmt
);
9044 if (lhs
&& TREE_CODE (lhs
) == SSA_NAME
)
9045 release_ssa_name (lhs
);
9046 free_stmt_vec_info (seq_stmt
);
9049 free_stmt_vec_info (patt_stmt
);
9053 STMT_VINFO_SAME_ALIGN_REFS (stmt_info
).release ();
9054 STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).release ();
9055 set_vinfo_for_stmt (stmt
, NULL
);
9060 /* Function get_vectype_for_scalar_type_and_size.
9062 Returns the vector type corresponding to SCALAR_TYPE and SIZE as supported
9066 get_vectype_for_scalar_type_and_size (tree scalar_type
, unsigned size
)
9068 tree orig_scalar_type
= scalar_type
;
9069 scalar_mode inner_mode
;
9070 machine_mode simd_mode
;
9074 if (!is_int_mode (TYPE_MODE (scalar_type
), &inner_mode
)
9075 && !is_float_mode (TYPE_MODE (scalar_type
), &inner_mode
))
9078 unsigned int nbytes
= GET_MODE_SIZE (inner_mode
);
9080 /* For vector types of elements whose mode precision doesn't
9081 match their types precision we use a element type of mode
9082 precision. The vectorization routines will have to make sure
9083 they support the proper result truncation/extension.
9084 We also make sure to build vector types with INTEGER_TYPE
9085 component type only. */
9086 if (INTEGRAL_TYPE_P (scalar_type
)
9087 && (GET_MODE_BITSIZE (inner_mode
) != TYPE_PRECISION (scalar_type
)
9088 || TREE_CODE (scalar_type
) != INTEGER_TYPE
))
9089 scalar_type
= build_nonstandard_integer_type (GET_MODE_BITSIZE (inner_mode
),
9090 TYPE_UNSIGNED (scalar_type
));
9092 /* We shouldn't end up building VECTOR_TYPEs of non-scalar components.
9093 When the component mode passes the above test simply use a type
9094 corresponding to that mode. The theory is that any use that
9095 would cause problems with this will disable vectorization anyway. */
9096 else if (!SCALAR_FLOAT_TYPE_P (scalar_type
)
9097 && !INTEGRAL_TYPE_P (scalar_type
))
9098 scalar_type
= lang_hooks
.types
.type_for_mode (inner_mode
, 1);
9100 /* We can't build a vector type of elements with alignment bigger than
9102 else if (nbytes
< TYPE_ALIGN_UNIT (scalar_type
))
9103 scalar_type
= lang_hooks
.types
.type_for_mode (inner_mode
,
9104 TYPE_UNSIGNED (scalar_type
));
9106 /* If we felt back to using the mode fail if there was
9107 no scalar type for it. */
9108 if (scalar_type
== NULL_TREE
)
9111 /* If no size was supplied use the mode the target prefers. Otherwise
9112 lookup a vector mode of the specified size. */
9114 simd_mode
= targetm
.vectorize
.preferred_simd_mode (inner_mode
);
9115 else if (!mode_for_vector (inner_mode
, size
/ nbytes
).exists (&simd_mode
))
9117 nunits
= GET_MODE_SIZE (simd_mode
) / nbytes
;
9118 /* NOTE: nunits == 1 is allowed to support single element vector types. */
9122 vectype
= build_vector_type (scalar_type
, nunits
);
9124 if (!VECTOR_MODE_P (TYPE_MODE (vectype
))
9125 && !INTEGRAL_MODE_P (TYPE_MODE (vectype
)))
9128 /* Re-attach the address-space qualifier if we canonicalized the scalar
9130 if (TYPE_ADDR_SPACE (orig_scalar_type
) != TYPE_ADDR_SPACE (vectype
))
9131 return build_qualified_type
9132 (vectype
, KEEP_QUAL_ADDR_SPACE (TYPE_QUALS (orig_scalar_type
)));
9137 unsigned int current_vector_size
;
9139 /* Function get_vectype_for_scalar_type.
9141 Returns the vector type corresponding to SCALAR_TYPE as supported
9145 get_vectype_for_scalar_type (tree scalar_type
)
9148 vectype
= get_vectype_for_scalar_type_and_size (scalar_type
,
9149 current_vector_size
);
9151 && current_vector_size
== 0)
9152 current_vector_size
= GET_MODE_SIZE (TYPE_MODE (vectype
));
9156 /* Function get_mask_type_for_scalar_type.
9158 Returns the mask type corresponding to a result of comparison
9159 of vectors of specified SCALAR_TYPE as supported by target. */
9162 get_mask_type_for_scalar_type (tree scalar_type
)
9164 tree vectype
= get_vectype_for_scalar_type (scalar_type
);
9169 return build_truth_vector_type (TYPE_VECTOR_SUBPARTS (vectype
),
9170 current_vector_size
);
9173 /* Function get_same_sized_vectype
9175 Returns a vector type corresponding to SCALAR_TYPE of size
9176 VECTOR_TYPE if supported by the target. */
9179 get_same_sized_vectype (tree scalar_type
, tree vector_type
)
9181 if (VECT_SCALAR_BOOLEAN_TYPE_P (scalar_type
))
9182 return build_same_sized_truth_vector_type (vector_type
);
9184 return get_vectype_for_scalar_type_and_size
9185 (scalar_type
, GET_MODE_SIZE (TYPE_MODE (vector_type
)));
9188 /* Function vect_is_simple_use.
9191 VINFO - the vect info of the loop or basic block that is being vectorized.
9192 OPERAND - operand in the loop or bb.
9194 DEF_STMT - the defining stmt in case OPERAND is an SSA_NAME.
9195 DT - the type of definition
9197 Returns whether a stmt with OPERAND can be vectorized.
9198 For loops, supportable operands are constants, loop invariants, and operands
9199 that are defined by the current iteration of the loop. Unsupportable
9200 operands are those that are defined by a previous iteration of the loop (as
9201 is the case in reduction/induction computations).
9202 For basic blocks, supportable operands are constants and bb invariants.
9203 For now, operands defined outside the basic block are not supported. */
9206 vect_is_simple_use (tree operand
, vec_info
*vinfo
,
9207 gimple
**def_stmt
, enum vect_def_type
*dt
)
9210 *dt
= vect_unknown_def_type
;
9212 if (dump_enabled_p ())
9214 dump_printf_loc (MSG_NOTE
, vect_location
,
9215 "vect_is_simple_use: operand ");
9216 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, operand
);
9217 dump_printf (MSG_NOTE
, "\n");
9220 if (CONSTANT_CLASS_P (operand
))
9222 *dt
= vect_constant_def
;
9226 if (is_gimple_min_invariant (operand
))
9228 *dt
= vect_external_def
;
9232 if (TREE_CODE (operand
) != SSA_NAME
)
9234 if (dump_enabled_p ())
9235 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
9240 if (SSA_NAME_IS_DEFAULT_DEF (operand
))
9242 *dt
= vect_external_def
;
9246 *def_stmt
= SSA_NAME_DEF_STMT (operand
);
9247 if (dump_enabled_p ())
9249 dump_printf_loc (MSG_NOTE
, vect_location
, "def_stmt: ");
9250 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, *def_stmt
, 0);
9253 if (! vect_stmt_in_region_p (vinfo
, *def_stmt
))
9254 *dt
= vect_external_def
;
9257 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (*def_stmt
);
9258 *dt
= STMT_VINFO_DEF_TYPE (stmt_vinfo
);
9261 if (dump_enabled_p ())
9263 dump_printf_loc (MSG_NOTE
, vect_location
, "type of def: ");
9266 case vect_uninitialized_def
:
9267 dump_printf (MSG_NOTE
, "uninitialized\n");
9269 case vect_constant_def
:
9270 dump_printf (MSG_NOTE
, "constant\n");
9272 case vect_external_def
:
9273 dump_printf (MSG_NOTE
, "external\n");
9275 case vect_internal_def
:
9276 dump_printf (MSG_NOTE
, "internal\n");
9278 case vect_induction_def
:
9279 dump_printf (MSG_NOTE
, "induction\n");
9281 case vect_reduction_def
:
9282 dump_printf (MSG_NOTE
, "reduction\n");
9284 case vect_double_reduction_def
:
9285 dump_printf (MSG_NOTE
, "double reduction\n");
9287 case vect_nested_cycle
:
9288 dump_printf (MSG_NOTE
, "nested cycle\n");
9290 case vect_unknown_def_type
:
9291 dump_printf (MSG_NOTE
, "unknown\n");
9296 if (*dt
== vect_unknown_def_type
)
9298 if (dump_enabled_p ())
9299 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
9300 "Unsupported pattern.\n");
9304 switch (gimple_code (*def_stmt
))
9311 if (dump_enabled_p ())
9312 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
9313 "unsupported defining stmt:\n");
9320 /* Function vect_is_simple_use.
9322 Same as vect_is_simple_use but also determines the vector operand
9323 type of OPERAND and stores it to *VECTYPE. If the definition of
9324 OPERAND is vect_uninitialized_def, vect_constant_def or
9325 vect_external_def *VECTYPE will be set to NULL_TREE and the caller
9326 is responsible to compute the best suited vector type for the
9330 vect_is_simple_use (tree operand
, vec_info
*vinfo
,
9331 gimple
**def_stmt
, enum vect_def_type
*dt
, tree
*vectype
)
9333 if (!vect_is_simple_use (operand
, vinfo
, def_stmt
, dt
))
9336 /* Now get a vector type if the def is internal, otherwise supply
9337 NULL_TREE and leave it up to the caller to figure out a proper
9338 type for the use stmt. */
9339 if (*dt
== vect_internal_def
9340 || *dt
== vect_induction_def
9341 || *dt
== vect_reduction_def
9342 || *dt
== vect_double_reduction_def
9343 || *dt
== vect_nested_cycle
)
9345 stmt_vec_info stmt_info
= vinfo_for_stmt (*def_stmt
);
9347 if (STMT_VINFO_IN_PATTERN_P (stmt_info
)
9348 && !STMT_VINFO_RELEVANT (stmt_info
)
9349 && !STMT_VINFO_LIVE_P (stmt_info
))
9350 stmt_info
= vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info
));
9352 *vectype
= STMT_VINFO_VECTYPE (stmt_info
);
9353 gcc_assert (*vectype
!= NULL_TREE
);
9355 else if (*dt
== vect_uninitialized_def
9356 || *dt
== vect_constant_def
9357 || *dt
== vect_external_def
)
9358 *vectype
= NULL_TREE
;
9366 /* Function supportable_widening_operation
9368 Check whether an operation represented by the code CODE is a
9369 widening operation that is supported by the target platform in
9370 vector form (i.e., when operating on arguments of type VECTYPE_IN
9371 producing a result of type VECTYPE_OUT).
9373 Widening operations we currently support are NOP (CONVERT), FLOAT
9374 and WIDEN_MULT. This function checks if these operations are supported
9375 by the target platform either directly (via vector tree-codes), or via
9379 - CODE1 and CODE2 are codes of vector operations to be used when
9380 vectorizing the operation, if available.
9381 - MULTI_STEP_CVT determines the number of required intermediate steps in
9382 case of multi-step conversion (like char->short->int - in that case
9383 MULTI_STEP_CVT will be 1).
9384 - INTERM_TYPES contains the intermediate type required to perform the
9385 widening operation (short in the above example). */
9388 supportable_widening_operation (enum tree_code code
, gimple
*stmt
,
9389 tree vectype_out
, tree vectype_in
,
9390 enum tree_code
*code1
, enum tree_code
*code2
,
9391 int *multi_step_cvt
,
9392 vec
<tree
> *interm_types
)
9394 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
9395 loop_vec_info loop_info
= STMT_VINFO_LOOP_VINFO (stmt_info
);
9396 struct loop
*vect_loop
= NULL
;
9397 machine_mode vec_mode
;
9398 enum insn_code icode1
, icode2
;
9399 optab optab1
, optab2
;
9400 tree vectype
= vectype_in
;
9401 tree wide_vectype
= vectype_out
;
9402 enum tree_code c1
, c2
;
9404 tree prev_type
, intermediate_type
;
9405 machine_mode intermediate_mode
, prev_mode
;
9406 optab optab3
, optab4
;
9408 *multi_step_cvt
= 0;
9410 vect_loop
= LOOP_VINFO_LOOP (loop_info
);
9414 case WIDEN_MULT_EXPR
:
9415 /* The result of a vectorized widening operation usually requires
9416 two vectors (because the widened results do not fit into one vector).
9417 The generated vector results would normally be expected to be
9418 generated in the same order as in the original scalar computation,
9419 i.e. if 8 results are generated in each vector iteration, they are
9420 to be organized as follows:
9421 vect1: [res1,res2,res3,res4],
9422 vect2: [res5,res6,res7,res8].
9424 However, in the special case that the result of the widening
9425 operation is used in a reduction computation only, the order doesn't
9426 matter (because when vectorizing a reduction we change the order of
9427 the computation). Some targets can take advantage of this and
9428 generate more efficient code. For example, targets like Altivec,
9429 that support widen_mult using a sequence of {mult_even,mult_odd}
9430 generate the following vectors:
9431 vect1: [res1,res3,res5,res7],
9432 vect2: [res2,res4,res6,res8].
9434 When vectorizing outer-loops, we execute the inner-loop sequentially
9435 (each vectorized inner-loop iteration contributes to VF outer-loop
9436 iterations in parallel). We therefore don't allow to change the
9437 order of the computation in the inner-loop during outer-loop
9439 /* TODO: Another case in which order doesn't *really* matter is when we
9440 widen and then contract again, e.g. (short)((int)x * y >> 8).
9441 Normally, pack_trunc performs an even/odd permute, whereas the
9442 repack from an even/odd expansion would be an interleave, which
9443 would be significantly simpler for e.g. AVX2. */
9444 /* In any case, in order to avoid duplicating the code below, recurse
9445 on VEC_WIDEN_MULT_EVEN_EXPR. If it succeeds, all the return values
9446 are properly set up for the caller. If we fail, we'll continue with
9447 a VEC_WIDEN_MULT_LO/HI_EXPR check. */
9449 && STMT_VINFO_RELEVANT (stmt_info
) == vect_used_by_reduction
9450 && !nested_in_vect_loop_p (vect_loop
, stmt
)
9451 && supportable_widening_operation (VEC_WIDEN_MULT_EVEN_EXPR
,
9452 stmt
, vectype_out
, vectype_in
,
9453 code1
, code2
, multi_step_cvt
,
9456 /* Elements in a vector with vect_used_by_reduction property cannot
9457 be reordered if the use chain with this property does not have the
9458 same operation. One such an example is s += a * b, where elements
9459 in a and b cannot be reordered. Here we check if the vector defined
9460 by STMT is only directly used in the reduction statement. */
9461 tree lhs
= gimple_assign_lhs (stmt
);
9462 use_operand_p dummy
;
9464 stmt_vec_info use_stmt_info
= NULL
;
9465 if (single_imm_use (lhs
, &dummy
, &use_stmt
)
9466 && (use_stmt_info
= vinfo_for_stmt (use_stmt
))
9467 && STMT_VINFO_DEF_TYPE (use_stmt_info
) == vect_reduction_def
)
9470 c1
= VEC_WIDEN_MULT_LO_EXPR
;
9471 c2
= VEC_WIDEN_MULT_HI_EXPR
;
9484 case VEC_WIDEN_MULT_EVEN_EXPR
:
9485 /* Support the recursion induced just above. */
9486 c1
= VEC_WIDEN_MULT_EVEN_EXPR
;
9487 c2
= VEC_WIDEN_MULT_ODD_EXPR
;
9490 case WIDEN_LSHIFT_EXPR
:
9491 c1
= VEC_WIDEN_LSHIFT_LO_EXPR
;
9492 c2
= VEC_WIDEN_LSHIFT_HI_EXPR
;
9496 c1
= VEC_UNPACK_LO_EXPR
;
9497 c2
= VEC_UNPACK_HI_EXPR
;
9501 c1
= VEC_UNPACK_FLOAT_LO_EXPR
;
9502 c2
= VEC_UNPACK_FLOAT_HI_EXPR
;
9505 case FIX_TRUNC_EXPR
:
9506 /* ??? Not yet implemented due to missing VEC_UNPACK_FIX_TRUNC_HI_EXPR/
9507 VEC_UNPACK_FIX_TRUNC_LO_EXPR tree codes and optabs used for
9508 computing the operation. */
9515 if (BYTES_BIG_ENDIAN
&& c1
!= VEC_WIDEN_MULT_EVEN_EXPR
)
9518 if (code
== FIX_TRUNC_EXPR
)
9520 /* The signedness is determined from output operand. */
9521 optab1
= optab_for_tree_code (c1
, vectype_out
, optab_default
);
9522 optab2
= optab_for_tree_code (c2
, vectype_out
, optab_default
);
9526 optab1
= optab_for_tree_code (c1
, vectype
, optab_default
);
9527 optab2
= optab_for_tree_code (c2
, vectype
, optab_default
);
9530 if (!optab1
|| !optab2
)
9533 vec_mode
= TYPE_MODE (vectype
);
9534 if ((icode1
= optab_handler (optab1
, vec_mode
)) == CODE_FOR_nothing
9535 || (icode2
= optab_handler (optab2
, vec_mode
)) == CODE_FOR_nothing
)
9541 if (insn_data
[icode1
].operand
[0].mode
== TYPE_MODE (wide_vectype
)
9542 && insn_data
[icode2
].operand
[0].mode
== TYPE_MODE (wide_vectype
))
9543 /* For scalar masks we may have different boolean
9544 vector types having the same QImode. Thus we
9545 add additional check for elements number. */
9546 return (!VECTOR_BOOLEAN_TYPE_P (vectype
)
9547 || (TYPE_VECTOR_SUBPARTS (vectype
) / 2
9548 == TYPE_VECTOR_SUBPARTS (wide_vectype
)));
9550 /* Check if it's a multi-step conversion that can be done using intermediate
9553 prev_type
= vectype
;
9554 prev_mode
= vec_mode
;
9556 if (!CONVERT_EXPR_CODE_P (code
))
9559 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
9560 intermediate steps in promotion sequence. We try
9561 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do
9563 interm_types
->create (MAX_INTERM_CVT_STEPS
);
9564 for (i
= 0; i
< MAX_INTERM_CVT_STEPS
; i
++)
9566 intermediate_mode
= insn_data
[icode1
].operand
[0].mode
;
9567 if (VECTOR_BOOLEAN_TYPE_P (prev_type
))
9570 = build_truth_vector_type (TYPE_VECTOR_SUBPARTS (prev_type
) / 2,
9571 current_vector_size
);
9572 if (intermediate_mode
!= TYPE_MODE (intermediate_type
))
9577 = lang_hooks
.types
.type_for_mode (intermediate_mode
,
9578 TYPE_UNSIGNED (prev_type
));
9580 optab3
= optab_for_tree_code (c1
, intermediate_type
, optab_default
);
9581 optab4
= optab_for_tree_code (c2
, intermediate_type
, optab_default
);
9583 if (!optab3
|| !optab4
9584 || (icode1
= optab_handler (optab1
, prev_mode
)) == CODE_FOR_nothing
9585 || insn_data
[icode1
].operand
[0].mode
!= intermediate_mode
9586 || (icode2
= optab_handler (optab2
, prev_mode
)) == CODE_FOR_nothing
9587 || insn_data
[icode2
].operand
[0].mode
!= intermediate_mode
9588 || ((icode1
= optab_handler (optab3
, intermediate_mode
))
9589 == CODE_FOR_nothing
)
9590 || ((icode2
= optab_handler (optab4
, intermediate_mode
))
9591 == CODE_FOR_nothing
))
9594 interm_types
->quick_push (intermediate_type
);
9595 (*multi_step_cvt
)++;
9597 if (insn_data
[icode1
].operand
[0].mode
== TYPE_MODE (wide_vectype
)
9598 && insn_data
[icode2
].operand
[0].mode
== TYPE_MODE (wide_vectype
))
9599 return (!VECTOR_BOOLEAN_TYPE_P (vectype
)
9600 || (TYPE_VECTOR_SUBPARTS (intermediate_type
) / 2
9601 == TYPE_VECTOR_SUBPARTS (wide_vectype
)));
9603 prev_type
= intermediate_type
;
9604 prev_mode
= intermediate_mode
;
9607 interm_types
->release ();
9612 /* Function supportable_narrowing_operation
9614 Check whether an operation represented by the code CODE is a
9615 narrowing operation that is supported by the target platform in
9616 vector form (i.e., when operating on arguments of type VECTYPE_IN
9617 and producing a result of type VECTYPE_OUT).
9619 Narrowing operations we currently support are NOP (CONVERT) and
9620 FIX_TRUNC. This function checks if these operations are supported by
9621 the target platform directly via vector tree-codes.
9624 - CODE1 is the code of a vector operation to be used when
9625 vectorizing the operation, if available.
9626 - MULTI_STEP_CVT determines the number of required intermediate steps in
9627 case of multi-step conversion (like int->short->char - in that case
9628 MULTI_STEP_CVT will be 1).
9629 - INTERM_TYPES contains the intermediate type required to perform the
9630 narrowing operation (short in the above example). */
9633 supportable_narrowing_operation (enum tree_code code
,
9634 tree vectype_out
, tree vectype_in
,
9635 enum tree_code
*code1
, int *multi_step_cvt
,
9636 vec
<tree
> *interm_types
)
9638 machine_mode vec_mode
;
9639 enum insn_code icode1
;
9640 optab optab1
, interm_optab
;
9641 tree vectype
= vectype_in
;
9642 tree narrow_vectype
= vectype_out
;
9644 tree intermediate_type
, prev_type
;
9645 machine_mode intermediate_mode
, prev_mode
;
9649 *multi_step_cvt
= 0;
9653 c1
= VEC_PACK_TRUNC_EXPR
;
9656 case FIX_TRUNC_EXPR
:
9657 c1
= VEC_PACK_FIX_TRUNC_EXPR
;
9661 /* ??? Not yet implemented due to missing VEC_PACK_FLOAT_EXPR
9662 tree code and optabs used for computing the operation. */
9669 if (code
== FIX_TRUNC_EXPR
)
9670 /* The signedness is determined from output operand. */
9671 optab1
= optab_for_tree_code (c1
, vectype_out
, optab_default
);
9673 optab1
= optab_for_tree_code (c1
, vectype
, optab_default
);
9678 vec_mode
= TYPE_MODE (vectype
);
9679 if ((icode1
= optab_handler (optab1
, vec_mode
)) == CODE_FOR_nothing
)
9684 if (insn_data
[icode1
].operand
[0].mode
== TYPE_MODE (narrow_vectype
))
9685 /* For scalar masks we may have different boolean
9686 vector types having the same QImode. Thus we
9687 add additional check for elements number. */
9688 return (!VECTOR_BOOLEAN_TYPE_P (vectype
)
9689 || (TYPE_VECTOR_SUBPARTS (vectype
) * 2
9690 == TYPE_VECTOR_SUBPARTS (narrow_vectype
)));
9692 /* Check if it's a multi-step conversion that can be done using intermediate
9694 prev_mode
= vec_mode
;
9695 prev_type
= vectype
;
9696 if (code
== FIX_TRUNC_EXPR
)
9697 uns
= TYPE_UNSIGNED (vectype_out
);
9699 uns
= TYPE_UNSIGNED (vectype
);
9701 /* For multi-step FIX_TRUNC_EXPR prefer signed floating to integer
9702 conversion over unsigned, as unsigned FIX_TRUNC_EXPR is often more
9703 costly than signed. */
9704 if (code
== FIX_TRUNC_EXPR
&& uns
)
9706 enum insn_code icode2
;
9709 = lang_hooks
.types
.type_for_mode (TYPE_MODE (vectype_out
), 0);
9711 = optab_for_tree_code (c1
, intermediate_type
, optab_default
);
9712 if (interm_optab
!= unknown_optab
9713 && (icode2
= optab_handler (optab1
, vec_mode
)) != CODE_FOR_nothing
9714 && insn_data
[icode1
].operand
[0].mode
9715 == insn_data
[icode2
].operand
[0].mode
)
9718 optab1
= interm_optab
;
9723 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
9724 intermediate steps in promotion sequence. We try
9725 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do not. */
9726 interm_types
->create (MAX_INTERM_CVT_STEPS
);
9727 for (i
= 0; i
< MAX_INTERM_CVT_STEPS
; i
++)
9729 intermediate_mode
= insn_data
[icode1
].operand
[0].mode
;
9730 if (VECTOR_BOOLEAN_TYPE_P (prev_type
))
9733 = build_truth_vector_type (TYPE_VECTOR_SUBPARTS (prev_type
) * 2,
9734 current_vector_size
);
9735 if (intermediate_mode
!= TYPE_MODE (intermediate_type
))
9740 = lang_hooks
.types
.type_for_mode (intermediate_mode
, uns
);
9742 = optab_for_tree_code (VEC_PACK_TRUNC_EXPR
, intermediate_type
,
9745 || ((icode1
= optab_handler (optab1
, prev_mode
)) == CODE_FOR_nothing
)
9746 || insn_data
[icode1
].operand
[0].mode
!= intermediate_mode
9747 || ((icode1
= optab_handler (interm_optab
, intermediate_mode
))
9748 == CODE_FOR_nothing
))
9751 interm_types
->quick_push (intermediate_type
);
9752 (*multi_step_cvt
)++;
9754 if (insn_data
[icode1
].operand
[0].mode
== TYPE_MODE (narrow_vectype
))
9755 return (!VECTOR_BOOLEAN_TYPE_P (vectype
)
9756 || (TYPE_VECTOR_SUBPARTS (intermediate_type
) * 2
9757 == TYPE_VECTOR_SUBPARTS (narrow_vectype
)));
9759 prev_mode
= intermediate_mode
;
9760 prev_type
= intermediate_type
;
9761 optab1
= interm_optab
;
9764 interm_types
->release ();