1 /* Statement Analysis and Transformation for Vectorization
2 Copyright (C) 2003-2018 Free Software Foundation, Inc.
3 Contributed by Dorit Naishlos <dorit@il.ibm.com>
4 and Ira Rosen <irar@il.ibm.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
24 #include "coretypes.h"
31 #include "optabs-tree.h"
32 #include "insn-config.h"
33 #include "recog.h" /* FIXME: for insn_data */
37 #include "fold-const.h"
38 #include "stor-layout.h"
41 #include "gimple-iterator.h"
42 #include "gimplify-me.h"
44 #include "tree-ssa-loop-manip.h"
46 #include "tree-ssa-loop.h"
47 #include "tree-scalar-evolution.h"
48 #include "tree-vectorizer.h"
50 #include "internal-fn.h"
51 #include "tree-vector-builder.h"
52 #include "vec-perm-indices.h"
54 /* For lang_hooks.types.type_for_mode. */
55 #include "langhooks.h"
57 /* Return the vectorized type for the given statement. */
60 stmt_vectype (struct _stmt_vec_info
*stmt_info
)
62 return STMT_VINFO_VECTYPE (stmt_info
);
65 /* Return TRUE iff the given statement is in an inner loop relative to
66 the loop being vectorized. */
68 stmt_in_inner_loop_p (struct _stmt_vec_info
*stmt_info
)
70 gimple
*stmt
= STMT_VINFO_STMT (stmt_info
);
71 basic_block bb
= gimple_bb (stmt
);
72 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
78 loop
= LOOP_VINFO_LOOP (loop_vinfo
);
80 return (bb
->loop_father
== loop
->inner
);
83 /* Record the cost of a statement, either by directly informing the
84 target model or by saving it in a vector for later processing.
85 Return a preliminary estimate of the statement's cost. */
88 record_stmt_cost (stmt_vector_for_cost
*body_cost_vec
, int count
,
89 enum vect_cost_for_stmt kind
, stmt_vec_info stmt_info
,
90 int misalign
, enum vect_cost_model_location where
)
92 if ((kind
== vector_load
|| kind
== unaligned_load
)
93 && STMT_VINFO_GATHER_SCATTER_P (stmt_info
))
94 kind
= vector_gather_load
;
95 if ((kind
== vector_store
|| kind
== unaligned_store
)
96 && STMT_VINFO_GATHER_SCATTER_P (stmt_info
))
97 kind
= vector_scatter_store
;
100 tree vectype
= stmt_info
? stmt_vectype (stmt_info
) : NULL_TREE
;
101 stmt_info_for_cost si
= { count
, kind
,
102 stmt_info
? STMT_VINFO_STMT (stmt_info
) : NULL
,
104 body_cost_vec
->safe_push (si
);
106 (builtin_vectorization_cost (kind
, vectype
, misalign
) * count
);
109 return add_stmt_cost (stmt_info
->vinfo
->target_cost_data
,
110 count
, kind
, stmt_info
, misalign
, where
);
113 /* Return a variable of type ELEM_TYPE[NELEMS]. */
116 create_vector_array (tree elem_type
, unsigned HOST_WIDE_INT nelems
)
118 return create_tmp_var (build_array_type_nelts (elem_type
, nelems
),
122 /* ARRAY is an array of vectors created by create_vector_array.
123 Return an SSA_NAME for the vector in index N. The reference
124 is part of the vectorization of STMT and the vector is associated
125 with scalar destination SCALAR_DEST. */
128 read_vector_array (gimple
*stmt
, gimple_stmt_iterator
*gsi
, tree scalar_dest
,
129 tree array
, unsigned HOST_WIDE_INT n
)
131 tree vect_type
, vect
, vect_name
, array_ref
;
134 gcc_assert (TREE_CODE (TREE_TYPE (array
)) == ARRAY_TYPE
);
135 vect_type
= TREE_TYPE (TREE_TYPE (array
));
136 vect
= vect_create_destination_var (scalar_dest
, vect_type
);
137 array_ref
= build4 (ARRAY_REF
, vect_type
, array
,
138 build_int_cst (size_type_node
, n
),
139 NULL_TREE
, NULL_TREE
);
141 new_stmt
= gimple_build_assign (vect
, array_ref
);
142 vect_name
= make_ssa_name (vect
, new_stmt
);
143 gimple_assign_set_lhs (new_stmt
, vect_name
);
144 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
149 /* ARRAY is an array of vectors created by create_vector_array.
150 Emit code to store SSA_NAME VECT in index N of the array.
151 The store is part of the vectorization of STMT. */
154 write_vector_array (gimple
*stmt
, gimple_stmt_iterator
*gsi
, tree vect
,
155 tree array
, unsigned HOST_WIDE_INT n
)
160 array_ref
= build4 (ARRAY_REF
, TREE_TYPE (vect
), array
,
161 build_int_cst (size_type_node
, n
),
162 NULL_TREE
, NULL_TREE
);
164 new_stmt
= gimple_build_assign (array_ref
, vect
);
165 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
168 /* PTR is a pointer to an array of type TYPE. Return a representation
169 of *PTR. The memory reference replaces those in FIRST_DR
173 create_array_ref (tree type
, tree ptr
, tree alias_ptr_type
)
177 mem_ref
= build2 (MEM_REF
, type
, ptr
, build_int_cst (alias_ptr_type
, 0));
178 /* Arrays have the same alignment as their type. */
179 set_ptr_info_alignment (get_ptr_info (ptr
), TYPE_ALIGN_UNIT (type
), 0);
183 /* Utility functions used by vect_mark_stmts_to_be_vectorized. */
185 /* Function vect_mark_relevant.
187 Mark STMT as "relevant for vectorization" and add it to WORKLIST. */
190 vect_mark_relevant (vec
<gimple
*> *worklist
, gimple
*stmt
,
191 enum vect_relevant relevant
, bool live_p
)
193 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
194 enum vect_relevant save_relevant
= STMT_VINFO_RELEVANT (stmt_info
);
195 bool save_live_p
= STMT_VINFO_LIVE_P (stmt_info
);
196 gimple
*pattern_stmt
;
198 if (dump_enabled_p ())
200 dump_printf_loc (MSG_NOTE
, vect_location
,
201 "mark relevant %d, live %d: ", relevant
, live_p
);
202 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
205 /* If this stmt is an original stmt in a pattern, we might need to mark its
206 related pattern stmt instead of the original stmt. However, such stmts
207 may have their own uses that are not in any pattern, in such cases the
208 stmt itself should be marked. */
209 if (STMT_VINFO_IN_PATTERN_P (stmt_info
))
211 /* This is the last stmt in a sequence that was detected as a
212 pattern that can potentially be vectorized. Don't mark the stmt
213 as relevant/live because it's not going to be vectorized.
214 Instead mark the pattern-stmt that replaces it. */
216 pattern_stmt
= STMT_VINFO_RELATED_STMT (stmt_info
);
218 if (dump_enabled_p ())
219 dump_printf_loc (MSG_NOTE
, vect_location
,
220 "last stmt in pattern. don't mark"
221 " relevant/live.\n");
222 stmt_info
= vinfo_for_stmt (pattern_stmt
);
223 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_info
) == stmt
);
224 save_relevant
= STMT_VINFO_RELEVANT (stmt_info
);
225 save_live_p
= STMT_VINFO_LIVE_P (stmt_info
);
229 STMT_VINFO_LIVE_P (stmt_info
) |= live_p
;
230 if (relevant
> STMT_VINFO_RELEVANT (stmt_info
))
231 STMT_VINFO_RELEVANT (stmt_info
) = relevant
;
233 if (STMT_VINFO_RELEVANT (stmt_info
) == save_relevant
234 && STMT_VINFO_LIVE_P (stmt_info
) == save_live_p
)
236 if (dump_enabled_p ())
237 dump_printf_loc (MSG_NOTE
, vect_location
,
238 "already marked relevant/live.\n");
242 worklist
->safe_push (stmt
);
246 /* Function is_simple_and_all_uses_invariant
248 Return true if STMT is simple and all uses of it are invariant. */
251 is_simple_and_all_uses_invariant (gimple
*stmt
, loop_vec_info loop_vinfo
)
257 if (!is_gimple_assign (stmt
))
260 FOR_EACH_SSA_TREE_OPERAND (op
, stmt
, iter
, SSA_OP_USE
)
262 enum vect_def_type dt
= vect_uninitialized_def
;
264 if (!vect_is_simple_use (op
, loop_vinfo
, &def_stmt
, &dt
))
266 if (dump_enabled_p ())
267 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
268 "use not simple.\n");
272 if (dt
!= vect_external_def
&& dt
!= vect_constant_def
)
278 /* Function vect_stmt_relevant_p.
280 Return true if STMT in loop that is represented by LOOP_VINFO is
281 "relevant for vectorization".
283 A stmt is considered "relevant for vectorization" if:
284 - it has uses outside the loop.
285 - it has vdefs (it alters memory).
286 - control stmts in the loop (except for the exit condition).
288 CHECKME: what other side effects would the vectorizer allow? */
291 vect_stmt_relevant_p (gimple
*stmt
, loop_vec_info loop_vinfo
,
292 enum vect_relevant
*relevant
, bool *live_p
)
294 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
296 imm_use_iterator imm_iter
;
300 *relevant
= vect_unused_in_scope
;
303 /* cond stmt other than loop exit cond. */
304 if (is_ctrl_stmt (stmt
)
305 && STMT_VINFO_TYPE (vinfo_for_stmt (stmt
))
306 != loop_exit_ctrl_vec_info_type
)
307 *relevant
= vect_used_in_scope
;
309 /* changing memory. */
310 if (gimple_code (stmt
) != GIMPLE_PHI
)
311 if (gimple_vdef (stmt
)
312 && !gimple_clobber_p (stmt
))
314 if (dump_enabled_p ())
315 dump_printf_loc (MSG_NOTE
, vect_location
,
316 "vec_stmt_relevant_p: stmt has vdefs.\n");
317 *relevant
= vect_used_in_scope
;
320 /* uses outside the loop. */
321 FOR_EACH_PHI_OR_STMT_DEF (def_p
, stmt
, op_iter
, SSA_OP_DEF
)
323 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, DEF_FROM_PTR (def_p
))
325 basic_block bb
= gimple_bb (USE_STMT (use_p
));
326 if (!flow_bb_inside_loop_p (loop
, bb
))
328 if (dump_enabled_p ())
329 dump_printf_loc (MSG_NOTE
, vect_location
,
330 "vec_stmt_relevant_p: used out of loop.\n");
332 if (is_gimple_debug (USE_STMT (use_p
)))
335 /* We expect all such uses to be in the loop exit phis
336 (because of loop closed form) */
337 gcc_assert (gimple_code (USE_STMT (use_p
)) == GIMPLE_PHI
);
338 gcc_assert (bb
== single_exit (loop
)->dest
);
345 if (*live_p
&& *relevant
== vect_unused_in_scope
346 && !is_simple_and_all_uses_invariant (stmt
, loop_vinfo
))
348 if (dump_enabled_p ())
349 dump_printf_loc (MSG_NOTE
, vect_location
,
350 "vec_stmt_relevant_p: stmt live but not relevant.\n");
351 *relevant
= vect_used_only_live
;
354 return (*live_p
|| *relevant
);
358 /* Function exist_non_indexing_operands_for_use_p
360 USE is one of the uses attached to STMT. Check if USE is
361 used in STMT for anything other than indexing an array. */
364 exist_non_indexing_operands_for_use_p (tree use
, gimple
*stmt
)
367 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
369 /* USE corresponds to some operand in STMT. If there is no data
370 reference in STMT, then any operand that corresponds to USE
371 is not indexing an array. */
372 if (!STMT_VINFO_DATA_REF (stmt_info
))
375 /* STMT has a data_ref. FORNOW this means that its of one of
379 (This should have been verified in analyze_data_refs).
381 'var' in the second case corresponds to a def, not a use,
382 so USE cannot correspond to any operands that are not used
385 Therefore, all we need to check is if STMT falls into the
386 first case, and whether var corresponds to USE. */
388 if (!gimple_assign_copy_p (stmt
))
390 if (is_gimple_call (stmt
)
391 && gimple_call_internal_p (stmt
))
392 switch (gimple_call_internal_fn (stmt
))
395 operand
= gimple_call_arg (stmt
, 3);
400 operand
= gimple_call_arg (stmt
, 2);
410 if (TREE_CODE (gimple_assign_lhs (stmt
)) == SSA_NAME
)
412 operand
= gimple_assign_rhs1 (stmt
);
413 if (TREE_CODE (operand
) != SSA_NAME
)
424 Function process_use.
427 - a USE in STMT in a loop represented by LOOP_VINFO
428 - RELEVANT - enum value to be set in the STMT_VINFO of the stmt
429 that defined USE. This is done by calling mark_relevant and passing it
430 the WORKLIST (to add DEF_STMT to the WORKLIST in case it is relevant).
431 - FORCE is true if exist_non_indexing_operands_for_use_p check shouldn't
435 Generally, LIVE_P and RELEVANT are used to define the liveness and
436 relevance info of the DEF_STMT of this USE:
437 STMT_VINFO_LIVE_P (DEF_STMT_info) <-- live_p
438 STMT_VINFO_RELEVANT (DEF_STMT_info) <-- relevant
440 - case 1: If USE is used only for address computations (e.g. array indexing),
441 which does not need to be directly vectorized, then the liveness/relevance
442 of the respective DEF_STMT is left unchanged.
443 - case 2: If STMT is a reduction phi and DEF_STMT is a reduction stmt, we
444 skip DEF_STMT cause it had already been processed.
445 - case 3: If DEF_STMT and STMT are in different nests, then "relevant" will
446 be modified accordingly.
448 Return true if everything is as expected. Return false otherwise. */
451 process_use (gimple
*stmt
, tree use
, loop_vec_info loop_vinfo
,
452 enum vect_relevant relevant
, vec
<gimple
*> *worklist
,
455 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
456 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (stmt
);
457 stmt_vec_info dstmt_vinfo
;
458 basic_block bb
, def_bb
;
460 enum vect_def_type dt
;
462 /* case 1: we are only interested in uses that need to be vectorized. Uses
463 that are used for address computation are not considered relevant. */
464 if (!force
&& !exist_non_indexing_operands_for_use_p (use
, stmt
))
467 if (!vect_is_simple_use (use
, loop_vinfo
, &def_stmt
, &dt
))
469 if (dump_enabled_p ())
470 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
471 "not vectorized: unsupported use in stmt.\n");
475 if (!def_stmt
|| gimple_nop_p (def_stmt
))
478 def_bb
= gimple_bb (def_stmt
);
479 if (!flow_bb_inside_loop_p (loop
, def_bb
))
481 if (dump_enabled_p ())
482 dump_printf_loc (MSG_NOTE
, vect_location
, "def_stmt is out of loop.\n");
486 /* case 2: A reduction phi (STMT) defined by a reduction stmt (DEF_STMT).
487 DEF_STMT must have already been processed, because this should be the
488 only way that STMT, which is a reduction-phi, was put in the worklist,
489 as there should be no other uses for DEF_STMT in the loop. So we just
490 check that everything is as expected, and we are done. */
491 dstmt_vinfo
= vinfo_for_stmt (def_stmt
);
492 bb
= gimple_bb (stmt
);
493 if (gimple_code (stmt
) == GIMPLE_PHI
494 && STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_reduction_def
495 && gimple_code (def_stmt
) != GIMPLE_PHI
496 && STMT_VINFO_DEF_TYPE (dstmt_vinfo
) == vect_reduction_def
497 && bb
->loop_father
== def_bb
->loop_father
)
499 if (dump_enabled_p ())
500 dump_printf_loc (MSG_NOTE
, vect_location
,
501 "reduc-stmt defining reduc-phi in the same nest.\n");
502 if (STMT_VINFO_IN_PATTERN_P (dstmt_vinfo
))
503 dstmt_vinfo
= vinfo_for_stmt (STMT_VINFO_RELATED_STMT (dstmt_vinfo
));
504 gcc_assert (STMT_VINFO_RELEVANT (dstmt_vinfo
) < vect_used_by_reduction
);
505 gcc_assert (STMT_VINFO_LIVE_P (dstmt_vinfo
)
506 || STMT_VINFO_RELEVANT (dstmt_vinfo
) > vect_unused_in_scope
);
510 /* case 3a: outer-loop stmt defining an inner-loop stmt:
511 outer-loop-header-bb:
517 if (flow_loop_nested_p (def_bb
->loop_father
, bb
->loop_father
))
519 if (dump_enabled_p ())
520 dump_printf_loc (MSG_NOTE
, vect_location
,
521 "outer-loop def-stmt defining inner-loop stmt.\n");
525 case vect_unused_in_scope
:
526 relevant
= (STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_nested_cycle
) ?
527 vect_used_in_scope
: vect_unused_in_scope
;
530 case vect_used_in_outer_by_reduction
:
531 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo
) != vect_reduction_def
);
532 relevant
= vect_used_by_reduction
;
535 case vect_used_in_outer
:
536 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo
) != vect_reduction_def
);
537 relevant
= vect_used_in_scope
;
540 case vect_used_in_scope
:
548 /* case 3b: inner-loop stmt defining an outer-loop stmt:
549 outer-loop-header-bb:
553 outer-loop-tail-bb (or outer-loop-exit-bb in double reduction):
555 else if (flow_loop_nested_p (bb
->loop_father
, def_bb
->loop_father
))
557 if (dump_enabled_p ())
558 dump_printf_loc (MSG_NOTE
, vect_location
,
559 "inner-loop def-stmt defining outer-loop stmt.\n");
563 case vect_unused_in_scope
:
564 relevant
= (STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_reduction_def
565 || STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_double_reduction_def
) ?
566 vect_used_in_outer_by_reduction
: vect_unused_in_scope
;
569 case vect_used_by_reduction
:
570 case vect_used_only_live
:
571 relevant
= vect_used_in_outer_by_reduction
;
574 case vect_used_in_scope
:
575 relevant
= vect_used_in_outer
;
582 /* We are also not interested in uses on loop PHI backedges that are
583 inductions. Otherwise we'll needlessly vectorize the IV increment
584 and cause hybrid SLP for SLP inductions. Unless the PHI is live
586 else if (gimple_code (stmt
) == GIMPLE_PHI
587 && STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_induction_def
588 && ! STMT_VINFO_LIVE_P (stmt_vinfo
)
589 && (PHI_ARG_DEF_FROM_EDGE (stmt
, loop_latch_edge (bb
->loop_father
))
592 if (dump_enabled_p ())
593 dump_printf_loc (MSG_NOTE
, vect_location
,
594 "induction value on backedge.\n");
599 vect_mark_relevant (worklist
, def_stmt
, relevant
, false);
604 /* Function vect_mark_stmts_to_be_vectorized.
606 Not all stmts in the loop need to be vectorized. For example:
615 Stmt 1 and 3 do not need to be vectorized, because loop control and
616 addressing of vectorized data-refs are handled differently.
618 This pass detects such stmts. */
621 vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo
)
623 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
624 basic_block
*bbs
= LOOP_VINFO_BBS (loop_vinfo
);
625 unsigned int nbbs
= loop
->num_nodes
;
626 gimple_stmt_iterator si
;
629 stmt_vec_info stmt_vinfo
;
633 enum vect_relevant relevant
;
635 if (dump_enabled_p ())
636 dump_printf_loc (MSG_NOTE
, vect_location
,
637 "=== vect_mark_stmts_to_be_vectorized ===\n");
639 auto_vec
<gimple
*, 64> worklist
;
641 /* 1. Init worklist. */
642 for (i
= 0; i
< nbbs
; i
++)
645 for (si
= gsi_start_phis (bb
); !gsi_end_p (si
); gsi_next (&si
))
648 if (dump_enabled_p ())
650 dump_printf_loc (MSG_NOTE
, vect_location
, "init: phi relevant? ");
651 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, phi
, 0);
654 if (vect_stmt_relevant_p (phi
, loop_vinfo
, &relevant
, &live_p
))
655 vect_mark_relevant (&worklist
, phi
, relevant
, live_p
);
657 for (si
= gsi_start_bb (bb
); !gsi_end_p (si
); gsi_next (&si
))
659 stmt
= gsi_stmt (si
);
660 if (dump_enabled_p ())
662 dump_printf_loc (MSG_NOTE
, vect_location
, "init: stmt relevant? ");
663 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
666 if (vect_stmt_relevant_p (stmt
, loop_vinfo
, &relevant
, &live_p
))
667 vect_mark_relevant (&worklist
, stmt
, relevant
, live_p
);
671 /* 2. Process_worklist */
672 while (worklist
.length () > 0)
677 stmt
= worklist
.pop ();
678 if (dump_enabled_p ())
680 dump_printf_loc (MSG_NOTE
, vect_location
, "worklist: examine stmt: ");
681 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
684 /* Examine the USEs of STMT. For each USE, mark the stmt that defines it
685 (DEF_STMT) as relevant/irrelevant according to the relevance property
687 stmt_vinfo
= vinfo_for_stmt (stmt
);
688 relevant
= STMT_VINFO_RELEVANT (stmt_vinfo
);
690 /* Generally, the relevance property of STMT (in STMT_VINFO_RELEVANT) is
691 propagated as is to the DEF_STMTs of its USEs.
693 One exception is when STMT has been identified as defining a reduction
694 variable; in this case we set the relevance to vect_used_by_reduction.
695 This is because we distinguish between two kinds of relevant stmts -
696 those that are used by a reduction computation, and those that are
697 (also) used by a regular computation. This allows us later on to
698 identify stmts that are used solely by a reduction, and therefore the
699 order of the results that they produce does not have to be kept. */
701 switch (STMT_VINFO_DEF_TYPE (stmt_vinfo
))
703 case vect_reduction_def
:
704 gcc_assert (relevant
!= vect_unused_in_scope
);
705 if (relevant
!= vect_unused_in_scope
706 && relevant
!= vect_used_in_scope
707 && relevant
!= vect_used_by_reduction
708 && relevant
!= vect_used_only_live
)
710 if (dump_enabled_p ())
711 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
712 "unsupported use of reduction.\n");
717 case vect_nested_cycle
:
718 if (relevant
!= vect_unused_in_scope
719 && relevant
!= vect_used_in_outer_by_reduction
720 && relevant
!= vect_used_in_outer
)
722 if (dump_enabled_p ())
723 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
724 "unsupported use of nested cycle.\n");
730 case vect_double_reduction_def
:
731 if (relevant
!= vect_unused_in_scope
732 && relevant
!= vect_used_by_reduction
733 && relevant
!= vect_used_only_live
)
735 if (dump_enabled_p ())
736 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
737 "unsupported use of double reduction.\n");
747 if (is_pattern_stmt_p (stmt_vinfo
))
749 /* Pattern statements are not inserted into the code, so
750 FOR_EACH_PHI_OR_STMT_USE optimizes their operands out, and we
751 have to scan the RHS or function arguments instead. */
752 if (is_gimple_assign (stmt
))
754 enum tree_code rhs_code
= gimple_assign_rhs_code (stmt
);
755 tree op
= gimple_assign_rhs1 (stmt
);
758 if (rhs_code
== COND_EXPR
&& COMPARISON_CLASS_P (op
))
760 if (!process_use (stmt
, TREE_OPERAND (op
, 0), loop_vinfo
,
761 relevant
, &worklist
, false)
762 || !process_use (stmt
, TREE_OPERAND (op
, 1), loop_vinfo
,
763 relevant
, &worklist
, false))
767 for (; i
< gimple_num_ops (stmt
); i
++)
769 op
= gimple_op (stmt
, i
);
770 if (TREE_CODE (op
) == SSA_NAME
771 && !process_use (stmt
, op
, loop_vinfo
, relevant
,
776 else if (is_gimple_call (stmt
))
778 for (i
= 0; i
< gimple_call_num_args (stmt
); i
++)
780 tree arg
= gimple_call_arg (stmt
, i
);
781 if (!process_use (stmt
, arg
, loop_vinfo
, relevant
,
788 FOR_EACH_PHI_OR_STMT_USE (use_p
, stmt
, iter
, SSA_OP_USE
)
790 tree op
= USE_FROM_PTR (use_p
);
791 if (!process_use (stmt
, op
, loop_vinfo
, relevant
,
796 if (STMT_VINFO_GATHER_SCATTER_P (stmt_vinfo
))
798 gather_scatter_info gs_info
;
799 if (!vect_check_gather_scatter (stmt
, loop_vinfo
, &gs_info
))
801 if (!process_use (stmt
, gs_info
.offset
, loop_vinfo
, relevant
,
805 } /* while worklist */
811 /* Function vect_model_simple_cost.
813 Models cost for simple operations, i.e. those that only emit ncopies of a
814 single op. Right now, this does not account for multiple insns that could
815 be generated for the single vector op. We will handle that shortly. */
818 vect_model_simple_cost (stmt_vec_info stmt_info
, int ncopies
,
819 enum vect_def_type
*dt
,
821 stmt_vector_for_cost
*prologue_cost_vec
,
822 stmt_vector_for_cost
*body_cost_vec
)
825 int inside_cost
= 0, prologue_cost
= 0;
827 /* The SLP costs were already calculated during SLP tree build. */
828 if (PURE_SLP_STMT (stmt_info
))
831 /* Cost the "broadcast" of a scalar operand in to a vector operand.
832 Use scalar_to_vec to cost the broadcast, as elsewhere in the vector
834 for (i
= 0; i
< ndts
; i
++)
835 if (dt
[i
] == vect_constant_def
|| dt
[i
] == vect_external_def
)
836 prologue_cost
+= record_stmt_cost (prologue_cost_vec
, 1, scalar_to_vec
,
837 stmt_info
, 0, vect_prologue
);
839 /* Pass the inside-of-loop statements to the target-specific cost model. */
840 inside_cost
= record_stmt_cost (body_cost_vec
, ncopies
, vector_stmt
,
841 stmt_info
, 0, vect_body
);
843 if (dump_enabled_p ())
844 dump_printf_loc (MSG_NOTE
, vect_location
,
845 "vect_model_simple_cost: inside_cost = %d, "
846 "prologue_cost = %d .\n", inside_cost
, prologue_cost
);
850 /* Model cost for type demotion and promotion operations. PWR is normally
851 zero for single-step promotions and demotions. It will be one if
852 two-step promotion/demotion is required, and so on. Each additional
853 step doubles the number of instructions required. */
856 vect_model_promotion_demotion_cost (stmt_vec_info stmt_info
,
857 enum vect_def_type
*dt
, int pwr
)
860 int inside_cost
= 0, prologue_cost
= 0;
861 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
862 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
863 void *target_cost_data
;
865 /* The SLP costs were already calculated during SLP tree build. */
866 if (PURE_SLP_STMT (stmt_info
))
870 target_cost_data
= LOOP_VINFO_TARGET_COST_DATA (loop_vinfo
);
872 target_cost_data
= BB_VINFO_TARGET_COST_DATA (bb_vinfo
);
874 for (i
= 0; i
< pwr
+ 1; i
++)
876 tmp
= (STMT_VINFO_TYPE (stmt_info
) == type_promotion_vec_info_type
) ?
878 inside_cost
+= add_stmt_cost (target_cost_data
, vect_pow2 (tmp
),
879 vec_promote_demote
, stmt_info
, 0,
883 /* FORNOW: Assuming maximum 2 args per stmts. */
884 for (i
= 0; i
< 2; i
++)
885 if (dt
[i
] == vect_constant_def
|| dt
[i
] == vect_external_def
)
886 prologue_cost
+= add_stmt_cost (target_cost_data
, 1, vector_stmt
,
887 stmt_info
, 0, vect_prologue
);
889 if (dump_enabled_p ())
890 dump_printf_loc (MSG_NOTE
, vect_location
,
891 "vect_model_promotion_demotion_cost: inside_cost = %d, "
892 "prologue_cost = %d .\n", inside_cost
, prologue_cost
);
895 /* Function vect_model_store_cost
897 Models cost for stores. In the case of grouped accesses, one access
898 has the overhead of the grouped access attributed to it. */
901 vect_model_store_cost (stmt_vec_info stmt_info
, int ncopies
,
902 vect_memory_access_type memory_access_type
,
903 vec_load_store_type vls_type
, slp_tree slp_node
,
904 stmt_vector_for_cost
*prologue_cost_vec
,
905 stmt_vector_for_cost
*body_cost_vec
)
907 unsigned int inside_cost
= 0, prologue_cost
= 0;
908 struct data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
);
909 gimple
*first_stmt
= STMT_VINFO_STMT (stmt_info
);
910 bool grouped_access_p
= STMT_VINFO_GROUPED_ACCESS (stmt_info
);
912 if (vls_type
== VLS_STORE_INVARIANT
)
913 prologue_cost
+= record_stmt_cost (prologue_cost_vec
, 1, scalar_to_vec
,
914 stmt_info
, 0, vect_prologue
);
916 /* Grouped stores update all elements in the group at once,
917 so we want the DR for the first statement. */
918 if (!slp_node
&& grouped_access_p
)
920 first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
921 dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
924 /* True if we should include any once-per-group costs as well as
925 the cost of the statement itself. For SLP we only get called
926 once per group anyhow. */
927 bool first_stmt_p
= (first_stmt
== STMT_VINFO_STMT (stmt_info
));
929 /* We assume that the cost of a single store-lanes instruction is
930 equivalent to the cost of GROUP_SIZE separate stores. If a grouped
931 access is instead being provided by a permute-and-store operation,
932 include the cost of the permutes. */
934 && memory_access_type
== VMAT_CONTIGUOUS_PERMUTE
)
936 /* Uses a high and low interleave or shuffle operations for each
938 int group_size
= GROUP_SIZE (vinfo_for_stmt (first_stmt
));
939 int nstmts
= ncopies
* ceil_log2 (group_size
) * group_size
;
940 inside_cost
= record_stmt_cost (body_cost_vec
, nstmts
, vec_perm
,
941 stmt_info
, 0, vect_body
);
943 if (dump_enabled_p ())
944 dump_printf_loc (MSG_NOTE
, vect_location
,
945 "vect_model_store_cost: strided group_size = %d .\n",
949 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
950 /* Costs of the stores. */
951 if (memory_access_type
== VMAT_ELEMENTWISE
952 || memory_access_type
== VMAT_GATHER_SCATTER
)
954 /* N scalar stores plus extracting the elements. */
955 unsigned int assumed_nunits
= vect_nunits_for_cost (vectype
);
956 inside_cost
+= record_stmt_cost (body_cost_vec
,
957 ncopies
* assumed_nunits
,
958 scalar_store
, stmt_info
, 0, vect_body
);
961 vect_get_store_cost (dr
, ncopies
, &inside_cost
, body_cost_vec
);
963 if (memory_access_type
== VMAT_ELEMENTWISE
964 || memory_access_type
== VMAT_STRIDED_SLP
)
966 /* N scalar stores plus extracting the elements. */
967 unsigned int assumed_nunits
= vect_nunits_for_cost (vectype
);
968 inside_cost
+= record_stmt_cost (body_cost_vec
,
969 ncopies
* assumed_nunits
,
970 vec_to_scalar
, stmt_info
, 0, vect_body
);
973 if (dump_enabled_p ())
974 dump_printf_loc (MSG_NOTE
, vect_location
,
975 "vect_model_store_cost: inside_cost = %d, "
976 "prologue_cost = %d .\n", inside_cost
, prologue_cost
);
980 /* Calculate cost of DR's memory access. */
982 vect_get_store_cost (struct data_reference
*dr
, int ncopies
,
983 unsigned int *inside_cost
,
984 stmt_vector_for_cost
*body_cost_vec
)
986 int alignment_support_scheme
= vect_supportable_dr_alignment (dr
, false);
987 gimple
*stmt
= DR_STMT (dr
);
988 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
990 switch (alignment_support_scheme
)
994 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
,
995 vector_store
, stmt_info
, 0,
998 if (dump_enabled_p ())
999 dump_printf_loc (MSG_NOTE
, vect_location
,
1000 "vect_model_store_cost: aligned.\n");
1004 case dr_unaligned_supported
:
1006 /* Here, we assign an additional cost for the unaligned store. */
1007 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
,
1008 unaligned_store
, stmt_info
,
1009 DR_MISALIGNMENT (dr
), vect_body
);
1010 if (dump_enabled_p ())
1011 dump_printf_loc (MSG_NOTE
, vect_location
,
1012 "vect_model_store_cost: unaligned supported by "
1017 case dr_unaligned_unsupported
:
1019 *inside_cost
= VECT_MAX_COST
;
1021 if (dump_enabled_p ())
1022 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1023 "vect_model_store_cost: unsupported access.\n");
1033 /* Function vect_model_load_cost
1035 Models cost for loads. In the case of grouped accesses, one access has
1036 the overhead of the grouped access attributed to it. Since unaligned
1037 accesses are supported for loads, we also account for the costs of the
1038 access scheme chosen. */
1041 vect_model_load_cost (stmt_vec_info stmt_info
, int ncopies
,
1042 vect_memory_access_type memory_access_type
,
1044 stmt_vector_for_cost
*prologue_cost_vec
,
1045 stmt_vector_for_cost
*body_cost_vec
)
1047 gimple
*first_stmt
= STMT_VINFO_STMT (stmt_info
);
1048 struct data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
);
1049 unsigned int inside_cost
= 0, prologue_cost
= 0;
1050 bool grouped_access_p
= STMT_VINFO_GROUPED_ACCESS (stmt_info
);
1052 /* Grouped loads read all elements in the group at once,
1053 so we want the DR for the first statement. */
1054 if (!slp_node
&& grouped_access_p
)
1056 first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
1057 dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
1060 /* True if we should include any once-per-group costs as well as
1061 the cost of the statement itself. For SLP we only get called
1062 once per group anyhow. */
1063 bool first_stmt_p
= (first_stmt
== STMT_VINFO_STMT (stmt_info
));
1065 /* We assume that the cost of a single load-lanes instruction is
1066 equivalent to the cost of GROUP_SIZE separate loads. If a grouped
1067 access is instead being provided by a load-and-permute operation,
1068 include the cost of the permutes. */
1070 && memory_access_type
== VMAT_CONTIGUOUS_PERMUTE
)
1072 /* Uses an even and odd extract operations or shuffle operations
1073 for each needed permute. */
1074 int group_size
= GROUP_SIZE (vinfo_for_stmt (first_stmt
));
1075 int nstmts
= ncopies
* ceil_log2 (group_size
) * group_size
;
1076 inside_cost
= record_stmt_cost (body_cost_vec
, nstmts
, vec_perm
,
1077 stmt_info
, 0, vect_body
);
1079 if (dump_enabled_p ())
1080 dump_printf_loc (MSG_NOTE
, vect_location
,
1081 "vect_model_load_cost: strided group_size = %d .\n",
1085 /* The loads themselves. */
1086 if (memory_access_type
== VMAT_ELEMENTWISE
1087 || memory_access_type
== VMAT_GATHER_SCATTER
)
1089 /* N scalar loads plus gathering them into a vector. */
1090 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
1091 unsigned int assumed_nunits
= vect_nunits_for_cost (vectype
);
1092 inside_cost
+= record_stmt_cost (body_cost_vec
,
1093 ncopies
* assumed_nunits
,
1094 scalar_load
, stmt_info
, 0, vect_body
);
1097 vect_get_load_cost (dr
, ncopies
, first_stmt_p
,
1098 &inside_cost
, &prologue_cost
,
1099 prologue_cost_vec
, body_cost_vec
, true);
1100 if (memory_access_type
== VMAT_ELEMENTWISE
1101 || memory_access_type
== VMAT_STRIDED_SLP
)
1102 inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
, vec_construct
,
1103 stmt_info
, 0, vect_body
);
1105 if (dump_enabled_p ())
1106 dump_printf_loc (MSG_NOTE
, vect_location
,
1107 "vect_model_load_cost: inside_cost = %d, "
1108 "prologue_cost = %d .\n", inside_cost
, prologue_cost
);
1112 /* Calculate cost of DR's memory access. */
1114 vect_get_load_cost (struct data_reference
*dr
, int ncopies
,
1115 bool add_realign_cost
, unsigned int *inside_cost
,
1116 unsigned int *prologue_cost
,
1117 stmt_vector_for_cost
*prologue_cost_vec
,
1118 stmt_vector_for_cost
*body_cost_vec
,
1119 bool record_prologue_costs
)
1121 int alignment_support_scheme
= vect_supportable_dr_alignment (dr
, false);
1122 gimple
*stmt
= DR_STMT (dr
);
1123 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1125 switch (alignment_support_scheme
)
1129 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
, vector_load
,
1130 stmt_info
, 0, vect_body
);
1132 if (dump_enabled_p ())
1133 dump_printf_loc (MSG_NOTE
, vect_location
,
1134 "vect_model_load_cost: aligned.\n");
1138 case dr_unaligned_supported
:
1140 /* Here, we assign an additional cost for the unaligned load. */
1141 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
,
1142 unaligned_load
, stmt_info
,
1143 DR_MISALIGNMENT (dr
), vect_body
);
1145 if (dump_enabled_p ())
1146 dump_printf_loc (MSG_NOTE
, vect_location
,
1147 "vect_model_load_cost: unaligned supported by "
1152 case dr_explicit_realign
:
1154 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
* 2,
1155 vector_load
, stmt_info
, 0, vect_body
);
1156 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
,
1157 vec_perm
, stmt_info
, 0, vect_body
);
1159 /* FIXME: If the misalignment remains fixed across the iterations of
1160 the containing loop, the following cost should be added to the
1162 if (targetm
.vectorize
.builtin_mask_for_load
)
1163 *inside_cost
+= record_stmt_cost (body_cost_vec
, 1, vector_stmt
,
1164 stmt_info
, 0, vect_body
);
1166 if (dump_enabled_p ())
1167 dump_printf_loc (MSG_NOTE
, vect_location
,
1168 "vect_model_load_cost: explicit realign\n");
1172 case dr_explicit_realign_optimized
:
1174 if (dump_enabled_p ())
1175 dump_printf_loc (MSG_NOTE
, vect_location
,
1176 "vect_model_load_cost: unaligned software "
1179 /* Unaligned software pipeline has a load of an address, an initial
1180 load, and possibly a mask operation to "prime" the loop. However,
1181 if this is an access in a group of loads, which provide grouped
1182 access, then the above cost should only be considered for one
1183 access in the group. Inside the loop, there is a load op
1184 and a realignment op. */
1186 if (add_realign_cost
&& record_prologue_costs
)
1188 *prologue_cost
+= record_stmt_cost (prologue_cost_vec
, 2,
1189 vector_stmt
, stmt_info
,
1191 if (targetm
.vectorize
.builtin_mask_for_load
)
1192 *prologue_cost
+= record_stmt_cost (prologue_cost_vec
, 1,
1193 vector_stmt
, stmt_info
,
1197 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
, vector_load
,
1198 stmt_info
, 0, vect_body
);
1199 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
, vec_perm
,
1200 stmt_info
, 0, vect_body
);
1202 if (dump_enabled_p ())
1203 dump_printf_loc (MSG_NOTE
, vect_location
,
1204 "vect_model_load_cost: explicit realign optimized"
1210 case dr_unaligned_unsupported
:
1212 *inside_cost
= VECT_MAX_COST
;
1214 if (dump_enabled_p ())
1215 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1216 "vect_model_load_cost: unsupported access.\n");
1225 /* Insert the new stmt NEW_STMT at *GSI or at the appropriate place in
1226 the loop preheader for the vectorized stmt STMT. */
1229 vect_init_vector_1 (gimple
*stmt
, gimple
*new_stmt
, gimple_stmt_iterator
*gsi
)
1232 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
1235 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (stmt
);
1236 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_vinfo
);
1240 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
1244 if (nested_in_vect_loop_p (loop
, stmt
))
1247 pe
= loop_preheader_edge (loop
);
1248 new_bb
= gsi_insert_on_edge_immediate (pe
, new_stmt
);
1249 gcc_assert (!new_bb
);
1253 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_vinfo
);
1255 gimple_stmt_iterator gsi_bb_start
;
1257 gcc_assert (bb_vinfo
);
1258 bb
= BB_VINFO_BB (bb_vinfo
);
1259 gsi_bb_start
= gsi_after_labels (bb
);
1260 gsi_insert_before (&gsi_bb_start
, new_stmt
, GSI_SAME_STMT
);
1264 if (dump_enabled_p ())
1266 dump_printf_loc (MSG_NOTE
, vect_location
,
1267 "created new init_stmt: ");
1268 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, new_stmt
, 0);
1272 /* Function vect_init_vector.
1274 Insert a new stmt (INIT_STMT) that initializes a new variable of type
1275 TYPE with the value VAL. If TYPE is a vector type and VAL does not have
1276 vector type a vector with all elements equal to VAL is created first.
1277 Place the initialization at BSI if it is not NULL. Otherwise, place the
1278 initialization at the loop preheader.
1279 Return the DEF of INIT_STMT.
1280 It will be used in the vectorization of STMT. */
1283 vect_init_vector (gimple
*stmt
, tree val
, tree type
, gimple_stmt_iterator
*gsi
)
1288 /* We abuse this function to push sth to a SSA name with initial 'val'. */
1289 if (! useless_type_conversion_p (type
, TREE_TYPE (val
)))
1291 gcc_assert (TREE_CODE (type
) == VECTOR_TYPE
);
1292 if (! types_compatible_p (TREE_TYPE (type
), TREE_TYPE (val
)))
1294 /* Scalar boolean value should be transformed into
1295 all zeros or all ones value before building a vector. */
1296 if (VECTOR_BOOLEAN_TYPE_P (type
))
1298 tree true_val
= build_all_ones_cst (TREE_TYPE (type
));
1299 tree false_val
= build_zero_cst (TREE_TYPE (type
));
1301 if (CONSTANT_CLASS_P (val
))
1302 val
= integer_zerop (val
) ? false_val
: true_val
;
1305 new_temp
= make_ssa_name (TREE_TYPE (type
));
1306 init_stmt
= gimple_build_assign (new_temp
, COND_EXPR
,
1307 val
, true_val
, false_val
);
1308 vect_init_vector_1 (stmt
, init_stmt
, gsi
);
1312 else if (CONSTANT_CLASS_P (val
))
1313 val
= fold_convert (TREE_TYPE (type
), val
);
1316 new_temp
= make_ssa_name (TREE_TYPE (type
));
1317 if (! INTEGRAL_TYPE_P (TREE_TYPE (val
)))
1318 init_stmt
= gimple_build_assign (new_temp
,
1319 fold_build1 (VIEW_CONVERT_EXPR
,
1323 init_stmt
= gimple_build_assign (new_temp
, NOP_EXPR
, val
);
1324 vect_init_vector_1 (stmt
, init_stmt
, gsi
);
1328 val
= build_vector_from_val (type
, val
);
1331 new_temp
= vect_get_new_ssa_name (type
, vect_simple_var
, "cst_");
1332 init_stmt
= gimple_build_assign (new_temp
, val
);
1333 vect_init_vector_1 (stmt
, init_stmt
, gsi
);
1337 /* Function vect_get_vec_def_for_operand_1.
1339 For a defining stmt DEF_STMT of a scalar stmt, return a vector def with type
1340 DT that will be used in the vectorized stmt. */
1343 vect_get_vec_def_for_operand_1 (gimple
*def_stmt
, enum vect_def_type dt
)
1347 stmt_vec_info def_stmt_info
= NULL
;
1351 /* operand is a constant or a loop invariant. */
1352 case vect_constant_def
:
1353 case vect_external_def
:
1354 /* Code should use vect_get_vec_def_for_operand. */
1357 /* operand is defined inside the loop. */
1358 case vect_internal_def
:
1360 /* Get the def from the vectorized stmt. */
1361 def_stmt_info
= vinfo_for_stmt (def_stmt
);
1363 vec_stmt
= STMT_VINFO_VEC_STMT (def_stmt_info
);
1364 /* Get vectorized pattern statement. */
1366 && STMT_VINFO_IN_PATTERN_P (def_stmt_info
)
1367 && !STMT_VINFO_RELEVANT (def_stmt_info
))
1368 vec_stmt
= STMT_VINFO_VEC_STMT (vinfo_for_stmt (
1369 STMT_VINFO_RELATED_STMT (def_stmt_info
)));
1370 gcc_assert (vec_stmt
);
1371 if (gimple_code (vec_stmt
) == GIMPLE_PHI
)
1372 vec_oprnd
= PHI_RESULT (vec_stmt
);
1373 else if (is_gimple_call (vec_stmt
))
1374 vec_oprnd
= gimple_call_lhs (vec_stmt
);
1376 vec_oprnd
= gimple_assign_lhs (vec_stmt
);
1380 /* operand is defined by a loop header phi. */
1381 case vect_reduction_def
:
1382 case vect_double_reduction_def
:
1383 case vect_nested_cycle
:
1384 case vect_induction_def
:
1386 gcc_assert (gimple_code (def_stmt
) == GIMPLE_PHI
);
1388 /* Get the def from the vectorized stmt. */
1389 def_stmt_info
= vinfo_for_stmt (def_stmt
);
1390 vec_stmt
= STMT_VINFO_VEC_STMT (def_stmt_info
);
1391 if (gimple_code (vec_stmt
) == GIMPLE_PHI
)
1392 vec_oprnd
= PHI_RESULT (vec_stmt
);
1394 vec_oprnd
= gimple_get_lhs (vec_stmt
);
1404 /* Function vect_get_vec_def_for_operand.
1406 OP is an operand in STMT. This function returns a (vector) def that will be
1407 used in the vectorized stmt for STMT.
1409 In the case that OP is an SSA_NAME which is defined in the loop, then
1410 STMT_VINFO_VEC_STMT of the defining stmt holds the relevant def.
1412 In case OP is an invariant or constant, a new stmt that creates a vector def
1413 needs to be introduced. VECTYPE may be used to specify a required type for
1414 vector invariant. */
1417 vect_get_vec_def_for_operand (tree op
, gimple
*stmt
, tree vectype
)
1420 enum vect_def_type dt
;
1422 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (stmt
);
1423 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_vinfo
);
1425 if (dump_enabled_p ())
1427 dump_printf_loc (MSG_NOTE
, vect_location
,
1428 "vect_get_vec_def_for_operand: ");
1429 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, op
);
1430 dump_printf (MSG_NOTE
, "\n");
1433 is_simple_use
= vect_is_simple_use (op
, loop_vinfo
, &def_stmt
, &dt
);
1434 gcc_assert (is_simple_use
);
1435 if (def_stmt
&& dump_enabled_p ())
1437 dump_printf_loc (MSG_NOTE
, vect_location
, " def_stmt = ");
1438 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, def_stmt
, 0);
1441 if (dt
== vect_constant_def
|| dt
== vect_external_def
)
1443 tree stmt_vectype
= STMT_VINFO_VECTYPE (stmt_vinfo
);
1447 vector_type
= vectype
;
1448 else if (VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (op
))
1449 && VECTOR_BOOLEAN_TYPE_P (stmt_vectype
))
1450 vector_type
= build_same_sized_truth_vector_type (stmt_vectype
);
1452 vector_type
= get_vectype_for_scalar_type (TREE_TYPE (op
));
1454 gcc_assert (vector_type
);
1455 return vect_init_vector (stmt
, op
, vector_type
, NULL
);
1458 return vect_get_vec_def_for_operand_1 (def_stmt
, dt
);
1462 /* Function vect_get_vec_def_for_stmt_copy
1464 Return a vector-def for an operand. This function is used when the
1465 vectorized stmt to be created (by the caller to this function) is a "copy"
1466 created in case the vectorized result cannot fit in one vector, and several
1467 copies of the vector-stmt are required. In this case the vector-def is
1468 retrieved from the vector stmt recorded in the STMT_VINFO_RELATED_STMT field
1469 of the stmt that defines VEC_OPRND.
1470 DT is the type of the vector def VEC_OPRND.
1473 In case the vectorization factor (VF) is bigger than the number
1474 of elements that can fit in a vectype (nunits), we have to generate
1475 more than one vector stmt to vectorize the scalar stmt. This situation
1476 arises when there are multiple data-types operated upon in the loop; the
1477 smallest data-type determines the VF, and as a result, when vectorizing
1478 stmts operating on wider types we need to create 'VF/nunits' "copies" of the
1479 vector stmt (each computing a vector of 'nunits' results, and together
1480 computing 'VF' results in each iteration). This function is called when
1481 vectorizing such a stmt (e.g. vectorizing S2 in the illustration below, in
1482 which VF=16 and nunits=4, so the number of copies required is 4):
1484 scalar stmt: vectorized into: STMT_VINFO_RELATED_STMT
1486 S1: x = load VS1.0: vx.0 = memref0 VS1.1
1487 VS1.1: vx.1 = memref1 VS1.2
1488 VS1.2: vx.2 = memref2 VS1.3
1489 VS1.3: vx.3 = memref3
1491 S2: z = x + ... VSnew.0: vz0 = vx.0 + ... VSnew.1
1492 VSnew.1: vz1 = vx.1 + ... VSnew.2
1493 VSnew.2: vz2 = vx.2 + ... VSnew.3
1494 VSnew.3: vz3 = vx.3 + ...
1496 The vectorization of S1 is explained in vectorizable_load.
1497 The vectorization of S2:
1498 To create the first vector-stmt out of the 4 copies - VSnew.0 -
1499 the function 'vect_get_vec_def_for_operand' is called to
1500 get the relevant vector-def for each operand of S2. For operand x it
1501 returns the vector-def 'vx.0'.
1503 To create the remaining copies of the vector-stmt (VSnew.j), this
1504 function is called to get the relevant vector-def for each operand. It is
1505 obtained from the respective VS1.j stmt, which is recorded in the
1506 STMT_VINFO_RELATED_STMT field of the stmt that defines VEC_OPRND.
1508 For example, to obtain the vector-def 'vx.1' in order to create the
1509 vector stmt 'VSnew.1', this function is called with VEC_OPRND='vx.0'.
1510 Given 'vx0' we obtain the stmt that defines it ('VS1.0'); from the
1511 STMT_VINFO_RELATED_STMT field of 'VS1.0' we obtain the next copy - 'VS1.1',
1512 and return its def ('vx.1').
1513 Overall, to create the above sequence this function will be called 3 times:
1514 vx.1 = vect_get_vec_def_for_stmt_copy (dt, vx.0);
1515 vx.2 = vect_get_vec_def_for_stmt_copy (dt, vx.1);
1516 vx.3 = vect_get_vec_def_for_stmt_copy (dt, vx.2); */
1519 vect_get_vec_def_for_stmt_copy (enum vect_def_type dt
, tree vec_oprnd
)
1521 gimple
*vec_stmt_for_operand
;
1522 stmt_vec_info def_stmt_info
;
1524 /* Do nothing; can reuse same def. */
1525 if (dt
== vect_external_def
|| dt
== vect_constant_def
)
1528 vec_stmt_for_operand
= SSA_NAME_DEF_STMT (vec_oprnd
);
1529 def_stmt_info
= vinfo_for_stmt (vec_stmt_for_operand
);
1530 gcc_assert (def_stmt_info
);
1531 vec_stmt_for_operand
= STMT_VINFO_RELATED_STMT (def_stmt_info
);
1532 gcc_assert (vec_stmt_for_operand
);
1533 if (gimple_code (vec_stmt_for_operand
) == GIMPLE_PHI
)
1534 vec_oprnd
= PHI_RESULT (vec_stmt_for_operand
);
1536 vec_oprnd
= gimple_get_lhs (vec_stmt_for_operand
);
1541 /* Get vectorized definitions for the operands to create a copy of an original
1542 stmt. See vect_get_vec_def_for_stmt_copy () for details. */
1545 vect_get_vec_defs_for_stmt_copy (enum vect_def_type
*dt
,
1546 vec
<tree
> *vec_oprnds0
,
1547 vec
<tree
> *vec_oprnds1
)
1549 tree vec_oprnd
= vec_oprnds0
->pop ();
1551 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
[0], vec_oprnd
);
1552 vec_oprnds0
->quick_push (vec_oprnd
);
1554 if (vec_oprnds1
&& vec_oprnds1
->length ())
1556 vec_oprnd
= vec_oprnds1
->pop ();
1557 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
[1], vec_oprnd
);
1558 vec_oprnds1
->quick_push (vec_oprnd
);
1563 /* Get vectorized definitions for OP0 and OP1. */
1566 vect_get_vec_defs (tree op0
, tree op1
, gimple
*stmt
,
1567 vec
<tree
> *vec_oprnds0
,
1568 vec
<tree
> *vec_oprnds1
,
1573 int nops
= (op1
== NULL_TREE
) ? 1 : 2;
1574 auto_vec
<tree
> ops (nops
);
1575 auto_vec
<vec
<tree
> > vec_defs (nops
);
1577 ops
.quick_push (op0
);
1579 ops
.quick_push (op1
);
1581 vect_get_slp_defs (ops
, slp_node
, &vec_defs
);
1583 *vec_oprnds0
= vec_defs
[0];
1585 *vec_oprnds1
= vec_defs
[1];
1591 vec_oprnds0
->create (1);
1592 vec_oprnd
= vect_get_vec_def_for_operand (op0
, stmt
);
1593 vec_oprnds0
->quick_push (vec_oprnd
);
1597 vec_oprnds1
->create (1);
1598 vec_oprnd
= vect_get_vec_def_for_operand (op1
, stmt
);
1599 vec_oprnds1
->quick_push (vec_oprnd
);
1605 /* Function vect_finish_stmt_generation.
1607 Insert a new stmt. */
1610 vect_finish_stmt_generation (gimple
*stmt
, gimple
*vec_stmt
,
1611 gimple_stmt_iterator
*gsi
)
1613 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1614 vec_info
*vinfo
= stmt_info
->vinfo
;
1616 gcc_assert (gimple_code (stmt
) != GIMPLE_LABEL
);
1618 if (!gsi_end_p (*gsi
)
1619 && gimple_has_mem_ops (vec_stmt
))
1621 gimple
*at_stmt
= gsi_stmt (*gsi
);
1622 tree vuse
= gimple_vuse (at_stmt
);
1623 if (vuse
&& TREE_CODE (vuse
) == SSA_NAME
)
1625 tree vdef
= gimple_vdef (at_stmt
);
1626 gimple_set_vuse (vec_stmt
, gimple_vuse (at_stmt
));
1627 /* If we have an SSA vuse and insert a store, update virtual
1628 SSA form to avoid triggering the renamer. Do so only
1629 if we can easily see all uses - which is what almost always
1630 happens with the way vectorized stmts are inserted. */
1631 if ((vdef
&& TREE_CODE (vdef
) == SSA_NAME
)
1632 && ((is_gimple_assign (vec_stmt
)
1633 && !is_gimple_reg (gimple_assign_lhs (vec_stmt
)))
1634 || (is_gimple_call (vec_stmt
)
1635 && !(gimple_call_flags (vec_stmt
)
1636 & (ECF_CONST
|ECF_PURE
|ECF_NOVOPS
)))))
1638 tree new_vdef
= copy_ssa_name (vuse
, vec_stmt
);
1639 gimple_set_vdef (vec_stmt
, new_vdef
);
1640 SET_USE (gimple_vuse_op (at_stmt
), new_vdef
);
1644 gsi_insert_before (gsi
, vec_stmt
, GSI_SAME_STMT
);
1646 set_vinfo_for_stmt (vec_stmt
, new_stmt_vec_info (vec_stmt
, vinfo
));
1648 if (dump_enabled_p ())
1650 dump_printf_loc (MSG_NOTE
, vect_location
, "add new stmt: ");
1651 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, vec_stmt
, 0);
1654 gimple_set_location (vec_stmt
, gimple_location (stmt
));
1656 /* While EH edges will generally prevent vectorization, stmt might
1657 e.g. be in a must-not-throw region. Ensure newly created stmts
1658 that could throw are part of the same region. */
1659 int lp_nr
= lookup_stmt_eh_lp (stmt
);
1660 if (lp_nr
!= 0 && stmt_could_throw_p (vec_stmt
))
1661 add_stmt_to_eh_lp (vec_stmt
, lp_nr
);
1664 /* We want to vectorize a call to combined function CFN with function
1665 decl FNDECL, using VECTYPE_OUT as the type of the output and VECTYPE_IN
1666 as the types of all inputs. Check whether this is possible using
1667 an internal function, returning its code if so or IFN_LAST if not. */
1670 vectorizable_internal_function (combined_fn cfn
, tree fndecl
,
1671 tree vectype_out
, tree vectype_in
)
1674 if (internal_fn_p (cfn
))
1675 ifn
= as_internal_fn (cfn
);
1677 ifn
= associated_internal_fn (fndecl
);
1678 if (ifn
!= IFN_LAST
&& direct_internal_fn_p (ifn
))
1680 const direct_internal_fn_info
&info
= direct_internal_fn (ifn
);
1681 if (info
.vectorizable
)
1683 tree type0
= (info
.type0
< 0 ? vectype_out
: vectype_in
);
1684 tree type1
= (info
.type1
< 0 ? vectype_out
: vectype_in
);
1685 if (direct_internal_fn_supported_p (ifn
, tree_pair (type0
, type1
),
1686 OPTIMIZE_FOR_SPEED
))
1694 static tree
permute_vec_elements (tree
, tree
, tree
, gimple
*,
1695 gimple_stmt_iterator
*);
1697 /* STMT is a non-strided load or store, meaning that it accesses
1698 elements with a known constant step. Return -1 if that step
1699 is negative, 0 if it is zero, and 1 if it is greater than zero. */
1702 compare_step_with_zero (gimple
*stmt
)
1704 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1705 data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
);
1706 return tree_int_cst_compare (vect_dr_behavior (dr
)->step
,
1710 /* If the target supports a permute mask that reverses the elements in
1711 a vector of type VECTYPE, return that mask, otherwise return null. */
1714 perm_mask_for_reverse (tree vectype
)
1716 poly_uint64 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
1718 /* The encoding has a single stepped pattern. */
1719 vec_perm_builder
sel (nunits
, 1, 3);
1720 for (int i
= 0; i
< 3; ++i
)
1721 sel
.quick_push (nunits
- 1 - i
);
1723 vec_perm_indices
indices (sel
, 1, nunits
);
1724 if (!can_vec_perm_const_p (TYPE_MODE (vectype
), indices
))
1726 return vect_gen_perm_mask_checked (vectype
, indices
);
1729 /* A subroutine of get_load_store_type, with a subset of the same
1730 arguments. Handle the case where STMT is part of a grouped load
1733 For stores, the statements in the group are all consecutive
1734 and there is no gap at the end. For loads, the statements in the
1735 group might not be consecutive; there can be gaps between statements
1736 as well as at the end. */
1739 get_group_load_store_type (gimple
*stmt
, tree vectype
, bool slp
,
1740 vec_load_store_type vls_type
,
1741 vect_memory_access_type
*memory_access_type
)
1743 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1744 vec_info
*vinfo
= stmt_info
->vinfo
;
1745 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
1746 struct loop
*loop
= loop_vinfo
? LOOP_VINFO_LOOP (loop_vinfo
) : NULL
;
1747 gimple
*first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
1748 data_reference
*first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
1749 unsigned int group_size
= GROUP_SIZE (vinfo_for_stmt (first_stmt
));
1750 bool single_element_p
= (stmt
== first_stmt
1751 && !GROUP_NEXT_ELEMENT (stmt_info
));
1752 unsigned HOST_WIDE_INT gap
= GROUP_GAP (vinfo_for_stmt (first_stmt
));
1753 poly_uint64 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
1755 /* True if the vectorized statements would access beyond the last
1756 statement in the group. */
1757 bool overrun_p
= false;
1759 /* True if we can cope with such overrun by peeling for gaps, so that
1760 there is at least one final scalar iteration after the vector loop. */
1761 bool can_overrun_p
= (vls_type
== VLS_LOAD
&& loop_vinfo
&& !loop
->inner
);
1763 /* There can only be a gap at the end of the group if the stride is
1764 known at compile time. */
1765 gcc_assert (!STMT_VINFO_STRIDED_P (stmt_info
) || gap
== 0);
1767 /* Stores can't yet have gaps. */
1768 gcc_assert (slp
|| vls_type
== VLS_LOAD
|| gap
== 0);
1772 if (STMT_VINFO_STRIDED_P (stmt_info
))
1774 /* Try to use consecutive accesses of GROUP_SIZE elements,
1775 separated by the stride, until we have a complete vector.
1776 Fall back to scalar accesses if that isn't possible. */
1777 if (multiple_p (nunits
, group_size
))
1778 *memory_access_type
= VMAT_STRIDED_SLP
;
1780 *memory_access_type
= VMAT_ELEMENTWISE
;
1784 overrun_p
= loop_vinfo
&& gap
!= 0;
1785 if (overrun_p
&& vls_type
!= VLS_LOAD
)
1787 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1788 "Grouped store with gaps requires"
1789 " non-consecutive accesses\n");
1792 /* An overrun is fine if the trailing elements are smaller
1793 than the alignment boundary B. Every vector access will
1794 be a multiple of B and so we are guaranteed to access a
1795 non-gap element in the same B-sized block. */
1797 && gap
< (vect_known_alignment_in_bytes (first_dr
)
1798 / vect_get_scalar_dr_size (first_dr
)))
1800 if (overrun_p
&& !can_overrun_p
)
1802 if (dump_enabled_p ())
1803 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1804 "Peeling for outer loop is not supported\n");
1807 *memory_access_type
= VMAT_CONTIGUOUS
;
1812 /* We can always handle this case using elementwise accesses,
1813 but see if something more efficient is available. */
1814 *memory_access_type
= VMAT_ELEMENTWISE
;
1816 /* If there is a gap at the end of the group then these optimizations
1817 would access excess elements in the last iteration. */
1818 bool would_overrun_p
= (gap
!= 0);
1819 /* An overrun is fine if the trailing elements are smaller than the
1820 alignment boundary B. Every vector access will be a multiple of B
1821 and so we are guaranteed to access a non-gap element in the
1822 same B-sized block. */
1824 && gap
< (vect_known_alignment_in_bytes (first_dr
)
1825 / vect_get_scalar_dr_size (first_dr
)))
1826 would_overrun_p
= false;
1828 if (!STMT_VINFO_STRIDED_P (stmt_info
)
1829 && (can_overrun_p
|| !would_overrun_p
)
1830 && compare_step_with_zero (stmt
) > 0)
1832 /* First try using LOAD/STORE_LANES. */
1833 if (vls_type
== VLS_LOAD
1834 ? vect_load_lanes_supported (vectype
, group_size
)
1835 : vect_store_lanes_supported (vectype
, group_size
))
1837 *memory_access_type
= VMAT_LOAD_STORE_LANES
;
1838 overrun_p
= would_overrun_p
;
1841 /* If that fails, try using permuting loads. */
1842 if (*memory_access_type
== VMAT_ELEMENTWISE
1843 && (vls_type
== VLS_LOAD
1844 ? vect_grouped_load_supported (vectype
, single_element_p
,
1846 : vect_grouped_store_supported (vectype
, group_size
)))
1848 *memory_access_type
= VMAT_CONTIGUOUS_PERMUTE
;
1849 overrun_p
= would_overrun_p
;
1854 if (vls_type
!= VLS_LOAD
&& first_stmt
== stmt
)
1856 /* STMT is the leader of the group. Check the operands of all the
1857 stmts of the group. */
1858 gimple
*next_stmt
= GROUP_NEXT_ELEMENT (stmt_info
);
1861 gcc_assert (gimple_assign_single_p (next_stmt
));
1862 tree op
= gimple_assign_rhs1 (next_stmt
);
1864 enum vect_def_type dt
;
1865 if (!vect_is_simple_use (op
, vinfo
, &def_stmt
, &dt
))
1867 if (dump_enabled_p ())
1868 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1869 "use not simple.\n");
1872 next_stmt
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt
));
1878 gcc_assert (can_overrun_p
);
1879 if (dump_enabled_p ())
1880 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1881 "Data access with gaps requires scalar "
1883 LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo
) = true;
1889 /* A subroutine of get_load_store_type, with a subset of the same
1890 arguments. Handle the case where STMT is a load or store that
1891 accesses consecutive elements with a negative step. */
1893 static vect_memory_access_type
1894 get_negative_load_store_type (gimple
*stmt
, tree vectype
,
1895 vec_load_store_type vls_type
,
1896 unsigned int ncopies
)
1898 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1899 struct data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
);
1900 dr_alignment_support alignment_support_scheme
;
1904 if (dump_enabled_p ())
1905 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1906 "multiple types with negative step.\n");
1907 return VMAT_ELEMENTWISE
;
1910 alignment_support_scheme
= vect_supportable_dr_alignment (dr
, false);
1911 if (alignment_support_scheme
!= dr_aligned
1912 && alignment_support_scheme
!= dr_unaligned_supported
)
1914 if (dump_enabled_p ())
1915 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1916 "negative step but alignment required.\n");
1917 return VMAT_ELEMENTWISE
;
1920 if (vls_type
== VLS_STORE_INVARIANT
)
1922 if (dump_enabled_p ())
1923 dump_printf_loc (MSG_NOTE
, vect_location
,
1924 "negative step with invariant source;"
1925 " no permute needed.\n");
1926 return VMAT_CONTIGUOUS_DOWN
;
1929 if (!perm_mask_for_reverse (vectype
))
1931 if (dump_enabled_p ())
1932 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1933 "negative step and reversing not supported.\n");
1934 return VMAT_ELEMENTWISE
;
1937 return VMAT_CONTIGUOUS_REVERSE
;
1940 /* Analyze load or store statement STMT of type VLS_TYPE. Return true
1941 if there is a memory access type that the vectorized form can use,
1942 storing it in *MEMORY_ACCESS_TYPE if so. If we decide to use gathers
1943 or scatters, fill in GS_INFO accordingly.
1945 SLP says whether we're performing SLP rather than loop vectorization.
1946 VECTYPE is the vector type that the vectorized statements will use.
1947 NCOPIES is the number of vector statements that will be needed. */
1950 get_load_store_type (gimple
*stmt
, tree vectype
, bool slp
,
1951 vec_load_store_type vls_type
, unsigned int ncopies
,
1952 vect_memory_access_type
*memory_access_type
,
1953 gather_scatter_info
*gs_info
)
1955 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1956 vec_info
*vinfo
= stmt_info
->vinfo
;
1957 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
1958 poly_uint64 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
1959 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info
))
1961 *memory_access_type
= VMAT_GATHER_SCATTER
;
1963 if (!vect_check_gather_scatter (stmt
, loop_vinfo
, gs_info
))
1965 else if (!vect_is_simple_use (gs_info
->offset
, vinfo
, &def_stmt
,
1966 &gs_info
->offset_dt
,
1967 &gs_info
->offset_vectype
))
1969 if (dump_enabled_p ())
1970 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1971 "%s index use not simple.\n",
1972 vls_type
== VLS_LOAD
? "gather" : "scatter");
1976 else if (STMT_VINFO_GROUPED_ACCESS (stmt_info
))
1978 if (!get_group_load_store_type (stmt
, vectype
, slp
, vls_type
,
1979 memory_access_type
))
1982 else if (STMT_VINFO_STRIDED_P (stmt_info
))
1985 *memory_access_type
= VMAT_ELEMENTWISE
;
1989 int cmp
= compare_step_with_zero (stmt
);
1991 *memory_access_type
= get_negative_load_store_type
1992 (stmt
, vectype
, vls_type
, ncopies
);
1995 gcc_assert (vls_type
== VLS_LOAD
);
1996 *memory_access_type
= VMAT_INVARIANT
;
1999 *memory_access_type
= VMAT_CONTIGUOUS
;
2002 if ((*memory_access_type
== VMAT_ELEMENTWISE
2003 || *memory_access_type
== VMAT_STRIDED_SLP
)
2004 && !nunits
.is_constant ())
2006 if (dump_enabled_p ())
2007 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2008 "Not using elementwise accesses due to variable "
2009 "vectorization factor.\n");
2013 /* FIXME: At the moment the cost model seems to underestimate the
2014 cost of using elementwise accesses. This check preserves the
2015 traditional behavior until that can be fixed. */
2016 if (*memory_access_type
== VMAT_ELEMENTWISE
2017 && !STMT_VINFO_STRIDED_P (stmt_info
))
2019 if (dump_enabled_p ())
2020 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2021 "not falling back to elementwise accesses\n");
2027 /* Function vectorizable_mask_load_store.
2029 Check if STMT performs a conditional load or store that can be vectorized.
2030 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2031 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
2032 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2035 vectorizable_mask_load_store (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
2036 gimple
**vec_stmt
, slp_tree slp_node
)
2038 tree vec_dest
= NULL
;
2039 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
2040 stmt_vec_info prev_stmt_info
;
2041 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
2042 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
2043 bool nested_in_vect_loop
= nested_in_vect_loop_p (loop
, stmt
);
2044 struct data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
);
2045 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
2046 tree rhs_vectype
= NULL_TREE
;
2051 tree dataref_ptr
= NULL_TREE
;
2053 poly_uint64 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
2057 gather_scatter_info gs_info
;
2058 vec_load_store_type vls_type
;
2061 enum vect_def_type dt
;
2063 if (slp_node
!= NULL
)
2066 ncopies
= vect_get_num_copies (loop_vinfo
, vectype
);
2067 gcc_assert (ncopies
>= 1);
2069 mask
= gimple_call_arg (stmt
, 2);
2071 if (!VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (mask
)))
2074 /* FORNOW. This restriction should be relaxed. */
2075 if (nested_in_vect_loop
&& ncopies
> 1)
2077 if (dump_enabled_p ())
2078 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2079 "multiple types in nested loop.");
2083 if (!STMT_VINFO_RELEVANT_P (stmt_info
))
2086 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
2090 if (!STMT_VINFO_DATA_REF (stmt_info
))
2093 elem_type
= TREE_TYPE (vectype
);
2095 if (TREE_CODE (mask
) != SSA_NAME
)
2098 if (!vect_is_simple_use (mask
, loop_vinfo
, &def_stmt
, &dt
, &mask_vectype
))
2102 mask_vectype
= get_mask_type_for_scalar_type (TREE_TYPE (vectype
));
2104 if (!mask_vectype
|| !VECTOR_BOOLEAN_TYPE_P (mask_vectype
)
2105 || maybe_ne (TYPE_VECTOR_SUBPARTS (mask_vectype
),
2106 TYPE_VECTOR_SUBPARTS (vectype
)))
2109 if (gimple_call_internal_fn (stmt
) == IFN_MASK_STORE
)
2111 tree rhs
= gimple_call_arg (stmt
, 3);
2112 if (!vect_is_simple_use (rhs
, loop_vinfo
, &def_stmt
, &dt
, &rhs_vectype
))
2114 if (dt
== vect_constant_def
|| dt
== vect_external_def
)
2115 vls_type
= VLS_STORE_INVARIANT
;
2117 vls_type
= VLS_STORE
;
2120 vls_type
= VLS_LOAD
;
2122 vect_memory_access_type memory_access_type
;
2123 if (!get_load_store_type (stmt
, vectype
, false, vls_type
, ncopies
,
2124 &memory_access_type
, &gs_info
))
2127 if (memory_access_type
== VMAT_GATHER_SCATTER
)
2129 tree arglist
= TYPE_ARG_TYPES (TREE_TYPE (gs_info
.decl
));
2131 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (TREE_CHAIN (arglist
))));
2132 if (TREE_CODE (masktype
) == INTEGER_TYPE
)
2134 if (dump_enabled_p ())
2135 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2136 "masked gather with integer mask not supported.");
2140 else if (memory_access_type
!= VMAT_CONTIGUOUS
)
2142 if (dump_enabled_p ())
2143 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2144 "unsupported access type for masked %s.\n",
2145 vls_type
== VLS_LOAD
? "load" : "store");
2148 else if (!VECTOR_MODE_P (TYPE_MODE (vectype
))
2149 || !can_vec_mask_load_store_p (TYPE_MODE (vectype
),
2150 TYPE_MODE (mask_vectype
),
2151 vls_type
== VLS_LOAD
)
2153 && !useless_type_conversion_p (vectype
, rhs_vectype
)))
2156 if (!vec_stmt
) /* transformation not required. */
2158 STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info
) = memory_access_type
;
2159 STMT_VINFO_TYPE (stmt_info
) = call_vec_info_type
;
2160 if (vls_type
== VLS_LOAD
)
2161 vect_model_load_cost (stmt_info
, ncopies
, memory_access_type
,
2164 vect_model_store_cost (stmt_info
, ncopies
, memory_access_type
,
2165 vls_type
, NULL
, NULL
, NULL
);
2168 gcc_assert (memory_access_type
== STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info
));
2172 if (memory_access_type
== VMAT_GATHER_SCATTER
)
2174 tree vec_oprnd0
= NULL_TREE
, op
;
2175 tree arglist
= TYPE_ARG_TYPES (TREE_TYPE (gs_info
.decl
));
2176 tree rettype
, srctype
, ptrtype
, idxtype
, masktype
, scaletype
;
2177 tree ptr
, vec_mask
= NULL_TREE
, mask_op
= NULL_TREE
, var
, scale
;
2178 tree perm_mask
= NULL_TREE
, prev_res
= NULL_TREE
;
2179 tree mask_perm_mask
= NULL_TREE
;
2180 edge pe
= loop_preheader_edge (loop
);
2183 enum { NARROW
, NONE
, WIDEN
} modifier
;
2184 poly_uint64 gather_off_nunits
2185 = TYPE_VECTOR_SUBPARTS (gs_info
.offset_vectype
);
2187 rettype
= TREE_TYPE (TREE_TYPE (gs_info
.decl
));
2188 srctype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
2189 ptrtype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
2190 idxtype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
2191 masktype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
2192 scaletype
= TREE_VALUE (arglist
);
2193 gcc_checking_assert (types_compatible_p (srctype
, rettype
)
2194 && types_compatible_p (srctype
, masktype
));
2196 if (known_eq (nunits
, gather_off_nunits
))
2198 else if (known_eq (nunits
* 2, gather_off_nunits
))
2202 /* Currently widening gathers and scatters are only supported for
2203 fixed-length vectors. */
2204 int count
= gather_off_nunits
.to_constant ();
2205 vec_perm_builder
sel (count
, count
, 1);
2206 for (i
= 0; i
< count
; ++i
)
2207 sel
.quick_push (i
| (count
/ 2));
2209 vec_perm_indices
indices (sel
, 1, count
);
2210 perm_mask
= vect_gen_perm_mask_checked (gs_info
.offset_vectype
,
2213 else if (known_eq (nunits
, gather_off_nunits
* 2))
2217 /* Currently narrowing gathers and scatters are only supported for
2218 fixed-length vectors. */
2219 int count
= nunits
.to_constant ();
2220 vec_perm_builder
sel (count
, count
, 1);
2221 sel
.quick_grow (count
);
2222 for (i
= 0; i
< count
; ++i
)
2223 sel
[i
] = i
< count
/ 2 ? i
: i
+ count
/ 2;
2224 vec_perm_indices
indices (sel
, 2, count
);
2225 perm_mask
= vect_gen_perm_mask_checked (vectype
, indices
);
2228 for (i
= 0; i
< count
; ++i
)
2229 sel
[i
] = i
| (count
/ 2);
2230 indices
.new_vector (sel
, 2, count
);
2231 mask_perm_mask
= vect_gen_perm_mask_checked (masktype
, indices
);
2236 vec_dest
= vect_create_destination_var (gimple_call_lhs (stmt
), vectype
);
2238 ptr
= fold_convert (ptrtype
, gs_info
.base
);
2239 if (!is_gimple_min_invariant (ptr
))
2241 ptr
= force_gimple_operand (ptr
, &seq
, true, NULL_TREE
);
2242 new_bb
= gsi_insert_seq_on_edge_immediate (pe
, seq
);
2243 gcc_assert (!new_bb
);
2246 scale
= build_int_cst (scaletype
, gs_info
.scale
);
2248 prev_stmt_info
= NULL
;
2249 for (j
= 0; j
< ncopies
; ++j
)
2251 if (modifier
== WIDEN
&& (j
& 1))
2252 op
= permute_vec_elements (vec_oprnd0
, vec_oprnd0
,
2253 perm_mask
, stmt
, gsi
);
2256 = vect_get_vec_def_for_operand (gs_info
.offset
, stmt
);
2259 = vect_get_vec_def_for_stmt_copy (gs_info
.offset_dt
, vec_oprnd0
);
2261 if (!useless_type_conversion_p (idxtype
, TREE_TYPE (op
)))
2263 gcc_assert (known_eq (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op
)),
2264 TYPE_VECTOR_SUBPARTS (idxtype
)));
2265 var
= vect_get_new_ssa_name (idxtype
, vect_simple_var
);
2266 op
= build1 (VIEW_CONVERT_EXPR
, idxtype
, op
);
2268 = gimple_build_assign (var
, VIEW_CONVERT_EXPR
, op
);
2269 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2273 if (mask_perm_mask
&& (j
& 1))
2274 mask_op
= permute_vec_elements (mask_op
, mask_op
,
2275 mask_perm_mask
, stmt
, gsi
);
2279 vec_mask
= vect_get_vec_def_for_operand (mask
, stmt
);
2282 vect_is_simple_use (vec_mask
, loop_vinfo
, &def_stmt
, &dt
);
2283 vec_mask
= vect_get_vec_def_for_stmt_copy (dt
, vec_mask
);
2287 if (!useless_type_conversion_p (masktype
, TREE_TYPE (vec_mask
)))
2290 (known_eq (TYPE_VECTOR_SUBPARTS (TREE_TYPE (mask_op
)),
2291 TYPE_VECTOR_SUBPARTS (masktype
)));
2292 var
= vect_get_new_ssa_name (masktype
, vect_simple_var
);
2293 mask_op
= build1 (VIEW_CONVERT_EXPR
, masktype
, mask_op
);
2295 = gimple_build_assign (var
, VIEW_CONVERT_EXPR
, mask_op
);
2296 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2302 = gimple_build_call (gs_info
.decl
, 5, mask_op
, ptr
, op
, mask_op
,
2305 if (!useless_type_conversion_p (vectype
, rettype
))
2307 gcc_assert (known_eq (TYPE_VECTOR_SUBPARTS (vectype
),
2308 TYPE_VECTOR_SUBPARTS (rettype
)));
2309 op
= vect_get_new_ssa_name (rettype
, vect_simple_var
);
2310 gimple_call_set_lhs (new_stmt
, op
);
2311 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2312 var
= make_ssa_name (vec_dest
);
2313 op
= build1 (VIEW_CONVERT_EXPR
, vectype
, op
);
2314 new_stmt
= gimple_build_assign (var
, VIEW_CONVERT_EXPR
, op
);
2318 var
= make_ssa_name (vec_dest
, new_stmt
);
2319 gimple_call_set_lhs (new_stmt
, var
);
2322 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2324 if (modifier
== NARROW
)
2331 var
= permute_vec_elements (prev_res
, var
,
2332 perm_mask
, stmt
, gsi
);
2333 new_stmt
= SSA_NAME_DEF_STMT (var
);
2336 if (prev_stmt_info
== NULL
)
2337 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
2339 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
2340 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
2344 else if (vls_type
!= VLS_LOAD
)
2346 tree vec_rhs
= NULL_TREE
, vec_mask
= NULL_TREE
;
2347 prev_stmt_info
= NULL
;
2348 LOOP_VINFO_HAS_MASK_STORE (loop_vinfo
) = true;
2349 for (i
= 0; i
< ncopies
; i
++)
2351 unsigned align
, misalign
;
2355 tree rhs
= gimple_call_arg (stmt
, 3);
2356 vec_rhs
= vect_get_vec_def_for_operand (rhs
, stmt
);
2357 vec_mask
= vect_get_vec_def_for_operand (mask
, stmt
,
2359 /* We should have catched mismatched types earlier. */
2360 gcc_assert (useless_type_conversion_p (vectype
,
2361 TREE_TYPE (vec_rhs
)));
2362 dataref_ptr
= vect_create_data_ref_ptr (stmt
, vectype
, NULL
,
2363 NULL_TREE
, &dummy
, gsi
,
2364 &ptr_incr
, false, &inv_p
);
2365 gcc_assert (!inv_p
);
2369 vect_is_simple_use (vec_rhs
, loop_vinfo
, &def_stmt
, &dt
);
2370 vec_rhs
= vect_get_vec_def_for_stmt_copy (dt
, vec_rhs
);
2371 vect_is_simple_use (vec_mask
, loop_vinfo
, &def_stmt
, &dt
);
2372 vec_mask
= vect_get_vec_def_for_stmt_copy (dt
, vec_mask
);
2373 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
, stmt
,
2374 TYPE_SIZE_UNIT (vectype
));
2377 align
= DR_TARGET_ALIGNMENT (dr
);
2378 if (aligned_access_p (dr
))
2380 else if (DR_MISALIGNMENT (dr
) == -1)
2382 align
= TYPE_ALIGN_UNIT (elem_type
);
2386 misalign
= DR_MISALIGNMENT (dr
);
2387 set_ptr_info_alignment (get_ptr_info (dataref_ptr
), align
,
2389 tree ptr
= build_int_cst (TREE_TYPE (gimple_call_arg (stmt
, 1)),
2390 misalign
? least_bit_hwi (misalign
) : align
);
2392 = gimple_build_call_internal (IFN_MASK_STORE
, 4, dataref_ptr
,
2393 ptr
, vec_mask
, vec_rhs
);
2394 gimple_call_set_nothrow (call
, true);
2396 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2398 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
2400 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
2401 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
2406 tree vec_mask
= NULL_TREE
;
2407 prev_stmt_info
= NULL
;
2408 vec_dest
= vect_create_destination_var (gimple_call_lhs (stmt
), vectype
);
2409 for (i
= 0; i
< ncopies
; i
++)
2411 unsigned align
, misalign
;
2415 vec_mask
= vect_get_vec_def_for_operand (mask
, stmt
,
2417 dataref_ptr
= vect_create_data_ref_ptr (stmt
, vectype
, NULL
,
2418 NULL_TREE
, &dummy
, gsi
,
2419 &ptr_incr
, false, &inv_p
);
2420 gcc_assert (!inv_p
);
2424 vect_is_simple_use (vec_mask
, loop_vinfo
, &def_stmt
, &dt
);
2425 vec_mask
= vect_get_vec_def_for_stmt_copy (dt
, vec_mask
);
2426 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
, stmt
,
2427 TYPE_SIZE_UNIT (vectype
));
2430 align
= DR_TARGET_ALIGNMENT (dr
);
2431 if (aligned_access_p (dr
))
2433 else if (DR_MISALIGNMENT (dr
) == -1)
2435 align
= TYPE_ALIGN_UNIT (elem_type
);
2439 misalign
= DR_MISALIGNMENT (dr
);
2440 set_ptr_info_alignment (get_ptr_info (dataref_ptr
), align
,
2442 tree ptr
= build_int_cst (TREE_TYPE (gimple_call_arg (stmt
, 1)),
2443 misalign
? least_bit_hwi (misalign
) : align
);
2445 = gimple_build_call_internal (IFN_MASK_LOAD
, 3, dataref_ptr
,
2447 gimple_call_set_lhs (call
, make_ssa_name (vec_dest
));
2448 gimple_call_set_nothrow (call
, true);
2449 vect_finish_stmt_generation (stmt
, call
, gsi
);
2451 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= call
;
2453 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = call
;
2454 prev_stmt_info
= vinfo_for_stmt (call
);
2461 /* Check and perform vectorization of BUILT_IN_BSWAP{16,32,64}. */
2464 vectorizable_bswap (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
2465 gimple
**vec_stmt
, slp_tree slp_node
,
2466 tree vectype_in
, enum vect_def_type
*dt
)
2469 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
2470 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
2472 unsigned HOST_WIDE_INT nunits
, num_bytes
;
2474 op
= gimple_call_arg (stmt
, 0);
2475 vectype
= STMT_VINFO_VECTYPE (stmt_info
);
2477 if (!TYPE_VECTOR_SUBPARTS (vectype
).is_constant (&nunits
))
2480 /* Multiple types in SLP are handled by creating the appropriate number of
2481 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
2486 ncopies
= vect_get_num_copies (loop_vinfo
, vectype
);
2488 gcc_assert (ncopies
>= 1);
2490 tree char_vectype
= get_same_sized_vectype (char_type_node
, vectype_in
);
2494 if (!TYPE_VECTOR_SUBPARTS (char_vectype
).is_constant (&num_bytes
))
2497 unsigned word_bytes
= num_bytes
/ nunits
;
2499 /* The encoding uses one stepped pattern for each byte in the word. */
2500 vec_perm_builder
elts (num_bytes
, word_bytes
, 3);
2501 for (unsigned i
= 0; i
< 3; ++i
)
2502 for (unsigned j
= 0; j
< word_bytes
; ++j
)
2503 elts
.quick_push ((i
+ 1) * word_bytes
- j
- 1);
2505 vec_perm_indices
indices (elts
, 1, num_bytes
);
2506 if (!can_vec_perm_const_p (TYPE_MODE (char_vectype
), indices
))
2511 STMT_VINFO_TYPE (stmt_info
) = call_vec_info_type
;
2512 if (dump_enabled_p ())
2513 dump_printf_loc (MSG_NOTE
, vect_location
, "=== vectorizable_bswap ==="
2515 if (! PURE_SLP_STMT (stmt_info
))
2517 add_stmt_cost (stmt_info
->vinfo
->target_cost_data
,
2518 1, vector_stmt
, stmt_info
, 0, vect_prologue
);
2519 add_stmt_cost (stmt_info
->vinfo
->target_cost_data
,
2520 ncopies
, vec_perm
, stmt_info
, 0, vect_body
);
2525 tree bswap_vconst
= vec_perm_indices_to_tree (char_vectype
, indices
);
2528 vec
<tree
> vec_oprnds
= vNULL
;
2529 gimple
*new_stmt
= NULL
;
2530 stmt_vec_info prev_stmt_info
= NULL
;
2531 for (unsigned j
= 0; j
< ncopies
; j
++)
2535 vect_get_vec_defs (op
, NULL
, stmt
, &vec_oprnds
, NULL
, slp_node
);
2537 vect_get_vec_defs_for_stmt_copy (dt
, &vec_oprnds
, NULL
);
2539 /* Arguments are ready. create the new vector stmt. */
2542 FOR_EACH_VEC_ELT (vec_oprnds
, i
, vop
)
2544 tree tem
= make_ssa_name (char_vectype
);
2545 new_stmt
= gimple_build_assign (tem
, build1 (VIEW_CONVERT_EXPR
,
2546 char_vectype
, vop
));
2547 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2548 tree tem2
= make_ssa_name (char_vectype
);
2549 new_stmt
= gimple_build_assign (tem2
, VEC_PERM_EXPR
,
2550 tem
, tem
, bswap_vconst
);
2551 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2552 tem
= make_ssa_name (vectype
);
2553 new_stmt
= gimple_build_assign (tem
, build1 (VIEW_CONVERT_EXPR
,
2555 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2557 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
2564 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
2566 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
2568 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
2571 vec_oprnds
.release ();
2575 /* Return true if vector types VECTYPE_IN and VECTYPE_OUT have
2576 integer elements and if we can narrow VECTYPE_IN to VECTYPE_OUT
2577 in a single step. On success, store the binary pack code in
2581 simple_integer_narrowing (tree vectype_out
, tree vectype_in
,
2582 tree_code
*convert_code
)
2584 if (!INTEGRAL_TYPE_P (TREE_TYPE (vectype_out
))
2585 || !INTEGRAL_TYPE_P (TREE_TYPE (vectype_in
)))
2589 int multi_step_cvt
= 0;
2590 auto_vec
<tree
, 8> interm_types
;
2591 if (!supportable_narrowing_operation (NOP_EXPR
, vectype_out
, vectype_in
,
2592 &code
, &multi_step_cvt
,
2597 *convert_code
= code
;
2601 /* Function vectorizable_call.
2603 Check if GS performs a function call that can be vectorized.
2604 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2605 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2606 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2609 vectorizable_call (gimple
*gs
, gimple_stmt_iterator
*gsi
, gimple
**vec_stmt
,
2616 tree vec_oprnd0
= NULL_TREE
, vec_oprnd1
= NULL_TREE
;
2617 stmt_vec_info stmt_info
= vinfo_for_stmt (gs
), prev_stmt_info
;
2618 tree vectype_out
, vectype_in
;
2619 poly_uint64 nunits_in
;
2620 poly_uint64 nunits_out
;
2621 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
2622 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
2623 vec_info
*vinfo
= stmt_info
->vinfo
;
2624 tree fndecl
, new_temp
, rhs_type
;
2626 enum vect_def_type dt
[3]
2627 = {vect_unknown_def_type
, vect_unknown_def_type
, vect_unknown_def_type
};
2629 gimple
*new_stmt
= NULL
;
2631 vec
<tree
> vargs
= vNULL
;
2632 enum { NARROW
, NONE
, WIDEN
} modifier
;
2636 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
2639 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
2643 /* Is GS a vectorizable call? */
2644 stmt
= dyn_cast
<gcall
*> (gs
);
2648 if (gimple_call_internal_p (stmt
)
2649 && (gimple_call_internal_fn (stmt
) == IFN_MASK_LOAD
2650 || gimple_call_internal_fn (stmt
) == IFN_MASK_STORE
))
2651 return vectorizable_mask_load_store (stmt
, gsi
, vec_stmt
,
2654 if (gimple_call_lhs (stmt
) == NULL_TREE
2655 || TREE_CODE (gimple_call_lhs (stmt
)) != SSA_NAME
)
2658 gcc_checking_assert (!stmt_can_throw_internal (stmt
));
2660 vectype_out
= STMT_VINFO_VECTYPE (stmt_info
);
2662 /* Process function arguments. */
2663 rhs_type
= NULL_TREE
;
2664 vectype_in
= NULL_TREE
;
2665 nargs
= gimple_call_num_args (stmt
);
2667 /* Bail out if the function has more than three arguments, we do not have
2668 interesting builtin functions to vectorize with more than two arguments
2669 except for fma. No arguments is also not good. */
2670 if (nargs
== 0 || nargs
> 3)
2673 /* Ignore the argument of IFN_GOMP_SIMD_LANE, it is magic. */
2674 if (gimple_call_internal_p (stmt
)
2675 && gimple_call_internal_fn (stmt
) == IFN_GOMP_SIMD_LANE
)
2678 rhs_type
= unsigned_type_node
;
2681 for (i
= 0; i
< nargs
; i
++)
2685 op
= gimple_call_arg (stmt
, i
);
2687 /* We can only handle calls with arguments of the same type. */
2689 && !types_compatible_p (rhs_type
, TREE_TYPE (op
)))
2691 if (dump_enabled_p ())
2692 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2693 "argument types differ.\n");
2697 rhs_type
= TREE_TYPE (op
);
2699 if (!vect_is_simple_use (op
, vinfo
, &def_stmt
, &dt
[i
], &opvectype
))
2701 if (dump_enabled_p ())
2702 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2703 "use not simple.\n");
2708 vectype_in
= opvectype
;
2710 && opvectype
!= vectype_in
)
2712 if (dump_enabled_p ())
2713 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2714 "argument vector types differ.\n");
2718 /* If all arguments are external or constant defs use a vector type with
2719 the same size as the output vector type. */
2721 vectype_in
= get_same_sized_vectype (rhs_type
, vectype_out
);
2723 gcc_assert (vectype_in
);
2726 if (dump_enabled_p ())
2728 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2729 "no vectype for scalar type ");
2730 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, rhs_type
);
2731 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
2738 nunits_in
= TYPE_VECTOR_SUBPARTS (vectype_in
);
2739 nunits_out
= TYPE_VECTOR_SUBPARTS (vectype_out
);
2740 if (known_eq (nunits_in
* 2, nunits_out
))
2742 else if (known_eq (nunits_out
, nunits_in
))
2744 else if (known_eq (nunits_out
* 2, nunits_in
))
2749 /* We only handle functions that do not read or clobber memory. */
2750 if (gimple_vuse (stmt
))
2752 if (dump_enabled_p ())
2753 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2754 "function reads from or writes to memory.\n");
2758 /* For now, we only vectorize functions if a target specific builtin
2759 is available. TODO -- in some cases, it might be profitable to
2760 insert the calls for pieces of the vector, in order to be able
2761 to vectorize other operations in the loop. */
2763 internal_fn ifn
= IFN_LAST
;
2764 combined_fn cfn
= gimple_call_combined_fn (stmt
);
2765 tree callee
= gimple_call_fndecl (stmt
);
2767 /* First try using an internal function. */
2768 tree_code convert_code
= ERROR_MARK
;
2770 && (modifier
== NONE
2771 || (modifier
== NARROW
2772 && simple_integer_narrowing (vectype_out
, vectype_in
,
2774 ifn
= vectorizable_internal_function (cfn
, callee
, vectype_out
,
2777 /* If that fails, try asking for a target-specific built-in function. */
2778 if (ifn
== IFN_LAST
)
2780 if (cfn
!= CFN_LAST
)
2781 fndecl
= targetm
.vectorize
.builtin_vectorized_function
2782 (cfn
, vectype_out
, vectype_in
);
2784 fndecl
= targetm
.vectorize
.builtin_md_vectorized_function
2785 (callee
, vectype_out
, vectype_in
);
2788 if (ifn
== IFN_LAST
&& !fndecl
)
2790 if (cfn
== CFN_GOMP_SIMD_LANE
2793 && LOOP_VINFO_LOOP (loop_vinfo
)->simduid
2794 && TREE_CODE (gimple_call_arg (stmt
, 0)) == SSA_NAME
2795 && LOOP_VINFO_LOOP (loop_vinfo
)->simduid
2796 == SSA_NAME_VAR (gimple_call_arg (stmt
, 0)))
2798 /* We can handle IFN_GOMP_SIMD_LANE by returning a
2799 { 0, 1, 2, ... vf - 1 } vector. */
2800 gcc_assert (nargs
== 0);
2802 else if (modifier
== NONE
2803 && (gimple_call_builtin_p (stmt
, BUILT_IN_BSWAP16
)
2804 || gimple_call_builtin_p (stmt
, BUILT_IN_BSWAP32
)
2805 || gimple_call_builtin_p (stmt
, BUILT_IN_BSWAP64
)))
2806 return vectorizable_bswap (stmt
, gsi
, vec_stmt
, slp_node
,
2810 if (dump_enabled_p ())
2811 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2812 "function is not vectorizable.\n");
2819 else if (modifier
== NARROW
&& ifn
== IFN_LAST
)
2820 ncopies
= vect_get_num_copies (loop_vinfo
, vectype_out
);
2822 ncopies
= vect_get_num_copies (loop_vinfo
, vectype_in
);
2824 /* Sanity check: make sure that at least one copy of the vectorized stmt
2825 needs to be generated. */
2826 gcc_assert (ncopies
>= 1);
2828 if (!vec_stmt
) /* transformation not required. */
2830 STMT_VINFO_TYPE (stmt_info
) = call_vec_info_type
;
2831 if (dump_enabled_p ())
2832 dump_printf_loc (MSG_NOTE
, vect_location
, "=== vectorizable_call ==="
2834 vect_model_simple_cost (stmt_info
, ncopies
, dt
, ndts
, NULL
, NULL
);
2835 if (ifn
!= IFN_LAST
&& modifier
== NARROW
&& !slp_node
)
2836 add_stmt_cost (stmt_info
->vinfo
->target_cost_data
, ncopies
/ 2,
2837 vec_promote_demote
, stmt_info
, 0, vect_body
);
2844 if (dump_enabled_p ())
2845 dump_printf_loc (MSG_NOTE
, vect_location
, "transform call.\n");
2848 scalar_dest
= gimple_call_lhs (stmt
);
2849 vec_dest
= vect_create_destination_var (scalar_dest
, vectype_out
);
2851 prev_stmt_info
= NULL
;
2852 if (modifier
== NONE
|| ifn
!= IFN_LAST
)
2854 tree prev_res
= NULL_TREE
;
2855 for (j
= 0; j
< ncopies
; ++j
)
2857 /* Build argument list for the vectorized call. */
2859 vargs
.create (nargs
);
2865 auto_vec
<vec
<tree
> > vec_defs (nargs
);
2866 vec
<tree
> vec_oprnds0
;
2868 for (i
= 0; i
< nargs
; i
++)
2869 vargs
.quick_push (gimple_call_arg (stmt
, i
));
2870 vect_get_slp_defs (vargs
, slp_node
, &vec_defs
);
2871 vec_oprnds0
= vec_defs
[0];
2873 /* Arguments are ready. Create the new vector stmt. */
2874 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vec_oprnd0
)
2877 for (k
= 0; k
< nargs
; k
++)
2879 vec
<tree
> vec_oprndsk
= vec_defs
[k
];
2880 vargs
[k
] = vec_oprndsk
[i
];
2882 if (modifier
== NARROW
)
2884 tree half_res
= make_ssa_name (vectype_in
);
2886 = gimple_build_call_internal_vec (ifn
, vargs
);
2887 gimple_call_set_lhs (call
, half_res
);
2888 gimple_call_set_nothrow (call
, true);
2890 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2893 prev_res
= half_res
;
2896 new_temp
= make_ssa_name (vec_dest
);
2897 new_stmt
= gimple_build_assign (new_temp
, convert_code
,
2898 prev_res
, half_res
);
2903 if (ifn
!= IFN_LAST
)
2904 call
= gimple_build_call_internal_vec (ifn
, vargs
);
2906 call
= gimple_build_call_vec (fndecl
, vargs
);
2907 new_temp
= make_ssa_name (vec_dest
, call
);
2908 gimple_call_set_lhs (call
, new_temp
);
2909 gimple_call_set_nothrow (call
, true);
2912 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2913 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
2916 for (i
= 0; i
< nargs
; i
++)
2918 vec
<tree
> vec_oprndsi
= vec_defs
[i
];
2919 vec_oprndsi
.release ();
2924 for (i
= 0; i
< nargs
; i
++)
2926 op
= gimple_call_arg (stmt
, i
);
2929 = vect_get_vec_def_for_operand (op
, stmt
);
2932 vec_oprnd0
= gimple_call_arg (new_stmt
, i
);
2934 = vect_get_vec_def_for_stmt_copy (dt
[i
], vec_oprnd0
);
2937 vargs
.quick_push (vec_oprnd0
);
2940 if (gimple_call_internal_p (stmt
)
2941 && gimple_call_internal_fn (stmt
) == IFN_GOMP_SIMD_LANE
)
2943 tree cst
= build_index_vector (vectype_out
, j
* nunits_out
, 1);
2945 = vect_get_new_ssa_name (vectype_out
, vect_simple_var
, "cst_");
2946 gimple
*init_stmt
= gimple_build_assign (new_var
, cst
);
2947 vect_init_vector_1 (stmt
, init_stmt
, NULL
);
2948 new_temp
= make_ssa_name (vec_dest
);
2949 new_stmt
= gimple_build_assign (new_temp
, new_var
);
2951 else if (modifier
== NARROW
)
2953 tree half_res
= make_ssa_name (vectype_in
);
2954 gcall
*call
= gimple_build_call_internal_vec (ifn
, vargs
);
2955 gimple_call_set_lhs (call
, half_res
);
2956 gimple_call_set_nothrow (call
, true);
2958 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2961 prev_res
= half_res
;
2964 new_temp
= make_ssa_name (vec_dest
);
2965 new_stmt
= gimple_build_assign (new_temp
, convert_code
,
2966 prev_res
, half_res
);
2971 if (ifn
!= IFN_LAST
)
2972 call
= gimple_build_call_internal_vec (ifn
, vargs
);
2974 call
= gimple_build_call_vec (fndecl
, vargs
);
2975 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
2976 gimple_call_set_lhs (call
, new_temp
);
2977 gimple_call_set_nothrow (call
, true);
2980 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2982 if (j
== (modifier
== NARROW
? 1 : 0))
2983 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
2985 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
2987 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
2990 else if (modifier
== NARROW
)
2992 for (j
= 0; j
< ncopies
; ++j
)
2994 /* Build argument list for the vectorized call. */
2996 vargs
.create (nargs
* 2);
3002 auto_vec
<vec
<tree
> > vec_defs (nargs
);
3003 vec
<tree
> vec_oprnds0
;
3005 for (i
= 0; i
< nargs
; i
++)
3006 vargs
.quick_push (gimple_call_arg (stmt
, i
));
3007 vect_get_slp_defs (vargs
, slp_node
, &vec_defs
);
3008 vec_oprnds0
= vec_defs
[0];
3010 /* Arguments are ready. Create the new vector stmt. */
3011 for (i
= 0; vec_oprnds0
.iterate (i
, &vec_oprnd0
); i
+= 2)
3015 for (k
= 0; k
< nargs
; k
++)
3017 vec
<tree
> vec_oprndsk
= vec_defs
[k
];
3018 vargs
.quick_push (vec_oprndsk
[i
]);
3019 vargs
.quick_push (vec_oprndsk
[i
+ 1]);
3022 if (ifn
!= IFN_LAST
)
3023 call
= gimple_build_call_internal_vec (ifn
, vargs
);
3025 call
= gimple_build_call_vec (fndecl
, vargs
);
3026 new_temp
= make_ssa_name (vec_dest
, call
);
3027 gimple_call_set_lhs (call
, new_temp
);
3028 gimple_call_set_nothrow (call
, true);
3030 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3031 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
3034 for (i
= 0; i
< nargs
; i
++)
3036 vec
<tree
> vec_oprndsi
= vec_defs
[i
];
3037 vec_oprndsi
.release ();
3042 for (i
= 0; i
< nargs
; i
++)
3044 op
= gimple_call_arg (stmt
, i
);
3048 = vect_get_vec_def_for_operand (op
, stmt
);
3050 = vect_get_vec_def_for_stmt_copy (dt
[i
], vec_oprnd0
);
3054 vec_oprnd1
= gimple_call_arg (new_stmt
, 2*i
+ 1);
3056 = vect_get_vec_def_for_stmt_copy (dt
[i
], vec_oprnd1
);
3058 = vect_get_vec_def_for_stmt_copy (dt
[i
], vec_oprnd0
);
3061 vargs
.quick_push (vec_oprnd0
);
3062 vargs
.quick_push (vec_oprnd1
);
3065 new_stmt
= gimple_build_call_vec (fndecl
, vargs
);
3066 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
3067 gimple_call_set_lhs (new_stmt
, new_temp
);
3068 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3071 STMT_VINFO_VEC_STMT (stmt_info
) = new_stmt
;
3073 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
3075 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
3078 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
3081 /* No current target implements this case. */
3086 /* The call in STMT might prevent it from being removed in dce.
3087 We however cannot remove it here, due to the way the ssa name
3088 it defines is mapped to the new definition. So just replace
3089 rhs of the statement with something harmless. */
3094 type
= TREE_TYPE (scalar_dest
);
3095 if (is_pattern_stmt_p (stmt_info
))
3096 lhs
= gimple_call_lhs (STMT_VINFO_RELATED_STMT (stmt_info
));
3098 lhs
= gimple_call_lhs (stmt
);
3100 new_stmt
= gimple_build_assign (lhs
, build_zero_cst (type
));
3101 set_vinfo_for_stmt (new_stmt
, stmt_info
);
3102 set_vinfo_for_stmt (stmt
, NULL
);
3103 STMT_VINFO_STMT (stmt_info
) = new_stmt
;
3104 gsi_replace (gsi
, new_stmt
, false);
3110 struct simd_call_arg_info
3114 HOST_WIDE_INT linear_step
;
3115 enum vect_def_type dt
;
3117 bool simd_lane_linear
;
3120 /* Helper function of vectorizable_simd_clone_call. If OP, an SSA_NAME,
3121 is linear within simd lane (but not within whole loop), note it in
3125 vect_simd_lane_linear (tree op
, struct loop
*loop
,
3126 struct simd_call_arg_info
*arginfo
)
3128 gimple
*def_stmt
= SSA_NAME_DEF_STMT (op
);
3130 if (!is_gimple_assign (def_stmt
)
3131 || gimple_assign_rhs_code (def_stmt
) != POINTER_PLUS_EXPR
3132 || !is_gimple_min_invariant (gimple_assign_rhs1 (def_stmt
)))
3135 tree base
= gimple_assign_rhs1 (def_stmt
);
3136 HOST_WIDE_INT linear_step
= 0;
3137 tree v
= gimple_assign_rhs2 (def_stmt
);
3138 while (TREE_CODE (v
) == SSA_NAME
)
3141 def_stmt
= SSA_NAME_DEF_STMT (v
);
3142 if (is_gimple_assign (def_stmt
))
3143 switch (gimple_assign_rhs_code (def_stmt
))
3146 t
= gimple_assign_rhs2 (def_stmt
);
3147 if (linear_step
|| TREE_CODE (t
) != INTEGER_CST
)
3149 base
= fold_build2 (POINTER_PLUS_EXPR
, TREE_TYPE (base
), base
, t
);
3150 v
= gimple_assign_rhs1 (def_stmt
);
3153 t
= gimple_assign_rhs2 (def_stmt
);
3154 if (linear_step
|| !tree_fits_shwi_p (t
) || integer_zerop (t
))
3156 linear_step
= tree_to_shwi (t
);
3157 v
= gimple_assign_rhs1 (def_stmt
);
3160 t
= gimple_assign_rhs1 (def_stmt
);
3161 if (TREE_CODE (TREE_TYPE (t
)) != INTEGER_TYPE
3162 || (TYPE_PRECISION (TREE_TYPE (v
))
3163 < TYPE_PRECISION (TREE_TYPE (t
))))
3172 else if (gimple_call_internal_p (def_stmt
, IFN_GOMP_SIMD_LANE
)
3174 && TREE_CODE (gimple_call_arg (def_stmt
, 0)) == SSA_NAME
3175 && (SSA_NAME_VAR (gimple_call_arg (def_stmt
, 0))
3180 arginfo
->linear_step
= linear_step
;
3182 arginfo
->simd_lane_linear
= true;
3188 /* Return the number of elements in vector type VECTYPE, which is associated
3189 with a SIMD clone. At present these vectors always have a constant
3192 static unsigned HOST_WIDE_INT
3193 simd_clone_subparts (tree vectype
)
3195 return TYPE_VECTOR_SUBPARTS (vectype
).to_constant ();
3198 /* Function vectorizable_simd_clone_call.
3200 Check if STMT performs a function call that can be vectorized
3201 by calling a simd clone of the function.
3202 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3203 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
3204 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3207 vectorizable_simd_clone_call (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
3208 gimple
**vec_stmt
, slp_tree slp_node
)
3213 tree vec_oprnd0
= NULL_TREE
;
3214 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
), prev_stmt_info
;
3216 unsigned int nunits
;
3217 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
3218 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
3219 vec_info
*vinfo
= stmt_info
->vinfo
;
3220 struct loop
*loop
= loop_vinfo
? LOOP_VINFO_LOOP (loop_vinfo
) : NULL
;
3221 tree fndecl
, new_temp
;
3223 gimple
*new_stmt
= NULL
;
3225 auto_vec
<simd_call_arg_info
> arginfo
;
3226 vec
<tree
> vargs
= vNULL
;
3228 tree lhs
, rtype
, ratype
;
3229 vec
<constructor_elt
, va_gc
> *ret_ctor_elts
= NULL
;
3231 /* Is STMT a vectorizable call? */
3232 if (!is_gimple_call (stmt
))
3235 fndecl
= gimple_call_fndecl (stmt
);
3236 if (fndecl
== NULL_TREE
)
3239 struct cgraph_node
*node
= cgraph_node::get (fndecl
);
3240 if (node
== NULL
|| node
->simd_clones
== NULL
)
3243 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
3246 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
3250 if (gimple_call_lhs (stmt
)
3251 && TREE_CODE (gimple_call_lhs (stmt
)) != SSA_NAME
)
3254 gcc_checking_assert (!stmt_can_throw_internal (stmt
));
3256 vectype
= STMT_VINFO_VECTYPE (stmt_info
);
3258 if (loop_vinfo
&& nested_in_vect_loop_p (loop
, stmt
))
3265 /* Process function arguments. */
3266 nargs
= gimple_call_num_args (stmt
);
3268 /* Bail out if the function has zero arguments. */
3272 arginfo
.reserve (nargs
, true);
3274 for (i
= 0; i
< nargs
; i
++)
3276 simd_call_arg_info thisarginfo
;
3279 thisarginfo
.linear_step
= 0;
3280 thisarginfo
.align
= 0;
3281 thisarginfo
.op
= NULL_TREE
;
3282 thisarginfo
.simd_lane_linear
= false;
3284 op
= gimple_call_arg (stmt
, i
);
3285 if (!vect_is_simple_use (op
, vinfo
, &def_stmt
, &thisarginfo
.dt
,
3286 &thisarginfo
.vectype
)
3287 || thisarginfo
.dt
== vect_uninitialized_def
)
3289 if (dump_enabled_p ())
3290 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3291 "use not simple.\n");
3295 if (thisarginfo
.dt
== vect_constant_def
3296 || thisarginfo
.dt
== vect_external_def
)
3297 gcc_assert (thisarginfo
.vectype
== NULL_TREE
);
3299 gcc_assert (thisarginfo
.vectype
!= NULL_TREE
);
3301 /* For linear arguments, the analyze phase should have saved
3302 the base and step in STMT_VINFO_SIMD_CLONE_INFO. */
3303 if (i
* 3 + 4 <= STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).length ()
3304 && STMT_VINFO_SIMD_CLONE_INFO (stmt_info
)[i
* 3 + 2])
3306 gcc_assert (vec_stmt
);
3307 thisarginfo
.linear_step
3308 = tree_to_shwi (STMT_VINFO_SIMD_CLONE_INFO (stmt_info
)[i
* 3 + 2]);
3310 = STMT_VINFO_SIMD_CLONE_INFO (stmt_info
)[i
* 3 + 1];
3311 thisarginfo
.simd_lane_linear
3312 = (STMT_VINFO_SIMD_CLONE_INFO (stmt_info
)[i
* 3 + 3]
3313 == boolean_true_node
);
3314 /* If loop has been peeled for alignment, we need to adjust it. */
3315 tree n1
= LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo
);
3316 tree n2
= LOOP_VINFO_NITERS (loop_vinfo
);
3317 if (n1
!= n2
&& !thisarginfo
.simd_lane_linear
)
3319 tree bias
= fold_build2 (MINUS_EXPR
, TREE_TYPE (n1
), n1
, n2
);
3320 tree step
= STMT_VINFO_SIMD_CLONE_INFO (stmt_info
)[i
* 3 + 2];
3321 tree opt
= TREE_TYPE (thisarginfo
.op
);
3322 bias
= fold_convert (TREE_TYPE (step
), bias
);
3323 bias
= fold_build2 (MULT_EXPR
, TREE_TYPE (step
), bias
, step
);
3325 = fold_build2 (POINTER_TYPE_P (opt
)
3326 ? POINTER_PLUS_EXPR
: PLUS_EXPR
, opt
,
3327 thisarginfo
.op
, bias
);
3331 && thisarginfo
.dt
!= vect_constant_def
3332 && thisarginfo
.dt
!= vect_external_def
3334 && TREE_CODE (op
) == SSA_NAME
3335 && simple_iv (loop
, loop_containing_stmt (stmt
), op
,
3337 && tree_fits_shwi_p (iv
.step
))
3339 thisarginfo
.linear_step
= tree_to_shwi (iv
.step
);
3340 thisarginfo
.op
= iv
.base
;
3342 else if ((thisarginfo
.dt
== vect_constant_def
3343 || thisarginfo
.dt
== vect_external_def
)
3344 && POINTER_TYPE_P (TREE_TYPE (op
)))
3345 thisarginfo
.align
= get_pointer_alignment (op
) / BITS_PER_UNIT
;
3346 /* Addresses of array elements indexed by GOMP_SIMD_LANE are
3348 if (POINTER_TYPE_P (TREE_TYPE (op
))
3349 && !thisarginfo
.linear_step
3351 && thisarginfo
.dt
!= vect_constant_def
3352 && thisarginfo
.dt
!= vect_external_def
3355 && TREE_CODE (op
) == SSA_NAME
)
3356 vect_simd_lane_linear (op
, loop
, &thisarginfo
);
3358 arginfo
.quick_push (thisarginfo
);
3361 unsigned HOST_WIDE_INT vf
;
3362 if (!LOOP_VINFO_VECT_FACTOR (loop_vinfo
).is_constant (&vf
))
3364 if (dump_enabled_p ())
3365 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3366 "not considering SIMD clones; not yet supported"
3367 " for variable-width vectors.\n");
3371 unsigned int badness
= 0;
3372 struct cgraph_node
*bestn
= NULL
;
3373 if (STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).exists ())
3374 bestn
= cgraph_node::get (STMT_VINFO_SIMD_CLONE_INFO (stmt_info
)[0]);
3376 for (struct cgraph_node
*n
= node
->simd_clones
; n
!= NULL
;
3377 n
= n
->simdclone
->next_clone
)
3379 unsigned int this_badness
= 0;
3380 if (n
->simdclone
->simdlen
> vf
3381 || n
->simdclone
->nargs
!= nargs
)
3383 if (n
->simdclone
->simdlen
< vf
)
3384 this_badness
+= (exact_log2 (vf
)
3385 - exact_log2 (n
->simdclone
->simdlen
)) * 1024;
3386 if (n
->simdclone
->inbranch
)
3387 this_badness
+= 2048;
3388 int target_badness
= targetm
.simd_clone
.usable (n
);
3389 if (target_badness
< 0)
3391 this_badness
+= target_badness
* 512;
3392 /* FORNOW: Have to add code to add the mask argument. */
3393 if (n
->simdclone
->inbranch
)
3395 for (i
= 0; i
< nargs
; i
++)
3397 switch (n
->simdclone
->args
[i
].arg_type
)
3399 case SIMD_CLONE_ARG_TYPE_VECTOR
:
3400 if (!useless_type_conversion_p
3401 (n
->simdclone
->args
[i
].orig_type
,
3402 TREE_TYPE (gimple_call_arg (stmt
, i
))))
3404 else if (arginfo
[i
].dt
== vect_constant_def
3405 || arginfo
[i
].dt
== vect_external_def
3406 || arginfo
[i
].linear_step
)
3409 case SIMD_CLONE_ARG_TYPE_UNIFORM
:
3410 if (arginfo
[i
].dt
!= vect_constant_def
3411 && arginfo
[i
].dt
!= vect_external_def
)
3414 case SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP
:
3415 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP
:
3416 if (arginfo
[i
].dt
== vect_constant_def
3417 || arginfo
[i
].dt
== vect_external_def
3418 || (arginfo
[i
].linear_step
3419 != n
->simdclone
->args
[i
].linear_step
))
3422 case SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP
:
3423 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_CONSTANT_STEP
:
3424 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_CONSTANT_STEP
:
3425 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_VARIABLE_STEP
:
3426 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_VARIABLE_STEP
:
3427 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_VARIABLE_STEP
:
3431 case SIMD_CLONE_ARG_TYPE_MASK
:
3434 if (i
== (size_t) -1)
3436 if (n
->simdclone
->args
[i
].alignment
> arginfo
[i
].align
)
3441 if (arginfo
[i
].align
)
3442 this_badness
+= (exact_log2 (arginfo
[i
].align
)
3443 - exact_log2 (n
->simdclone
->args
[i
].alignment
));
3445 if (i
== (size_t) -1)
3447 if (bestn
== NULL
|| this_badness
< badness
)
3450 badness
= this_badness
;
3457 for (i
= 0; i
< nargs
; i
++)
3458 if ((arginfo
[i
].dt
== vect_constant_def
3459 || arginfo
[i
].dt
== vect_external_def
)
3460 && bestn
->simdclone
->args
[i
].arg_type
== SIMD_CLONE_ARG_TYPE_VECTOR
)
3463 = get_vectype_for_scalar_type (TREE_TYPE (gimple_call_arg (stmt
,
3465 if (arginfo
[i
].vectype
== NULL
3466 || (simd_clone_subparts (arginfo
[i
].vectype
)
3467 > bestn
->simdclone
->simdlen
))
3471 fndecl
= bestn
->decl
;
3472 nunits
= bestn
->simdclone
->simdlen
;
3473 ncopies
= vf
/ nunits
;
3475 /* If the function isn't const, only allow it in simd loops where user
3476 has asserted that at least nunits consecutive iterations can be
3477 performed using SIMD instructions. */
3478 if ((loop
== NULL
|| (unsigned) loop
->safelen
< nunits
)
3479 && gimple_vuse (stmt
))
3482 /* Sanity check: make sure that at least one copy of the vectorized stmt
3483 needs to be generated. */
3484 gcc_assert (ncopies
>= 1);
3486 if (!vec_stmt
) /* transformation not required. */
3488 STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).safe_push (bestn
->decl
);
3489 for (i
= 0; i
< nargs
; i
++)
3490 if ((bestn
->simdclone
->args
[i
].arg_type
3491 == SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP
)
3492 || (bestn
->simdclone
->args
[i
].arg_type
3493 == SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP
))
3495 STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).safe_grow_cleared (i
* 3
3497 STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).safe_push (arginfo
[i
].op
);
3498 tree lst
= POINTER_TYPE_P (TREE_TYPE (arginfo
[i
].op
))
3499 ? size_type_node
: TREE_TYPE (arginfo
[i
].op
);
3500 tree ls
= build_int_cst (lst
, arginfo
[i
].linear_step
);
3501 STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).safe_push (ls
);
3502 tree sll
= arginfo
[i
].simd_lane_linear
3503 ? boolean_true_node
: boolean_false_node
;
3504 STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).safe_push (sll
);
3506 STMT_VINFO_TYPE (stmt_info
) = call_simd_clone_vec_info_type
;
3507 if (dump_enabled_p ())
3508 dump_printf_loc (MSG_NOTE
, vect_location
,
3509 "=== vectorizable_simd_clone_call ===\n");
3510 /* vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL); */
3516 if (dump_enabled_p ())
3517 dump_printf_loc (MSG_NOTE
, vect_location
, "transform call.\n");
3520 scalar_dest
= gimple_call_lhs (stmt
);
3521 vec_dest
= NULL_TREE
;
3526 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
3527 rtype
= TREE_TYPE (TREE_TYPE (fndecl
));
3528 if (TREE_CODE (rtype
) == ARRAY_TYPE
)
3531 rtype
= TREE_TYPE (ratype
);
3535 prev_stmt_info
= NULL
;
3536 for (j
= 0; j
< ncopies
; ++j
)
3538 /* Build argument list for the vectorized call. */
3540 vargs
.create (nargs
);
3544 for (i
= 0; i
< nargs
; i
++)
3546 unsigned int k
, l
, m
, o
;
3548 op
= gimple_call_arg (stmt
, i
);
3549 switch (bestn
->simdclone
->args
[i
].arg_type
)
3551 case SIMD_CLONE_ARG_TYPE_VECTOR
:
3552 atype
= bestn
->simdclone
->args
[i
].vector_type
;
3553 o
= nunits
/ simd_clone_subparts (atype
);
3554 for (m
= j
* o
; m
< (j
+ 1) * o
; m
++)
3556 if (simd_clone_subparts (atype
)
3557 < simd_clone_subparts (arginfo
[i
].vectype
))
3559 poly_uint64 prec
= GET_MODE_BITSIZE (TYPE_MODE (atype
));
3560 k
= (simd_clone_subparts (arginfo
[i
].vectype
)
3561 / simd_clone_subparts (atype
));
3562 gcc_assert ((k
& (k
- 1)) == 0);
3565 = vect_get_vec_def_for_operand (op
, stmt
);
3568 vec_oprnd0
= arginfo
[i
].op
;
3569 if ((m
& (k
- 1)) == 0)
3571 = vect_get_vec_def_for_stmt_copy (arginfo
[i
].dt
,
3574 arginfo
[i
].op
= vec_oprnd0
;
3576 = build3 (BIT_FIELD_REF
, atype
, vec_oprnd0
,
3578 bitsize_int ((m
& (k
- 1)) * prec
));
3580 = gimple_build_assign (make_ssa_name (atype
),
3582 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3583 vargs
.safe_push (gimple_assign_lhs (new_stmt
));
3587 k
= (simd_clone_subparts (atype
)
3588 / simd_clone_subparts (arginfo
[i
].vectype
));
3589 gcc_assert ((k
& (k
- 1)) == 0);
3590 vec
<constructor_elt
, va_gc
> *ctor_elts
;
3592 vec_alloc (ctor_elts
, k
);
3595 for (l
= 0; l
< k
; l
++)
3597 if (m
== 0 && l
== 0)
3599 = vect_get_vec_def_for_operand (op
, stmt
);
3602 = vect_get_vec_def_for_stmt_copy (arginfo
[i
].dt
,
3604 arginfo
[i
].op
= vec_oprnd0
;
3607 CONSTRUCTOR_APPEND_ELT (ctor_elts
, NULL_TREE
,
3611 vargs
.safe_push (vec_oprnd0
);
3614 vec_oprnd0
= build_constructor (atype
, ctor_elts
);
3616 = gimple_build_assign (make_ssa_name (atype
),
3618 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3619 vargs
.safe_push (gimple_assign_lhs (new_stmt
));
3624 case SIMD_CLONE_ARG_TYPE_UNIFORM
:
3625 vargs
.safe_push (op
);
3627 case SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP
:
3628 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP
:
3633 = force_gimple_operand (arginfo
[i
].op
, &stmts
, true,
3638 edge pe
= loop_preheader_edge (loop
);
3639 new_bb
= gsi_insert_seq_on_edge_immediate (pe
, stmts
);
3640 gcc_assert (!new_bb
);
3642 if (arginfo
[i
].simd_lane_linear
)
3644 vargs
.safe_push (arginfo
[i
].op
);
3647 tree phi_res
= copy_ssa_name (op
);
3648 gphi
*new_phi
= create_phi_node (phi_res
, loop
->header
);
3649 set_vinfo_for_stmt (new_phi
,
3650 new_stmt_vec_info (new_phi
, loop_vinfo
));
3651 add_phi_arg (new_phi
, arginfo
[i
].op
,
3652 loop_preheader_edge (loop
), UNKNOWN_LOCATION
);
3654 = POINTER_TYPE_P (TREE_TYPE (op
))
3655 ? POINTER_PLUS_EXPR
: PLUS_EXPR
;
3656 tree type
= POINTER_TYPE_P (TREE_TYPE (op
))
3657 ? sizetype
: TREE_TYPE (op
);
3659 = wi::mul (bestn
->simdclone
->args
[i
].linear_step
,
3661 tree tcst
= wide_int_to_tree (type
, cst
);
3662 tree phi_arg
= copy_ssa_name (op
);
3664 = gimple_build_assign (phi_arg
, code
, phi_res
, tcst
);
3665 gimple_stmt_iterator si
= gsi_after_labels (loop
->header
);
3666 gsi_insert_after (&si
, new_stmt
, GSI_NEW_STMT
);
3667 set_vinfo_for_stmt (new_stmt
,
3668 new_stmt_vec_info (new_stmt
, loop_vinfo
));
3669 add_phi_arg (new_phi
, phi_arg
, loop_latch_edge (loop
),
3671 arginfo
[i
].op
= phi_res
;
3672 vargs
.safe_push (phi_res
);
3677 = POINTER_TYPE_P (TREE_TYPE (op
))
3678 ? POINTER_PLUS_EXPR
: PLUS_EXPR
;
3679 tree type
= POINTER_TYPE_P (TREE_TYPE (op
))
3680 ? sizetype
: TREE_TYPE (op
);
3682 = wi::mul (bestn
->simdclone
->args
[i
].linear_step
,
3684 tree tcst
= wide_int_to_tree (type
, cst
);
3685 new_temp
= make_ssa_name (TREE_TYPE (op
));
3686 new_stmt
= gimple_build_assign (new_temp
, code
,
3687 arginfo
[i
].op
, tcst
);
3688 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3689 vargs
.safe_push (new_temp
);
3692 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_CONSTANT_STEP
:
3693 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_CONSTANT_STEP
:
3694 case SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP
:
3695 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_VARIABLE_STEP
:
3696 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_VARIABLE_STEP
:
3697 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_VARIABLE_STEP
:
3703 new_stmt
= gimple_build_call_vec (fndecl
, vargs
);
3706 gcc_assert (ratype
|| simd_clone_subparts (rtype
) == nunits
);
3708 new_temp
= create_tmp_var (ratype
);
3709 else if (simd_clone_subparts (vectype
)
3710 == simd_clone_subparts (rtype
))
3711 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
3713 new_temp
= make_ssa_name (rtype
, new_stmt
);
3714 gimple_call_set_lhs (new_stmt
, new_temp
);
3716 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3720 if (simd_clone_subparts (vectype
) < nunits
)
3723 poly_uint64 prec
= GET_MODE_BITSIZE (TYPE_MODE (vectype
));
3724 poly_uint64 bytes
= GET_MODE_SIZE (TYPE_MODE (vectype
));
3725 k
= nunits
/ simd_clone_subparts (vectype
);
3726 gcc_assert ((k
& (k
- 1)) == 0);
3727 for (l
= 0; l
< k
; l
++)
3732 t
= build_fold_addr_expr (new_temp
);
3733 t
= build2 (MEM_REF
, vectype
, t
,
3734 build_int_cst (TREE_TYPE (t
), l
* bytes
));
3737 t
= build3 (BIT_FIELD_REF
, vectype
, new_temp
,
3738 bitsize_int (prec
), bitsize_int (l
* prec
));
3740 = gimple_build_assign (make_ssa_name (vectype
), t
);
3741 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3742 if (j
== 0 && l
== 0)
3743 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
3745 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
3747 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
3752 tree clobber
= build_constructor (ratype
, NULL
);
3753 TREE_THIS_VOLATILE (clobber
) = 1;
3754 new_stmt
= gimple_build_assign (new_temp
, clobber
);
3755 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3759 else if (simd_clone_subparts (vectype
) > nunits
)
3761 unsigned int k
= (simd_clone_subparts (vectype
)
3762 / simd_clone_subparts (rtype
));
3763 gcc_assert ((k
& (k
- 1)) == 0);
3764 if ((j
& (k
- 1)) == 0)
3765 vec_alloc (ret_ctor_elts
, k
);
3768 unsigned int m
, o
= nunits
/ simd_clone_subparts (rtype
);
3769 for (m
= 0; m
< o
; m
++)
3771 tree tem
= build4 (ARRAY_REF
, rtype
, new_temp
,
3772 size_int (m
), NULL_TREE
, NULL_TREE
);
3774 = gimple_build_assign (make_ssa_name (rtype
), tem
);
3775 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3776 CONSTRUCTOR_APPEND_ELT (ret_ctor_elts
, NULL_TREE
,
3777 gimple_assign_lhs (new_stmt
));
3779 tree clobber
= build_constructor (ratype
, NULL
);
3780 TREE_THIS_VOLATILE (clobber
) = 1;
3781 new_stmt
= gimple_build_assign (new_temp
, clobber
);
3782 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3785 CONSTRUCTOR_APPEND_ELT (ret_ctor_elts
, NULL_TREE
, new_temp
);
3786 if ((j
& (k
- 1)) != k
- 1)
3788 vec_oprnd0
= build_constructor (vectype
, ret_ctor_elts
);
3790 = gimple_build_assign (make_ssa_name (vec_dest
), vec_oprnd0
);
3791 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3793 if ((unsigned) j
== k
- 1)
3794 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
3796 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
3798 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
3803 tree t
= build_fold_addr_expr (new_temp
);
3804 t
= build2 (MEM_REF
, vectype
, t
,
3805 build_int_cst (TREE_TYPE (t
), 0));
3807 = gimple_build_assign (make_ssa_name (vec_dest
), t
);
3808 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3809 tree clobber
= build_constructor (ratype
, NULL
);
3810 TREE_THIS_VOLATILE (clobber
) = 1;
3811 vect_finish_stmt_generation (stmt
,
3812 gimple_build_assign (new_temp
,
3818 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
3820 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
3822 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
3827 /* The call in STMT might prevent it from being removed in dce.
3828 We however cannot remove it here, due to the way the ssa name
3829 it defines is mapped to the new definition. So just replace
3830 rhs of the statement with something harmless. */
3837 type
= TREE_TYPE (scalar_dest
);
3838 if (is_pattern_stmt_p (stmt_info
))
3839 lhs
= gimple_call_lhs (STMT_VINFO_RELATED_STMT (stmt_info
));
3841 lhs
= gimple_call_lhs (stmt
);
3842 new_stmt
= gimple_build_assign (lhs
, build_zero_cst (type
));
3845 new_stmt
= gimple_build_nop ();
3846 set_vinfo_for_stmt (new_stmt
, stmt_info
);
3847 set_vinfo_for_stmt (stmt
, NULL
);
3848 STMT_VINFO_STMT (stmt_info
) = new_stmt
;
3849 gsi_replace (gsi
, new_stmt
, true);
3850 unlink_stmt_vdef (stmt
);
3856 /* Function vect_gen_widened_results_half
3858 Create a vector stmt whose code, type, number of arguments, and result
3859 variable are CODE, OP_TYPE, and VEC_DEST, and its arguments are
3860 VEC_OPRND0 and VEC_OPRND1. The new vector stmt is to be inserted at BSI.
3861 In the case that CODE is a CALL_EXPR, this means that a call to DECL
3862 needs to be created (DECL is a function-decl of a target-builtin).
3863 STMT is the original scalar stmt that we are vectorizing. */
3866 vect_gen_widened_results_half (enum tree_code code
,
3868 tree vec_oprnd0
, tree vec_oprnd1
, int op_type
,
3869 tree vec_dest
, gimple_stmt_iterator
*gsi
,
3875 /* Generate half of the widened result: */
3876 if (code
== CALL_EXPR
)
3878 /* Target specific support */
3879 if (op_type
== binary_op
)
3880 new_stmt
= gimple_build_call (decl
, 2, vec_oprnd0
, vec_oprnd1
);
3882 new_stmt
= gimple_build_call (decl
, 1, vec_oprnd0
);
3883 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
3884 gimple_call_set_lhs (new_stmt
, new_temp
);
3888 /* Generic support */
3889 gcc_assert (op_type
== TREE_CODE_LENGTH (code
));
3890 if (op_type
!= binary_op
)
3892 new_stmt
= gimple_build_assign (vec_dest
, code
, vec_oprnd0
, vec_oprnd1
);
3893 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
3894 gimple_assign_set_lhs (new_stmt
, new_temp
);
3896 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3902 /* Get vectorized definitions for loop-based vectorization. For the first
3903 operand we call vect_get_vec_def_for_operand() (with OPRND containing
3904 scalar operand), and for the rest we get a copy with
3905 vect_get_vec_def_for_stmt_copy() using the previous vector definition
3906 (stored in OPRND). See vect_get_vec_def_for_stmt_copy() for details.
3907 The vectors are collected into VEC_OPRNDS. */
3910 vect_get_loop_based_defs (tree
*oprnd
, gimple
*stmt
, enum vect_def_type dt
,
3911 vec
<tree
> *vec_oprnds
, int multi_step_cvt
)
3915 /* Get first vector operand. */
3916 /* All the vector operands except the very first one (that is scalar oprnd)
3918 if (TREE_CODE (TREE_TYPE (*oprnd
)) != VECTOR_TYPE
)
3919 vec_oprnd
= vect_get_vec_def_for_operand (*oprnd
, stmt
);
3921 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
, *oprnd
);
3923 vec_oprnds
->quick_push (vec_oprnd
);
3925 /* Get second vector operand. */
3926 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
, vec_oprnd
);
3927 vec_oprnds
->quick_push (vec_oprnd
);
3931 /* For conversion in multiple steps, continue to get operands
3934 vect_get_loop_based_defs (oprnd
, stmt
, dt
, vec_oprnds
, multi_step_cvt
- 1);
3938 /* Create vectorized demotion statements for vector operands from VEC_OPRNDS.
3939 For multi-step conversions store the resulting vectors and call the function
3943 vect_create_vectorized_demotion_stmts (vec
<tree
> *vec_oprnds
,
3944 int multi_step_cvt
, gimple
*stmt
,
3946 gimple_stmt_iterator
*gsi
,
3947 slp_tree slp_node
, enum tree_code code
,
3948 stmt_vec_info
*prev_stmt_info
)
3951 tree vop0
, vop1
, new_tmp
, vec_dest
;
3953 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
3955 vec_dest
= vec_dsts
.pop ();
3957 for (i
= 0; i
< vec_oprnds
->length (); i
+= 2)
3959 /* Create demotion operation. */
3960 vop0
= (*vec_oprnds
)[i
];
3961 vop1
= (*vec_oprnds
)[i
+ 1];
3962 new_stmt
= gimple_build_assign (vec_dest
, code
, vop0
, vop1
);
3963 new_tmp
= make_ssa_name (vec_dest
, new_stmt
);
3964 gimple_assign_set_lhs (new_stmt
, new_tmp
);
3965 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3968 /* Store the resulting vector for next recursive call. */
3969 (*vec_oprnds
)[i
/2] = new_tmp
;
3972 /* This is the last step of the conversion sequence. Store the
3973 vectors in SLP_NODE or in vector info of the scalar statement
3974 (or in STMT_VINFO_RELATED_STMT chain). */
3976 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
3979 if (!*prev_stmt_info
)
3980 STMT_VINFO_VEC_STMT (stmt_info
) = new_stmt
;
3982 STMT_VINFO_RELATED_STMT (*prev_stmt_info
) = new_stmt
;
3984 *prev_stmt_info
= vinfo_for_stmt (new_stmt
);
3989 /* For multi-step demotion operations we first generate demotion operations
3990 from the source type to the intermediate types, and then combine the
3991 results (stored in VEC_OPRNDS) in demotion operation to the destination
3995 /* At each level of recursion we have half of the operands we had at the
3997 vec_oprnds
->truncate ((i
+1)/2);
3998 vect_create_vectorized_demotion_stmts (vec_oprnds
, multi_step_cvt
- 1,
3999 stmt
, vec_dsts
, gsi
, slp_node
,
4000 VEC_PACK_TRUNC_EXPR
,
4004 vec_dsts
.quick_push (vec_dest
);
4008 /* Create vectorized promotion statements for vector operands from VEC_OPRNDS0
4009 and VEC_OPRNDS1 (for binary operations). For multi-step conversions store
4010 the resulting vectors and call the function recursively. */
4013 vect_create_vectorized_promotion_stmts (vec
<tree
> *vec_oprnds0
,
4014 vec
<tree
> *vec_oprnds1
,
4015 gimple
*stmt
, tree vec_dest
,
4016 gimple_stmt_iterator
*gsi
,
4017 enum tree_code code1
,
4018 enum tree_code code2
, tree decl1
,
4019 tree decl2
, int op_type
)
4022 tree vop0
, vop1
, new_tmp1
, new_tmp2
;
4023 gimple
*new_stmt1
, *new_stmt2
;
4024 vec
<tree
> vec_tmp
= vNULL
;
4026 vec_tmp
.create (vec_oprnds0
->length () * 2);
4027 FOR_EACH_VEC_ELT (*vec_oprnds0
, i
, vop0
)
4029 if (op_type
== binary_op
)
4030 vop1
= (*vec_oprnds1
)[i
];
4034 /* Generate the two halves of promotion operation. */
4035 new_stmt1
= vect_gen_widened_results_half (code1
, decl1
, vop0
, vop1
,
4036 op_type
, vec_dest
, gsi
, stmt
);
4037 new_stmt2
= vect_gen_widened_results_half (code2
, decl2
, vop0
, vop1
,
4038 op_type
, vec_dest
, gsi
, stmt
);
4039 if (is_gimple_call (new_stmt1
))
4041 new_tmp1
= gimple_call_lhs (new_stmt1
);
4042 new_tmp2
= gimple_call_lhs (new_stmt2
);
4046 new_tmp1
= gimple_assign_lhs (new_stmt1
);
4047 new_tmp2
= gimple_assign_lhs (new_stmt2
);
4050 /* Store the results for the next step. */
4051 vec_tmp
.quick_push (new_tmp1
);
4052 vec_tmp
.quick_push (new_tmp2
);
4055 vec_oprnds0
->release ();
4056 *vec_oprnds0
= vec_tmp
;
4060 /* Check if STMT performs a conversion operation, that can be vectorized.
4061 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4062 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
4063 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4066 vectorizable_conversion (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
4067 gimple
**vec_stmt
, slp_tree slp_node
)
4071 tree op0
, op1
= NULL_TREE
;
4072 tree vec_oprnd0
= NULL_TREE
, vec_oprnd1
= NULL_TREE
;
4073 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
4074 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
4075 enum tree_code code
, code1
= ERROR_MARK
, code2
= ERROR_MARK
;
4076 enum tree_code codecvt1
= ERROR_MARK
, codecvt2
= ERROR_MARK
;
4077 tree decl1
= NULL_TREE
, decl2
= NULL_TREE
;
4080 enum vect_def_type dt
[2] = {vect_unknown_def_type
, vect_unknown_def_type
};
4082 gimple
*new_stmt
= NULL
;
4083 stmt_vec_info prev_stmt_info
;
4084 poly_uint64 nunits_in
;
4085 poly_uint64 nunits_out
;
4086 tree vectype_out
, vectype_in
;
4088 tree lhs_type
, rhs_type
;
4089 enum { NARROW
, NONE
, WIDEN
} modifier
;
4090 vec
<tree
> vec_oprnds0
= vNULL
;
4091 vec
<tree
> vec_oprnds1
= vNULL
;
4093 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
4094 vec_info
*vinfo
= stmt_info
->vinfo
;
4095 int multi_step_cvt
= 0;
4096 vec
<tree
> interm_types
= vNULL
;
4097 tree last_oprnd
, intermediate_type
, cvt_type
= NULL_TREE
;
4099 unsigned short fltsz
;
4101 /* Is STMT a vectorizable conversion? */
4103 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
4106 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
4110 if (!is_gimple_assign (stmt
))
4113 if (TREE_CODE (gimple_assign_lhs (stmt
)) != SSA_NAME
)
4116 code
= gimple_assign_rhs_code (stmt
);
4117 if (!CONVERT_EXPR_CODE_P (code
)
4118 && code
!= FIX_TRUNC_EXPR
4119 && code
!= FLOAT_EXPR
4120 && code
!= WIDEN_MULT_EXPR
4121 && code
!= WIDEN_LSHIFT_EXPR
)
4124 op_type
= TREE_CODE_LENGTH (code
);
4126 /* Check types of lhs and rhs. */
4127 scalar_dest
= gimple_assign_lhs (stmt
);
4128 lhs_type
= TREE_TYPE (scalar_dest
);
4129 vectype_out
= STMT_VINFO_VECTYPE (stmt_info
);
4131 op0
= gimple_assign_rhs1 (stmt
);
4132 rhs_type
= TREE_TYPE (op0
);
4134 if ((code
!= FIX_TRUNC_EXPR
&& code
!= FLOAT_EXPR
)
4135 && !((INTEGRAL_TYPE_P (lhs_type
)
4136 && INTEGRAL_TYPE_P (rhs_type
))
4137 || (SCALAR_FLOAT_TYPE_P (lhs_type
)
4138 && SCALAR_FLOAT_TYPE_P (rhs_type
))))
4141 if (!VECTOR_BOOLEAN_TYPE_P (vectype_out
)
4142 && ((INTEGRAL_TYPE_P (lhs_type
)
4143 && !type_has_mode_precision_p (lhs_type
))
4144 || (INTEGRAL_TYPE_P (rhs_type
)
4145 && !type_has_mode_precision_p (rhs_type
))))
4147 if (dump_enabled_p ())
4148 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4149 "type conversion to/from bit-precision unsupported."
4154 /* Check the operands of the operation. */
4155 if (!vect_is_simple_use (op0
, vinfo
, &def_stmt
, &dt
[0], &vectype_in
))
4157 if (dump_enabled_p ())
4158 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4159 "use not simple.\n");
4162 if (op_type
== binary_op
)
4166 op1
= gimple_assign_rhs2 (stmt
);
4167 gcc_assert (code
== WIDEN_MULT_EXPR
|| code
== WIDEN_LSHIFT_EXPR
);
4168 /* For WIDEN_MULT_EXPR, if OP0 is a constant, use the type of
4170 if (CONSTANT_CLASS_P (op0
))
4171 ok
= vect_is_simple_use (op1
, vinfo
, &def_stmt
, &dt
[1], &vectype_in
);
4173 ok
= vect_is_simple_use (op1
, vinfo
, &def_stmt
, &dt
[1]);
4177 if (dump_enabled_p ())
4178 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4179 "use not simple.\n");
4184 /* If op0 is an external or constant defs use a vector type of
4185 the same size as the output vector type. */
4187 vectype_in
= get_same_sized_vectype (rhs_type
, vectype_out
);
4189 gcc_assert (vectype_in
);
4192 if (dump_enabled_p ())
4194 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4195 "no vectype for scalar type ");
4196 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, rhs_type
);
4197 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
4203 if (VECTOR_BOOLEAN_TYPE_P (vectype_out
)
4204 && !VECTOR_BOOLEAN_TYPE_P (vectype_in
))
4206 if (dump_enabled_p ())
4208 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4209 "can't convert between boolean and non "
4211 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, rhs_type
);
4212 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
4218 nunits_in
= TYPE_VECTOR_SUBPARTS (vectype_in
);
4219 nunits_out
= TYPE_VECTOR_SUBPARTS (vectype_out
);
4220 if (known_eq (nunits_out
, nunits_in
))
4222 else if (multiple_p (nunits_out
, nunits_in
))
4226 gcc_checking_assert (multiple_p (nunits_in
, nunits_out
));
4230 /* Multiple types in SLP are handled by creating the appropriate number of
4231 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4235 else if (modifier
== NARROW
)
4236 ncopies
= vect_get_num_copies (loop_vinfo
, vectype_out
);
4238 ncopies
= vect_get_num_copies (loop_vinfo
, vectype_in
);
4240 /* Sanity check: make sure that at least one copy of the vectorized stmt
4241 needs to be generated. */
4242 gcc_assert (ncopies
>= 1);
4244 bool found_mode
= false;
4245 scalar_mode lhs_mode
= SCALAR_TYPE_MODE (lhs_type
);
4246 scalar_mode rhs_mode
= SCALAR_TYPE_MODE (rhs_type
);
4247 opt_scalar_mode rhs_mode_iter
;
4249 /* Supportable by target? */
4253 if (code
!= FIX_TRUNC_EXPR
&& code
!= FLOAT_EXPR
)
4255 if (supportable_convert_operation (code
, vectype_out
, vectype_in
,
4260 if (dump_enabled_p ())
4261 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4262 "conversion not supported by target.\n");
4266 if (supportable_widening_operation (code
, stmt
, vectype_out
, vectype_in
,
4267 &code1
, &code2
, &multi_step_cvt
,
4270 /* Binary widening operation can only be supported directly by the
4272 gcc_assert (!(multi_step_cvt
&& op_type
== binary_op
));
4276 if (code
!= FLOAT_EXPR
4277 || GET_MODE_SIZE (lhs_mode
) <= GET_MODE_SIZE (rhs_mode
))
4280 fltsz
= GET_MODE_SIZE (lhs_mode
);
4281 FOR_EACH_2XWIDER_MODE (rhs_mode_iter
, rhs_mode
)
4283 rhs_mode
= rhs_mode_iter
.require ();
4284 if (GET_MODE_SIZE (rhs_mode
) > fltsz
)
4288 = build_nonstandard_integer_type (GET_MODE_BITSIZE (rhs_mode
), 0);
4289 cvt_type
= get_same_sized_vectype (cvt_type
, vectype_in
);
4290 if (cvt_type
== NULL_TREE
)
4293 if (GET_MODE_SIZE (rhs_mode
) == fltsz
)
4295 if (!supportable_convert_operation (code
, vectype_out
,
4296 cvt_type
, &decl1
, &codecvt1
))
4299 else if (!supportable_widening_operation (code
, stmt
, vectype_out
,
4300 cvt_type
, &codecvt1
,
4301 &codecvt2
, &multi_step_cvt
,
4305 gcc_assert (multi_step_cvt
== 0);
4307 if (supportable_widening_operation (NOP_EXPR
, stmt
, cvt_type
,
4308 vectype_in
, &code1
, &code2
,
4309 &multi_step_cvt
, &interm_types
))
4319 if (GET_MODE_SIZE (rhs_mode
) == fltsz
)
4320 codecvt2
= ERROR_MARK
;
4324 interm_types
.safe_push (cvt_type
);
4325 cvt_type
= NULL_TREE
;
4330 gcc_assert (op_type
== unary_op
);
4331 if (supportable_narrowing_operation (code
, vectype_out
, vectype_in
,
4332 &code1
, &multi_step_cvt
,
4336 if (code
!= FIX_TRUNC_EXPR
4337 || GET_MODE_SIZE (lhs_mode
) >= GET_MODE_SIZE (rhs_mode
))
4341 = build_nonstandard_integer_type (GET_MODE_BITSIZE (rhs_mode
), 0);
4342 cvt_type
= get_same_sized_vectype (cvt_type
, vectype_in
);
4343 if (cvt_type
== NULL_TREE
)
4345 if (!supportable_convert_operation (code
, cvt_type
, vectype_in
,
4348 if (supportable_narrowing_operation (NOP_EXPR
, vectype_out
, cvt_type
,
4349 &code1
, &multi_step_cvt
,
4358 if (!vec_stmt
) /* transformation not required. */
4360 if (dump_enabled_p ())
4361 dump_printf_loc (MSG_NOTE
, vect_location
,
4362 "=== vectorizable_conversion ===\n");
4363 if (code
== FIX_TRUNC_EXPR
|| code
== FLOAT_EXPR
)
4365 STMT_VINFO_TYPE (stmt_info
) = type_conversion_vec_info_type
;
4366 vect_model_simple_cost (stmt_info
, ncopies
, dt
, ndts
, NULL
, NULL
);
4368 else if (modifier
== NARROW
)
4370 STMT_VINFO_TYPE (stmt_info
) = type_demotion_vec_info_type
;
4371 vect_model_promotion_demotion_cost (stmt_info
, dt
, multi_step_cvt
);
4375 STMT_VINFO_TYPE (stmt_info
) = type_promotion_vec_info_type
;
4376 vect_model_promotion_demotion_cost (stmt_info
, dt
, multi_step_cvt
);
4378 interm_types
.release ();
4383 if (dump_enabled_p ())
4384 dump_printf_loc (MSG_NOTE
, vect_location
,
4385 "transform conversion. ncopies = %d.\n", ncopies
);
4387 if (op_type
== binary_op
)
4389 if (CONSTANT_CLASS_P (op0
))
4390 op0
= fold_convert (TREE_TYPE (op1
), op0
);
4391 else if (CONSTANT_CLASS_P (op1
))
4392 op1
= fold_convert (TREE_TYPE (op0
), op1
);
4395 /* In case of multi-step conversion, we first generate conversion operations
4396 to the intermediate types, and then from that types to the final one.
4397 We create vector destinations for the intermediate type (TYPES) received
4398 from supportable_*_operation, and store them in the correct order
4399 for future use in vect_create_vectorized_*_stmts (). */
4400 auto_vec
<tree
> vec_dsts (multi_step_cvt
+ 1);
4401 vec_dest
= vect_create_destination_var (scalar_dest
,
4402 (cvt_type
&& modifier
== WIDEN
)
4403 ? cvt_type
: vectype_out
);
4404 vec_dsts
.quick_push (vec_dest
);
4408 for (i
= interm_types
.length () - 1;
4409 interm_types
.iterate (i
, &intermediate_type
); i
--)
4411 vec_dest
= vect_create_destination_var (scalar_dest
,
4413 vec_dsts
.quick_push (vec_dest
);
4418 vec_dest
= vect_create_destination_var (scalar_dest
,
4420 ? vectype_out
: cvt_type
);
4424 if (modifier
== WIDEN
)
4426 vec_oprnds0
.create (multi_step_cvt
? vect_pow2 (multi_step_cvt
) : 1);
4427 if (op_type
== binary_op
)
4428 vec_oprnds1
.create (1);
4430 else if (modifier
== NARROW
)
4431 vec_oprnds0
.create (
4432 2 * (multi_step_cvt
? vect_pow2 (multi_step_cvt
) : 1));
4434 else if (code
== WIDEN_LSHIFT_EXPR
)
4435 vec_oprnds1
.create (slp_node
->vec_stmts_size
);
4438 prev_stmt_info
= NULL
;
4442 for (j
= 0; j
< ncopies
; j
++)
4445 vect_get_vec_defs (op0
, NULL
, stmt
, &vec_oprnds0
, NULL
, slp_node
);
4447 vect_get_vec_defs_for_stmt_copy (dt
, &vec_oprnds0
, NULL
);
4449 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vop0
)
4451 /* Arguments are ready, create the new vector stmt. */
4452 if (code1
== CALL_EXPR
)
4454 new_stmt
= gimple_build_call (decl1
, 1, vop0
);
4455 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
4456 gimple_call_set_lhs (new_stmt
, new_temp
);
4460 gcc_assert (TREE_CODE_LENGTH (code1
) == unary_op
);
4461 new_stmt
= gimple_build_assign (vec_dest
, code1
, vop0
);
4462 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
4463 gimple_assign_set_lhs (new_stmt
, new_temp
);
4466 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
4468 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
4471 if (!prev_stmt_info
)
4472 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
4474 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
4475 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
4482 /* In case the vectorization factor (VF) is bigger than the number
4483 of elements that we can fit in a vectype (nunits), we have to
4484 generate more than one vector stmt - i.e - we need to "unroll"
4485 the vector stmt by a factor VF/nunits. */
4486 for (j
= 0; j
< ncopies
; j
++)
4493 if (code
== WIDEN_LSHIFT_EXPR
)
4498 /* Store vec_oprnd1 for every vector stmt to be created
4499 for SLP_NODE. We check during the analysis that all
4500 the shift arguments are the same. */
4501 for (k
= 0; k
< slp_node
->vec_stmts_size
- 1; k
++)
4502 vec_oprnds1
.quick_push (vec_oprnd1
);
4504 vect_get_vec_defs (op0
, NULL_TREE
, stmt
, &vec_oprnds0
, NULL
,
4508 vect_get_vec_defs (op0
, op1
, stmt
, &vec_oprnds0
,
4509 &vec_oprnds1
, slp_node
);
4513 vec_oprnd0
= vect_get_vec_def_for_operand (op0
, stmt
);
4514 vec_oprnds0
.quick_push (vec_oprnd0
);
4515 if (op_type
== binary_op
)
4517 if (code
== WIDEN_LSHIFT_EXPR
)
4520 vec_oprnd1
= vect_get_vec_def_for_operand (op1
, stmt
);
4521 vec_oprnds1
.quick_push (vec_oprnd1
);
4527 vec_oprnd0
= vect_get_vec_def_for_stmt_copy (dt
[0], vec_oprnd0
);
4528 vec_oprnds0
.truncate (0);
4529 vec_oprnds0
.quick_push (vec_oprnd0
);
4530 if (op_type
== binary_op
)
4532 if (code
== WIDEN_LSHIFT_EXPR
)
4535 vec_oprnd1
= vect_get_vec_def_for_stmt_copy (dt
[1],
4537 vec_oprnds1
.truncate (0);
4538 vec_oprnds1
.quick_push (vec_oprnd1
);
4542 /* Arguments are ready. Create the new vector stmts. */
4543 for (i
= multi_step_cvt
; i
>= 0; i
--)
4545 tree this_dest
= vec_dsts
[i
];
4546 enum tree_code c1
= code1
, c2
= code2
;
4547 if (i
== 0 && codecvt2
!= ERROR_MARK
)
4552 vect_create_vectorized_promotion_stmts (&vec_oprnds0
,
4554 stmt
, this_dest
, gsi
,
4555 c1
, c2
, decl1
, decl2
,
4559 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vop0
)
4563 if (codecvt1
== CALL_EXPR
)
4565 new_stmt
= gimple_build_call (decl1
, 1, vop0
);
4566 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
4567 gimple_call_set_lhs (new_stmt
, new_temp
);
4571 gcc_assert (TREE_CODE_LENGTH (codecvt1
) == unary_op
);
4572 new_temp
= make_ssa_name (vec_dest
);
4573 new_stmt
= gimple_build_assign (new_temp
, codecvt1
,
4577 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
4580 new_stmt
= SSA_NAME_DEF_STMT (vop0
);
4583 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
4586 if (!prev_stmt_info
)
4587 STMT_VINFO_VEC_STMT (stmt_info
) = new_stmt
;
4589 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
4590 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
4595 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
4599 /* In case the vectorization factor (VF) is bigger than the number
4600 of elements that we can fit in a vectype (nunits), we have to
4601 generate more than one vector stmt - i.e - we need to "unroll"
4602 the vector stmt by a factor VF/nunits. */
4603 for (j
= 0; j
< ncopies
; j
++)
4607 vect_get_vec_defs (op0
, NULL_TREE
, stmt
, &vec_oprnds0
, NULL
,
4611 vec_oprnds0
.truncate (0);
4612 vect_get_loop_based_defs (&last_oprnd
, stmt
, dt
[0], &vec_oprnds0
,
4613 vect_pow2 (multi_step_cvt
) - 1);
4616 /* Arguments are ready. Create the new vector stmts. */
4618 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vop0
)
4620 if (codecvt1
== CALL_EXPR
)
4622 new_stmt
= gimple_build_call (decl1
, 1, vop0
);
4623 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
4624 gimple_call_set_lhs (new_stmt
, new_temp
);
4628 gcc_assert (TREE_CODE_LENGTH (codecvt1
) == unary_op
);
4629 new_temp
= make_ssa_name (vec_dest
);
4630 new_stmt
= gimple_build_assign (new_temp
, codecvt1
,
4634 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
4635 vec_oprnds0
[i
] = new_temp
;
4638 vect_create_vectorized_demotion_stmts (&vec_oprnds0
, multi_step_cvt
,
4639 stmt
, vec_dsts
, gsi
,
4644 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
4648 vec_oprnds0
.release ();
4649 vec_oprnds1
.release ();
4650 interm_types
.release ();
4656 /* Function vectorizable_assignment.
4658 Check if STMT performs an assignment (copy) that can be vectorized.
4659 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4660 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
4661 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4664 vectorizable_assignment (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
4665 gimple
**vec_stmt
, slp_tree slp_node
)
4670 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
4671 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
4674 enum vect_def_type dt
[1] = {vect_unknown_def_type
};
4678 vec
<tree
> vec_oprnds
= vNULL
;
4680 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
4681 vec_info
*vinfo
= stmt_info
->vinfo
;
4682 gimple
*new_stmt
= NULL
;
4683 stmt_vec_info prev_stmt_info
= NULL
;
4684 enum tree_code code
;
4687 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
4690 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
4694 /* Is vectorizable assignment? */
4695 if (!is_gimple_assign (stmt
))
4698 scalar_dest
= gimple_assign_lhs (stmt
);
4699 if (TREE_CODE (scalar_dest
) != SSA_NAME
)
4702 code
= gimple_assign_rhs_code (stmt
);
4703 if (gimple_assign_single_p (stmt
)
4704 || code
== PAREN_EXPR
4705 || CONVERT_EXPR_CODE_P (code
))
4706 op
= gimple_assign_rhs1 (stmt
);
4710 if (code
== VIEW_CONVERT_EXPR
)
4711 op
= TREE_OPERAND (op
, 0);
4713 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
4714 poly_uint64 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
4716 /* Multiple types in SLP are handled by creating the appropriate number of
4717 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4722 ncopies
= vect_get_num_copies (loop_vinfo
, vectype
);
4724 gcc_assert (ncopies
>= 1);
4726 if (!vect_is_simple_use (op
, vinfo
, &def_stmt
, &dt
[0], &vectype_in
))
4728 if (dump_enabled_p ())
4729 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4730 "use not simple.\n");
4734 /* We can handle NOP_EXPR conversions that do not change the number
4735 of elements or the vector size. */
4736 if ((CONVERT_EXPR_CODE_P (code
)
4737 || code
== VIEW_CONVERT_EXPR
)
4739 || maybe_ne (TYPE_VECTOR_SUBPARTS (vectype_in
), nunits
)
4740 || maybe_ne (GET_MODE_SIZE (TYPE_MODE (vectype
)),
4741 GET_MODE_SIZE (TYPE_MODE (vectype_in
)))))
4744 /* We do not handle bit-precision changes. */
4745 if ((CONVERT_EXPR_CODE_P (code
)
4746 || code
== VIEW_CONVERT_EXPR
)
4747 && INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest
))
4748 && (!type_has_mode_precision_p (TREE_TYPE (scalar_dest
))
4749 || !type_has_mode_precision_p (TREE_TYPE (op
)))
4750 /* But a conversion that does not change the bit-pattern is ok. */
4751 && !((TYPE_PRECISION (TREE_TYPE (scalar_dest
))
4752 > TYPE_PRECISION (TREE_TYPE (op
)))
4753 && TYPE_UNSIGNED (TREE_TYPE (op
)))
4754 /* Conversion between boolean types of different sizes is
4755 a simple assignment in case their vectypes are same
4757 && (!VECTOR_BOOLEAN_TYPE_P (vectype
)
4758 || !VECTOR_BOOLEAN_TYPE_P (vectype_in
)))
4760 if (dump_enabled_p ())
4761 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4762 "type conversion to/from bit-precision "
4767 if (!vec_stmt
) /* transformation not required. */
4769 STMT_VINFO_TYPE (stmt_info
) = assignment_vec_info_type
;
4770 if (dump_enabled_p ())
4771 dump_printf_loc (MSG_NOTE
, vect_location
,
4772 "=== vectorizable_assignment ===\n");
4773 vect_model_simple_cost (stmt_info
, ncopies
, dt
, ndts
, NULL
, NULL
);
4778 if (dump_enabled_p ())
4779 dump_printf_loc (MSG_NOTE
, vect_location
, "transform assignment.\n");
4782 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
4785 for (j
= 0; j
< ncopies
; j
++)
4789 vect_get_vec_defs (op
, NULL
, stmt
, &vec_oprnds
, NULL
, slp_node
);
4791 vect_get_vec_defs_for_stmt_copy (dt
, &vec_oprnds
, NULL
);
4793 /* Arguments are ready. create the new vector stmt. */
4794 FOR_EACH_VEC_ELT (vec_oprnds
, i
, vop
)
4796 if (CONVERT_EXPR_CODE_P (code
)
4797 || code
== VIEW_CONVERT_EXPR
)
4798 vop
= build1 (VIEW_CONVERT_EXPR
, vectype
, vop
);
4799 new_stmt
= gimple_build_assign (vec_dest
, vop
);
4800 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
4801 gimple_assign_set_lhs (new_stmt
, new_temp
);
4802 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
4804 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
4811 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
4813 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
4815 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
4818 vec_oprnds
.release ();
4823 /* Return TRUE if CODE (a shift operation) is supported for SCALAR_TYPE
4824 either as shift by a scalar or by a vector. */
4827 vect_supportable_shift (enum tree_code code
, tree scalar_type
)
4830 machine_mode vec_mode
;
4835 vectype
= get_vectype_for_scalar_type (scalar_type
);
4839 optab
= optab_for_tree_code (code
, vectype
, optab_scalar
);
4841 || optab_handler (optab
, TYPE_MODE (vectype
)) == CODE_FOR_nothing
)
4843 optab
= optab_for_tree_code (code
, vectype
, optab_vector
);
4845 || (optab_handler (optab
, TYPE_MODE (vectype
))
4846 == CODE_FOR_nothing
))
4850 vec_mode
= TYPE_MODE (vectype
);
4851 icode
= (int) optab_handler (optab
, vec_mode
);
4852 if (icode
== CODE_FOR_nothing
)
4859 /* Function vectorizable_shift.
4861 Check if STMT performs a shift operation that can be vectorized.
4862 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4863 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
4864 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4867 vectorizable_shift (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
4868 gimple
**vec_stmt
, slp_tree slp_node
)
4872 tree op0
, op1
= NULL
;
4873 tree vec_oprnd1
= NULL_TREE
;
4874 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
4876 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
4877 enum tree_code code
;
4878 machine_mode vec_mode
;
4882 machine_mode optab_op2_mode
;
4884 enum vect_def_type dt
[2] = {vect_unknown_def_type
, vect_unknown_def_type
};
4886 gimple
*new_stmt
= NULL
;
4887 stmt_vec_info prev_stmt_info
;
4888 poly_uint64 nunits_in
;
4889 poly_uint64 nunits_out
;
4894 vec
<tree
> vec_oprnds0
= vNULL
;
4895 vec
<tree
> vec_oprnds1
= vNULL
;
4898 bool scalar_shift_arg
= true;
4899 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
4900 vec_info
*vinfo
= stmt_info
->vinfo
;
4902 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
4905 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
4909 /* Is STMT a vectorizable binary/unary operation? */
4910 if (!is_gimple_assign (stmt
))
4913 if (TREE_CODE (gimple_assign_lhs (stmt
)) != SSA_NAME
)
4916 code
= gimple_assign_rhs_code (stmt
);
4918 if (!(code
== LSHIFT_EXPR
|| code
== RSHIFT_EXPR
|| code
== LROTATE_EXPR
4919 || code
== RROTATE_EXPR
))
4922 scalar_dest
= gimple_assign_lhs (stmt
);
4923 vectype_out
= STMT_VINFO_VECTYPE (stmt_info
);
4924 if (!type_has_mode_precision_p (TREE_TYPE (scalar_dest
)))
4926 if (dump_enabled_p ())
4927 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4928 "bit-precision shifts not supported.\n");
4932 op0
= gimple_assign_rhs1 (stmt
);
4933 if (!vect_is_simple_use (op0
, vinfo
, &def_stmt
, &dt
[0], &vectype
))
4935 if (dump_enabled_p ())
4936 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4937 "use not simple.\n");
4940 /* If op0 is an external or constant def use a vector type with
4941 the same size as the output vector type. */
4943 vectype
= get_same_sized_vectype (TREE_TYPE (op0
), vectype_out
);
4945 gcc_assert (vectype
);
4948 if (dump_enabled_p ())
4949 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4950 "no vectype for scalar type\n");
4954 nunits_out
= TYPE_VECTOR_SUBPARTS (vectype_out
);
4955 nunits_in
= TYPE_VECTOR_SUBPARTS (vectype
);
4956 if (maybe_ne (nunits_out
, nunits_in
))
4959 op1
= gimple_assign_rhs2 (stmt
);
4960 if (!vect_is_simple_use (op1
, vinfo
, &def_stmt
, &dt
[1], &op1_vectype
))
4962 if (dump_enabled_p ())
4963 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4964 "use not simple.\n");
4968 /* Multiple types in SLP are handled by creating the appropriate number of
4969 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4974 ncopies
= vect_get_num_copies (loop_vinfo
, vectype
);
4976 gcc_assert (ncopies
>= 1);
4978 /* Determine whether the shift amount is a vector, or scalar. If the
4979 shift/rotate amount is a vector, use the vector/vector shift optabs. */
4981 if ((dt
[1] == vect_internal_def
4982 || dt
[1] == vect_induction_def
)
4984 scalar_shift_arg
= false;
4985 else if (dt
[1] == vect_constant_def
4986 || dt
[1] == vect_external_def
4987 || dt
[1] == vect_internal_def
)
4989 /* In SLP, need to check whether the shift count is the same,
4990 in loops if it is a constant or invariant, it is always
4994 vec
<gimple
*> stmts
= SLP_TREE_SCALAR_STMTS (slp_node
);
4997 FOR_EACH_VEC_ELT (stmts
, k
, slpstmt
)
4998 if (!operand_equal_p (gimple_assign_rhs2 (slpstmt
), op1
, 0))
4999 scalar_shift_arg
= false;
5002 /* If the shift amount is computed by a pattern stmt we cannot
5003 use the scalar amount directly thus give up and use a vector
5005 if (dt
[1] == vect_internal_def
)
5007 gimple
*def
= SSA_NAME_DEF_STMT (op1
);
5008 if (is_pattern_stmt_p (vinfo_for_stmt (def
)))
5009 scalar_shift_arg
= false;
5014 if (dump_enabled_p ())
5015 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5016 "operand mode requires invariant argument.\n");
5020 /* Vector shifted by vector. */
5021 if (!scalar_shift_arg
)
5023 optab
= optab_for_tree_code (code
, vectype
, optab_vector
);
5024 if (dump_enabled_p ())
5025 dump_printf_loc (MSG_NOTE
, vect_location
,
5026 "vector/vector shift/rotate found.\n");
5029 op1_vectype
= get_same_sized_vectype (TREE_TYPE (op1
), vectype_out
);
5030 if (op1_vectype
== NULL_TREE
5031 || TYPE_MODE (op1_vectype
) != TYPE_MODE (vectype
))
5033 if (dump_enabled_p ())
5034 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5035 "unusable type for last operand in"
5036 " vector/vector shift/rotate.\n");
5040 /* See if the machine has a vector shifted by scalar insn and if not
5041 then see if it has a vector shifted by vector insn. */
5044 optab
= optab_for_tree_code (code
, vectype
, optab_scalar
);
5046 && optab_handler (optab
, TYPE_MODE (vectype
)) != CODE_FOR_nothing
)
5048 if (dump_enabled_p ())
5049 dump_printf_loc (MSG_NOTE
, vect_location
,
5050 "vector/scalar shift/rotate found.\n");
5054 optab
= optab_for_tree_code (code
, vectype
, optab_vector
);
5056 && (optab_handler (optab
, TYPE_MODE (vectype
))
5057 != CODE_FOR_nothing
))
5059 scalar_shift_arg
= false;
5061 if (dump_enabled_p ())
5062 dump_printf_loc (MSG_NOTE
, vect_location
,
5063 "vector/vector shift/rotate found.\n");
5065 /* Unlike the other binary operators, shifts/rotates have
5066 the rhs being int, instead of the same type as the lhs,
5067 so make sure the scalar is the right type if we are
5068 dealing with vectors of long long/long/short/char. */
5069 if (dt
[1] == vect_constant_def
)
5070 op1
= fold_convert (TREE_TYPE (vectype
), op1
);
5071 else if (!useless_type_conversion_p (TREE_TYPE (vectype
),
5075 && TYPE_MODE (TREE_TYPE (vectype
))
5076 != TYPE_MODE (TREE_TYPE (op1
)))
5078 if (dump_enabled_p ())
5079 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5080 "unusable type for last operand in"
5081 " vector/vector shift/rotate.\n");
5084 if (vec_stmt
&& !slp_node
)
5086 op1
= fold_convert (TREE_TYPE (vectype
), op1
);
5087 op1
= vect_init_vector (stmt
, op1
,
5088 TREE_TYPE (vectype
), NULL
);
5095 /* Supportable by target? */
5098 if (dump_enabled_p ())
5099 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5103 vec_mode
= TYPE_MODE (vectype
);
5104 icode
= (int) optab_handler (optab
, vec_mode
);
5105 if (icode
== CODE_FOR_nothing
)
5107 if (dump_enabled_p ())
5108 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5109 "op not supported by target.\n");
5110 /* Check only during analysis. */
5111 if (maybe_ne (GET_MODE_SIZE (vec_mode
), UNITS_PER_WORD
)
5113 && !vect_worthwhile_without_simd_p (vinfo
, code
)))
5115 if (dump_enabled_p ())
5116 dump_printf_loc (MSG_NOTE
, vect_location
,
5117 "proceeding using word mode.\n");
5120 /* Worthwhile without SIMD support? Check only during analysis. */
5122 && !VECTOR_MODE_P (TYPE_MODE (vectype
))
5123 && !vect_worthwhile_without_simd_p (vinfo
, code
))
5125 if (dump_enabled_p ())
5126 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5127 "not worthwhile without SIMD support.\n");
5131 if (!vec_stmt
) /* transformation not required. */
5133 STMT_VINFO_TYPE (stmt_info
) = shift_vec_info_type
;
5134 if (dump_enabled_p ())
5135 dump_printf_loc (MSG_NOTE
, vect_location
,
5136 "=== vectorizable_shift ===\n");
5137 vect_model_simple_cost (stmt_info
, ncopies
, dt
, ndts
, NULL
, NULL
);
5143 if (dump_enabled_p ())
5144 dump_printf_loc (MSG_NOTE
, vect_location
,
5145 "transform binary/unary operation.\n");
5148 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
5150 prev_stmt_info
= NULL
;
5151 for (j
= 0; j
< ncopies
; j
++)
5156 if (scalar_shift_arg
)
5158 /* Vector shl and shr insn patterns can be defined with scalar
5159 operand 2 (shift operand). In this case, use constant or loop
5160 invariant op1 directly, without extending it to vector mode
5162 optab_op2_mode
= insn_data
[icode
].operand
[2].mode
;
5163 if (!VECTOR_MODE_P (optab_op2_mode
))
5165 if (dump_enabled_p ())
5166 dump_printf_loc (MSG_NOTE
, vect_location
,
5167 "operand 1 using scalar mode.\n");
5169 vec_oprnds1
.create (slp_node
? slp_node
->vec_stmts_size
: 1);
5170 vec_oprnds1
.quick_push (vec_oprnd1
);
5173 /* Store vec_oprnd1 for every vector stmt to be created
5174 for SLP_NODE. We check during the analysis that all
5175 the shift arguments are the same.
5176 TODO: Allow different constants for different vector
5177 stmts generated for an SLP instance. */
5178 for (k
= 0; k
< slp_node
->vec_stmts_size
- 1; k
++)
5179 vec_oprnds1
.quick_push (vec_oprnd1
);
5184 /* vec_oprnd1 is available if operand 1 should be of a scalar-type
5185 (a special case for certain kind of vector shifts); otherwise,
5186 operand 1 should be of a vector type (the usual case). */
5188 vect_get_vec_defs (op0
, NULL_TREE
, stmt
, &vec_oprnds0
, NULL
,
5191 vect_get_vec_defs (op0
, op1
, stmt
, &vec_oprnds0
, &vec_oprnds1
,
5195 vect_get_vec_defs_for_stmt_copy (dt
, &vec_oprnds0
, &vec_oprnds1
);
5197 /* Arguments are ready. Create the new vector stmt. */
5198 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vop0
)
5200 vop1
= vec_oprnds1
[i
];
5201 new_stmt
= gimple_build_assign (vec_dest
, code
, vop0
, vop1
);
5202 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
5203 gimple_assign_set_lhs (new_stmt
, new_temp
);
5204 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
5206 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
5213 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
5215 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
5216 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
5219 vec_oprnds0
.release ();
5220 vec_oprnds1
.release ();
5226 /* Function vectorizable_operation.
5228 Check if STMT performs a binary, unary or ternary operation that can
5230 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
5231 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
5232 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
5235 vectorizable_operation (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
5236 gimple
**vec_stmt
, slp_tree slp_node
)
5240 tree op0
, op1
= NULL_TREE
, op2
= NULL_TREE
;
5241 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
5243 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
5244 enum tree_code code
, orig_code
;
5245 machine_mode vec_mode
;
5249 bool target_support_p
;
5251 enum vect_def_type dt
[3]
5252 = {vect_unknown_def_type
, vect_unknown_def_type
, vect_unknown_def_type
};
5254 gimple
*new_stmt
= NULL
;
5255 stmt_vec_info prev_stmt_info
;
5256 poly_uint64 nunits_in
;
5257 poly_uint64 nunits_out
;
5261 vec
<tree
> vec_oprnds0
= vNULL
;
5262 vec
<tree
> vec_oprnds1
= vNULL
;
5263 vec
<tree
> vec_oprnds2
= vNULL
;
5264 tree vop0
, vop1
, vop2
;
5265 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
5266 vec_info
*vinfo
= stmt_info
->vinfo
;
5268 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
5271 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
5275 /* Is STMT a vectorizable binary/unary operation? */
5276 if (!is_gimple_assign (stmt
))
5279 if (TREE_CODE (gimple_assign_lhs (stmt
)) != SSA_NAME
)
5282 orig_code
= code
= gimple_assign_rhs_code (stmt
);
5284 /* For pointer addition and subtraction, we should use the normal
5285 plus and minus for the vector operation. */
5286 if (code
== POINTER_PLUS_EXPR
)
5288 if (code
== POINTER_DIFF_EXPR
)
5291 /* Support only unary or binary operations. */
5292 op_type
= TREE_CODE_LENGTH (code
);
5293 if (op_type
!= unary_op
&& op_type
!= binary_op
&& op_type
!= ternary_op
)
5295 if (dump_enabled_p ())
5296 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5297 "num. args = %d (not unary/binary/ternary op).\n",
5302 scalar_dest
= gimple_assign_lhs (stmt
);
5303 vectype_out
= STMT_VINFO_VECTYPE (stmt_info
);
5305 /* Most operations cannot handle bit-precision types without extra
5307 if (!VECTOR_BOOLEAN_TYPE_P (vectype_out
)
5308 && !type_has_mode_precision_p (TREE_TYPE (scalar_dest
))
5309 /* Exception are bitwise binary operations. */
5310 && code
!= BIT_IOR_EXPR
5311 && code
!= BIT_XOR_EXPR
5312 && code
!= BIT_AND_EXPR
)
5314 if (dump_enabled_p ())
5315 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5316 "bit-precision arithmetic not supported.\n");
5320 op0
= gimple_assign_rhs1 (stmt
);
5321 if (!vect_is_simple_use (op0
, vinfo
, &def_stmt
, &dt
[0], &vectype
))
5323 if (dump_enabled_p ())
5324 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5325 "use not simple.\n");
5328 /* If op0 is an external or constant def use a vector type with
5329 the same size as the output vector type. */
5332 /* For boolean type we cannot determine vectype by
5333 invariant value (don't know whether it is a vector
5334 of booleans or vector of integers). We use output
5335 vectype because operations on boolean don't change
5337 if (VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (op0
)))
5339 if (!VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (scalar_dest
)))
5341 if (dump_enabled_p ())
5342 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5343 "not supported operation on bool value.\n");
5346 vectype
= vectype_out
;
5349 vectype
= get_same_sized_vectype (TREE_TYPE (op0
), vectype_out
);
5352 gcc_assert (vectype
);
5355 if (dump_enabled_p ())
5357 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5358 "no vectype for scalar type ");
5359 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
5361 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
5367 nunits_out
= TYPE_VECTOR_SUBPARTS (vectype_out
);
5368 nunits_in
= TYPE_VECTOR_SUBPARTS (vectype
);
5369 if (maybe_ne (nunits_out
, nunits_in
))
5372 if (op_type
== binary_op
|| op_type
== ternary_op
)
5374 op1
= gimple_assign_rhs2 (stmt
);
5375 if (!vect_is_simple_use (op1
, vinfo
, &def_stmt
, &dt
[1]))
5377 if (dump_enabled_p ())
5378 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5379 "use not simple.\n");
5383 if (op_type
== ternary_op
)
5385 op2
= gimple_assign_rhs3 (stmt
);
5386 if (!vect_is_simple_use (op2
, vinfo
, &def_stmt
, &dt
[2]))
5388 if (dump_enabled_p ())
5389 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5390 "use not simple.\n");
5395 /* Multiple types in SLP are handled by creating the appropriate number of
5396 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
5401 ncopies
= vect_get_num_copies (loop_vinfo
, vectype
);
5403 gcc_assert (ncopies
>= 1);
5405 /* Shifts are handled in vectorizable_shift (). */
5406 if (code
== LSHIFT_EXPR
|| code
== RSHIFT_EXPR
|| code
== LROTATE_EXPR
5407 || code
== RROTATE_EXPR
)
5410 /* Supportable by target? */
5412 vec_mode
= TYPE_MODE (vectype
);
5413 if (code
== MULT_HIGHPART_EXPR
)
5414 target_support_p
= can_mult_highpart_p (vec_mode
, TYPE_UNSIGNED (vectype
));
5417 optab
= optab_for_tree_code (code
, vectype
, optab_default
);
5420 if (dump_enabled_p ())
5421 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5425 target_support_p
= (optab_handler (optab
, vec_mode
)
5426 != CODE_FOR_nothing
);
5429 if (!target_support_p
)
5431 if (dump_enabled_p ())
5432 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5433 "op not supported by target.\n");
5434 /* Check only during analysis. */
5435 if (maybe_ne (GET_MODE_SIZE (vec_mode
), UNITS_PER_WORD
)
5436 || (!vec_stmt
&& !vect_worthwhile_without_simd_p (vinfo
, code
)))
5438 if (dump_enabled_p ())
5439 dump_printf_loc (MSG_NOTE
, vect_location
,
5440 "proceeding using word mode.\n");
5443 /* Worthwhile without SIMD support? Check only during analysis. */
5444 if (!VECTOR_MODE_P (vec_mode
)
5446 && !vect_worthwhile_without_simd_p (vinfo
, code
))
5448 if (dump_enabled_p ())
5449 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5450 "not worthwhile without SIMD support.\n");
5454 if (!vec_stmt
) /* transformation not required. */
5456 STMT_VINFO_TYPE (stmt_info
) = op_vec_info_type
;
5457 if (dump_enabled_p ())
5458 dump_printf_loc (MSG_NOTE
, vect_location
,
5459 "=== vectorizable_operation ===\n");
5460 vect_model_simple_cost (stmt_info
, ncopies
, dt
, ndts
, NULL
, NULL
);
5466 if (dump_enabled_p ())
5467 dump_printf_loc (MSG_NOTE
, vect_location
,
5468 "transform binary/unary operation.\n");
5471 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
5473 /* POINTER_DIFF_EXPR has pointer arguments which are vectorized as
5474 vectors with unsigned elements, but the result is signed. So, we
5475 need to compute the MINUS_EXPR into vectype temporary and
5476 VIEW_CONVERT_EXPR it into the final vectype_out result. */
5477 tree vec_cvt_dest
= NULL_TREE
;
5478 if (orig_code
== POINTER_DIFF_EXPR
)
5479 vec_cvt_dest
= vect_create_destination_var (scalar_dest
, vectype_out
);
5481 /* In case the vectorization factor (VF) is bigger than the number
5482 of elements that we can fit in a vectype (nunits), we have to generate
5483 more than one vector stmt - i.e - we need to "unroll" the
5484 vector stmt by a factor VF/nunits. In doing so, we record a pointer
5485 from one copy of the vector stmt to the next, in the field
5486 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
5487 stages to find the correct vector defs to be used when vectorizing
5488 stmts that use the defs of the current stmt. The example below
5489 illustrates the vectorization process when VF=16 and nunits=4 (i.e.,
5490 we need to create 4 vectorized stmts):
5492 before vectorization:
5493 RELATED_STMT VEC_STMT
5497 step 1: vectorize stmt S1 (done in vectorizable_load. See more details
5499 RELATED_STMT VEC_STMT
5500 VS1_0: vx0 = memref0 VS1_1 -
5501 VS1_1: vx1 = memref1 VS1_2 -
5502 VS1_2: vx2 = memref2 VS1_3 -
5503 VS1_3: vx3 = memref3 - -
5504 S1: x = load - VS1_0
5507 step2: vectorize stmt S2 (done here):
5508 To vectorize stmt S2 we first need to find the relevant vector
5509 def for the first operand 'x'. This is, as usual, obtained from
5510 the vector stmt recorded in the STMT_VINFO_VEC_STMT of the stmt
5511 that defines 'x' (S1). This way we find the stmt VS1_0, and the
5512 relevant vector def 'vx0'. Having found 'vx0' we can generate
5513 the vector stmt VS2_0, and as usual, record it in the
5514 STMT_VINFO_VEC_STMT of stmt S2.
5515 When creating the second copy (VS2_1), we obtain the relevant vector
5516 def from the vector stmt recorded in the STMT_VINFO_RELATED_STMT of
5517 stmt VS1_0. This way we find the stmt VS1_1 and the relevant
5518 vector def 'vx1'. Using 'vx1' we create stmt VS2_1 and record a
5519 pointer to it in the STMT_VINFO_RELATED_STMT of the vector stmt VS2_0.
5520 Similarly when creating stmts VS2_2 and VS2_3. This is the resulting
5521 chain of stmts and pointers:
5522 RELATED_STMT VEC_STMT
5523 VS1_0: vx0 = memref0 VS1_1 -
5524 VS1_1: vx1 = memref1 VS1_2 -
5525 VS1_2: vx2 = memref2 VS1_3 -
5526 VS1_3: vx3 = memref3 - -
5527 S1: x = load - VS1_0
5528 VS2_0: vz0 = vx0 + v1 VS2_1 -
5529 VS2_1: vz1 = vx1 + v1 VS2_2 -
5530 VS2_2: vz2 = vx2 + v1 VS2_3 -
5531 VS2_3: vz3 = vx3 + v1 - -
5532 S2: z = x + 1 - VS2_0 */
5534 prev_stmt_info
= NULL
;
5535 for (j
= 0; j
< ncopies
; j
++)
5540 if (op_type
== binary_op
|| op_type
== ternary_op
)
5541 vect_get_vec_defs (op0
, op1
, stmt
, &vec_oprnds0
, &vec_oprnds1
,
5544 vect_get_vec_defs (op0
, NULL_TREE
, stmt
, &vec_oprnds0
, NULL
,
5546 if (op_type
== ternary_op
)
5547 vect_get_vec_defs (op2
, NULL_TREE
, stmt
, &vec_oprnds2
, NULL
,
5552 vect_get_vec_defs_for_stmt_copy (dt
, &vec_oprnds0
, &vec_oprnds1
);
5553 if (op_type
== ternary_op
)
5555 tree vec_oprnd
= vec_oprnds2
.pop ();
5556 vec_oprnds2
.quick_push (vect_get_vec_def_for_stmt_copy (dt
[2],
5561 /* Arguments are ready. Create the new vector stmt. */
5562 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vop0
)
5564 vop1
= ((op_type
== binary_op
|| op_type
== ternary_op
)
5565 ? vec_oprnds1
[i
] : NULL_TREE
);
5566 vop2
= ((op_type
== ternary_op
)
5567 ? vec_oprnds2
[i
] : NULL_TREE
);
5568 new_stmt
= gimple_build_assign (vec_dest
, code
, vop0
, vop1
, vop2
);
5569 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
5570 gimple_assign_set_lhs (new_stmt
, new_temp
);
5571 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
5574 new_temp
= build1 (VIEW_CONVERT_EXPR
, vectype_out
, new_temp
);
5575 new_stmt
= gimple_build_assign (vec_cvt_dest
, VIEW_CONVERT_EXPR
,
5577 new_temp
= make_ssa_name (vec_cvt_dest
, new_stmt
);
5578 gimple_assign_set_lhs (new_stmt
, new_temp
);
5579 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
5582 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
5589 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
5591 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
5592 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
5595 vec_oprnds0
.release ();
5596 vec_oprnds1
.release ();
5597 vec_oprnds2
.release ();
5602 /* A helper function to ensure data reference DR's base alignment. */
5605 ensure_base_align (struct data_reference
*dr
)
5610 if (DR_VECT_AUX (dr
)->base_misaligned
)
5612 tree base_decl
= DR_VECT_AUX (dr
)->base_decl
;
5614 unsigned int align_base_to
= DR_TARGET_ALIGNMENT (dr
) * BITS_PER_UNIT
;
5616 if (decl_in_symtab_p (base_decl
))
5617 symtab_node::get (base_decl
)->increase_alignment (align_base_to
);
5620 SET_DECL_ALIGN (base_decl
, align_base_to
);
5621 DECL_USER_ALIGN (base_decl
) = 1;
5623 DR_VECT_AUX (dr
)->base_misaligned
= false;
5628 /* Function get_group_alias_ptr_type.
5630 Return the alias type for the group starting at FIRST_STMT. */
5633 get_group_alias_ptr_type (gimple
*first_stmt
)
5635 struct data_reference
*first_dr
, *next_dr
;
5638 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
5639 next_stmt
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (first_stmt
));
5642 next_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (next_stmt
));
5643 if (get_alias_set (DR_REF (first_dr
))
5644 != get_alias_set (DR_REF (next_dr
)))
5646 if (dump_enabled_p ())
5647 dump_printf_loc (MSG_NOTE
, vect_location
,
5648 "conflicting alias set types.\n");
5649 return ptr_type_node
;
5651 next_stmt
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt
));
5653 return reference_alias_ptr_type (DR_REF (first_dr
));
5657 /* Function vectorizable_store.
5659 Check if STMT defines a non scalar data-ref (array/pointer/structure) that
5661 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
5662 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
5663 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
5666 vectorizable_store (gimple
*stmt
, gimple_stmt_iterator
*gsi
, gimple
**vec_stmt
,
5672 tree vec_oprnd
= NULL_TREE
;
5673 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
5674 struct data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
), *first_dr
= NULL
;
5676 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
5677 struct loop
*loop
= NULL
;
5678 machine_mode vec_mode
;
5680 enum dr_alignment_support alignment_support_scheme
;
5682 enum vect_def_type dt
;
5683 stmt_vec_info prev_stmt_info
= NULL
;
5684 tree dataref_ptr
= NULL_TREE
;
5685 tree dataref_offset
= NULL_TREE
;
5686 gimple
*ptr_incr
= NULL
;
5689 gimple
*next_stmt
, *first_stmt
;
5691 unsigned int group_size
, i
;
5692 vec
<tree
> oprnds
= vNULL
;
5693 vec
<tree
> result_chain
= vNULL
;
5695 tree offset
= NULL_TREE
;
5696 vec
<tree
> vec_oprnds
= vNULL
;
5697 bool slp
= (slp_node
!= NULL
);
5698 unsigned int vec_num
;
5699 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
5700 vec_info
*vinfo
= stmt_info
->vinfo
;
5702 gather_scatter_info gs_info
;
5703 enum vect_def_type scatter_src_dt
= vect_unknown_def_type
;
5706 vec_load_store_type vls_type
;
5709 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
5712 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
5716 /* Is vectorizable store? */
5718 if (!is_gimple_assign (stmt
))
5721 scalar_dest
= gimple_assign_lhs (stmt
);
5722 if (TREE_CODE (scalar_dest
) == VIEW_CONVERT_EXPR
5723 && is_pattern_stmt_p (stmt_info
))
5724 scalar_dest
= TREE_OPERAND (scalar_dest
, 0);
5725 if (TREE_CODE (scalar_dest
) != ARRAY_REF
5726 && TREE_CODE (scalar_dest
) != BIT_FIELD_REF
5727 && TREE_CODE (scalar_dest
) != INDIRECT_REF
5728 && TREE_CODE (scalar_dest
) != COMPONENT_REF
5729 && TREE_CODE (scalar_dest
) != IMAGPART_EXPR
5730 && TREE_CODE (scalar_dest
) != REALPART_EXPR
5731 && TREE_CODE (scalar_dest
) != MEM_REF
)
5734 /* Cannot have hybrid store SLP -- that would mean storing to the
5735 same location twice. */
5736 gcc_assert (slp
== PURE_SLP_STMT (stmt_info
));
5738 gcc_assert (gimple_assign_single_p (stmt
));
5740 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
), rhs_vectype
= NULL_TREE
;
5741 poly_uint64 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
5745 loop
= LOOP_VINFO_LOOP (loop_vinfo
);
5746 vf
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
5751 /* Multiple types in SLP are handled by creating the appropriate number of
5752 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
5757 ncopies
= vect_get_num_copies (loop_vinfo
, vectype
);
5759 gcc_assert (ncopies
>= 1);
5761 /* FORNOW. This restriction should be relaxed. */
5762 if (loop
&& nested_in_vect_loop_p (loop
, stmt
) && ncopies
> 1)
5764 if (dump_enabled_p ())
5765 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5766 "multiple types in nested loop.\n");
5770 op
= gimple_assign_rhs1 (stmt
);
5772 /* In the case this is a store from a constant make sure
5773 native_encode_expr can handle it. */
5774 if (CONSTANT_CLASS_P (op
) && native_encode_expr (op
, NULL
, 64) == 0)
5777 if (!vect_is_simple_use (op
, vinfo
, &def_stmt
, &dt
, &rhs_vectype
))
5779 if (dump_enabled_p ())
5780 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5781 "use not simple.\n");
5785 if (dt
== vect_constant_def
|| dt
== vect_external_def
)
5786 vls_type
= VLS_STORE_INVARIANT
;
5788 vls_type
= VLS_STORE
;
5790 if (rhs_vectype
&& !useless_type_conversion_p (vectype
, rhs_vectype
))
5793 elem_type
= TREE_TYPE (vectype
);
5794 vec_mode
= TYPE_MODE (vectype
);
5796 /* FORNOW. In some cases can vectorize even if data-type not supported
5797 (e.g. - array initialization with 0). */
5798 if (optab_handler (mov_optab
, vec_mode
) == CODE_FOR_nothing
)
5801 if (!STMT_VINFO_DATA_REF (stmt_info
))
5804 vect_memory_access_type memory_access_type
;
5805 if (!get_load_store_type (stmt
, vectype
, slp
, vls_type
, ncopies
,
5806 &memory_access_type
, &gs_info
))
5809 if (!vec_stmt
) /* transformation not required. */
5811 STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info
) = memory_access_type
;
5812 STMT_VINFO_TYPE (stmt_info
) = store_vec_info_type
;
5813 /* The SLP costs are calculated during SLP analysis. */
5814 if (!PURE_SLP_STMT (stmt_info
))
5815 vect_model_store_cost (stmt_info
, ncopies
, memory_access_type
,
5816 vls_type
, NULL
, NULL
, NULL
);
5819 gcc_assert (memory_access_type
== STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info
));
5823 ensure_base_align (dr
);
5825 if (memory_access_type
== VMAT_GATHER_SCATTER
)
5827 tree vec_oprnd0
= NULL_TREE
, vec_oprnd1
= NULL_TREE
, op
, src
;
5828 tree arglist
= TYPE_ARG_TYPES (TREE_TYPE (gs_info
.decl
));
5829 tree rettype
, srctype
, ptrtype
, idxtype
, masktype
, scaletype
;
5830 tree ptr
, mask
, var
, scale
, perm_mask
= NULL_TREE
;
5831 edge pe
= loop_preheader_edge (loop
);
5834 enum { NARROW
, NONE
, WIDEN
} modifier
;
5835 poly_uint64 scatter_off_nunits
5836 = TYPE_VECTOR_SUBPARTS (gs_info
.offset_vectype
);
5838 if (known_eq (nunits
, scatter_off_nunits
))
5840 else if (known_eq (nunits
* 2, scatter_off_nunits
))
5844 /* Currently gathers and scatters are only supported for
5845 fixed-length vectors. */
5846 unsigned int count
= scatter_off_nunits
.to_constant ();
5847 vec_perm_builder
sel (count
, count
, 1);
5848 for (i
= 0; i
< (unsigned int) count
; ++i
)
5849 sel
.quick_push (i
| (count
/ 2));
5851 vec_perm_indices
indices (sel
, 1, count
);
5852 perm_mask
= vect_gen_perm_mask_checked (gs_info
.offset_vectype
,
5854 gcc_assert (perm_mask
!= NULL_TREE
);
5856 else if (known_eq (nunits
, scatter_off_nunits
* 2))
5860 /* Currently gathers and scatters are only supported for
5861 fixed-length vectors. */
5862 unsigned int count
= nunits
.to_constant ();
5863 vec_perm_builder
sel (count
, count
, 1);
5864 for (i
= 0; i
< (unsigned int) count
; ++i
)
5865 sel
.quick_push (i
| (count
/ 2));
5867 vec_perm_indices
indices (sel
, 2, count
);
5868 perm_mask
= vect_gen_perm_mask_checked (vectype
, indices
);
5869 gcc_assert (perm_mask
!= NULL_TREE
);
5875 rettype
= TREE_TYPE (TREE_TYPE (gs_info
.decl
));
5876 ptrtype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
5877 masktype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
5878 idxtype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
5879 srctype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
5880 scaletype
= TREE_VALUE (arglist
);
5882 gcc_checking_assert (TREE_CODE (masktype
) == INTEGER_TYPE
5883 && TREE_CODE (rettype
) == VOID_TYPE
);
5885 ptr
= fold_convert (ptrtype
, gs_info
.base
);
5886 if (!is_gimple_min_invariant (ptr
))
5888 ptr
= force_gimple_operand (ptr
, &seq
, true, NULL_TREE
);
5889 new_bb
= gsi_insert_seq_on_edge_immediate (pe
, seq
);
5890 gcc_assert (!new_bb
);
5893 /* Currently we support only unconditional scatter stores,
5894 so mask should be all ones. */
5895 mask
= build_int_cst (masktype
, -1);
5896 mask
= vect_init_vector (stmt
, mask
, masktype
, NULL
);
5898 scale
= build_int_cst (scaletype
, gs_info
.scale
);
5900 prev_stmt_info
= NULL
;
5901 for (j
= 0; j
< ncopies
; ++j
)
5906 = vect_get_vec_def_for_operand (gimple_assign_rhs1 (stmt
), stmt
);
5908 = vect_get_vec_def_for_operand (gs_info
.offset
, stmt
);
5910 else if (modifier
!= NONE
&& (j
& 1))
5912 if (modifier
== WIDEN
)
5915 = vect_get_vec_def_for_stmt_copy (scatter_src_dt
, vec_oprnd1
);
5916 op
= permute_vec_elements (vec_oprnd0
, vec_oprnd0
, perm_mask
,
5919 else if (modifier
== NARROW
)
5921 src
= permute_vec_elements (vec_oprnd1
, vec_oprnd1
, perm_mask
,
5924 = vect_get_vec_def_for_stmt_copy (gs_info
.offset_dt
,
5933 = vect_get_vec_def_for_stmt_copy (scatter_src_dt
, vec_oprnd1
);
5935 = vect_get_vec_def_for_stmt_copy (gs_info
.offset_dt
,
5939 if (!useless_type_conversion_p (srctype
, TREE_TYPE (src
)))
5941 gcc_assert (known_eq (TYPE_VECTOR_SUBPARTS (TREE_TYPE (src
)),
5942 TYPE_VECTOR_SUBPARTS (srctype
)));
5943 var
= vect_get_new_ssa_name (srctype
, vect_simple_var
);
5944 src
= build1 (VIEW_CONVERT_EXPR
, srctype
, src
);
5945 new_stmt
= gimple_build_assign (var
, VIEW_CONVERT_EXPR
, src
);
5946 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
5950 if (!useless_type_conversion_p (idxtype
, TREE_TYPE (op
)))
5952 gcc_assert (known_eq (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op
)),
5953 TYPE_VECTOR_SUBPARTS (idxtype
)));
5954 var
= vect_get_new_ssa_name (idxtype
, vect_simple_var
);
5955 op
= build1 (VIEW_CONVERT_EXPR
, idxtype
, op
);
5956 new_stmt
= gimple_build_assign (var
, VIEW_CONVERT_EXPR
, op
);
5957 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
5962 = gimple_build_call (gs_info
.decl
, 5, ptr
, mask
, op
, src
, scale
);
5964 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
5966 if (prev_stmt_info
== NULL
)
5967 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
5969 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
5970 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
5975 grouped_store
= STMT_VINFO_GROUPED_ACCESS (stmt_info
);
5978 first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
5979 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
5980 group_size
= GROUP_SIZE (vinfo_for_stmt (first_stmt
));
5982 GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt
))++;
5985 gcc_assert (!loop
|| !nested_in_vect_loop_p (loop
, stmt
));
5987 /* We vectorize all the stmts of the interleaving group when we
5988 reach the last stmt in the group. */
5989 if (GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt
))
5990 < GROUP_SIZE (vinfo_for_stmt (first_stmt
))
5999 grouped_store
= false;
6000 /* VEC_NUM is the number of vect stmts to be created for this
6002 vec_num
= SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node
);
6003 first_stmt
= SLP_TREE_SCALAR_STMTS (slp_node
)[0];
6004 gcc_assert (GROUP_FIRST_ELEMENT (vinfo_for_stmt (first_stmt
)) == first_stmt
);
6005 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
6006 op
= gimple_assign_rhs1 (first_stmt
);
6009 /* VEC_NUM is the number of vect stmts to be created for this
6011 vec_num
= group_size
;
6013 ref_type
= get_group_alias_ptr_type (first_stmt
);
6019 group_size
= vec_num
= 1;
6020 ref_type
= reference_alias_ptr_type (DR_REF (first_dr
));
6023 if (dump_enabled_p ())
6024 dump_printf_loc (MSG_NOTE
, vect_location
,
6025 "transform store. ncopies = %d\n", ncopies
);
6027 if (memory_access_type
== VMAT_ELEMENTWISE
6028 || memory_access_type
== VMAT_STRIDED_SLP
)
6030 gimple_stmt_iterator incr_gsi
;
6036 gimple_seq stmts
= NULL
;
6037 tree stride_base
, stride_step
, alias_off
;
6040 /* Checked by get_load_store_type. */
6041 unsigned int const_nunits
= nunits
.to_constant ();
6043 gcc_assert (!nested_in_vect_loop_p (loop
, stmt
));
6046 = fold_build_pointer_plus
6047 (unshare_expr (DR_BASE_ADDRESS (first_dr
)),
6048 size_binop (PLUS_EXPR
,
6049 convert_to_ptrofftype (unshare_expr (DR_OFFSET (first_dr
))),
6050 convert_to_ptrofftype (DR_INIT (first_dr
))));
6051 stride_step
= fold_convert (sizetype
, unshare_expr (DR_STEP (first_dr
)));
6053 /* For a store with loop-invariant (but other than power-of-2)
6054 stride (i.e. not a grouped access) like so:
6056 for (i = 0; i < n; i += stride)
6059 we generate a new induction variable and new stores from
6060 the components of the (vectorized) rhs:
6062 for (j = 0; ; j += VF*stride)
6067 array[j + stride] = tmp2;
6071 unsigned nstores
= const_nunits
;
6073 tree ltype
= elem_type
;
6074 tree lvectype
= vectype
;
6077 if (group_size
< const_nunits
6078 && const_nunits
% group_size
== 0)
6080 nstores
= const_nunits
/ group_size
;
6082 ltype
= build_vector_type (elem_type
, group_size
);
6085 /* First check if vec_extract optab doesn't support extraction
6086 of vector elts directly. */
6087 scalar_mode elmode
= SCALAR_TYPE_MODE (elem_type
);
6089 if (!mode_for_vector (elmode
, group_size
).exists (&vmode
)
6090 || !VECTOR_MODE_P (vmode
)
6091 || (convert_optab_handler (vec_extract_optab
,
6092 TYPE_MODE (vectype
), vmode
)
6093 == CODE_FOR_nothing
))
6095 /* Try to avoid emitting an extract of vector elements
6096 by performing the extracts using an integer type of the
6097 same size, extracting from a vector of those and then
6098 re-interpreting it as the original vector type if
6101 = group_size
* GET_MODE_BITSIZE (elmode
);
6102 elmode
= int_mode_for_size (lsize
, 0).require ();
6103 unsigned int lnunits
= const_nunits
/ group_size
;
6104 /* If we can't construct such a vector fall back to
6105 element extracts from the original vector type and
6106 element size stores. */
6107 if (mode_for_vector (elmode
, lnunits
).exists (&vmode
)
6108 && VECTOR_MODE_P (vmode
)
6109 && (convert_optab_handler (vec_extract_optab
,
6111 != CODE_FOR_nothing
))
6115 ltype
= build_nonstandard_integer_type (lsize
, 1);
6116 lvectype
= build_vector_type (ltype
, nstores
);
6118 /* Else fall back to vector extraction anyway.
6119 Fewer stores are more important than avoiding spilling
6120 of the vector we extract from. Compared to the
6121 construction case in vectorizable_load no store-forwarding
6122 issue exists here for reasonable archs. */
6125 else if (group_size
>= const_nunits
6126 && group_size
% const_nunits
== 0)
6129 lnel
= const_nunits
;
6133 ltype
= build_aligned_type (ltype
, TYPE_ALIGN (elem_type
));
6134 ncopies
= SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node
);
6137 ivstep
= stride_step
;
6138 ivstep
= fold_build2 (MULT_EXPR
, TREE_TYPE (ivstep
), ivstep
,
6139 build_int_cst (TREE_TYPE (ivstep
), vf
));
6141 standard_iv_increment_position (loop
, &incr_gsi
, &insert_after
);
6143 create_iv (stride_base
, ivstep
, NULL
,
6144 loop
, &incr_gsi
, insert_after
,
6146 incr
= gsi_stmt (incr_gsi
);
6147 set_vinfo_for_stmt (incr
, new_stmt_vec_info (incr
, loop_vinfo
));
6149 stride_step
= force_gimple_operand (stride_step
, &stmts
, true, NULL_TREE
);
6151 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop
), stmts
);
6153 prev_stmt_info
= NULL
;
6154 alias_off
= build_int_cst (ref_type
, 0);
6155 next_stmt
= first_stmt
;
6156 for (g
= 0; g
< group_size
; g
++)
6158 running_off
= offvar
;
6161 tree size
= TYPE_SIZE_UNIT (ltype
);
6162 tree pos
= fold_build2 (MULT_EXPR
, sizetype
, size_int (g
),
6164 tree newoff
= copy_ssa_name (running_off
, NULL
);
6165 incr
= gimple_build_assign (newoff
, POINTER_PLUS_EXPR
,
6167 vect_finish_stmt_generation (stmt
, incr
, gsi
);
6168 running_off
= newoff
;
6170 unsigned int group_el
= 0;
6171 unsigned HOST_WIDE_INT
6172 elsz
= tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (vectype
)));
6173 for (j
= 0; j
< ncopies
; j
++)
6175 /* We've set op and dt above, from gimple_assign_rhs1(stmt),
6176 and first_stmt == stmt. */
6181 vect_get_vec_defs (op
, NULL_TREE
, stmt
, &vec_oprnds
, NULL
,
6183 vec_oprnd
= vec_oprnds
[0];
6187 gcc_assert (gimple_assign_single_p (next_stmt
));
6188 op
= gimple_assign_rhs1 (next_stmt
);
6189 vec_oprnd
= vect_get_vec_def_for_operand (op
, next_stmt
);
6195 vec_oprnd
= vec_oprnds
[j
];
6198 vect_is_simple_use (vec_oprnd
, vinfo
, &def_stmt
, &dt
);
6199 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
, vec_oprnd
);
6202 /* Pun the vector to extract from if necessary. */
6203 if (lvectype
!= vectype
)
6205 tree tem
= make_ssa_name (lvectype
);
6207 = gimple_build_assign (tem
, build1 (VIEW_CONVERT_EXPR
,
6208 lvectype
, vec_oprnd
));
6209 vect_finish_stmt_generation (stmt
, pun
, gsi
);
6212 for (i
= 0; i
< nstores
; i
++)
6214 tree newref
, newoff
;
6215 gimple
*incr
, *assign
;
6216 tree size
= TYPE_SIZE (ltype
);
6217 /* Extract the i'th component. */
6218 tree pos
= fold_build2 (MULT_EXPR
, bitsizetype
,
6219 bitsize_int (i
), size
);
6220 tree elem
= fold_build3 (BIT_FIELD_REF
, ltype
, vec_oprnd
,
6223 elem
= force_gimple_operand_gsi (gsi
, elem
, true,
6227 tree this_off
= build_int_cst (TREE_TYPE (alias_off
),
6229 newref
= build2 (MEM_REF
, ltype
,
6230 running_off
, this_off
);
6232 /* And store it to *running_off. */
6233 assign
= gimple_build_assign (newref
, elem
);
6234 vect_finish_stmt_generation (stmt
, assign
, gsi
);
6238 || group_el
== group_size
)
6240 newoff
= copy_ssa_name (running_off
, NULL
);
6241 incr
= gimple_build_assign (newoff
, POINTER_PLUS_EXPR
,
6242 running_off
, stride_step
);
6243 vect_finish_stmt_generation (stmt
, incr
, gsi
);
6245 running_off
= newoff
;
6248 if (g
== group_size
- 1
6251 if (j
== 0 && i
== 0)
6252 STMT_VINFO_VEC_STMT (stmt_info
)
6253 = *vec_stmt
= assign
;
6255 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = assign
;
6256 prev_stmt_info
= vinfo_for_stmt (assign
);
6260 next_stmt
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt
));
6265 vec_oprnds
.release ();
6269 auto_vec
<tree
> dr_chain (group_size
);
6270 oprnds
.create (group_size
);
6272 alignment_support_scheme
= vect_supportable_dr_alignment (first_dr
, false);
6273 gcc_assert (alignment_support_scheme
);
6274 /* Targets with store-lane instructions must not require explicit
6276 gcc_assert (memory_access_type
!= VMAT_LOAD_STORE_LANES
6277 || alignment_support_scheme
== dr_aligned
6278 || alignment_support_scheme
== dr_unaligned_supported
);
6280 if (memory_access_type
== VMAT_CONTIGUOUS_DOWN
6281 || memory_access_type
== VMAT_CONTIGUOUS_REVERSE
)
6282 offset
= size_int (-TYPE_VECTOR_SUBPARTS (vectype
) + 1);
6284 if (memory_access_type
== VMAT_LOAD_STORE_LANES
)
6285 aggr_type
= build_array_type_nelts (elem_type
, vec_num
* nunits
);
6287 aggr_type
= vectype
;
6289 /* In case the vectorization factor (VF) is bigger than the number
6290 of elements that we can fit in a vectype (nunits), we have to generate
6291 more than one vector stmt - i.e - we need to "unroll" the
6292 vector stmt by a factor VF/nunits. For more details see documentation in
6293 vect_get_vec_def_for_copy_stmt. */
6295 /* In case of interleaving (non-unit grouped access):
6302 We create vectorized stores starting from base address (the access of the
6303 first stmt in the chain (S2 in the above example), when the last store stmt
6304 of the chain (S4) is reached:
6307 VS2: &base + vec_size*1 = vx0
6308 VS3: &base + vec_size*2 = vx1
6309 VS4: &base + vec_size*3 = vx3
6311 Then permutation statements are generated:
6313 VS5: vx5 = VEC_PERM_EXPR < vx0, vx3, {0, 8, 1, 9, 2, 10, 3, 11} >
6314 VS6: vx6 = VEC_PERM_EXPR < vx0, vx3, {4, 12, 5, 13, 6, 14, 7, 15} >
6317 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
6318 (the order of the data-refs in the output of vect_permute_store_chain
6319 corresponds to the order of scalar stmts in the interleaving chain - see
6320 the documentation of vect_permute_store_chain()).
6322 In case of both multiple types and interleaving, above vector stores and
6323 permutation stmts are created for every copy. The result vector stmts are
6324 put in STMT_VINFO_VEC_STMT for the first copy and in the corresponding
6325 STMT_VINFO_RELATED_STMT for the next copies.
6328 prev_stmt_info
= NULL
;
6329 for (j
= 0; j
< ncopies
; j
++)
6336 /* Get vectorized arguments for SLP_NODE. */
6337 vect_get_vec_defs (op
, NULL_TREE
, stmt
, &vec_oprnds
,
6340 vec_oprnd
= vec_oprnds
[0];
6344 /* For interleaved stores we collect vectorized defs for all the
6345 stores in the group in DR_CHAIN and OPRNDS. DR_CHAIN is then
6346 used as an input to vect_permute_store_chain(), and OPRNDS as
6347 an input to vect_get_vec_def_for_stmt_copy() for the next copy.
6349 If the store is not grouped, GROUP_SIZE is 1, and DR_CHAIN and
6350 OPRNDS are of size 1. */
6351 next_stmt
= first_stmt
;
6352 for (i
= 0; i
< group_size
; i
++)
6354 /* Since gaps are not supported for interleaved stores,
6355 GROUP_SIZE is the exact number of stmts in the chain.
6356 Therefore, NEXT_STMT can't be NULL_TREE. In case that
6357 there is no interleaving, GROUP_SIZE is 1, and only one
6358 iteration of the loop will be executed. */
6359 gcc_assert (next_stmt
6360 && gimple_assign_single_p (next_stmt
));
6361 op
= gimple_assign_rhs1 (next_stmt
);
6363 vec_oprnd
= vect_get_vec_def_for_operand (op
, next_stmt
);
6364 dr_chain
.quick_push (vec_oprnd
);
6365 oprnds
.quick_push (vec_oprnd
);
6366 next_stmt
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt
));
6370 /* We should have catched mismatched types earlier. */
6371 gcc_assert (useless_type_conversion_p (vectype
,
6372 TREE_TYPE (vec_oprnd
)));
6373 bool simd_lane_access_p
6374 = STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info
);
6375 if (simd_lane_access_p
6376 && TREE_CODE (DR_BASE_ADDRESS (first_dr
)) == ADDR_EXPR
6377 && VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (first_dr
), 0))
6378 && integer_zerop (DR_OFFSET (first_dr
))
6379 && integer_zerop (DR_INIT (first_dr
))
6380 && alias_sets_conflict_p (get_alias_set (aggr_type
),
6381 get_alias_set (TREE_TYPE (ref_type
))))
6383 dataref_ptr
= unshare_expr (DR_BASE_ADDRESS (first_dr
));
6384 dataref_offset
= build_int_cst (ref_type
, 0);
6389 = vect_create_data_ref_ptr (first_stmt
, aggr_type
,
6390 simd_lane_access_p
? loop
: NULL
,
6391 offset
, &dummy
, gsi
, &ptr_incr
,
6392 simd_lane_access_p
, &inv_p
);
6393 gcc_assert (bb_vinfo
|| !inv_p
);
6397 /* For interleaved stores we created vectorized defs for all the
6398 defs stored in OPRNDS in the previous iteration (previous copy).
6399 DR_CHAIN is then used as an input to vect_permute_store_chain(),
6400 and OPRNDS as an input to vect_get_vec_def_for_stmt_copy() for the
6402 If the store is not grouped, GROUP_SIZE is 1, and DR_CHAIN and
6403 OPRNDS are of size 1. */
6404 for (i
= 0; i
< group_size
; i
++)
6407 vect_is_simple_use (op
, vinfo
, &def_stmt
, &dt
);
6408 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
, op
);
6409 dr_chain
[i
] = vec_oprnd
;
6410 oprnds
[i
] = vec_oprnd
;
6414 = int_const_binop (PLUS_EXPR
, dataref_offset
,
6415 TYPE_SIZE_UNIT (aggr_type
));
6417 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
, stmt
,
6418 TYPE_SIZE_UNIT (aggr_type
));
6421 if (memory_access_type
== VMAT_LOAD_STORE_LANES
)
6425 /* Combine all the vectors into an array. */
6426 vec_array
= create_vector_array (vectype
, vec_num
);
6427 for (i
= 0; i
< vec_num
; i
++)
6429 vec_oprnd
= dr_chain
[i
];
6430 write_vector_array (stmt
, gsi
, vec_oprnd
, vec_array
, i
);
6434 MEM_REF[...all elements...] = STORE_LANES (VEC_ARRAY). */
6435 data_ref
= create_array_ref (aggr_type
, dataref_ptr
, ref_type
);
6436 gcall
*call
= gimple_build_call_internal (IFN_STORE_LANES
, 1,
6438 gimple_call_set_lhs (call
, data_ref
);
6439 gimple_call_set_nothrow (call
, true);
6441 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6449 result_chain
.create (group_size
);
6451 vect_permute_store_chain (dr_chain
, group_size
, stmt
, gsi
,
6455 next_stmt
= first_stmt
;
6456 for (i
= 0; i
< vec_num
; i
++)
6458 unsigned align
, misalign
;
6461 /* Bump the vector pointer. */
6462 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
,
6466 vec_oprnd
= vec_oprnds
[i
];
6467 else if (grouped_store
)
6468 /* For grouped stores vectorized defs are interleaved in
6469 vect_permute_store_chain(). */
6470 vec_oprnd
= result_chain
[i
];
6472 data_ref
= fold_build2 (MEM_REF
, vectype
,
6476 : build_int_cst (ref_type
, 0));
6477 align
= DR_TARGET_ALIGNMENT (first_dr
);
6478 if (aligned_access_p (first_dr
))
6480 else if (DR_MISALIGNMENT (first_dr
) == -1)
6482 align
= dr_alignment (vect_dr_behavior (first_dr
));
6484 TREE_TYPE (data_ref
)
6485 = build_aligned_type (TREE_TYPE (data_ref
),
6486 align
* BITS_PER_UNIT
);
6490 TREE_TYPE (data_ref
)
6491 = build_aligned_type (TREE_TYPE (data_ref
),
6492 TYPE_ALIGN (elem_type
));
6493 misalign
= DR_MISALIGNMENT (first_dr
);
6495 if (dataref_offset
== NULL_TREE
6496 && TREE_CODE (dataref_ptr
) == SSA_NAME
)
6497 set_ptr_info_alignment (get_ptr_info (dataref_ptr
), align
,
6500 if (memory_access_type
== VMAT_CONTIGUOUS_REVERSE
)
6502 tree perm_mask
= perm_mask_for_reverse (vectype
);
6504 = vect_create_destination_var (gimple_assign_rhs1 (stmt
),
6506 tree new_temp
= make_ssa_name (perm_dest
);
6508 /* Generate the permute statement. */
6510 = gimple_build_assign (new_temp
, VEC_PERM_EXPR
, vec_oprnd
,
6511 vec_oprnd
, perm_mask
);
6512 vect_finish_stmt_generation (stmt
, perm_stmt
, gsi
);
6514 perm_stmt
= SSA_NAME_DEF_STMT (new_temp
);
6515 vec_oprnd
= new_temp
;
6518 /* Arguments are ready. Create the new vector stmt. */
6519 new_stmt
= gimple_build_assign (data_ref
, vec_oprnd
);
6520 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6525 next_stmt
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt
));
6533 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
6535 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
6536 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
6541 result_chain
.release ();
6542 vec_oprnds
.release ();
6547 /* Given a vector type VECTYPE, turns permutation SEL into the equivalent
6548 VECTOR_CST mask. No checks are made that the target platform supports the
6549 mask, so callers may wish to test can_vec_perm_const_p separately, or use
6550 vect_gen_perm_mask_checked. */
6553 vect_gen_perm_mask_any (tree vectype
, const vec_perm_indices
&sel
)
6557 poly_uint64 nunits
= sel
.length ();
6558 gcc_assert (known_eq (nunits
, TYPE_VECTOR_SUBPARTS (vectype
)));
6560 mask_type
= build_vector_type (ssizetype
, nunits
);
6561 return vec_perm_indices_to_tree (mask_type
, sel
);
6564 /* Checked version of vect_gen_perm_mask_any. Asserts can_vec_perm_const_p,
6565 i.e. that the target supports the pattern _for arbitrary input vectors_. */
6568 vect_gen_perm_mask_checked (tree vectype
, const vec_perm_indices
&sel
)
6570 gcc_assert (can_vec_perm_const_p (TYPE_MODE (vectype
), sel
));
6571 return vect_gen_perm_mask_any (vectype
, sel
);
6574 /* Given a vector variable X and Y, that was generated for the scalar
6575 STMT, generate instructions to permute the vector elements of X and Y
6576 using permutation mask MASK_VEC, insert them at *GSI and return the
6577 permuted vector variable. */
6580 permute_vec_elements (tree x
, tree y
, tree mask_vec
, gimple
*stmt
,
6581 gimple_stmt_iterator
*gsi
)
6583 tree vectype
= TREE_TYPE (x
);
6584 tree perm_dest
, data_ref
;
6587 perm_dest
= vect_create_destination_var (gimple_get_lhs (stmt
), vectype
);
6588 data_ref
= make_ssa_name (perm_dest
);
6590 /* Generate the permute statement. */
6591 perm_stmt
= gimple_build_assign (data_ref
, VEC_PERM_EXPR
, x
, y
, mask_vec
);
6592 vect_finish_stmt_generation (stmt
, perm_stmt
, gsi
);
6597 /* Hoist the definitions of all SSA uses on STMT out of the loop LOOP,
6598 inserting them on the loops preheader edge. Returns true if we
6599 were successful in doing so (and thus STMT can be moved then),
6600 otherwise returns false. */
6603 hoist_defs_of_uses (gimple
*stmt
, struct loop
*loop
)
6609 FOR_EACH_SSA_TREE_OPERAND (op
, stmt
, i
, SSA_OP_USE
)
6611 gimple
*def_stmt
= SSA_NAME_DEF_STMT (op
);
6612 if (!gimple_nop_p (def_stmt
)
6613 && flow_bb_inside_loop_p (loop
, gimple_bb (def_stmt
)))
6615 /* Make sure we don't need to recurse. While we could do
6616 so in simple cases when there are more complex use webs
6617 we don't have an easy way to preserve stmt order to fulfil
6618 dependencies within them. */
6621 if (gimple_code (def_stmt
) == GIMPLE_PHI
)
6623 FOR_EACH_SSA_TREE_OPERAND (op2
, def_stmt
, i2
, SSA_OP_USE
)
6625 gimple
*def_stmt2
= SSA_NAME_DEF_STMT (op2
);
6626 if (!gimple_nop_p (def_stmt2
)
6627 && flow_bb_inside_loop_p (loop
, gimple_bb (def_stmt2
)))
6637 FOR_EACH_SSA_TREE_OPERAND (op
, stmt
, i
, SSA_OP_USE
)
6639 gimple
*def_stmt
= SSA_NAME_DEF_STMT (op
);
6640 if (!gimple_nop_p (def_stmt
)
6641 && flow_bb_inside_loop_p (loop
, gimple_bb (def_stmt
)))
6643 gimple_stmt_iterator gsi
= gsi_for_stmt (def_stmt
);
6644 gsi_remove (&gsi
, false);
6645 gsi_insert_on_edge_immediate (loop_preheader_edge (loop
), def_stmt
);
6652 /* vectorizable_load.
6654 Check if STMT reads a non scalar data-ref (array/pointer/structure) that
6656 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
6657 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
6658 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
6661 vectorizable_load (gimple
*stmt
, gimple_stmt_iterator
*gsi
, gimple
**vec_stmt
,
6662 slp_tree slp_node
, slp_instance slp_node_instance
)
6665 tree vec_dest
= NULL
;
6666 tree data_ref
= NULL
;
6667 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
6668 stmt_vec_info prev_stmt_info
;
6669 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
6670 struct loop
*loop
= NULL
;
6671 struct loop
*containing_loop
= (gimple_bb (stmt
))->loop_father
;
6672 bool nested_in_vect_loop
= false;
6673 struct data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
), *first_dr
= NULL
;
6677 gimple
*new_stmt
= NULL
;
6679 enum dr_alignment_support alignment_support_scheme
;
6680 tree dataref_ptr
= NULL_TREE
;
6681 tree dataref_offset
= NULL_TREE
;
6682 gimple
*ptr_incr
= NULL
;
6685 unsigned int group_size
;
6686 poly_uint64 group_gap_adj
;
6687 tree msq
= NULL_TREE
, lsq
;
6688 tree offset
= NULL_TREE
;
6689 tree byte_offset
= NULL_TREE
;
6690 tree realignment_token
= NULL_TREE
;
6692 vec
<tree
> dr_chain
= vNULL
;
6693 bool grouped_load
= false;
6695 gimple
*first_stmt_for_drptr
= NULL
;
6697 bool compute_in_loop
= false;
6698 struct loop
*at_loop
;
6700 bool slp
= (slp_node
!= NULL
);
6701 bool slp_perm
= false;
6702 enum tree_code code
;
6703 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
6706 gather_scatter_info gs_info
;
6707 vec_info
*vinfo
= stmt_info
->vinfo
;
6710 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
6713 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
6717 /* Is vectorizable load? */
6718 if (!is_gimple_assign (stmt
))
6721 scalar_dest
= gimple_assign_lhs (stmt
);
6722 if (TREE_CODE (scalar_dest
) != SSA_NAME
)
6725 code
= gimple_assign_rhs_code (stmt
);
6726 if (code
!= ARRAY_REF
6727 && code
!= BIT_FIELD_REF
6728 && code
!= INDIRECT_REF
6729 && code
!= COMPONENT_REF
6730 && code
!= IMAGPART_EXPR
6731 && code
!= REALPART_EXPR
6733 && TREE_CODE_CLASS (code
) != tcc_declaration
)
6736 if (!STMT_VINFO_DATA_REF (stmt_info
))
6739 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
6740 poly_uint64 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
6744 loop
= LOOP_VINFO_LOOP (loop_vinfo
);
6745 nested_in_vect_loop
= nested_in_vect_loop_p (loop
, stmt
);
6746 vf
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
6751 /* Multiple types in SLP are handled by creating the appropriate number of
6752 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
6757 ncopies
= vect_get_num_copies (loop_vinfo
, vectype
);
6759 gcc_assert (ncopies
>= 1);
6761 /* FORNOW. This restriction should be relaxed. */
6762 if (nested_in_vect_loop
&& ncopies
> 1)
6764 if (dump_enabled_p ())
6765 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6766 "multiple types in nested loop.\n");
6770 /* Invalidate assumptions made by dependence analysis when vectorization
6771 on the unrolled body effectively re-orders stmts. */
6773 && STMT_VINFO_MIN_NEG_DIST (stmt_info
) != 0
6774 && maybe_gt (LOOP_VINFO_VECT_FACTOR (loop_vinfo
),
6775 STMT_VINFO_MIN_NEG_DIST (stmt_info
)))
6777 if (dump_enabled_p ())
6778 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6779 "cannot perform implicit CSE when unrolling "
6780 "with negative dependence distance\n");
6784 elem_type
= TREE_TYPE (vectype
);
6785 mode
= TYPE_MODE (vectype
);
6787 /* FORNOW. In some cases can vectorize even if data-type not supported
6788 (e.g. - data copies). */
6789 if (optab_handler (mov_optab
, mode
) == CODE_FOR_nothing
)
6791 if (dump_enabled_p ())
6792 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6793 "Aligned load, but unsupported type.\n");
6797 /* Check if the load is a part of an interleaving chain. */
6798 if (STMT_VINFO_GROUPED_ACCESS (stmt_info
))
6800 grouped_load
= true;
6802 gcc_assert (!nested_in_vect_loop
);
6803 gcc_assert (!STMT_VINFO_GATHER_SCATTER_P (stmt_info
));
6805 first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
6806 group_size
= GROUP_SIZE (vinfo_for_stmt (first_stmt
));
6808 if (slp
&& SLP_TREE_LOAD_PERMUTATION (slp_node
).exists ())
6811 /* Invalidate assumptions made by dependence analysis when vectorization
6812 on the unrolled body effectively re-orders stmts. */
6813 if (!PURE_SLP_STMT (stmt_info
)
6814 && STMT_VINFO_MIN_NEG_DIST (stmt_info
) != 0
6815 && maybe_gt (LOOP_VINFO_VECT_FACTOR (loop_vinfo
),
6816 STMT_VINFO_MIN_NEG_DIST (stmt_info
)))
6818 if (dump_enabled_p ())
6819 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6820 "cannot perform implicit CSE when performing "
6821 "group loads with negative dependence distance\n");
6825 /* Similarly when the stmt is a load that is both part of a SLP
6826 instance and a loop vectorized stmt via the same-dr mechanism
6827 we have to give up. */
6828 if (STMT_VINFO_GROUP_SAME_DR_STMT (stmt_info
)
6829 && (STMT_SLP_TYPE (stmt_info
)
6830 != STMT_SLP_TYPE (vinfo_for_stmt
6831 (STMT_VINFO_GROUP_SAME_DR_STMT (stmt_info
)))))
6833 if (dump_enabled_p ())
6834 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6835 "conflicting SLP types for CSEd load\n");
6840 vect_memory_access_type memory_access_type
;
6841 if (!get_load_store_type (stmt
, vectype
, slp
, VLS_LOAD
, ncopies
,
6842 &memory_access_type
, &gs_info
))
6845 if (!vec_stmt
) /* transformation not required. */
6848 STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info
) = memory_access_type
;
6849 STMT_VINFO_TYPE (stmt_info
) = load_vec_info_type
;
6850 /* The SLP costs are calculated during SLP analysis. */
6851 if (!PURE_SLP_STMT (stmt_info
))
6852 vect_model_load_cost (stmt_info
, ncopies
, memory_access_type
,
6858 gcc_assert (memory_access_type
6859 == STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info
));
6861 if (dump_enabled_p ())
6862 dump_printf_loc (MSG_NOTE
, vect_location
,
6863 "transform load. ncopies = %d\n", ncopies
);
6867 ensure_base_align (dr
);
6869 if (memory_access_type
== VMAT_GATHER_SCATTER
)
6871 tree vec_oprnd0
= NULL_TREE
, op
;
6872 tree arglist
= TYPE_ARG_TYPES (TREE_TYPE (gs_info
.decl
));
6873 tree rettype
, srctype
, ptrtype
, idxtype
, masktype
, scaletype
;
6874 tree ptr
, mask
, var
, scale
, merge
, perm_mask
= NULL_TREE
, prev_res
= NULL_TREE
;
6875 edge pe
= loop_preheader_edge (loop
);
6878 enum { NARROW
, NONE
, WIDEN
} modifier
;
6879 poly_uint64 gather_off_nunits
6880 = TYPE_VECTOR_SUBPARTS (gs_info
.offset_vectype
);
6882 if (known_eq (nunits
, gather_off_nunits
))
6884 else if (known_eq (nunits
* 2, gather_off_nunits
))
6888 /* Currently widening gathers are only supported for
6889 fixed-length vectors. */
6890 int count
= gather_off_nunits
.to_constant ();
6891 vec_perm_builder
sel (count
, count
, 1);
6892 for (i
= 0; i
< count
; ++i
)
6893 sel
.quick_push (i
| (count
/ 2));
6895 vec_perm_indices
indices (sel
, 1, count
);
6896 perm_mask
= vect_gen_perm_mask_checked (gs_info
.offset_vectype
,
6899 else if (known_eq (nunits
, gather_off_nunits
* 2))
6903 /* Currently narrowing gathers are only supported for
6904 fixed-length vectors. */
6905 int count
= nunits
.to_constant ();
6906 vec_perm_builder
sel (count
, count
, 1);
6907 for (i
= 0; i
< count
; ++i
)
6908 sel
.quick_push (i
< count
/ 2 ? i
: i
+ count
/ 2);
6910 vec_perm_indices
indices (sel
, 2, count
);
6911 perm_mask
= vect_gen_perm_mask_checked (vectype
, indices
);
6917 rettype
= TREE_TYPE (TREE_TYPE (gs_info
.decl
));
6918 srctype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
6919 ptrtype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
6920 idxtype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
6921 masktype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
6922 scaletype
= TREE_VALUE (arglist
);
6923 gcc_checking_assert (types_compatible_p (srctype
, rettype
));
6925 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
6927 ptr
= fold_convert (ptrtype
, gs_info
.base
);
6928 if (!is_gimple_min_invariant (ptr
))
6930 ptr
= force_gimple_operand (ptr
, &seq
, true, NULL_TREE
);
6931 new_bb
= gsi_insert_seq_on_edge_immediate (pe
, seq
);
6932 gcc_assert (!new_bb
);
6935 /* Currently we support only unconditional gather loads,
6936 so mask should be all ones. */
6937 if (TREE_CODE (masktype
) == INTEGER_TYPE
)
6938 mask
= build_int_cst (masktype
, -1);
6939 else if (TREE_CODE (TREE_TYPE (masktype
)) == INTEGER_TYPE
)
6941 mask
= build_int_cst (TREE_TYPE (masktype
), -1);
6942 mask
= build_vector_from_val (masktype
, mask
);
6943 mask
= vect_init_vector (stmt
, mask
, masktype
, NULL
);
6945 else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (masktype
)))
6949 for (j
= 0; j
< 6; ++j
)
6951 real_from_target (&r
, tmp
, TYPE_MODE (TREE_TYPE (masktype
)));
6952 mask
= build_real (TREE_TYPE (masktype
), r
);
6953 mask
= build_vector_from_val (masktype
, mask
);
6954 mask
= vect_init_vector (stmt
, mask
, masktype
, NULL
);
6959 scale
= build_int_cst (scaletype
, gs_info
.scale
);
6961 if (TREE_CODE (TREE_TYPE (rettype
)) == INTEGER_TYPE
)
6962 merge
= build_int_cst (TREE_TYPE (rettype
), 0);
6963 else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (rettype
)))
6967 for (j
= 0; j
< 6; ++j
)
6969 real_from_target (&r
, tmp
, TYPE_MODE (TREE_TYPE (rettype
)));
6970 merge
= build_real (TREE_TYPE (rettype
), r
);
6974 merge
= build_vector_from_val (rettype
, merge
);
6975 merge
= vect_init_vector (stmt
, merge
, rettype
, NULL
);
6977 prev_stmt_info
= NULL
;
6978 for (j
= 0; j
< ncopies
; ++j
)
6980 if (modifier
== WIDEN
&& (j
& 1))
6981 op
= permute_vec_elements (vec_oprnd0
, vec_oprnd0
,
6982 perm_mask
, stmt
, gsi
);
6985 = vect_get_vec_def_for_operand (gs_info
.offset
, stmt
);
6988 = vect_get_vec_def_for_stmt_copy (gs_info
.offset_dt
, vec_oprnd0
);
6990 if (!useless_type_conversion_p (idxtype
, TREE_TYPE (op
)))
6992 gcc_assert (known_eq (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op
)),
6993 TYPE_VECTOR_SUBPARTS (idxtype
)));
6994 var
= vect_get_new_ssa_name (idxtype
, vect_simple_var
);
6995 op
= build1 (VIEW_CONVERT_EXPR
, idxtype
, op
);
6997 = gimple_build_assign (var
, VIEW_CONVERT_EXPR
, op
);
6998 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
7003 = gimple_build_call (gs_info
.decl
, 5, merge
, ptr
, op
, mask
, scale
);
7005 if (!useless_type_conversion_p (vectype
, rettype
))
7007 gcc_assert (known_eq (TYPE_VECTOR_SUBPARTS (vectype
),
7008 TYPE_VECTOR_SUBPARTS (rettype
)));
7009 op
= vect_get_new_ssa_name (rettype
, vect_simple_var
);
7010 gimple_call_set_lhs (new_stmt
, op
);
7011 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
7012 var
= make_ssa_name (vec_dest
);
7013 op
= build1 (VIEW_CONVERT_EXPR
, vectype
, op
);
7015 = gimple_build_assign (var
, VIEW_CONVERT_EXPR
, op
);
7019 var
= make_ssa_name (vec_dest
, new_stmt
);
7020 gimple_call_set_lhs (new_stmt
, var
);
7023 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
7025 if (modifier
== NARROW
)
7032 var
= permute_vec_elements (prev_res
, var
,
7033 perm_mask
, stmt
, gsi
);
7034 new_stmt
= SSA_NAME_DEF_STMT (var
);
7037 if (prev_stmt_info
== NULL
)
7038 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
7040 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
7041 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
7046 if (memory_access_type
== VMAT_ELEMENTWISE
7047 || memory_access_type
== VMAT_STRIDED_SLP
)
7049 gimple_stmt_iterator incr_gsi
;
7055 vec
<constructor_elt
, va_gc
> *v
= NULL
;
7056 gimple_seq stmts
= NULL
;
7057 tree stride_base
, stride_step
, alias_off
;
7058 /* Checked by get_load_store_type. */
7059 unsigned int const_nunits
= nunits
.to_constant ();
7061 gcc_assert (!nested_in_vect_loop
);
7063 if (slp
&& grouped_load
)
7065 first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
7066 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
7067 group_size
= GROUP_SIZE (vinfo_for_stmt (first_stmt
));
7068 ref_type
= get_group_alias_ptr_type (first_stmt
);
7075 ref_type
= reference_alias_ptr_type (DR_REF (first_dr
));
7079 = fold_build_pointer_plus
7080 (DR_BASE_ADDRESS (first_dr
),
7081 size_binop (PLUS_EXPR
,
7082 convert_to_ptrofftype (DR_OFFSET (first_dr
)),
7083 convert_to_ptrofftype (DR_INIT (first_dr
))));
7084 stride_step
= fold_convert (sizetype
, DR_STEP (first_dr
));
7086 /* For a load with loop-invariant (but other than power-of-2)
7087 stride (i.e. not a grouped access) like so:
7089 for (i = 0; i < n; i += stride)
7092 we generate a new induction variable and new accesses to
7093 form a new vector (or vectors, depending on ncopies):
7095 for (j = 0; ; j += VF*stride)
7097 tmp2 = array[j + stride];
7099 vectemp = {tmp1, tmp2, ...}
7102 ivstep
= fold_build2 (MULT_EXPR
, TREE_TYPE (stride_step
), stride_step
,
7103 build_int_cst (TREE_TYPE (stride_step
), vf
));
7105 standard_iv_increment_position (loop
, &incr_gsi
, &insert_after
);
7107 create_iv (unshare_expr (stride_base
), unshare_expr (ivstep
), NULL
,
7108 loop
, &incr_gsi
, insert_after
,
7110 incr
= gsi_stmt (incr_gsi
);
7111 set_vinfo_for_stmt (incr
, new_stmt_vec_info (incr
, loop_vinfo
));
7113 stride_step
= force_gimple_operand (unshare_expr (stride_step
),
7114 &stmts
, true, NULL_TREE
);
7116 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop
), stmts
);
7118 prev_stmt_info
= NULL
;
7119 running_off
= offvar
;
7120 alias_off
= build_int_cst (ref_type
, 0);
7121 int nloads
= const_nunits
;
7123 tree ltype
= TREE_TYPE (vectype
);
7124 tree lvectype
= vectype
;
7125 auto_vec
<tree
> dr_chain
;
7126 if (memory_access_type
== VMAT_STRIDED_SLP
)
7128 if (group_size
< const_nunits
)
7130 /* First check if vec_init optab supports construction from
7131 vector elts directly. */
7132 scalar_mode elmode
= SCALAR_TYPE_MODE (TREE_TYPE (vectype
));
7134 if (mode_for_vector (elmode
, group_size
).exists (&vmode
)
7135 && VECTOR_MODE_P (vmode
)
7136 && (convert_optab_handler (vec_init_optab
,
7137 TYPE_MODE (vectype
), vmode
)
7138 != CODE_FOR_nothing
))
7140 nloads
= const_nunits
/ group_size
;
7142 ltype
= build_vector_type (TREE_TYPE (vectype
), group_size
);
7146 /* Otherwise avoid emitting a constructor of vector elements
7147 by performing the loads using an integer type of the same
7148 size, constructing a vector of those and then
7149 re-interpreting it as the original vector type.
7150 This avoids a huge runtime penalty due to the general
7151 inability to perform store forwarding from smaller stores
7152 to a larger load. */
7154 = group_size
* TYPE_PRECISION (TREE_TYPE (vectype
));
7155 elmode
= int_mode_for_size (lsize
, 0).require ();
7156 unsigned int lnunits
= const_nunits
/ group_size
;
7157 /* If we can't construct such a vector fall back to
7158 element loads of the original vector type. */
7159 if (mode_for_vector (elmode
, lnunits
).exists (&vmode
)
7160 && VECTOR_MODE_P (vmode
)
7161 && (convert_optab_handler (vec_init_optab
, vmode
, elmode
)
7162 != CODE_FOR_nothing
))
7166 ltype
= build_nonstandard_integer_type (lsize
, 1);
7167 lvectype
= build_vector_type (ltype
, nloads
);
7174 lnel
= const_nunits
;
7177 ltype
= build_aligned_type (ltype
, TYPE_ALIGN (TREE_TYPE (vectype
)));
7181 /* For SLP permutation support we need to load the whole group,
7182 not only the number of vector stmts the permutation result
7186 /* We don't yet generate SLP_TREE_LOAD_PERMUTATIONs for
7188 unsigned int const_vf
= vf
.to_constant ();
7189 ncopies
= CEIL (group_size
* const_vf
, const_nunits
);
7190 dr_chain
.create (ncopies
);
7193 ncopies
= SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node
);
7195 unsigned int group_el
= 0;
7196 unsigned HOST_WIDE_INT
7197 elsz
= tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (vectype
)));
7198 for (j
= 0; j
< ncopies
; j
++)
7201 vec_alloc (v
, nloads
);
7202 for (i
= 0; i
< nloads
; i
++)
7204 tree this_off
= build_int_cst (TREE_TYPE (alias_off
),
7206 new_stmt
= gimple_build_assign (make_ssa_name (ltype
),
7207 build2 (MEM_REF
, ltype
,
7208 running_off
, this_off
));
7209 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
7211 CONSTRUCTOR_APPEND_ELT (v
, NULL_TREE
,
7212 gimple_assign_lhs (new_stmt
));
7216 || group_el
== group_size
)
7218 tree newoff
= copy_ssa_name (running_off
);
7219 gimple
*incr
= gimple_build_assign (newoff
, POINTER_PLUS_EXPR
,
7220 running_off
, stride_step
);
7221 vect_finish_stmt_generation (stmt
, incr
, gsi
);
7223 running_off
= newoff
;
7229 tree vec_inv
= build_constructor (lvectype
, v
);
7230 new_temp
= vect_init_vector (stmt
, vec_inv
, lvectype
, gsi
);
7231 new_stmt
= SSA_NAME_DEF_STMT (new_temp
);
7232 if (lvectype
!= vectype
)
7234 new_stmt
= gimple_build_assign (make_ssa_name (vectype
),
7236 build1 (VIEW_CONVERT_EXPR
,
7237 vectype
, new_temp
));
7238 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
7245 dr_chain
.quick_push (gimple_assign_lhs (new_stmt
));
7247 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
7252 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
7254 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
7255 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
7261 vect_transform_slp_perm_load (slp_node
, dr_chain
, gsi
, vf
,
7262 slp_node_instance
, false, &n_perms
);
7269 first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
7270 group_size
= GROUP_SIZE (vinfo_for_stmt (first_stmt
));
7271 /* For SLP vectorization we directly vectorize a subchain
7272 without permutation. */
7273 if (slp
&& ! SLP_TREE_LOAD_PERMUTATION (slp_node
).exists ())
7274 first_stmt
= SLP_TREE_SCALAR_STMTS (slp_node
)[0];
7275 /* For BB vectorization always use the first stmt to base
7276 the data ref pointer on. */
7278 first_stmt_for_drptr
= SLP_TREE_SCALAR_STMTS (slp_node
)[0];
7280 /* Check if the chain of loads is already vectorized. */
7281 if (STMT_VINFO_VEC_STMT (vinfo_for_stmt (first_stmt
))
7282 /* For SLP we would need to copy over SLP_TREE_VEC_STMTS.
7283 ??? But we can only do so if there is exactly one
7284 as we have no way to get at the rest. Leave the CSE
7286 ??? With the group load eventually participating
7287 in multiple different permutations (having multiple
7288 slp nodes which refer to the same group) the CSE
7289 is even wrong code. See PR56270. */
7292 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
7295 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
7298 /* VEC_NUM is the number of vect stmts to be created for this group. */
7301 grouped_load
= false;
7302 /* For SLP permutation support we need to load the whole group,
7303 not only the number of vector stmts the permutation result
7307 /* We don't yet generate SLP_TREE_LOAD_PERMUTATIONs for
7309 unsigned int const_vf
= vf
.to_constant ();
7310 unsigned int const_nunits
= nunits
.to_constant ();
7311 vec_num
= CEIL (group_size
* const_vf
, const_nunits
);
7312 group_gap_adj
= vf
* group_size
- nunits
* vec_num
;
7316 vec_num
= SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node
);
7318 = group_size
- SLP_INSTANCE_GROUP_SIZE (slp_node_instance
);
7322 vec_num
= group_size
;
7324 ref_type
= get_group_alias_ptr_type (first_stmt
);
7330 group_size
= vec_num
= 1;
7332 ref_type
= reference_alias_ptr_type (DR_REF (first_dr
));
7335 alignment_support_scheme
= vect_supportable_dr_alignment (first_dr
, false);
7336 gcc_assert (alignment_support_scheme
);
7337 /* Targets with load-lane instructions must not require explicit
7339 gcc_assert (memory_access_type
!= VMAT_LOAD_STORE_LANES
7340 || alignment_support_scheme
== dr_aligned
7341 || alignment_support_scheme
== dr_unaligned_supported
);
7343 /* In case the vectorization factor (VF) is bigger than the number
7344 of elements that we can fit in a vectype (nunits), we have to generate
7345 more than one vector stmt - i.e - we need to "unroll" the
7346 vector stmt by a factor VF/nunits. In doing so, we record a pointer
7347 from one copy of the vector stmt to the next, in the field
7348 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
7349 stages to find the correct vector defs to be used when vectorizing
7350 stmts that use the defs of the current stmt. The example below
7351 illustrates the vectorization process when VF=16 and nunits=4 (i.e., we
7352 need to create 4 vectorized stmts):
7354 before vectorization:
7355 RELATED_STMT VEC_STMT
7359 step 1: vectorize stmt S1:
7360 We first create the vector stmt VS1_0, and, as usual, record a
7361 pointer to it in the STMT_VINFO_VEC_STMT of the scalar stmt S1.
7362 Next, we create the vector stmt VS1_1, and record a pointer to
7363 it in the STMT_VINFO_RELATED_STMT of the vector stmt VS1_0.
7364 Similarly, for VS1_2 and VS1_3. This is the resulting chain of
7366 RELATED_STMT VEC_STMT
7367 VS1_0: vx0 = memref0 VS1_1 -
7368 VS1_1: vx1 = memref1 VS1_2 -
7369 VS1_2: vx2 = memref2 VS1_3 -
7370 VS1_3: vx3 = memref3 - -
7371 S1: x = load - VS1_0
7374 See in documentation in vect_get_vec_def_for_stmt_copy for how the
7375 information we recorded in RELATED_STMT field is used to vectorize
7378 /* In case of interleaving (non-unit grouped access):
7385 Vectorized loads are created in the order of memory accesses
7386 starting from the access of the first stmt of the chain:
7389 VS2: vx1 = &base + vec_size*1
7390 VS3: vx3 = &base + vec_size*2
7391 VS4: vx4 = &base + vec_size*3
7393 Then permutation statements are generated:
7395 VS5: vx5 = VEC_PERM_EXPR < vx0, vx1, { 0, 2, ..., i*2 } >
7396 VS6: vx6 = VEC_PERM_EXPR < vx0, vx1, { 1, 3, ..., i*2+1 } >
7399 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
7400 (the order of the data-refs in the output of vect_permute_load_chain
7401 corresponds to the order of scalar stmts in the interleaving chain - see
7402 the documentation of vect_permute_load_chain()).
7403 The generation of permutation stmts and recording them in
7404 STMT_VINFO_VEC_STMT is done in vect_transform_grouped_load().
7406 In case of both multiple types and interleaving, the vector loads and
7407 permutation stmts above are created for every copy. The result vector
7408 stmts are put in STMT_VINFO_VEC_STMT for the first copy and in the
7409 corresponding STMT_VINFO_RELATED_STMT for the next copies. */
7411 /* If the data reference is aligned (dr_aligned) or potentially unaligned
7412 on a target that supports unaligned accesses (dr_unaligned_supported)
7413 we generate the following code:
7417 p = p + indx * vectype_size;
7422 Otherwise, the data reference is potentially unaligned on a target that
7423 does not support unaligned accesses (dr_explicit_realign_optimized) -
7424 then generate the following code, in which the data in each iteration is
7425 obtained by two vector loads, one from the previous iteration, and one
7426 from the current iteration:
7428 msq_init = *(floor(p1))
7429 p2 = initial_addr + VS - 1;
7430 realignment_token = call target_builtin;
7433 p2 = p2 + indx * vectype_size
7435 vec_dest = realign_load (msq, lsq, realignment_token)
7440 /* If the misalignment remains the same throughout the execution of the
7441 loop, we can create the init_addr and permutation mask at the loop
7442 preheader. Otherwise, it needs to be created inside the loop.
7443 This can only occur when vectorizing memory accesses in the inner-loop
7444 nested within an outer-loop that is being vectorized. */
7446 if (nested_in_vect_loop
7447 && !multiple_p (DR_STEP_ALIGNMENT (dr
),
7448 GET_MODE_SIZE (TYPE_MODE (vectype
))))
7450 gcc_assert (alignment_support_scheme
!= dr_explicit_realign_optimized
);
7451 compute_in_loop
= true;
7454 if ((alignment_support_scheme
== dr_explicit_realign_optimized
7455 || alignment_support_scheme
== dr_explicit_realign
)
7456 && !compute_in_loop
)
7458 msq
= vect_setup_realignment (first_stmt
, gsi
, &realignment_token
,
7459 alignment_support_scheme
, NULL_TREE
,
7461 if (alignment_support_scheme
== dr_explicit_realign_optimized
)
7463 phi
= as_a
<gphi
*> (SSA_NAME_DEF_STMT (msq
));
7464 byte_offset
= size_binop (MINUS_EXPR
, TYPE_SIZE_UNIT (vectype
),
7471 if (memory_access_type
== VMAT_CONTIGUOUS_REVERSE
)
7472 offset
= size_int (-TYPE_VECTOR_SUBPARTS (vectype
) + 1);
7474 if (memory_access_type
== VMAT_LOAD_STORE_LANES
)
7475 aggr_type
= build_array_type_nelts (elem_type
, vec_num
* nunits
);
7477 aggr_type
= vectype
;
7479 prev_stmt_info
= NULL
;
7480 poly_uint64 group_elt
= 0;
7481 for (j
= 0; j
< ncopies
; j
++)
7483 /* 1. Create the vector or array pointer update chain. */
7486 bool simd_lane_access_p
7487 = STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info
);
7488 if (simd_lane_access_p
7489 && TREE_CODE (DR_BASE_ADDRESS (first_dr
)) == ADDR_EXPR
7490 && VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (first_dr
), 0))
7491 && integer_zerop (DR_OFFSET (first_dr
))
7492 && integer_zerop (DR_INIT (first_dr
))
7493 && alias_sets_conflict_p (get_alias_set (aggr_type
),
7494 get_alias_set (TREE_TYPE (ref_type
)))
7495 && (alignment_support_scheme
== dr_aligned
7496 || alignment_support_scheme
== dr_unaligned_supported
))
7498 dataref_ptr
= unshare_expr (DR_BASE_ADDRESS (first_dr
));
7499 dataref_offset
= build_int_cst (ref_type
, 0);
7502 else if (first_stmt_for_drptr
7503 && first_stmt
!= first_stmt_for_drptr
)
7506 = vect_create_data_ref_ptr (first_stmt_for_drptr
, aggr_type
,
7507 at_loop
, offset
, &dummy
, gsi
,
7508 &ptr_incr
, simd_lane_access_p
,
7509 &inv_p
, byte_offset
);
7510 /* Adjust the pointer by the difference to first_stmt. */
7511 data_reference_p ptrdr
7512 = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt_for_drptr
));
7513 tree diff
= fold_convert (sizetype
,
7514 size_binop (MINUS_EXPR
,
7517 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
,
7522 = vect_create_data_ref_ptr (first_stmt
, aggr_type
, at_loop
,
7523 offset
, &dummy
, gsi
, &ptr_incr
,
7524 simd_lane_access_p
, &inv_p
,
7527 else if (dataref_offset
)
7528 dataref_offset
= int_const_binop (PLUS_EXPR
, dataref_offset
,
7529 TYPE_SIZE_UNIT (aggr_type
));
7531 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
, stmt
,
7532 TYPE_SIZE_UNIT (aggr_type
));
7534 if (grouped_load
|| slp_perm
)
7535 dr_chain
.create (vec_num
);
7537 if (memory_access_type
== VMAT_LOAD_STORE_LANES
)
7541 vec_array
= create_vector_array (vectype
, vec_num
);
7544 VEC_ARRAY = LOAD_LANES (MEM_REF[...all elements...]). */
7545 data_ref
= create_array_ref (aggr_type
, dataref_ptr
, ref_type
);
7546 gcall
*call
= gimple_build_call_internal (IFN_LOAD_LANES
, 1,
7548 gimple_call_set_lhs (call
, vec_array
);
7549 gimple_call_set_nothrow (call
, true);
7551 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
7553 /* Extract each vector into an SSA_NAME. */
7554 for (i
= 0; i
< vec_num
; i
++)
7556 new_temp
= read_vector_array (stmt
, gsi
, scalar_dest
,
7558 dr_chain
.quick_push (new_temp
);
7561 /* Record the mapping between SSA_NAMEs and statements. */
7562 vect_record_grouped_load_vectors (stmt
, dr_chain
);
7566 for (i
= 0; i
< vec_num
; i
++)
7569 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
,
7572 /* 2. Create the vector-load in the loop. */
7573 switch (alignment_support_scheme
)
7576 case dr_unaligned_supported
:
7578 unsigned int align
, misalign
;
7581 = fold_build2 (MEM_REF
, vectype
, dataref_ptr
,
7584 : build_int_cst (ref_type
, 0));
7585 align
= DR_TARGET_ALIGNMENT (dr
);
7586 if (alignment_support_scheme
== dr_aligned
)
7588 gcc_assert (aligned_access_p (first_dr
));
7591 else if (DR_MISALIGNMENT (first_dr
) == -1)
7593 align
= dr_alignment (vect_dr_behavior (first_dr
));
7595 TREE_TYPE (data_ref
)
7596 = build_aligned_type (TREE_TYPE (data_ref
),
7597 align
* BITS_PER_UNIT
);
7601 TREE_TYPE (data_ref
)
7602 = build_aligned_type (TREE_TYPE (data_ref
),
7603 TYPE_ALIGN (elem_type
));
7604 misalign
= DR_MISALIGNMENT (first_dr
);
7606 if (dataref_offset
== NULL_TREE
7607 && TREE_CODE (dataref_ptr
) == SSA_NAME
)
7608 set_ptr_info_alignment (get_ptr_info (dataref_ptr
),
7612 case dr_explicit_realign
:
7616 tree vs
= size_int (TYPE_VECTOR_SUBPARTS (vectype
));
7618 if (compute_in_loop
)
7619 msq
= vect_setup_realignment (first_stmt
, gsi
,
7621 dr_explicit_realign
,
7624 if (TREE_CODE (dataref_ptr
) == SSA_NAME
)
7625 ptr
= copy_ssa_name (dataref_ptr
);
7627 ptr
= make_ssa_name (TREE_TYPE (dataref_ptr
));
7628 unsigned int align
= DR_TARGET_ALIGNMENT (first_dr
);
7629 new_stmt
= gimple_build_assign
7630 (ptr
, BIT_AND_EXPR
, dataref_ptr
,
7632 (TREE_TYPE (dataref_ptr
),
7633 -(HOST_WIDE_INT
) align
));
7634 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
7636 = build2 (MEM_REF
, vectype
, ptr
,
7637 build_int_cst (ref_type
, 0));
7638 vec_dest
= vect_create_destination_var (scalar_dest
,
7640 new_stmt
= gimple_build_assign (vec_dest
, data_ref
);
7641 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
7642 gimple_assign_set_lhs (new_stmt
, new_temp
);
7643 gimple_set_vdef (new_stmt
, gimple_vdef (stmt
));
7644 gimple_set_vuse (new_stmt
, gimple_vuse (stmt
));
7645 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
7648 bump
= size_binop (MULT_EXPR
, vs
,
7649 TYPE_SIZE_UNIT (elem_type
));
7650 bump
= size_binop (MINUS_EXPR
, bump
, size_one_node
);
7651 ptr
= bump_vector_ptr (dataref_ptr
, NULL
, gsi
, stmt
, bump
);
7652 new_stmt
= gimple_build_assign
7653 (NULL_TREE
, BIT_AND_EXPR
, ptr
,
7655 (TREE_TYPE (ptr
), -(HOST_WIDE_INT
) align
));
7656 ptr
= copy_ssa_name (ptr
, new_stmt
);
7657 gimple_assign_set_lhs (new_stmt
, ptr
);
7658 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
7660 = build2 (MEM_REF
, vectype
, ptr
,
7661 build_int_cst (ref_type
, 0));
7664 case dr_explicit_realign_optimized
:
7666 if (TREE_CODE (dataref_ptr
) == SSA_NAME
)
7667 new_temp
= copy_ssa_name (dataref_ptr
);
7669 new_temp
= make_ssa_name (TREE_TYPE (dataref_ptr
));
7670 unsigned int align
= DR_TARGET_ALIGNMENT (first_dr
);
7671 new_stmt
= gimple_build_assign
7672 (new_temp
, BIT_AND_EXPR
, dataref_ptr
,
7673 build_int_cst (TREE_TYPE (dataref_ptr
),
7674 -(HOST_WIDE_INT
) align
));
7675 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
7677 = build2 (MEM_REF
, vectype
, new_temp
,
7678 build_int_cst (ref_type
, 0));
7684 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
7685 new_stmt
= gimple_build_assign (vec_dest
, data_ref
);
7686 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
7687 gimple_assign_set_lhs (new_stmt
, new_temp
);
7688 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
7690 /* 3. Handle explicit realignment if necessary/supported.
7692 vec_dest = realign_load (msq, lsq, realignment_token) */
7693 if (alignment_support_scheme
== dr_explicit_realign_optimized
7694 || alignment_support_scheme
== dr_explicit_realign
)
7696 lsq
= gimple_assign_lhs (new_stmt
);
7697 if (!realignment_token
)
7698 realignment_token
= dataref_ptr
;
7699 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
7700 new_stmt
= gimple_build_assign (vec_dest
, REALIGN_LOAD_EXPR
,
7701 msq
, lsq
, realignment_token
);
7702 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
7703 gimple_assign_set_lhs (new_stmt
, new_temp
);
7704 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
7706 if (alignment_support_scheme
== dr_explicit_realign_optimized
)
7709 if (i
== vec_num
- 1 && j
== ncopies
- 1)
7710 add_phi_arg (phi
, lsq
,
7711 loop_latch_edge (containing_loop
),
7717 /* 4. Handle invariant-load. */
7718 if (inv_p
&& !bb_vinfo
)
7720 gcc_assert (!grouped_load
);
7721 /* If we have versioned for aliasing or the loop doesn't
7722 have any data dependencies that would preclude this,
7723 then we are sure this is a loop invariant load and
7724 thus we can insert it on the preheader edge. */
7725 if (LOOP_VINFO_NO_DATA_DEPENDENCIES (loop_vinfo
)
7726 && !nested_in_vect_loop
7727 && hoist_defs_of_uses (stmt
, loop
))
7729 if (dump_enabled_p ())
7731 dump_printf_loc (MSG_NOTE
, vect_location
,
7732 "hoisting out of the vectorized "
7734 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
7736 tree tem
= copy_ssa_name (scalar_dest
);
7737 gsi_insert_on_edge_immediate
7738 (loop_preheader_edge (loop
),
7739 gimple_build_assign (tem
,
7741 (gimple_assign_rhs1 (stmt
))));
7742 new_temp
= vect_init_vector (stmt
, tem
, vectype
, NULL
);
7743 new_stmt
= SSA_NAME_DEF_STMT (new_temp
);
7744 set_vinfo_for_stmt (new_stmt
,
7745 new_stmt_vec_info (new_stmt
, vinfo
));
7749 gimple_stmt_iterator gsi2
= *gsi
;
7751 new_temp
= vect_init_vector (stmt
, scalar_dest
,
7753 new_stmt
= SSA_NAME_DEF_STMT (new_temp
);
7757 if (memory_access_type
== VMAT_CONTIGUOUS_REVERSE
)
7759 tree perm_mask
= perm_mask_for_reverse (vectype
);
7760 new_temp
= permute_vec_elements (new_temp
, new_temp
,
7761 perm_mask
, stmt
, gsi
);
7762 new_stmt
= SSA_NAME_DEF_STMT (new_temp
);
7765 /* Collect vector loads and later create their permutation in
7766 vect_transform_grouped_load (). */
7767 if (grouped_load
|| slp_perm
)
7768 dr_chain
.quick_push (new_temp
);
7770 /* Store vector loads in the corresponding SLP_NODE. */
7771 if (slp
&& !slp_perm
)
7772 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
7774 /* With SLP permutation we load the gaps as well, without
7775 we need to skip the gaps after we manage to fully load
7776 all elements. group_gap_adj is GROUP_SIZE here. */
7777 group_elt
+= nunits
;
7778 if (maybe_ne (group_gap_adj
, 0U)
7780 && known_eq (group_elt
, group_size
- group_gap_adj
))
7782 poly_wide_int bump_val
7783 = (wi::to_wide (TYPE_SIZE_UNIT (elem_type
))
7785 tree bump
= wide_int_to_tree (sizetype
, bump_val
);
7786 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
,
7791 /* Bump the vector pointer to account for a gap or for excess
7792 elements loaded for a permuted SLP load. */
7793 if (maybe_ne (group_gap_adj
, 0U) && slp_perm
)
7795 poly_wide_int bump_val
7796 = (wi::to_wide (TYPE_SIZE_UNIT (elem_type
))
7798 tree bump
= wide_int_to_tree (sizetype
, bump_val
);
7799 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
,
7804 if (slp
&& !slp_perm
)
7810 if (!vect_transform_slp_perm_load (slp_node
, dr_chain
, gsi
, vf
,
7811 slp_node_instance
, false,
7814 dr_chain
.release ();
7822 if (memory_access_type
!= VMAT_LOAD_STORE_LANES
)
7823 vect_transform_grouped_load (stmt
, dr_chain
, group_size
, gsi
);
7824 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
7829 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
7831 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
7832 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
7835 dr_chain
.release ();
7841 /* Function vect_is_simple_cond.
7844 LOOP - the loop that is being vectorized.
7845 COND - Condition that is checked for simple use.
7848 *COMP_VECTYPE - the vector type for the comparison.
7849 *DTS - The def types for the arguments of the comparison
7851 Returns whether a COND can be vectorized. Checks whether
7852 condition operands are supportable using vec_is_simple_use. */
7855 vect_is_simple_cond (tree cond
, vec_info
*vinfo
,
7856 tree
*comp_vectype
, enum vect_def_type
*dts
,
7860 tree vectype1
= NULL_TREE
, vectype2
= NULL_TREE
;
7863 if (TREE_CODE (cond
) == SSA_NAME
7864 && VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (cond
)))
7866 gimple
*lhs_def_stmt
= SSA_NAME_DEF_STMT (cond
);
7867 if (!vect_is_simple_use (cond
, vinfo
, &lhs_def_stmt
,
7868 &dts
[0], comp_vectype
)
7870 || !VECTOR_BOOLEAN_TYPE_P (*comp_vectype
))
7875 if (!COMPARISON_CLASS_P (cond
))
7878 lhs
= TREE_OPERAND (cond
, 0);
7879 rhs
= TREE_OPERAND (cond
, 1);
7881 if (TREE_CODE (lhs
) == SSA_NAME
)
7883 gimple
*lhs_def_stmt
= SSA_NAME_DEF_STMT (lhs
);
7884 if (!vect_is_simple_use (lhs
, vinfo
, &lhs_def_stmt
, &dts
[0], &vectype1
))
7887 else if (TREE_CODE (lhs
) == INTEGER_CST
|| TREE_CODE (lhs
) == REAL_CST
7888 || TREE_CODE (lhs
) == FIXED_CST
)
7889 dts
[0] = vect_constant_def
;
7893 if (TREE_CODE (rhs
) == SSA_NAME
)
7895 gimple
*rhs_def_stmt
= SSA_NAME_DEF_STMT (rhs
);
7896 if (!vect_is_simple_use (rhs
, vinfo
, &rhs_def_stmt
, &dts
[1], &vectype2
))
7899 else if (TREE_CODE (rhs
) == INTEGER_CST
|| TREE_CODE (rhs
) == REAL_CST
7900 || TREE_CODE (rhs
) == FIXED_CST
)
7901 dts
[1] = vect_constant_def
;
7905 if (vectype1
&& vectype2
7906 && maybe_ne (TYPE_VECTOR_SUBPARTS (vectype1
),
7907 TYPE_VECTOR_SUBPARTS (vectype2
)))
7910 *comp_vectype
= vectype1
? vectype1
: vectype2
;
7911 /* Invariant comparison. */
7912 if (! *comp_vectype
)
7914 tree scalar_type
= TREE_TYPE (lhs
);
7915 /* If we can widen the comparison to match vectype do so. */
7916 if (INTEGRAL_TYPE_P (scalar_type
)
7917 && tree_int_cst_lt (TYPE_SIZE (scalar_type
),
7918 TYPE_SIZE (TREE_TYPE (vectype
))))
7919 scalar_type
= build_nonstandard_integer_type
7920 (tree_to_uhwi (TYPE_SIZE (TREE_TYPE (vectype
))),
7921 TYPE_UNSIGNED (scalar_type
));
7922 *comp_vectype
= get_vectype_for_scalar_type (scalar_type
);
7928 /* vectorizable_condition.
7930 Check if STMT is conditional modify expression that can be vectorized.
7931 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
7932 stmt using VEC_COND_EXPR to replace it, put it in VEC_STMT, and insert it
7935 When STMT is vectorized as nested cycle, REDUC_DEF is the vector variable
7936 to be used at REDUC_INDEX (in then clause if REDUC_INDEX is 1, and in
7937 else clause if it is 2).
7939 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
7942 vectorizable_condition (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
7943 gimple
**vec_stmt
, tree reduc_def
, int reduc_index
,
7946 tree scalar_dest
= NULL_TREE
;
7947 tree vec_dest
= NULL_TREE
;
7948 tree cond_expr
, cond_expr0
= NULL_TREE
, cond_expr1
= NULL_TREE
;
7949 tree then_clause
, else_clause
;
7950 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
7951 tree comp_vectype
= NULL_TREE
;
7952 tree vec_cond_lhs
= NULL_TREE
, vec_cond_rhs
= NULL_TREE
;
7953 tree vec_then_clause
= NULL_TREE
, vec_else_clause
= NULL_TREE
;
7956 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
7957 enum vect_def_type dts
[4]
7958 = {vect_unknown_def_type
, vect_unknown_def_type
,
7959 vect_unknown_def_type
, vect_unknown_def_type
};
7962 enum tree_code code
, cond_code
, bitop1
= NOP_EXPR
, bitop2
= NOP_EXPR
;
7963 stmt_vec_info prev_stmt_info
= NULL
;
7965 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
7966 vec
<tree
> vec_oprnds0
= vNULL
;
7967 vec
<tree
> vec_oprnds1
= vNULL
;
7968 vec
<tree
> vec_oprnds2
= vNULL
;
7969 vec
<tree
> vec_oprnds3
= vNULL
;
7971 bool masked
= false;
7973 if (reduc_index
&& STMT_SLP_TYPE (stmt_info
))
7976 if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info
) == TREE_CODE_REDUCTION
)
7978 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
7981 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
7982 && !(STMT_VINFO_DEF_TYPE (stmt_info
) == vect_nested_cycle
7986 /* FORNOW: not yet supported. */
7987 if (STMT_VINFO_LIVE_P (stmt_info
))
7989 if (dump_enabled_p ())
7990 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7991 "value used after loop.\n");
7996 /* Is vectorizable conditional operation? */
7997 if (!is_gimple_assign (stmt
))
8000 code
= gimple_assign_rhs_code (stmt
);
8002 if (code
!= COND_EXPR
)
8005 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
8006 tree vectype1
= NULL_TREE
, vectype2
= NULL_TREE
;
8011 ncopies
= vect_get_num_copies (loop_vinfo
, vectype
);
8013 gcc_assert (ncopies
>= 1);
8014 if (reduc_index
&& ncopies
> 1)
8015 return false; /* FORNOW */
8017 cond_expr
= gimple_assign_rhs1 (stmt
);
8018 then_clause
= gimple_assign_rhs2 (stmt
);
8019 else_clause
= gimple_assign_rhs3 (stmt
);
8021 if (!vect_is_simple_cond (cond_expr
, stmt_info
->vinfo
,
8022 &comp_vectype
, &dts
[0], vectype
)
8027 if (!vect_is_simple_use (then_clause
, stmt_info
->vinfo
, &def_stmt
, &dts
[2],
8030 if (!vect_is_simple_use (else_clause
, stmt_info
->vinfo
, &def_stmt
, &dts
[3],
8034 if (vectype1
&& !useless_type_conversion_p (vectype
, vectype1
))
8037 if (vectype2
&& !useless_type_conversion_p (vectype
, vectype2
))
8040 masked
= !COMPARISON_CLASS_P (cond_expr
);
8041 vec_cmp_type
= build_same_sized_truth_vector_type (comp_vectype
);
8043 if (vec_cmp_type
== NULL_TREE
)
8046 cond_code
= TREE_CODE (cond_expr
);
8049 cond_expr0
= TREE_OPERAND (cond_expr
, 0);
8050 cond_expr1
= TREE_OPERAND (cond_expr
, 1);
8053 if (!masked
&& VECTOR_BOOLEAN_TYPE_P (comp_vectype
))
8055 /* Boolean values may have another representation in vectors
8056 and therefore we prefer bit operations over comparison for
8057 them (which also works for scalar masks). We store opcodes
8058 to use in bitop1 and bitop2. Statement is vectorized as
8059 BITOP2 (rhs1 BITOP1 rhs2) or rhs1 BITOP2 (BITOP1 rhs2)
8060 depending on bitop1 and bitop2 arity. */
8064 bitop1
= BIT_NOT_EXPR
;
8065 bitop2
= BIT_AND_EXPR
;
8068 bitop1
= BIT_NOT_EXPR
;
8069 bitop2
= BIT_IOR_EXPR
;
8072 bitop1
= BIT_NOT_EXPR
;
8073 bitop2
= BIT_AND_EXPR
;
8074 std::swap (cond_expr0
, cond_expr1
);
8077 bitop1
= BIT_NOT_EXPR
;
8078 bitop2
= BIT_IOR_EXPR
;
8079 std::swap (cond_expr0
, cond_expr1
);
8082 bitop1
= BIT_XOR_EXPR
;
8085 bitop1
= BIT_XOR_EXPR
;
8086 bitop2
= BIT_NOT_EXPR
;
8091 cond_code
= SSA_NAME
;
8096 STMT_VINFO_TYPE (stmt_info
) = condition_vec_info_type
;
8097 if (bitop1
!= NOP_EXPR
)
8099 machine_mode mode
= TYPE_MODE (comp_vectype
);
8102 optab
= optab_for_tree_code (bitop1
, comp_vectype
, optab_default
);
8103 if (!optab
|| optab_handler (optab
, mode
) == CODE_FOR_nothing
)
8106 if (bitop2
!= NOP_EXPR
)
8108 optab
= optab_for_tree_code (bitop2
, comp_vectype
,
8110 if (!optab
|| optab_handler (optab
, mode
) == CODE_FOR_nothing
)
8114 if (expand_vec_cond_expr_p (vectype
, comp_vectype
,
8117 vect_model_simple_cost (stmt_info
, ncopies
, dts
, ndts
, NULL
, NULL
);
8127 vec_oprnds0
.create (1);
8128 vec_oprnds1
.create (1);
8129 vec_oprnds2
.create (1);
8130 vec_oprnds3
.create (1);
8134 scalar_dest
= gimple_assign_lhs (stmt
);
8135 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
8137 /* Handle cond expr. */
8138 for (j
= 0; j
< ncopies
; j
++)
8140 gassign
*new_stmt
= NULL
;
8145 auto_vec
<tree
, 4> ops
;
8146 auto_vec
<vec
<tree
>, 4> vec_defs
;
8149 ops
.safe_push (cond_expr
);
8152 ops
.safe_push (cond_expr0
);
8153 ops
.safe_push (cond_expr1
);
8155 ops
.safe_push (then_clause
);
8156 ops
.safe_push (else_clause
);
8157 vect_get_slp_defs (ops
, slp_node
, &vec_defs
);
8158 vec_oprnds3
= vec_defs
.pop ();
8159 vec_oprnds2
= vec_defs
.pop ();
8161 vec_oprnds1
= vec_defs
.pop ();
8162 vec_oprnds0
= vec_defs
.pop ();
8170 = vect_get_vec_def_for_operand (cond_expr
, stmt
,
8172 vect_is_simple_use (cond_expr
, stmt_info
->vinfo
,
8178 = vect_get_vec_def_for_operand (cond_expr0
,
8179 stmt
, comp_vectype
);
8180 vect_is_simple_use (cond_expr0
, loop_vinfo
, >emp
, &dts
[0]);
8183 = vect_get_vec_def_for_operand (cond_expr1
,
8184 stmt
, comp_vectype
);
8185 vect_is_simple_use (cond_expr1
, loop_vinfo
, >emp
, &dts
[1]);
8187 if (reduc_index
== 1)
8188 vec_then_clause
= reduc_def
;
8191 vec_then_clause
= vect_get_vec_def_for_operand (then_clause
,
8193 vect_is_simple_use (then_clause
, loop_vinfo
,
8196 if (reduc_index
== 2)
8197 vec_else_clause
= reduc_def
;
8200 vec_else_clause
= vect_get_vec_def_for_operand (else_clause
,
8202 vect_is_simple_use (else_clause
, loop_vinfo
, >emp
, &dts
[3]);
8209 = vect_get_vec_def_for_stmt_copy (dts
[0],
8210 vec_oprnds0
.pop ());
8213 = vect_get_vec_def_for_stmt_copy (dts
[1],
8214 vec_oprnds1
.pop ());
8216 vec_then_clause
= vect_get_vec_def_for_stmt_copy (dts
[2],
8217 vec_oprnds2
.pop ());
8218 vec_else_clause
= vect_get_vec_def_for_stmt_copy (dts
[3],
8219 vec_oprnds3
.pop ());
8224 vec_oprnds0
.quick_push (vec_cond_lhs
);
8226 vec_oprnds1
.quick_push (vec_cond_rhs
);
8227 vec_oprnds2
.quick_push (vec_then_clause
);
8228 vec_oprnds3
.quick_push (vec_else_clause
);
8231 /* Arguments are ready. Create the new vector stmt. */
8232 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vec_cond_lhs
)
8234 vec_then_clause
= vec_oprnds2
[i
];
8235 vec_else_clause
= vec_oprnds3
[i
];
8238 vec_compare
= vec_cond_lhs
;
8241 vec_cond_rhs
= vec_oprnds1
[i
];
8242 if (bitop1
== NOP_EXPR
)
8243 vec_compare
= build2 (cond_code
, vec_cmp_type
,
8244 vec_cond_lhs
, vec_cond_rhs
);
8247 new_temp
= make_ssa_name (vec_cmp_type
);
8248 if (bitop1
== BIT_NOT_EXPR
)
8249 new_stmt
= gimple_build_assign (new_temp
, bitop1
,
8253 = gimple_build_assign (new_temp
, bitop1
, vec_cond_lhs
,
8255 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
8256 if (bitop2
== NOP_EXPR
)
8257 vec_compare
= new_temp
;
8258 else if (bitop2
== BIT_NOT_EXPR
)
8260 /* Instead of doing ~x ? y : z do x ? z : y. */
8261 vec_compare
= new_temp
;
8262 std::swap (vec_then_clause
, vec_else_clause
);
8266 vec_compare
= make_ssa_name (vec_cmp_type
);
8268 = gimple_build_assign (vec_compare
, bitop2
,
8269 vec_cond_lhs
, new_temp
);
8270 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
8274 new_temp
= make_ssa_name (vec_dest
);
8275 new_stmt
= gimple_build_assign (new_temp
, VEC_COND_EXPR
,
8276 vec_compare
, vec_then_clause
,
8278 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
8280 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
8287 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
8289 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
8291 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
8294 vec_oprnds0
.release ();
8295 vec_oprnds1
.release ();
8296 vec_oprnds2
.release ();
8297 vec_oprnds3
.release ();
8302 /* vectorizable_comparison.
8304 Check if STMT is comparison expression that can be vectorized.
8305 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
8306 comparison, put it in VEC_STMT, and insert it at GSI.
8308 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
8311 vectorizable_comparison (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
8312 gimple
**vec_stmt
, tree reduc_def
,
8315 tree lhs
, rhs1
, rhs2
;
8316 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
8317 tree vectype1
= NULL_TREE
, vectype2
= NULL_TREE
;
8318 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
8319 tree vec_rhs1
= NULL_TREE
, vec_rhs2
= NULL_TREE
;
8321 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
8322 enum vect_def_type dts
[2] = {vect_unknown_def_type
, vect_unknown_def_type
};
8326 enum tree_code code
, bitop1
= NOP_EXPR
, bitop2
= NOP_EXPR
;
8327 stmt_vec_info prev_stmt_info
= NULL
;
8329 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
8330 vec
<tree
> vec_oprnds0
= vNULL
;
8331 vec
<tree
> vec_oprnds1
= vNULL
;
8336 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
8339 if (!vectype
|| !VECTOR_BOOLEAN_TYPE_P (vectype
))
8342 mask_type
= vectype
;
8343 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
8348 ncopies
= vect_get_num_copies (loop_vinfo
, vectype
);
8350 gcc_assert (ncopies
>= 1);
8351 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
8352 && !(STMT_VINFO_DEF_TYPE (stmt_info
) == vect_nested_cycle
8356 if (STMT_VINFO_LIVE_P (stmt_info
))
8358 if (dump_enabled_p ())
8359 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
8360 "value used after loop.\n");
8364 if (!is_gimple_assign (stmt
))
8367 code
= gimple_assign_rhs_code (stmt
);
8369 if (TREE_CODE_CLASS (code
) != tcc_comparison
)
8372 rhs1
= gimple_assign_rhs1 (stmt
);
8373 rhs2
= gimple_assign_rhs2 (stmt
);
8375 if (!vect_is_simple_use (rhs1
, stmt_info
->vinfo
, &def_stmt
,
8376 &dts
[0], &vectype1
))
8379 if (!vect_is_simple_use (rhs2
, stmt_info
->vinfo
, &def_stmt
,
8380 &dts
[1], &vectype2
))
8383 if (vectype1
&& vectype2
8384 && maybe_ne (TYPE_VECTOR_SUBPARTS (vectype1
),
8385 TYPE_VECTOR_SUBPARTS (vectype2
)))
8388 vectype
= vectype1
? vectype1
: vectype2
;
8390 /* Invariant comparison. */
8393 vectype
= get_vectype_for_scalar_type (TREE_TYPE (rhs1
));
8394 if (maybe_ne (TYPE_VECTOR_SUBPARTS (vectype
), nunits
))
8397 else if (maybe_ne (nunits
, TYPE_VECTOR_SUBPARTS (vectype
)))
8400 /* Can't compare mask and non-mask types. */
8401 if (vectype1
&& vectype2
8402 && (VECTOR_BOOLEAN_TYPE_P (vectype1
) ^ VECTOR_BOOLEAN_TYPE_P (vectype2
)))
8405 /* Boolean values may have another representation in vectors
8406 and therefore we prefer bit operations over comparison for
8407 them (which also works for scalar masks). We store opcodes
8408 to use in bitop1 and bitop2. Statement is vectorized as
8409 BITOP2 (rhs1 BITOP1 rhs2) or
8410 rhs1 BITOP2 (BITOP1 rhs2)
8411 depending on bitop1 and bitop2 arity. */
8412 if (VECTOR_BOOLEAN_TYPE_P (vectype
))
8414 if (code
== GT_EXPR
)
8416 bitop1
= BIT_NOT_EXPR
;
8417 bitop2
= BIT_AND_EXPR
;
8419 else if (code
== GE_EXPR
)
8421 bitop1
= BIT_NOT_EXPR
;
8422 bitop2
= BIT_IOR_EXPR
;
8424 else if (code
== LT_EXPR
)
8426 bitop1
= BIT_NOT_EXPR
;
8427 bitop2
= BIT_AND_EXPR
;
8428 std::swap (rhs1
, rhs2
);
8429 std::swap (dts
[0], dts
[1]);
8431 else if (code
== LE_EXPR
)
8433 bitop1
= BIT_NOT_EXPR
;
8434 bitop2
= BIT_IOR_EXPR
;
8435 std::swap (rhs1
, rhs2
);
8436 std::swap (dts
[0], dts
[1]);
8440 bitop1
= BIT_XOR_EXPR
;
8441 if (code
== EQ_EXPR
)
8442 bitop2
= BIT_NOT_EXPR
;
8448 STMT_VINFO_TYPE (stmt_info
) = comparison_vec_info_type
;
8449 vect_model_simple_cost (stmt_info
, ncopies
* (1 + (bitop2
!= NOP_EXPR
)),
8450 dts
, ndts
, NULL
, NULL
);
8451 if (bitop1
== NOP_EXPR
)
8452 return expand_vec_cmp_expr_p (vectype
, mask_type
, code
);
8455 machine_mode mode
= TYPE_MODE (vectype
);
8458 optab
= optab_for_tree_code (bitop1
, vectype
, optab_default
);
8459 if (!optab
|| optab_handler (optab
, mode
) == CODE_FOR_nothing
)
8462 if (bitop2
!= NOP_EXPR
)
8464 optab
= optab_for_tree_code (bitop2
, vectype
, optab_default
);
8465 if (!optab
|| optab_handler (optab
, mode
) == CODE_FOR_nothing
)
8475 vec_oprnds0
.create (1);
8476 vec_oprnds1
.create (1);
8480 lhs
= gimple_assign_lhs (stmt
);
8481 mask
= vect_create_destination_var (lhs
, mask_type
);
8483 /* Handle cmp expr. */
8484 for (j
= 0; j
< ncopies
; j
++)
8486 gassign
*new_stmt
= NULL
;
8491 auto_vec
<tree
, 2> ops
;
8492 auto_vec
<vec
<tree
>, 2> vec_defs
;
8494 ops
.safe_push (rhs1
);
8495 ops
.safe_push (rhs2
);
8496 vect_get_slp_defs (ops
, slp_node
, &vec_defs
);
8497 vec_oprnds1
= vec_defs
.pop ();
8498 vec_oprnds0
= vec_defs
.pop ();
8502 vec_rhs1
= vect_get_vec_def_for_operand (rhs1
, stmt
, vectype
);
8503 vec_rhs2
= vect_get_vec_def_for_operand (rhs2
, stmt
, vectype
);
8508 vec_rhs1
= vect_get_vec_def_for_stmt_copy (dts
[0],
8509 vec_oprnds0
.pop ());
8510 vec_rhs2
= vect_get_vec_def_for_stmt_copy (dts
[1],
8511 vec_oprnds1
.pop ());
8516 vec_oprnds0
.quick_push (vec_rhs1
);
8517 vec_oprnds1
.quick_push (vec_rhs2
);
8520 /* Arguments are ready. Create the new vector stmt. */
8521 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vec_rhs1
)
8523 vec_rhs2
= vec_oprnds1
[i
];
8525 new_temp
= make_ssa_name (mask
);
8526 if (bitop1
== NOP_EXPR
)
8528 new_stmt
= gimple_build_assign (new_temp
, code
,
8529 vec_rhs1
, vec_rhs2
);
8530 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
8534 if (bitop1
== BIT_NOT_EXPR
)
8535 new_stmt
= gimple_build_assign (new_temp
, bitop1
, vec_rhs2
);
8537 new_stmt
= gimple_build_assign (new_temp
, bitop1
, vec_rhs1
,
8539 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
8540 if (bitop2
!= NOP_EXPR
)
8542 tree res
= make_ssa_name (mask
);
8543 if (bitop2
== BIT_NOT_EXPR
)
8544 new_stmt
= gimple_build_assign (res
, bitop2
, new_temp
);
8546 new_stmt
= gimple_build_assign (res
, bitop2
, vec_rhs1
,
8548 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
8552 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
8559 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
8561 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
8563 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
8566 vec_oprnds0
.release ();
8567 vec_oprnds1
.release ();
8572 /* If SLP_NODE is nonnull, return true if vectorizable_live_operation
8573 can handle all live statements in the node. Otherwise return true
8574 if STMT is not live or if vectorizable_live_operation can handle it.
8575 GSI and VEC_STMT are as for vectorizable_live_operation. */
8578 can_vectorize_live_stmts (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
8579 slp_tree slp_node
, gimple
**vec_stmt
)
8585 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (slp_node
), i
, slp_stmt
)
8587 stmt_vec_info slp_stmt_info
= vinfo_for_stmt (slp_stmt
);
8588 if (STMT_VINFO_LIVE_P (slp_stmt_info
)
8589 && !vectorizable_live_operation (slp_stmt
, gsi
, slp_node
, i
,
8594 else if (STMT_VINFO_LIVE_P (vinfo_for_stmt (stmt
))
8595 && !vectorizable_live_operation (stmt
, gsi
, slp_node
, -1, vec_stmt
))
8601 /* Make sure the statement is vectorizable. */
8604 vect_analyze_stmt (gimple
*stmt
, bool *need_to_vectorize
, slp_tree node
,
8605 slp_instance node_instance
)
8607 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
8608 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
8609 enum vect_relevant relevance
= STMT_VINFO_RELEVANT (stmt_info
);
8611 gimple
*pattern_stmt
;
8612 gimple_seq pattern_def_seq
;
8614 if (dump_enabled_p ())
8616 dump_printf_loc (MSG_NOTE
, vect_location
, "==> examining statement: ");
8617 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
8620 if (gimple_has_volatile_ops (stmt
))
8622 if (dump_enabled_p ())
8623 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
8624 "not vectorized: stmt has volatile operands\n");
8629 /* Skip stmts that do not need to be vectorized. In loops this is expected
8631 - the COND_EXPR which is the loop exit condition
8632 - any LABEL_EXPRs in the loop
8633 - computations that are used only for array indexing or loop control.
8634 In basic blocks we only analyze statements that are a part of some SLP
8635 instance, therefore, all the statements are relevant.
8637 Pattern statement needs to be analyzed instead of the original statement
8638 if the original statement is not relevant. Otherwise, we analyze both
8639 statements. In basic blocks we are called from some SLP instance
8640 traversal, don't analyze pattern stmts instead, the pattern stmts
8641 already will be part of SLP instance. */
8643 pattern_stmt
= STMT_VINFO_RELATED_STMT (stmt_info
);
8644 if (!STMT_VINFO_RELEVANT_P (stmt_info
)
8645 && !STMT_VINFO_LIVE_P (stmt_info
))
8647 if (STMT_VINFO_IN_PATTERN_P (stmt_info
)
8649 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt
))
8650 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt
))))
8652 /* Analyze PATTERN_STMT instead of the original stmt. */
8653 stmt
= pattern_stmt
;
8654 stmt_info
= vinfo_for_stmt (pattern_stmt
);
8655 if (dump_enabled_p ())
8657 dump_printf_loc (MSG_NOTE
, vect_location
,
8658 "==> examining pattern statement: ");
8659 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
8664 if (dump_enabled_p ())
8665 dump_printf_loc (MSG_NOTE
, vect_location
, "irrelevant.\n");
8670 else if (STMT_VINFO_IN_PATTERN_P (stmt_info
)
8673 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt
))
8674 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt
))))
8676 /* Analyze PATTERN_STMT too. */
8677 if (dump_enabled_p ())
8679 dump_printf_loc (MSG_NOTE
, vect_location
,
8680 "==> examining pattern statement: ");
8681 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
8684 if (!vect_analyze_stmt (pattern_stmt
, need_to_vectorize
, node
,
8689 if (is_pattern_stmt_p (stmt_info
)
8691 && (pattern_def_seq
= STMT_VINFO_PATTERN_DEF_SEQ (stmt_info
)))
8693 gimple_stmt_iterator si
;
8695 for (si
= gsi_start (pattern_def_seq
); !gsi_end_p (si
); gsi_next (&si
))
8697 gimple
*pattern_def_stmt
= gsi_stmt (si
);
8698 if (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_def_stmt
))
8699 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_def_stmt
)))
8701 /* Analyze def stmt of STMT if it's a pattern stmt. */
8702 if (dump_enabled_p ())
8704 dump_printf_loc (MSG_NOTE
, vect_location
,
8705 "==> examining pattern def statement: ");
8706 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, pattern_def_stmt
, 0);
8709 if (!vect_analyze_stmt (pattern_def_stmt
,
8710 need_to_vectorize
, node
, node_instance
))
8716 switch (STMT_VINFO_DEF_TYPE (stmt_info
))
8718 case vect_internal_def
:
8721 case vect_reduction_def
:
8722 case vect_nested_cycle
:
8723 gcc_assert (!bb_vinfo
8724 && (relevance
== vect_used_in_outer
8725 || relevance
== vect_used_in_outer_by_reduction
8726 || relevance
== vect_used_by_reduction
8727 || relevance
== vect_unused_in_scope
8728 || relevance
== vect_used_only_live
));
8731 case vect_induction_def
:
8732 gcc_assert (!bb_vinfo
);
8735 case vect_constant_def
:
8736 case vect_external_def
:
8737 case vect_unknown_def_type
:
8742 if (STMT_VINFO_RELEVANT_P (stmt_info
))
8744 gcc_assert (!VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt
))));
8745 gcc_assert (STMT_VINFO_VECTYPE (stmt_info
)
8746 || (is_gimple_call (stmt
)
8747 && gimple_call_lhs (stmt
) == NULL_TREE
));
8748 *need_to_vectorize
= true;
8751 if (PURE_SLP_STMT (stmt_info
) && !node
)
8753 dump_printf_loc (MSG_NOTE
, vect_location
,
8754 "handled only by SLP analysis\n");
8760 && (STMT_VINFO_RELEVANT_P (stmt_info
)
8761 || STMT_VINFO_DEF_TYPE (stmt_info
) == vect_reduction_def
))
8762 ok
= (vectorizable_simd_clone_call (stmt
, NULL
, NULL
, node
)
8763 || vectorizable_conversion (stmt
, NULL
, NULL
, node
)
8764 || vectorizable_shift (stmt
, NULL
, NULL
, node
)
8765 || vectorizable_operation (stmt
, NULL
, NULL
, node
)
8766 || vectorizable_assignment (stmt
, NULL
, NULL
, node
)
8767 || vectorizable_load (stmt
, NULL
, NULL
, node
, NULL
)
8768 || vectorizable_call (stmt
, NULL
, NULL
, node
)
8769 || vectorizable_store (stmt
, NULL
, NULL
, node
)
8770 || vectorizable_reduction (stmt
, NULL
, NULL
, node
, node_instance
)
8771 || vectorizable_induction (stmt
, NULL
, NULL
, node
)
8772 || vectorizable_condition (stmt
, NULL
, NULL
, NULL
, 0, node
)
8773 || vectorizable_comparison (stmt
, NULL
, NULL
, NULL
, node
));
8777 ok
= (vectorizable_simd_clone_call (stmt
, NULL
, NULL
, node
)
8778 || vectorizable_conversion (stmt
, NULL
, NULL
, node
)
8779 || vectorizable_shift (stmt
, NULL
, NULL
, node
)
8780 || vectorizable_operation (stmt
, NULL
, NULL
, node
)
8781 || vectorizable_assignment (stmt
, NULL
, NULL
, node
)
8782 || vectorizable_load (stmt
, NULL
, NULL
, node
, NULL
)
8783 || vectorizable_call (stmt
, NULL
, NULL
, node
)
8784 || vectorizable_store (stmt
, NULL
, NULL
, node
)
8785 || vectorizable_condition (stmt
, NULL
, NULL
, NULL
, 0, node
)
8786 || vectorizable_comparison (stmt
, NULL
, NULL
, NULL
, node
));
8791 if (dump_enabled_p ())
8793 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
8794 "not vectorized: relevant stmt not ");
8795 dump_printf (MSG_MISSED_OPTIMIZATION
, "supported: ");
8796 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
8805 /* Stmts that are (also) "live" (i.e. - that are used out of the loop)
8806 need extra handling, except for vectorizable reductions. */
8807 if (STMT_VINFO_TYPE (stmt_info
) != reduc_vec_info_type
8808 && !can_vectorize_live_stmts (stmt
, NULL
, node
, NULL
))
8810 if (dump_enabled_p ())
8812 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
8813 "not vectorized: live stmt not supported: ");
8814 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
8824 /* Function vect_transform_stmt.
8826 Create a vectorized stmt to replace STMT, and insert it at BSI. */
8829 vect_transform_stmt (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
8830 bool *grouped_store
, slp_tree slp_node
,
8831 slp_instance slp_node_instance
)
8833 bool is_store
= false;
8834 gimple
*vec_stmt
= NULL
;
8835 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
8838 gcc_assert (slp_node
|| !PURE_SLP_STMT (stmt_info
));
8839 gimple
*old_vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
8841 switch (STMT_VINFO_TYPE (stmt_info
))
8843 case type_demotion_vec_info_type
:
8844 case type_promotion_vec_info_type
:
8845 case type_conversion_vec_info_type
:
8846 done
= vectorizable_conversion (stmt
, gsi
, &vec_stmt
, slp_node
);
8850 case induc_vec_info_type
:
8851 done
= vectorizable_induction (stmt
, gsi
, &vec_stmt
, slp_node
);
8855 case shift_vec_info_type
:
8856 done
= vectorizable_shift (stmt
, gsi
, &vec_stmt
, slp_node
);
8860 case op_vec_info_type
:
8861 done
= vectorizable_operation (stmt
, gsi
, &vec_stmt
, slp_node
);
8865 case assignment_vec_info_type
:
8866 done
= vectorizable_assignment (stmt
, gsi
, &vec_stmt
, slp_node
);
8870 case load_vec_info_type
:
8871 done
= vectorizable_load (stmt
, gsi
, &vec_stmt
, slp_node
,
8876 case store_vec_info_type
:
8877 done
= vectorizable_store (stmt
, gsi
, &vec_stmt
, slp_node
);
8879 if (STMT_VINFO_GROUPED_ACCESS (stmt_info
) && !slp_node
)
8881 /* In case of interleaving, the whole chain is vectorized when the
8882 last store in the chain is reached. Store stmts before the last
8883 one are skipped, and there vec_stmt_info shouldn't be freed
8885 *grouped_store
= true;
8886 if (STMT_VINFO_VEC_STMT (stmt_info
))
8893 case condition_vec_info_type
:
8894 done
= vectorizable_condition (stmt
, gsi
, &vec_stmt
, NULL
, 0, slp_node
);
8898 case comparison_vec_info_type
:
8899 done
= vectorizable_comparison (stmt
, gsi
, &vec_stmt
, NULL
, slp_node
);
8903 case call_vec_info_type
:
8904 done
= vectorizable_call (stmt
, gsi
, &vec_stmt
, slp_node
);
8905 stmt
= gsi_stmt (*gsi
);
8906 if (gimple_call_internal_p (stmt
, IFN_MASK_STORE
))
8910 case call_simd_clone_vec_info_type
:
8911 done
= vectorizable_simd_clone_call (stmt
, gsi
, &vec_stmt
, slp_node
);
8912 stmt
= gsi_stmt (*gsi
);
8915 case reduc_vec_info_type
:
8916 done
= vectorizable_reduction (stmt
, gsi
, &vec_stmt
, slp_node
,
8922 if (!STMT_VINFO_LIVE_P (stmt_info
))
8924 if (dump_enabled_p ())
8925 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
8926 "stmt not supported.\n");
8931 /* Verify SLP vectorization doesn't mess with STMT_VINFO_VEC_STMT.
8932 This would break hybrid SLP vectorization. */
8934 gcc_assert (!vec_stmt
8935 && STMT_VINFO_VEC_STMT (stmt_info
) == old_vec_stmt
);
8937 /* Handle inner-loop stmts whose DEF is used in the loop-nest that
8938 is being vectorized, but outside the immediately enclosing loop. */
8940 && STMT_VINFO_LOOP_VINFO (stmt_info
)
8941 && nested_in_vect_loop_p (LOOP_VINFO_LOOP (
8942 STMT_VINFO_LOOP_VINFO (stmt_info
)), stmt
)
8943 && STMT_VINFO_TYPE (stmt_info
) != reduc_vec_info_type
8944 && (STMT_VINFO_RELEVANT (stmt_info
) == vect_used_in_outer
8945 || STMT_VINFO_RELEVANT (stmt_info
) ==
8946 vect_used_in_outer_by_reduction
))
8948 struct loop
*innerloop
= LOOP_VINFO_LOOP (
8949 STMT_VINFO_LOOP_VINFO (stmt_info
))->inner
;
8950 imm_use_iterator imm_iter
;
8951 use_operand_p use_p
;
8955 if (dump_enabled_p ())
8956 dump_printf_loc (MSG_NOTE
, vect_location
,
8957 "Record the vdef for outer-loop vectorization.\n");
8959 /* Find the relevant loop-exit phi-node, and reord the vec_stmt there
8960 (to be used when vectorizing outer-loop stmts that use the DEF of
8962 if (gimple_code (stmt
) == GIMPLE_PHI
)
8963 scalar_dest
= PHI_RESULT (stmt
);
8965 scalar_dest
= gimple_assign_lhs (stmt
);
8967 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, scalar_dest
)
8969 if (!flow_bb_inside_loop_p (innerloop
, gimple_bb (USE_STMT (use_p
))))
8971 exit_phi
= USE_STMT (use_p
);
8972 STMT_VINFO_VEC_STMT (vinfo_for_stmt (exit_phi
)) = vec_stmt
;
8977 /* Handle stmts whose DEF is used outside the loop-nest that is
8978 being vectorized. */
8979 if (STMT_VINFO_TYPE (stmt_info
) != reduc_vec_info_type
)
8981 done
= can_vectorize_live_stmts (stmt
, gsi
, slp_node
, &vec_stmt
);
8986 STMT_VINFO_VEC_STMT (stmt_info
) = vec_stmt
;
8992 /* Remove a group of stores (for SLP or interleaving), free their
8996 vect_remove_stores (gimple
*first_stmt
)
8998 gimple
*next
= first_stmt
;
9000 gimple_stmt_iterator next_si
;
9004 stmt_vec_info stmt_info
= vinfo_for_stmt (next
);
9006 tmp
= GROUP_NEXT_ELEMENT (stmt_info
);
9007 if (is_pattern_stmt_p (stmt_info
))
9008 next
= STMT_VINFO_RELATED_STMT (stmt_info
);
9009 /* Free the attached stmt_vec_info and remove the stmt. */
9010 next_si
= gsi_for_stmt (next
);
9011 unlink_stmt_vdef (next
);
9012 gsi_remove (&next_si
, true);
9013 release_defs (next
);
9014 free_stmt_vec_info (next
);
9020 /* Function new_stmt_vec_info.
9022 Create and initialize a new stmt_vec_info struct for STMT. */
9025 new_stmt_vec_info (gimple
*stmt
, vec_info
*vinfo
)
9028 res
= (stmt_vec_info
) xcalloc (1, sizeof (struct _stmt_vec_info
));
9030 STMT_VINFO_TYPE (res
) = undef_vec_info_type
;
9031 STMT_VINFO_STMT (res
) = stmt
;
9033 STMT_VINFO_RELEVANT (res
) = vect_unused_in_scope
;
9034 STMT_VINFO_LIVE_P (res
) = false;
9035 STMT_VINFO_VECTYPE (res
) = NULL
;
9036 STMT_VINFO_VEC_STMT (res
) = NULL
;
9037 STMT_VINFO_VECTORIZABLE (res
) = true;
9038 STMT_VINFO_IN_PATTERN_P (res
) = false;
9039 STMT_VINFO_RELATED_STMT (res
) = NULL
;
9040 STMT_VINFO_PATTERN_DEF_SEQ (res
) = NULL
;
9041 STMT_VINFO_DATA_REF (res
) = NULL
;
9042 STMT_VINFO_VEC_REDUCTION_TYPE (res
) = TREE_CODE_REDUCTION
;
9043 STMT_VINFO_VEC_CONST_COND_REDUC_CODE (res
) = ERROR_MARK
;
9045 if (gimple_code (stmt
) == GIMPLE_PHI
9046 && is_loop_header_bb_p (gimple_bb (stmt
)))
9047 STMT_VINFO_DEF_TYPE (res
) = vect_unknown_def_type
;
9049 STMT_VINFO_DEF_TYPE (res
) = vect_internal_def
;
9051 STMT_VINFO_SAME_ALIGN_REFS (res
).create (0);
9052 STMT_SLP_TYPE (res
) = loop_vect
;
9053 STMT_VINFO_NUM_SLP_USES (res
) = 0;
9055 GROUP_FIRST_ELEMENT (res
) = NULL
;
9056 GROUP_NEXT_ELEMENT (res
) = NULL
;
9057 GROUP_SIZE (res
) = 0;
9058 GROUP_STORE_COUNT (res
) = 0;
9059 GROUP_GAP (res
) = 0;
9060 GROUP_SAME_DR_STMT (res
) = NULL
;
9066 /* Create a hash table for stmt_vec_info. */
9069 init_stmt_vec_info_vec (void)
9071 gcc_assert (!stmt_vec_info_vec
.exists ());
9072 stmt_vec_info_vec
.create (50);
9076 /* Free hash table for stmt_vec_info. */
9079 free_stmt_vec_info_vec (void)
9083 FOR_EACH_VEC_ELT (stmt_vec_info_vec
, i
, info
)
9085 free_stmt_vec_info (STMT_VINFO_STMT (info
));
9086 gcc_assert (stmt_vec_info_vec
.exists ());
9087 stmt_vec_info_vec
.release ();
9091 /* Free stmt vectorization related info. */
9094 free_stmt_vec_info (gimple
*stmt
)
9096 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
9101 /* Check if this statement has a related "pattern stmt"
9102 (introduced by the vectorizer during the pattern recognition
9103 pass). Free pattern's stmt_vec_info and def stmt's stmt_vec_info
9105 if (STMT_VINFO_IN_PATTERN_P (stmt_info
))
9107 stmt_vec_info patt_info
9108 = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info
));
9111 gimple_seq seq
= STMT_VINFO_PATTERN_DEF_SEQ (patt_info
);
9112 gimple
*patt_stmt
= STMT_VINFO_STMT (patt_info
);
9113 gimple_set_bb (patt_stmt
, NULL
);
9114 tree lhs
= gimple_get_lhs (patt_stmt
);
9115 if (lhs
&& TREE_CODE (lhs
) == SSA_NAME
)
9116 release_ssa_name (lhs
);
9119 gimple_stmt_iterator si
;
9120 for (si
= gsi_start (seq
); !gsi_end_p (si
); gsi_next (&si
))
9122 gimple
*seq_stmt
= gsi_stmt (si
);
9123 gimple_set_bb (seq_stmt
, NULL
);
9124 lhs
= gimple_get_lhs (seq_stmt
);
9125 if (lhs
&& TREE_CODE (lhs
) == SSA_NAME
)
9126 release_ssa_name (lhs
);
9127 free_stmt_vec_info (seq_stmt
);
9130 free_stmt_vec_info (patt_stmt
);
9134 STMT_VINFO_SAME_ALIGN_REFS (stmt_info
).release ();
9135 STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).release ();
9136 set_vinfo_for_stmt (stmt
, NULL
);
9141 /* Function get_vectype_for_scalar_type_and_size.
9143 Returns the vector type corresponding to SCALAR_TYPE and SIZE as supported
9147 get_vectype_for_scalar_type_and_size (tree scalar_type
, poly_uint64 size
)
9149 tree orig_scalar_type
= scalar_type
;
9150 scalar_mode inner_mode
;
9151 machine_mode simd_mode
;
9155 if (!is_int_mode (TYPE_MODE (scalar_type
), &inner_mode
)
9156 && !is_float_mode (TYPE_MODE (scalar_type
), &inner_mode
))
9159 unsigned int nbytes
= GET_MODE_SIZE (inner_mode
);
9161 /* For vector types of elements whose mode precision doesn't
9162 match their types precision we use a element type of mode
9163 precision. The vectorization routines will have to make sure
9164 they support the proper result truncation/extension.
9165 We also make sure to build vector types with INTEGER_TYPE
9166 component type only. */
9167 if (INTEGRAL_TYPE_P (scalar_type
)
9168 && (GET_MODE_BITSIZE (inner_mode
) != TYPE_PRECISION (scalar_type
)
9169 || TREE_CODE (scalar_type
) != INTEGER_TYPE
))
9170 scalar_type
= build_nonstandard_integer_type (GET_MODE_BITSIZE (inner_mode
),
9171 TYPE_UNSIGNED (scalar_type
));
9173 /* We shouldn't end up building VECTOR_TYPEs of non-scalar components.
9174 When the component mode passes the above test simply use a type
9175 corresponding to that mode. The theory is that any use that
9176 would cause problems with this will disable vectorization anyway. */
9177 else if (!SCALAR_FLOAT_TYPE_P (scalar_type
)
9178 && !INTEGRAL_TYPE_P (scalar_type
))
9179 scalar_type
= lang_hooks
.types
.type_for_mode (inner_mode
, 1);
9181 /* We can't build a vector type of elements with alignment bigger than
9183 else if (nbytes
< TYPE_ALIGN_UNIT (scalar_type
))
9184 scalar_type
= lang_hooks
.types
.type_for_mode (inner_mode
,
9185 TYPE_UNSIGNED (scalar_type
));
9187 /* If we felt back to using the mode fail if there was
9188 no scalar type for it. */
9189 if (scalar_type
== NULL_TREE
)
9192 /* If no size was supplied use the mode the target prefers. Otherwise
9193 lookup a vector mode of the specified size. */
9194 if (known_eq (size
, 0U))
9195 simd_mode
= targetm
.vectorize
.preferred_simd_mode (inner_mode
);
9196 else if (!multiple_p (size
, nbytes
, &nunits
)
9197 || !mode_for_vector (inner_mode
, nunits
).exists (&simd_mode
))
9199 /* NOTE: nunits == 1 is allowed to support single element vector types. */
9200 if (!multiple_p (GET_MODE_SIZE (simd_mode
), nbytes
, &nunits
))
9203 vectype
= build_vector_type (scalar_type
, nunits
);
9205 if (!VECTOR_MODE_P (TYPE_MODE (vectype
))
9206 && !INTEGRAL_MODE_P (TYPE_MODE (vectype
)))
9209 /* Re-attach the address-space qualifier if we canonicalized the scalar
9211 if (TYPE_ADDR_SPACE (orig_scalar_type
) != TYPE_ADDR_SPACE (vectype
))
9212 return build_qualified_type
9213 (vectype
, KEEP_QUAL_ADDR_SPACE (TYPE_QUALS (orig_scalar_type
)));
9218 poly_uint64 current_vector_size
;
9220 /* Function get_vectype_for_scalar_type.
9222 Returns the vector type corresponding to SCALAR_TYPE as supported
9226 get_vectype_for_scalar_type (tree scalar_type
)
9229 vectype
= get_vectype_for_scalar_type_and_size (scalar_type
,
9230 current_vector_size
);
9232 && known_eq (current_vector_size
, 0U))
9233 current_vector_size
= GET_MODE_SIZE (TYPE_MODE (vectype
));
9237 /* Function get_mask_type_for_scalar_type.
9239 Returns the mask type corresponding to a result of comparison
9240 of vectors of specified SCALAR_TYPE as supported by target. */
9243 get_mask_type_for_scalar_type (tree scalar_type
)
9245 tree vectype
= get_vectype_for_scalar_type (scalar_type
);
9250 return build_truth_vector_type (TYPE_VECTOR_SUBPARTS (vectype
),
9251 current_vector_size
);
9254 /* Function get_same_sized_vectype
9256 Returns a vector type corresponding to SCALAR_TYPE of size
9257 VECTOR_TYPE if supported by the target. */
9260 get_same_sized_vectype (tree scalar_type
, tree vector_type
)
9262 if (VECT_SCALAR_BOOLEAN_TYPE_P (scalar_type
))
9263 return build_same_sized_truth_vector_type (vector_type
);
9265 return get_vectype_for_scalar_type_and_size
9266 (scalar_type
, GET_MODE_SIZE (TYPE_MODE (vector_type
)));
9269 /* Function vect_is_simple_use.
9272 VINFO - the vect info of the loop or basic block that is being vectorized.
9273 OPERAND - operand in the loop or bb.
9275 DEF_STMT - the defining stmt in case OPERAND is an SSA_NAME.
9276 DT - the type of definition
9278 Returns whether a stmt with OPERAND can be vectorized.
9279 For loops, supportable operands are constants, loop invariants, and operands
9280 that are defined by the current iteration of the loop. Unsupportable
9281 operands are those that are defined by a previous iteration of the loop (as
9282 is the case in reduction/induction computations).
9283 For basic blocks, supportable operands are constants and bb invariants.
9284 For now, operands defined outside the basic block are not supported. */
9287 vect_is_simple_use (tree operand
, vec_info
*vinfo
,
9288 gimple
**def_stmt
, enum vect_def_type
*dt
)
9291 *dt
= vect_unknown_def_type
;
9293 if (dump_enabled_p ())
9295 dump_printf_loc (MSG_NOTE
, vect_location
,
9296 "vect_is_simple_use: operand ");
9297 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, operand
);
9298 dump_printf (MSG_NOTE
, "\n");
9301 if (CONSTANT_CLASS_P (operand
))
9303 *dt
= vect_constant_def
;
9307 if (is_gimple_min_invariant (operand
))
9309 *dt
= vect_external_def
;
9313 if (TREE_CODE (operand
) != SSA_NAME
)
9315 if (dump_enabled_p ())
9316 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
9321 if (SSA_NAME_IS_DEFAULT_DEF (operand
))
9323 *dt
= vect_external_def
;
9327 *def_stmt
= SSA_NAME_DEF_STMT (operand
);
9328 if (dump_enabled_p ())
9330 dump_printf_loc (MSG_NOTE
, vect_location
, "def_stmt: ");
9331 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, *def_stmt
, 0);
9334 if (! vect_stmt_in_region_p (vinfo
, *def_stmt
))
9335 *dt
= vect_external_def
;
9338 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (*def_stmt
);
9339 *dt
= STMT_VINFO_DEF_TYPE (stmt_vinfo
);
9342 if (dump_enabled_p ())
9344 dump_printf_loc (MSG_NOTE
, vect_location
, "type of def: ");
9347 case vect_uninitialized_def
:
9348 dump_printf (MSG_NOTE
, "uninitialized\n");
9350 case vect_constant_def
:
9351 dump_printf (MSG_NOTE
, "constant\n");
9353 case vect_external_def
:
9354 dump_printf (MSG_NOTE
, "external\n");
9356 case vect_internal_def
:
9357 dump_printf (MSG_NOTE
, "internal\n");
9359 case vect_induction_def
:
9360 dump_printf (MSG_NOTE
, "induction\n");
9362 case vect_reduction_def
:
9363 dump_printf (MSG_NOTE
, "reduction\n");
9365 case vect_double_reduction_def
:
9366 dump_printf (MSG_NOTE
, "double reduction\n");
9368 case vect_nested_cycle
:
9369 dump_printf (MSG_NOTE
, "nested cycle\n");
9371 case vect_unknown_def_type
:
9372 dump_printf (MSG_NOTE
, "unknown\n");
9377 if (*dt
== vect_unknown_def_type
)
9379 if (dump_enabled_p ())
9380 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
9381 "Unsupported pattern.\n");
9385 switch (gimple_code (*def_stmt
))
9392 if (dump_enabled_p ())
9393 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
9394 "unsupported defining stmt:\n");
9401 /* Function vect_is_simple_use.
9403 Same as vect_is_simple_use but also determines the vector operand
9404 type of OPERAND and stores it to *VECTYPE. If the definition of
9405 OPERAND is vect_uninitialized_def, vect_constant_def or
9406 vect_external_def *VECTYPE will be set to NULL_TREE and the caller
9407 is responsible to compute the best suited vector type for the
9411 vect_is_simple_use (tree operand
, vec_info
*vinfo
,
9412 gimple
**def_stmt
, enum vect_def_type
*dt
, tree
*vectype
)
9414 if (!vect_is_simple_use (operand
, vinfo
, def_stmt
, dt
))
9417 /* Now get a vector type if the def is internal, otherwise supply
9418 NULL_TREE and leave it up to the caller to figure out a proper
9419 type for the use stmt. */
9420 if (*dt
== vect_internal_def
9421 || *dt
== vect_induction_def
9422 || *dt
== vect_reduction_def
9423 || *dt
== vect_double_reduction_def
9424 || *dt
== vect_nested_cycle
)
9426 stmt_vec_info stmt_info
= vinfo_for_stmt (*def_stmt
);
9428 if (STMT_VINFO_IN_PATTERN_P (stmt_info
)
9429 && !STMT_VINFO_RELEVANT (stmt_info
)
9430 && !STMT_VINFO_LIVE_P (stmt_info
))
9431 stmt_info
= vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info
));
9433 *vectype
= STMT_VINFO_VECTYPE (stmt_info
);
9434 gcc_assert (*vectype
!= NULL_TREE
);
9436 else if (*dt
== vect_uninitialized_def
9437 || *dt
== vect_constant_def
9438 || *dt
== vect_external_def
)
9439 *vectype
= NULL_TREE
;
9447 /* Function supportable_widening_operation
9449 Check whether an operation represented by the code CODE is a
9450 widening operation that is supported by the target platform in
9451 vector form (i.e., when operating on arguments of type VECTYPE_IN
9452 producing a result of type VECTYPE_OUT).
9454 Widening operations we currently support are NOP (CONVERT), FLOAT
9455 and WIDEN_MULT. This function checks if these operations are supported
9456 by the target platform either directly (via vector tree-codes), or via
9460 - CODE1 and CODE2 are codes of vector operations to be used when
9461 vectorizing the operation, if available.
9462 - MULTI_STEP_CVT determines the number of required intermediate steps in
9463 case of multi-step conversion (like char->short->int - in that case
9464 MULTI_STEP_CVT will be 1).
9465 - INTERM_TYPES contains the intermediate type required to perform the
9466 widening operation (short in the above example). */
9469 supportable_widening_operation (enum tree_code code
, gimple
*stmt
,
9470 tree vectype_out
, tree vectype_in
,
9471 enum tree_code
*code1
, enum tree_code
*code2
,
9472 int *multi_step_cvt
,
9473 vec
<tree
> *interm_types
)
9475 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
9476 loop_vec_info loop_info
= STMT_VINFO_LOOP_VINFO (stmt_info
);
9477 struct loop
*vect_loop
= NULL
;
9478 machine_mode vec_mode
;
9479 enum insn_code icode1
, icode2
;
9480 optab optab1
, optab2
;
9481 tree vectype
= vectype_in
;
9482 tree wide_vectype
= vectype_out
;
9483 enum tree_code c1
, c2
;
9485 tree prev_type
, intermediate_type
;
9486 machine_mode intermediate_mode
, prev_mode
;
9487 optab optab3
, optab4
;
9489 *multi_step_cvt
= 0;
9491 vect_loop
= LOOP_VINFO_LOOP (loop_info
);
9495 case WIDEN_MULT_EXPR
:
9496 /* The result of a vectorized widening operation usually requires
9497 two vectors (because the widened results do not fit into one vector).
9498 The generated vector results would normally be expected to be
9499 generated in the same order as in the original scalar computation,
9500 i.e. if 8 results are generated in each vector iteration, they are
9501 to be organized as follows:
9502 vect1: [res1,res2,res3,res4],
9503 vect2: [res5,res6,res7,res8].
9505 However, in the special case that the result of the widening
9506 operation is used in a reduction computation only, the order doesn't
9507 matter (because when vectorizing a reduction we change the order of
9508 the computation). Some targets can take advantage of this and
9509 generate more efficient code. For example, targets like Altivec,
9510 that support widen_mult using a sequence of {mult_even,mult_odd}
9511 generate the following vectors:
9512 vect1: [res1,res3,res5,res7],
9513 vect2: [res2,res4,res6,res8].
9515 When vectorizing outer-loops, we execute the inner-loop sequentially
9516 (each vectorized inner-loop iteration contributes to VF outer-loop
9517 iterations in parallel). We therefore don't allow to change the
9518 order of the computation in the inner-loop during outer-loop
9520 /* TODO: Another case in which order doesn't *really* matter is when we
9521 widen and then contract again, e.g. (short)((int)x * y >> 8).
9522 Normally, pack_trunc performs an even/odd permute, whereas the
9523 repack from an even/odd expansion would be an interleave, which
9524 would be significantly simpler for e.g. AVX2. */
9525 /* In any case, in order to avoid duplicating the code below, recurse
9526 on VEC_WIDEN_MULT_EVEN_EXPR. If it succeeds, all the return values
9527 are properly set up for the caller. If we fail, we'll continue with
9528 a VEC_WIDEN_MULT_LO/HI_EXPR check. */
9530 && STMT_VINFO_RELEVANT (stmt_info
) == vect_used_by_reduction
9531 && !nested_in_vect_loop_p (vect_loop
, stmt
)
9532 && supportable_widening_operation (VEC_WIDEN_MULT_EVEN_EXPR
,
9533 stmt
, vectype_out
, vectype_in
,
9534 code1
, code2
, multi_step_cvt
,
9537 /* Elements in a vector with vect_used_by_reduction property cannot
9538 be reordered if the use chain with this property does not have the
9539 same operation. One such an example is s += a * b, where elements
9540 in a and b cannot be reordered. Here we check if the vector defined
9541 by STMT is only directly used in the reduction statement. */
9542 tree lhs
= gimple_assign_lhs (stmt
);
9543 use_operand_p dummy
;
9545 stmt_vec_info use_stmt_info
= NULL
;
9546 if (single_imm_use (lhs
, &dummy
, &use_stmt
)
9547 && (use_stmt_info
= vinfo_for_stmt (use_stmt
))
9548 && STMT_VINFO_DEF_TYPE (use_stmt_info
) == vect_reduction_def
)
9551 c1
= VEC_WIDEN_MULT_LO_EXPR
;
9552 c2
= VEC_WIDEN_MULT_HI_EXPR
;
9565 case VEC_WIDEN_MULT_EVEN_EXPR
:
9566 /* Support the recursion induced just above. */
9567 c1
= VEC_WIDEN_MULT_EVEN_EXPR
;
9568 c2
= VEC_WIDEN_MULT_ODD_EXPR
;
9571 case WIDEN_LSHIFT_EXPR
:
9572 c1
= VEC_WIDEN_LSHIFT_LO_EXPR
;
9573 c2
= VEC_WIDEN_LSHIFT_HI_EXPR
;
9577 c1
= VEC_UNPACK_LO_EXPR
;
9578 c2
= VEC_UNPACK_HI_EXPR
;
9582 c1
= VEC_UNPACK_FLOAT_LO_EXPR
;
9583 c2
= VEC_UNPACK_FLOAT_HI_EXPR
;
9586 case FIX_TRUNC_EXPR
:
9587 /* ??? Not yet implemented due to missing VEC_UNPACK_FIX_TRUNC_HI_EXPR/
9588 VEC_UNPACK_FIX_TRUNC_LO_EXPR tree codes and optabs used for
9589 computing the operation. */
9596 if (BYTES_BIG_ENDIAN
&& c1
!= VEC_WIDEN_MULT_EVEN_EXPR
)
9599 if (code
== FIX_TRUNC_EXPR
)
9601 /* The signedness is determined from output operand. */
9602 optab1
= optab_for_tree_code (c1
, vectype_out
, optab_default
);
9603 optab2
= optab_for_tree_code (c2
, vectype_out
, optab_default
);
9607 optab1
= optab_for_tree_code (c1
, vectype
, optab_default
);
9608 optab2
= optab_for_tree_code (c2
, vectype
, optab_default
);
9611 if (!optab1
|| !optab2
)
9614 vec_mode
= TYPE_MODE (vectype
);
9615 if ((icode1
= optab_handler (optab1
, vec_mode
)) == CODE_FOR_nothing
9616 || (icode2
= optab_handler (optab2
, vec_mode
)) == CODE_FOR_nothing
)
9622 if (insn_data
[icode1
].operand
[0].mode
== TYPE_MODE (wide_vectype
)
9623 && insn_data
[icode2
].operand
[0].mode
== TYPE_MODE (wide_vectype
))
9624 /* For scalar masks we may have different boolean
9625 vector types having the same QImode. Thus we
9626 add additional check for elements number. */
9627 return (!VECTOR_BOOLEAN_TYPE_P (vectype
)
9628 || known_eq (TYPE_VECTOR_SUBPARTS (vectype
),
9629 TYPE_VECTOR_SUBPARTS (wide_vectype
) * 2));
9631 /* Check if it's a multi-step conversion that can be done using intermediate
9634 prev_type
= vectype
;
9635 prev_mode
= vec_mode
;
9637 if (!CONVERT_EXPR_CODE_P (code
))
9640 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
9641 intermediate steps in promotion sequence. We try
9642 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do
9644 interm_types
->create (MAX_INTERM_CVT_STEPS
);
9645 for (i
= 0; i
< MAX_INTERM_CVT_STEPS
; i
++)
9647 intermediate_mode
= insn_data
[icode1
].operand
[0].mode
;
9648 if (VECTOR_BOOLEAN_TYPE_P (prev_type
))
9650 poly_uint64 intermediate_nelts
9651 = exact_div (TYPE_VECTOR_SUBPARTS (prev_type
), 2);
9653 = build_truth_vector_type (intermediate_nelts
,
9654 current_vector_size
);
9655 if (intermediate_mode
!= TYPE_MODE (intermediate_type
))
9660 = lang_hooks
.types
.type_for_mode (intermediate_mode
,
9661 TYPE_UNSIGNED (prev_type
));
9663 optab3
= optab_for_tree_code (c1
, intermediate_type
, optab_default
);
9664 optab4
= optab_for_tree_code (c2
, intermediate_type
, optab_default
);
9666 if (!optab3
|| !optab4
9667 || (icode1
= optab_handler (optab1
, prev_mode
)) == CODE_FOR_nothing
9668 || insn_data
[icode1
].operand
[0].mode
!= intermediate_mode
9669 || (icode2
= optab_handler (optab2
, prev_mode
)) == CODE_FOR_nothing
9670 || insn_data
[icode2
].operand
[0].mode
!= intermediate_mode
9671 || ((icode1
= optab_handler (optab3
, intermediate_mode
))
9672 == CODE_FOR_nothing
)
9673 || ((icode2
= optab_handler (optab4
, intermediate_mode
))
9674 == CODE_FOR_nothing
))
9677 interm_types
->quick_push (intermediate_type
);
9678 (*multi_step_cvt
)++;
9680 if (insn_data
[icode1
].operand
[0].mode
== TYPE_MODE (wide_vectype
)
9681 && insn_data
[icode2
].operand
[0].mode
== TYPE_MODE (wide_vectype
))
9682 return (!VECTOR_BOOLEAN_TYPE_P (vectype
)
9683 || known_eq (TYPE_VECTOR_SUBPARTS (intermediate_type
),
9684 TYPE_VECTOR_SUBPARTS (wide_vectype
) * 2));
9686 prev_type
= intermediate_type
;
9687 prev_mode
= intermediate_mode
;
9690 interm_types
->release ();
9695 /* Function supportable_narrowing_operation
9697 Check whether an operation represented by the code CODE is a
9698 narrowing operation that is supported by the target platform in
9699 vector form (i.e., when operating on arguments of type VECTYPE_IN
9700 and producing a result of type VECTYPE_OUT).
9702 Narrowing operations we currently support are NOP (CONVERT) and
9703 FIX_TRUNC. This function checks if these operations are supported by
9704 the target platform directly via vector tree-codes.
9707 - CODE1 is the code of a vector operation to be used when
9708 vectorizing the operation, if available.
9709 - MULTI_STEP_CVT determines the number of required intermediate steps in
9710 case of multi-step conversion (like int->short->char - in that case
9711 MULTI_STEP_CVT will be 1).
9712 - INTERM_TYPES contains the intermediate type required to perform the
9713 narrowing operation (short in the above example). */
9716 supportable_narrowing_operation (enum tree_code code
,
9717 tree vectype_out
, tree vectype_in
,
9718 enum tree_code
*code1
, int *multi_step_cvt
,
9719 vec
<tree
> *interm_types
)
9721 machine_mode vec_mode
;
9722 enum insn_code icode1
;
9723 optab optab1
, interm_optab
;
9724 tree vectype
= vectype_in
;
9725 tree narrow_vectype
= vectype_out
;
9727 tree intermediate_type
, prev_type
;
9728 machine_mode intermediate_mode
, prev_mode
;
9732 *multi_step_cvt
= 0;
9736 c1
= VEC_PACK_TRUNC_EXPR
;
9739 case FIX_TRUNC_EXPR
:
9740 c1
= VEC_PACK_FIX_TRUNC_EXPR
;
9744 /* ??? Not yet implemented due to missing VEC_PACK_FLOAT_EXPR
9745 tree code and optabs used for computing the operation. */
9752 if (code
== FIX_TRUNC_EXPR
)
9753 /* The signedness is determined from output operand. */
9754 optab1
= optab_for_tree_code (c1
, vectype_out
, optab_default
);
9756 optab1
= optab_for_tree_code (c1
, vectype
, optab_default
);
9761 vec_mode
= TYPE_MODE (vectype
);
9762 if ((icode1
= optab_handler (optab1
, vec_mode
)) == CODE_FOR_nothing
)
9767 if (insn_data
[icode1
].operand
[0].mode
== TYPE_MODE (narrow_vectype
))
9768 /* For scalar masks we may have different boolean
9769 vector types having the same QImode. Thus we
9770 add additional check for elements number. */
9771 return (!VECTOR_BOOLEAN_TYPE_P (vectype
)
9772 || known_eq (TYPE_VECTOR_SUBPARTS (vectype
) * 2,
9773 TYPE_VECTOR_SUBPARTS (narrow_vectype
)));
9775 /* Check if it's a multi-step conversion that can be done using intermediate
9777 prev_mode
= vec_mode
;
9778 prev_type
= vectype
;
9779 if (code
== FIX_TRUNC_EXPR
)
9780 uns
= TYPE_UNSIGNED (vectype_out
);
9782 uns
= TYPE_UNSIGNED (vectype
);
9784 /* For multi-step FIX_TRUNC_EXPR prefer signed floating to integer
9785 conversion over unsigned, as unsigned FIX_TRUNC_EXPR is often more
9786 costly than signed. */
9787 if (code
== FIX_TRUNC_EXPR
&& uns
)
9789 enum insn_code icode2
;
9792 = lang_hooks
.types
.type_for_mode (TYPE_MODE (vectype_out
), 0);
9794 = optab_for_tree_code (c1
, intermediate_type
, optab_default
);
9795 if (interm_optab
!= unknown_optab
9796 && (icode2
= optab_handler (optab1
, vec_mode
)) != CODE_FOR_nothing
9797 && insn_data
[icode1
].operand
[0].mode
9798 == insn_data
[icode2
].operand
[0].mode
)
9801 optab1
= interm_optab
;
9806 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
9807 intermediate steps in promotion sequence. We try
9808 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do not. */
9809 interm_types
->create (MAX_INTERM_CVT_STEPS
);
9810 for (i
= 0; i
< MAX_INTERM_CVT_STEPS
; i
++)
9812 intermediate_mode
= insn_data
[icode1
].operand
[0].mode
;
9813 if (VECTOR_BOOLEAN_TYPE_P (prev_type
))
9816 = build_truth_vector_type (TYPE_VECTOR_SUBPARTS (prev_type
) * 2,
9817 current_vector_size
);
9818 if (intermediate_mode
!= TYPE_MODE (intermediate_type
))
9823 = lang_hooks
.types
.type_for_mode (intermediate_mode
, uns
);
9825 = optab_for_tree_code (VEC_PACK_TRUNC_EXPR
, intermediate_type
,
9828 || ((icode1
= optab_handler (optab1
, prev_mode
)) == CODE_FOR_nothing
)
9829 || insn_data
[icode1
].operand
[0].mode
!= intermediate_mode
9830 || ((icode1
= optab_handler (interm_optab
, intermediate_mode
))
9831 == CODE_FOR_nothing
))
9834 interm_types
->quick_push (intermediate_type
);
9835 (*multi_step_cvt
)++;
9837 if (insn_data
[icode1
].operand
[0].mode
== TYPE_MODE (narrow_vectype
))
9838 return (!VECTOR_BOOLEAN_TYPE_P (vectype
)
9839 || known_eq (TYPE_VECTOR_SUBPARTS (intermediate_type
) * 2,
9840 TYPE_VECTOR_SUBPARTS (narrow_vectype
)));
9842 prev_mode
= intermediate_mode
;
9843 prev_type
= intermediate_type
;
9844 optab1
= interm_optab
;
9847 interm_types
->release ();