1 /* Statement Analysis and Transformation for Vectorization
2 Copyright (C) 2003-2018 Free Software Foundation, Inc.
3 Contributed by Dorit Naishlos <dorit@il.ibm.com>
4 and Ira Rosen <irar@il.ibm.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
24 #include "coretypes.h"
31 #include "optabs-tree.h"
32 #include "insn-config.h"
33 #include "recog.h" /* FIXME: for insn_data */
37 #include "fold-const.h"
38 #include "stor-layout.h"
41 #include "gimple-iterator.h"
42 #include "gimplify-me.h"
44 #include "tree-ssa-loop-manip.h"
46 #include "tree-ssa-loop.h"
47 #include "tree-scalar-evolution.h"
48 #include "tree-vectorizer.h"
50 #include "internal-fn.h"
51 #include "tree-vector-builder.h"
52 #include "vec-perm-indices.h"
53 #include "tree-ssa-loop-niter.h"
54 #include "gimple-fold.h"
56 /* For lang_hooks.types.type_for_mode. */
57 #include "langhooks.h"
59 /* Return the vectorized type for the given statement. */
62 stmt_vectype (struct _stmt_vec_info
*stmt_info
)
64 return STMT_VINFO_VECTYPE (stmt_info
);
67 /* Return TRUE iff the given statement is in an inner loop relative to
68 the loop being vectorized. */
70 stmt_in_inner_loop_p (struct _stmt_vec_info
*stmt_info
)
72 gimple
*stmt
= STMT_VINFO_STMT (stmt_info
);
73 basic_block bb
= gimple_bb (stmt
);
74 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
80 loop
= LOOP_VINFO_LOOP (loop_vinfo
);
82 return (bb
->loop_father
== loop
->inner
);
85 /* Record the cost of a statement, either by directly informing the
86 target model or by saving it in a vector for later processing.
87 Return a preliminary estimate of the statement's cost. */
90 record_stmt_cost (stmt_vector_for_cost
*body_cost_vec
, int count
,
91 enum vect_cost_for_stmt kind
, stmt_vec_info stmt_info
,
92 int misalign
, enum vect_cost_model_location where
)
94 if ((kind
== vector_load
|| kind
== unaligned_load
)
95 && STMT_VINFO_GATHER_SCATTER_P (stmt_info
))
96 kind
= vector_gather_load
;
97 if ((kind
== vector_store
|| kind
== unaligned_store
)
98 && STMT_VINFO_GATHER_SCATTER_P (stmt_info
))
99 kind
= vector_scatter_store
;
102 tree vectype
= stmt_info
? stmt_vectype (stmt_info
) : NULL_TREE
;
103 stmt_info_for_cost si
= { count
, kind
,
104 stmt_info
? STMT_VINFO_STMT (stmt_info
) : NULL
,
106 body_cost_vec
->safe_push (si
);
108 (builtin_vectorization_cost (kind
, vectype
, misalign
) * count
);
111 return add_stmt_cost (stmt_info
->vinfo
->target_cost_data
,
112 count
, kind
, stmt_info
, misalign
, where
);
115 /* Return a variable of type ELEM_TYPE[NELEMS]. */
118 create_vector_array (tree elem_type
, unsigned HOST_WIDE_INT nelems
)
120 return create_tmp_var (build_array_type_nelts (elem_type
, nelems
),
124 /* ARRAY is an array of vectors created by create_vector_array.
125 Return an SSA_NAME for the vector in index N. The reference
126 is part of the vectorization of STMT and the vector is associated
127 with scalar destination SCALAR_DEST. */
130 read_vector_array (gimple
*stmt
, gimple_stmt_iterator
*gsi
, tree scalar_dest
,
131 tree array
, unsigned HOST_WIDE_INT n
)
133 tree vect_type
, vect
, vect_name
, array_ref
;
136 gcc_assert (TREE_CODE (TREE_TYPE (array
)) == ARRAY_TYPE
);
137 vect_type
= TREE_TYPE (TREE_TYPE (array
));
138 vect
= vect_create_destination_var (scalar_dest
, vect_type
);
139 array_ref
= build4 (ARRAY_REF
, vect_type
, array
,
140 build_int_cst (size_type_node
, n
),
141 NULL_TREE
, NULL_TREE
);
143 new_stmt
= gimple_build_assign (vect
, array_ref
);
144 vect_name
= make_ssa_name (vect
, new_stmt
);
145 gimple_assign_set_lhs (new_stmt
, vect_name
);
146 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
151 /* ARRAY is an array of vectors created by create_vector_array.
152 Emit code to store SSA_NAME VECT in index N of the array.
153 The store is part of the vectorization of STMT. */
156 write_vector_array (gimple
*stmt
, gimple_stmt_iterator
*gsi
, tree vect
,
157 tree array
, unsigned HOST_WIDE_INT n
)
162 array_ref
= build4 (ARRAY_REF
, TREE_TYPE (vect
), array
,
163 build_int_cst (size_type_node
, n
),
164 NULL_TREE
, NULL_TREE
);
166 new_stmt
= gimple_build_assign (array_ref
, vect
);
167 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
170 /* PTR is a pointer to an array of type TYPE. Return a representation
171 of *PTR. The memory reference replaces those in FIRST_DR
175 create_array_ref (tree type
, tree ptr
, tree alias_ptr_type
)
179 mem_ref
= build2 (MEM_REF
, type
, ptr
, build_int_cst (alias_ptr_type
, 0));
180 /* Arrays have the same alignment as their type. */
181 set_ptr_info_alignment (get_ptr_info (ptr
), TYPE_ALIGN_UNIT (type
), 0);
185 /* Utility functions used by vect_mark_stmts_to_be_vectorized. */
187 /* Function vect_mark_relevant.
189 Mark STMT as "relevant for vectorization" and add it to WORKLIST. */
192 vect_mark_relevant (vec
<gimple
*> *worklist
, gimple
*stmt
,
193 enum vect_relevant relevant
, bool live_p
)
195 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
196 enum vect_relevant save_relevant
= STMT_VINFO_RELEVANT (stmt_info
);
197 bool save_live_p
= STMT_VINFO_LIVE_P (stmt_info
);
198 gimple
*pattern_stmt
;
200 if (dump_enabled_p ())
202 dump_printf_loc (MSG_NOTE
, vect_location
,
203 "mark relevant %d, live %d: ", relevant
, live_p
);
204 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
207 /* If this stmt is an original stmt in a pattern, we might need to mark its
208 related pattern stmt instead of the original stmt. However, such stmts
209 may have their own uses that are not in any pattern, in such cases the
210 stmt itself should be marked. */
211 if (STMT_VINFO_IN_PATTERN_P (stmt_info
))
213 /* This is the last stmt in a sequence that was detected as a
214 pattern that can potentially be vectorized. Don't mark the stmt
215 as relevant/live because it's not going to be vectorized.
216 Instead mark the pattern-stmt that replaces it. */
218 pattern_stmt
= STMT_VINFO_RELATED_STMT (stmt_info
);
220 if (dump_enabled_p ())
221 dump_printf_loc (MSG_NOTE
, vect_location
,
222 "last stmt in pattern. don't mark"
223 " relevant/live.\n");
224 stmt_info
= vinfo_for_stmt (pattern_stmt
);
225 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_info
) == stmt
);
226 save_relevant
= STMT_VINFO_RELEVANT (stmt_info
);
227 save_live_p
= STMT_VINFO_LIVE_P (stmt_info
);
231 STMT_VINFO_LIVE_P (stmt_info
) |= live_p
;
232 if (relevant
> STMT_VINFO_RELEVANT (stmt_info
))
233 STMT_VINFO_RELEVANT (stmt_info
) = relevant
;
235 if (STMT_VINFO_RELEVANT (stmt_info
) == save_relevant
236 && STMT_VINFO_LIVE_P (stmt_info
) == save_live_p
)
238 if (dump_enabled_p ())
239 dump_printf_loc (MSG_NOTE
, vect_location
,
240 "already marked relevant/live.\n");
244 worklist
->safe_push (stmt
);
248 /* Function is_simple_and_all_uses_invariant
250 Return true if STMT is simple and all uses of it are invariant. */
253 is_simple_and_all_uses_invariant (gimple
*stmt
, loop_vec_info loop_vinfo
)
259 if (!is_gimple_assign (stmt
))
262 FOR_EACH_SSA_TREE_OPERAND (op
, stmt
, iter
, SSA_OP_USE
)
264 enum vect_def_type dt
= vect_uninitialized_def
;
266 if (!vect_is_simple_use (op
, loop_vinfo
, &def_stmt
, &dt
))
268 if (dump_enabled_p ())
269 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
270 "use not simple.\n");
274 if (dt
!= vect_external_def
&& dt
!= vect_constant_def
)
280 /* Function vect_stmt_relevant_p.
282 Return true if STMT in loop that is represented by LOOP_VINFO is
283 "relevant for vectorization".
285 A stmt is considered "relevant for vectorization" if:
286 - it has uses outside the loop.
287 - it has vdefs (it alters memory).
288 - control stmts in the loop (except for the exit condition).
290 CHECKME: what other side effects would the vectorizer allow? */
293 vect_stmt_relevant_p (gimple
*stmt
, loop_vec_info loop_vinfo
,
294 enum vect_relevant
*relevant
, bool *live_p
)
296 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
298 imm_use_iterator imm_iter
;
302 *relevant
= vect_unused_in_scope
;
305 /* cond stmt other than loop exit cond. */
306 if (is_ctrl_stmt (stmt
)
307 && STMT_VINFO_TYPE (vinfo_for_stmt (stmt
))
308 != loop_exit_ctrl_vec_info_type
)
309 *relevant
= vect_used_in_scope
;
311 /* changing memory. */
312 if (gimple_code (stmt
) != GIMPLE_PHI
)
313 if (gimple_vdef (stmt
)
314 && !gimple_clobber_p (stmt
))
316 if (dump_enabled_p ())
317 dump_printf_loc (MSG_NOTE
, vect_location
,
318 "vec_stmt_relevant_p: stmt has vdefs.\n");
319 *relevant
= vect_used_in_scope
;
322 /* uses outside the loop. */
323 FOR_EACH_PHI_OR_STMT_DEF (def_p
, stmt
, op_iter
, SSA_OP_DEF
)
325 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, DEF_FROM_PTR (def_p
))
327 basic_block bb
= gimple_bb (USE_STMT (use_p
));
328 if (!flow_bb_inside_loop_p (loop
, bb
))
330 if (dump_enabled_p ())
331 dump_printf_loc (MSG_NOTE
, vect_location
,
332 "vec_stmt_relevant_p: used out of loop.\n");
334 if (is_gimple_debug (USE_STMT (use_p
)))
337 /* We expect all such uses to be in the loop exit phis
338 (because of loop closed form) */
339 gcc_assert (gimple_code (USE_STMT (use_p
)) == GIMPLE_PHI
);
340 gcc_assert (bb
== single_exit (loop
)->dest
);
347 if (*live_p
&& *relevant
== vect_unused_in_scope
348 && !is_simple_and_all_uses_invariant (stmt
, loop_vinfo
))
350 if (dump_enabled_p ())
351 dump_printf_loc (MSG_NOTE
, vect_location
,
352 "vec_stmt_relevant_p: stmt live but not relevant.\n");
353 *relevant
= vect_used_only_live
;
356 return (*live_p
|| *relevant
);
360 /* Function exist_non_indexing_operands_for_use_p
362 USE is one of the uses attached to STMT. Check if USE is
363 used in STMT for anything other than indexing an array. */
366 exist_non_indexing_operands_for_use_p (tree use
, gimple
*stmt
)
369 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
371 /* USE corresponds to some operand in STMT. If there is no data
372 reference in STMT, then any operand that corresponds to USE
373 is not indexing an array. */
374 if (!STMT_VINFO_DATA_REF (stmt_info
))
377 /* STMT has a data_ref. FORNOW this means that its of one of
381 (This should have been verified in analyze_data_refs).
383 'var' in the second case corresponds to a def, not a use,
384 so USE cannot correspond to any operands that are not used
387 Therefore, all we need to check is if STMT falls into the
388 first case, and whether var corresponds to USE. */
390 if (!gimple_assign_copy_p (stmt
))
392 if (is_gimple_call (stmt
)
393 && gimple_call_internal_p (stmt
))
395 internal_fn ifn
= gimple_call_internal_fn (stmt
);
396 int mask_index
= internal_fn_mask_index (ifn
);
398 && use
== gimple_call_arg (stmt
, mask_index
))
400 int stored_value_index
= internal_fn_stored_value_index (ifn
);
401 if (stored_value_index
>= 0
402 && use
== gimple_call_arg (stmt
, stored_value_index
))
404 if (internal_gather_scatter_fn_p (ifn
)
405 && use
== gimple_call_arg (stmt
, 1))
411 if (TREE_CODE (gimple_assign_lhs (stmt
)) == SSA_NAME
)
413 operand
= gimple_assign_rhs1 (stmt
);
414 if (TREE_CODE (operand
) != SSA_NAME
)
425 Function process_use.
428 - a USE in STMT in a loop represented by LOOP_VINFO
429 - RELEVANT - enum value to be set in the STMT_VINFO of the stmt
430 that defined USE. This is done by calling mark_relevant and passing it
431 the WORKLIST (to add DEF_STMT to the WORKLIST in case it is relevant).
432 - FORCE is true if exist_non_indexing_operands_for_use_p check shouldn't
436 Generally, LIVE_P and RELEVANT are used to define the liveness and
437 relevance info of the DEF_STMT of this USE:
438 STMT_VINFO_LIVE_P (DEF_STMT_info) <-- live_p
439 STMT_VINFO_RELEVANT (DEF_STMT_info) <-- relevant
441 - case 1: If USE is used only for address computations (e.g. array indexing),
442 which does not need to be directly vectorized, then the liveness/relevance
443 of the respective DEF_STMT is left unchanged.
444 - case 2: If STMT is a reduction phi and DEF_STMT is a reduction stmt, we
445 skip DEF_STMT cause it had already been processed.
446 - case 3: If DEF_STMT and STMT are in different nests, then "relevant" will
447 be modified accordingly.
449 Return true if everything is as expected. Return false otherwise. */
452 process_use (gimple
*stmt
, tree use
, loop_vec_info loop_vinfo
,
453 enum vect_relevant relevant
, vec
<gimple
*> *worklist
,
456 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
457 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (stmt
);
458 stmt_vec_info dstmt_vinfo
;
459 basic_block bb
, def_bb
;
461 enum vect_def_type dt
;
463 /* case 1: we are only interested in uses that need to be vectorized. Uses
464 that are used for address computation are not considered relevant. */
465 if (!force
&& !exist_non_indexing_operands_for_use_p (use
, stmt
))
468 if (!vect_is_simple_use (use
, loop_vinfo
, &def_stmt
, &dt
))
470 if (dump_enabled_p ())
471 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
472 "not vectorized: unsupported use in stmt.\n");
476 if (!def_stmt
|| gimple_nop_p (def_stmt
))
479 def_bb
= gimple_bb (def_stmt
);
480 if (!flow_bb_inside_loop_p (loop
, def_bb
))
482 if (dump_enabled_p ())
483 dump_printf_loc (MSG_NOTE
, vect_location
, "def_stmt is out of loop.\n");
487 /* case 2: A reduction phi (STMT) defined by a reduction stmt (DEF_STMT).
488 DEF_STMT must have already been processed, because this should be the
489 only way that STMT, which is a reduction-phi, was put in the worklist,
490 as there should be no other uses for DEF_STMT in the loop. So we just
491 check that everything is as expected, and we are done. */
492 dstmt_vinfo
= vinfo_for_stmt (def_stmt
);
493 bb
= gimple_bb (stmt
);
494 if (gimple_code (stmt
) == GIMPLE_PHI
495 && STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_reduction_def
496 && gimple_code (def_stmt
) != GIMPLE_PHI
497 && STMT_VINFO_DEF_TYPE (dstmt_vinfo
) == vect_reduction_def
498 && bb
->loop_father
== def_bb
->loop_father
)
500 if (dump_enabled_p ())
501 dump_printf_loc (MSG_NOTE
, vect_location
,
502 "reduc-stmt defining reduc-phi in the same nest.\n");
503 if (STMT_VINFO_IN_PATTERN_P (dstmt_vinfo
))
504 dstmt_vinfo
= vinfo_for_stmt (STMT_VINFO_RELATED_STMT (dstmt_vinfo
));
505 gcc_assert (STMT_VINFO_RELEVANT (dstmt_vinfo
) < vect_used_by_reduction
);
506 gcc_assert (STMT_VINFO_LIVE_P (dstmt_vinfo
)
507 || STMT_VINFO_RELEVANT (dstmt_vinfo
) > vect_unused_in_scope
);
511 /* case 3a: outer-loop stmt defining an inner-loop stmt:
512 outer-loop-header-bb:
518 if (flow_loop_nested_p (def_bb
->loop_father
, bb
->loop_father
))
520 if (dump_enabled_p ())
521 dump_printf_loc (MSG_NOTE
, vect_location
,
522 "outer-loop def-stmt defining inner-loop stmt.\n");
526 case vect_unused_in_scope
:
527 relevant
= (STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_nested_cycle
) ?
528 vect_used_in_scope
: vect_unused_in_scope
;
531 case vect_used_in_outer_by_reduction
:
532 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo
) != vect_reduction_def
);
533 relevant
= vect_used_by_reduction
;
536 case vect_used_in_outer
:
537 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo
) != vect_reduction_def
);
538 relevant
= vect_used_in_scope
;
541 case vect_used_in_scope
:
549 /* case 3b: inner-loop stmt defining an outer-loop stmt:
550 outer-loop-header-bb:
554 outer-loop-tail-bb (or outer-loop-exit-bb in double reduction):
556 else if (flow_loop_nested_p (bb
->loop_father
, def_bb
->loop_father
))
558 if (dump_enabled_p ())
559 dump_printf_loc (MSG_NOTE
, vect_location
,
560 "inner-loop def-stmt defining outer-loop stmt.\n");
564 case vect_unused_in_scope
:
565 relevant
= (STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_reduction_def
566 || STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_double_reduction_def
) ?
567 vect_used_in_outer_by_reduction
: vect_unused_in_scope
;
570 case vect_used_by_reduction
:
571 case vect_used_only_live
:
572 relevant
= vect_used_in_outer_by_reduction
;
575 case vect_used_in_scope
:
576 relevant
= vect_used_in_outer
;
583 /* We are also not interested in uses on loop PHI backedges that are
584 inductions. Otherwise we'll needlessly vectorize the IV increment
585 and cause hybrid SLP for SLP inductions. Unless the PHI is live
587 else if (gimple_code (stmt
) == GIMPLE_PHI
588 && STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_induction_def
589 && ! STMT_VINFO_LIVE_P (stmt_vinfo
)
590 && (PHI_ARG_DEF_FROM_EDGE (stmt
, loop_latch_edge (bb
->loop_father
))
593 if (dump_enabled_p ())
594 dump_printf_loc (MSG_NOTE
, vect_location
,
595 "induction value on backedge.\n");
600 vect_mark_relevant (worklist
, def_stmt
, relevant
, false);
605 /* Function vect_mark_stmts_to_be_vectorized.
607 Not all stmts in the loop need to be vectorized. For example:
616 Stmt 1 and 3 do not need to be vectorized, because loop control and
617 addressing of vectorized data-refs are handled differently.
619 This pass detects such stmts. */
622 vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo
)
624 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
625 basic_block
*bbs
= LOOP_VINFO_BBS (loop_vinfo
);
626 unsigned int nbbs
= loop
->num_nodes
;
627 gimple_stmt_iterator si
;
630 stmt_vec_info stmt_vinfo
;
634 enum vect_relevant relevant
;
636 if (dump_enabled_p ())
637 dump_printf_loc (MSG_NOTE
, vect_location
,
638 "=== vect_mark_stmts_to_be_vectorized ===\n");
640 auto_vec
<gimple
*, 64> worklist
;
642 /* 1. Init worklist. */
643 for (i
= 0; i
< nbbs
; i
++)
646 for (si
= gsi_start_phis (bb
); !gsi_end_p (si
); gsi_next (&si
))
649 if (dump_enabled_p ())
651 dump_printf_loc (MSG_NOTE
, vect_location
, "init: phi relevant? ");
652 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, phi
, 0);
655 if (vect_stmt_relevant_p (phi
, loop_vinfo
, &relevant
, &live_p
))
656 vect_mark_relevant (&worklist
, phi
, relevant
, live_p
);
658 for (si
= gsi_start_bb (bb
); !gsi_end_p (si
); gsi_next (&si
))
660 stmt
= gsi_stmt (si
);
661 if (dump_enabled_p ())
663 dump_printf_loc (MSG_NOTE
, vect_location
, "init: stmt relevant? ");
664 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
667 if (vect_stmt_relevant_p (stmt
, loop_vinfo
, &relevant
, &live_p
))
668 vect_mark_relevant (&worklist
, stmt
, relevant
, live_p
);
672 /* 2. Process_worklist */
673 while (worklist
.length () > 0)
678 stmt
= worklist
.pop ();
679 if (dump_enabled_p ())
681 dump_printf_loc (MSG_NOTE
, vect_location
, "worklist: examine stmt: ");
682 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
685 /* Examine the USEs of STMT. For each USE, mark the stmt that defines it
686 (DEF_STMT) as relevant/irrelevant according to the relevance property
688 stmt_vinfo
= vinfo_for_stmt (stmt
);
689 relevant
= STMT_VINFO_RELEVANT (stmt_vinfo
);
691 /* Generally, the relevance property of STMT (in STMT_VINFO_RELEVANT) is
692 propagated as is to the DEF_STMTs of its USEs.
694 One exception is when STMT has been identified as defining a reduction
695 variable; in this case we set the relevance to vect_used_by_reduction.
696 This is because we distinguish between two kinds of relevant stmts -
697 those that are used by a reduction computation, and those that are
698 (also) used by a regular computation. This allows us later on to
699 identify stmts that are used solely by a reduction, and therefore the
700 order of the results that they produce does not have to be kept. */
702 switch (STMT_VINFO_DEF_TYPE (stmt_vinfo
))
704 case vect_reduction_def
:
705 gcc_assert (relevant
!= vect_unused_in_scope
);
706 if (relevant
!= vect_unused_in_scope
707 && relevant
!= vect_used_in_scope
708 && relevant
!= vect_used_by_reduction
709 && relevant
!= vect_used_only_live
)
711 if (dump_enabled_p ())
712 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
713 "unsupported use of reduction.\n");
718 case vect_nested_cycle
:
719 if (relevant
!= vect_unused_in_scope
720 && relevant
!= vect_used_in_outer_by_reduction
721 && relevant
!= vect_used_in_outer
)
723 if (dump_enabled_p ())
724 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
725 "unsupported use of nested cycle.\n");
731 case vect_double_reduction_def
:
732 if (relevant
!= vect_unused_in_scope
733 && relevant
!= vect_used_by_reduction
734 && relevant
!= vect_used_only_live
)
736 if (dump_enabled_p ())
737 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
738 "unsupported use of double reduction.\n");
748 if (is_pattern_stmt_p (stmt_vinfo
))
750 /* Pattern statements are not inserted into the code, so
751 FOR_EACH_PHI_OR_STMT_USE optimizes their operands out, and we
752 have to scan the RHS or function arguments instead. */
753 if (is_gimple_assign (stmt
))
755 enum tree_code rhs_code
= gimple_assign_rhs_code (stmt
);
756 tree op
= gimple_assign_rhs1 (stmt
);
759 if (rhs_code
== COND_EXPR
&& COMPARISON_CLASS_P (op
))
761 if (!process_use (stmt
, TREE_OPERAND (op
, 0), loop_vinfo
,
762 relevant
, &worklist
, false)
763 || !process_use (stmt
, TREE_OPERAND (op
, 1), loop_vinfo
,
764 relevant
, &worklist
, false))
768 for (; i
< gimple_num_ops (stmt
); i
++)
770 op
= gimple_op (stmt
, i
);
771 if (TREE_CODE (op
) == SSA_NAME
772 && !process_use (stmt
, op
, loop_vinfo
, relevant
,
777 else if (is_gimple_call (stmt
))
779 for (i
= 0; i
< gimple_call_num_args (stmt
); i
++)
781 tree arg
= gimple_call_arg (stmt
, i
);
782 if (!process_use (stmt
, arg
, loop_vinfo
, relevant
,
789 FOR_EACH_PHI_OR_STMT_USE (use_p
, stmt
, iter
, SSA_OP_USE
)
791 tree op
= USE_FROM_PTR (use_p
);
792 if (!process_use (stmt
, op
, loop_vinfo
, relevant
,
797 if (STMT_VINFO_GATHER_SCATTER_P (stmt_vinfo
))
799 gather_scatter_info gs_info
;
800 if (!vect_check_gather_scatter (stmt
, loop_vinfo
, &gs_info
))
802 if (!process_use (stmt
, gs_info
.offset
, loop_vinfo
, relevant
,
806 } /* while worklist */
812 /* Function vect_model_simple_cost.
814 Models cost for simple operations, i.e. those that only emit ncopies of a
815 single op. Right now, this does not account for multiple insns that could
816 be generated for the single vector op. We will handle that shortly. */
819 vect_model_simple_cost (stmt_vec_info stmt_info
, int ncopies
,
820 enum vect_def_type
*dt
,
822 stmt_vector_for_cost
*prologue_cost_vec
,
823 stmt_vector_for_cost
*body_cost_vec
)
826 int inside_cost
= 0, prologue_cost
= 0;
828 /* The SLP costs were already calculated during SLP tree build. */
829 gcc_assert (!PURE_SLP_STMT (stmt_info
));
831 /* Cost the "broadcast" of a scalar operand in to a vector operand.
832 Use scalar_to_vec to cost the broadcast, as elsewhere in the vector
834 for (i
= 0; i
< ndts
; i
++)
835 if (dt
[i
] == vect_constant_def
|| dt
[i
] == vect_external_def
)
836 prologue_cost
+= record_stmt_cost (prologue_cost_vec
, 1, scalar_to_vec
,
837 stmt_info
, 0, vect_prologue
);
839 /* Pass the inside-of-loop statements to the target-specific cost model. */
840 inside_cost
= record_stmt_cost (body_cost_vec
, ncopies
, vector_stmt
,
841 stmt_info
, 0, vect_body
);
843 if (dump_enabled_p ())
844 dump_printf_loc (MSG_NOTE
, vect_location
,
845 "vect_model_simple_cost: inside_cost = %d, "
846 "prologue_cost = %d .\n", inside_cost
, prologue_cost
);
850 /* Model cost for type demotion and promotion operations. PWR is normally
851 zero for single-step promotions and demotions. It will be one if
852 two-step promotion/demotion is required, and so on. Each additional
853 step doubles the number of instructions required. */
856 vect_model_promotion_demotion_cost (stmt_vec_info stmt_info
,
857 enum vect_def_type
*dt
, int pwr
)
860 int inside_cost
= 0, prologue_cost
= 0;
861 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
862 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
863 void *target_cost_data
;
865 /* The SLP costs were already calculated during SLP tree build. */
866 gcc_assert (!PURE_SLP_STMT (stmt_info
));
869 target_cost_data
= LOOP_VINFO_TARGET_COST_DATA (loop_vinfo
);
871 target_cost_data
= BB_VINFO_TARGET_COST_DATA (bb_vinfo
);
873 for (i
= 0; i
< pwr
+ 1; i
++)
875 tmp
= (STMT_VINFO_TYPE (stmt_info
) == type_promotion_vec_info_type
) ?
877 inside_cost
+= add_stmt_cost (target_cost_data
, vect_pow2 (tmp
),
878 vec_promote_demote
, stmt_info
, 0,
882 /* FORNOW: Assuming maximum 2 args per stmts. */
883 for (i
= 0; i
< 2; i
++)
884 if (dt
[i
] == vect_constant_def
|| dt
[i
] == vect_external_def
)
885 prologue_cost
+= add_stmt_cost (target_cost_data
, 1, vector_stmt
,
886 stmt_info
, 0, vect_prologue
);
888 if (dump_enabled_p ())
889 dump_printf_loc (MSG_NOTE
, vect_location
,
890 "vect_model_promotion_demotion_cost: inside_cost = %d, "
891 "prologue_cost = %d .\n", inside_cost
, prologue_cost
);
894 /* Function vect_model_store_cost
896 Models cost for stores. In the case of grouped accesses, one access
897 has the overhead of the grouped access attributed to it. */
900 vect_model_store_cost (stmt_vec_info stmt_info
, int ncopies
,
901 vect_memory_access_type memory_access_type
,
902 vec_load_store_type vls_type
, slp_tree slp_node
,
903 stmt_vector_for_cost
*prologue_cost_vec
,
904 stmt_vector_for_cost
*body_cost_vec
)
906 unsigned int inside_cost
= 0, prologue_cost
= 0;
907 struct data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
);
908 gimple
*first_stmt
= STMT_VINFO_STMT (stmt_info
);
909 bool grouped_access_p
= STMT_VINFO_GROUPED_ACCESS (stmt_info
);
911 if (vls_type
== VLS_STORE_INVARIANT
)
912 prologue_cost
+= record_stmt_cost (prologue_cost_vec
, 1, scalar_to_vec
,
913 stmt_info
, 0, vect_prologue
);
915 /* Grouped stores update all elements in the group at once,
916 so we want the DR for the first statement. */
917 if (!slp_node
&& grouped_access_p
)
919 first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
920 dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
923 /* True if we should include any once-per-group costs as well as
924 the cost of the statement itself. For SLP we only get called
925 once per group anyhow. */
926 bool first_stmt_p
= (first_stmt
== STMT_VINFO_STMT (stmt_info
));
928 /* We assume that the cost of a single store-lanes instruction is
929 equivalent to the cost of GROUP_SIZE separate stores. If a grouped
930 access is instead being provided by a permute-and-store operation,
931 include the cost of the permutes. */
933 && memory_access_type
== VMAT_CONTIGUOUS_PERMUTE
)
935 /* Uses a high and low interleave or shuffle operations for each
937 int group_size
= GROUP_SIZE (vinfo_for_stmt (first_stmt
));
938 int nstmts
= ncopies
* ceil_log2 (group_size
) * group_size
;
939 inside_cost
= record_stmt_cost (body_cost_vec
, nstmts
, vec_perm
,
940 stmt_info
, 0, vect_body
);
942 if (dump_enabled_p ())
943 dump_printf_loc (MSG_NOTE
, vect_location
,
944 "vect_model_store_cost: strided group_size = %d .\n",
948 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
949 /* Costs of the stores. */
950 if (memory_access_type
== VMAT_ELEMENTWISE
951 || memory_access_type
== VMAT_GATHER_SCATTER
)
953 /* N scalar stores plus extracting the elements. */
954 unsigned int assumed_nunits
= vect_nunits_for_cost (vectype
);
955 inside_cost
+= record_stmt_cost (body_cost_vec
,
956 ncopies
* assumed_nunits
,
957 scalar_store
, stmt_info
, 0, vect_body
);
960 vect_get_store_cost (dr
, ncopies
, &inside_cost
, body_cost_vec
);
962 if (memory_access_type
== VMAT_ELEMENTWISE
963 || memory_access_type
== VMAT_STRIDED_SLP
)
965 /* N scalar stores plus extracting the elements. */
966 unsigned int assumed_nunits
= vect_nunits_for_cost (vectype
);
967 inside_cost
+= record_stmt_cost (body_cost_vec
,
968 ncopies
* assumed_nunits
,
969 vec_to_scalar
, stmt_info
, 0, vect_body
);
972 if (dump_enabled_p ())
973 dump_printf_loc (MSG_NOTE
, vect_location
,
974 "vect_model_store_cost: inside_cost = %d, "
975 "prologue_cost = %d .\n", inside_cost
, prologue_cost
);
979 /* Calculate cost of DR's memory access. */
981 vect_get_store_cost (struct data_reference
*dr
, int ncopies
,
982 unsigned int *inside_cost
,
983 stmt_vector_for_cost
*body_cost_vec
)
985 int alignment_support_scheme
= vect_supportable_dr_alignment (dr
, false);
986 gimple
*stmt
= DR_STMT (dr
);
987 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
989 switch (alignment_support_scheme
)
993 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
,
994 vector_store
, stmt_info
, 0,
997 if (dump_enabled_p ())
998 dump_printf_loc (MSG_NOTE
, vect_location
,
999 "vect_model_store_cost: aligned.\n");
1003 case dr_unaligned_supported
:
1005 /* Here, we assign an additional cost for the unaligned store. */
1006 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
,
1007 unaligned_store
, stmt_info
,
1008 DR_MISALIGNMENT (dr
), vect_body
);
1009 if (dump_enabled_p ())
1010 dump_printf_loc (MSG_NOTE
, vect_location
,
1011 "vect_model_store_cost: unaligned supported by "
1016 case dr_unaligned_unsupported
:
1018 *inside_cost
= VECT_MAX_COST
;
1020 if (dump_enabled_p ())
1021 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1022 "vect_model_store_cost: unsupported access.\n");
1032 /* Function vect_model_load_cost
1034 Models cost for loads. In the case of grouped accesses, one access has
1035 the overhead of the grouped access attributed to it. Since unaligned
1036 accesses are supported for loads, we also account for the costs of the
1037 access scheme chosen. */
1040 vect_model_load_cost (stmt_vec_info stmt_info
, int ncopies
,
1041 vect_memory_access_type memory_access_type
,
1043 stmt_vector_for_cost
*prologue_cost_vec
,
1044 stmt_vector_for_cost
*body_cost_vec
)
1046 gimple
*first_stmt
= STMT_VINFO_STMT (stmt_info
);
1047 struct data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
);
1048 unsigned int inside_cost
= 0, prologue_cost
= 0;
1049 bool grouped_access_p
= STMT_VINFO_GROUPED_ACCESS (stmt_info
);
1051 /* Grouped loads read all elements in the group at once,
1052 so we want the DR for the first statement. */
1053 if (!slp_node
&& grouped_access_p
)
1055 first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
1056 dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
1059 /* True if we should include any once-per-group costs as well as
1060 the cost of the statement itself. For SLP we only get called
1061 once per group anyhow. */
1062 bool first_stmt_p
= (first_stmt
== STMT_VINFO_STMT (stmt_info
));
1064 /* We assume that the cost of a single load-lanes instruction is
1065 equivalent to the cost of GROUP_SIZE separate loads. If a grouped
1066 access is instead being provided by a load-and-permute operation,
1067 include the cost of the permutes. */
1069 && memory_access_type
== VMAT_CONTIGUOUS_PERMUTE
)
1071 /* Uses an even and odd extract operations or shuffle operations
1072 for each needed permute. */
1073 int group_size
= GROUP_SIZE (vinfo_for_stmt (first_stmt
));
1074 int nstmts
= ncopies
* ceil_log2 (group_size
) * group_size
;
1075 inside_cost
= record_stmt_cost (body_cost_vec
, nstmts
, vec_perm
,
1076 stmt_info
, 0, vect_body
);
1078 if (dump_enabled_p ())
1079 dump_printf_loc (MSG_NOTE
, vect_location
,
1080 "vect_model_load_cost: strided group_size = %d .\n",
1084 /* The loads themselves. */
1085 if (memory_access_type
== VMAT_ELEMENTWISE
1086 || memory_access_type
== VMAT_GATHER_SCATTER
)
1088 /* N scalar loads plus gathering them into a vector. */
1089 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
1090 unsigned int assumed_nunits
= vect_nunits_for_cost (vectype
);
1091 inside_cost
+= record_stmt_cost (body_cost_vec
,
1092 ncopies
* assumed_nunits
,
1093 scalar_load
, stmt_info
, 0, vect_body
);
1096 vect_get_load_cost (dr
, ncopies
, first_stmt_p
,
1097 &inside_cost
, &prologue_cost
,
1098 prologue_cost_vec
, body_cost_vec
, true);
1099 if (memory_access_type
== VMAT_ELEMENTWISE
1100 || memory_access_type
== VMAT_STRIDED_SLP
)
1101 inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
, vec_construct
,
1102 stmt_info
, 0, vect_body
);
1104 if (dump_enabled_p ())
1105 dump_printf_loc (MSG_NOTE
, vect_location
,
1106 "vect_model_load_cost: inside_cost = %d, "
1107 "prologue_cost = %d .\n", inside_cost
, prologue_cost
);
1111 /* Calculate cost of DR's memory access. */
1113 vect_get_load_cost (struct data_reference
*dr
, int ncopies
,
1114 bool add_realign_cost
, unsigned int *inside_cost
,
1115 unsigned int *prologue_cost
,
1116 stmt_vector_for_cost
*prologue_cost_vec
,
1117 stmt_vector_for_cost
*body_cost_vec
,
1118 bool record_prologue_costs
)
1120 int alignment_support_scheme
= vect_supportable_dr_alignment (dr
, false);
1121 gimple
*stmt
= DR_STMT (dr
);
1122 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1124 switch (alignment_support_scheme
)
1128 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
, vector_load
,
1129 stmt_info
, 0, vect_body
);
1131 if (dump_enabled_p ())
1132 dump_printf_loc (MSG_NOTE
, vect_location
,
1133 "vect_model_load_cost: aligned.\n");
1137 case dr_unaligned_supported
:
1139 /* Here, we assign an additional cost for the unaligned load. */
1140 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
,
1141 unaligned_load
, stmt_info
,
1142 DR_MISALIGNMENT (dr
), vect_body
);
1144 if (dump_enabled_p ())
1145 dump_printf_loc (MSG_NOTE
, vect_location
,
1146 "vect_model_load_cost: unaligned supported by "
1151 case dr_explicit_realign
:
1153 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
* 2,
1154 vector_load
, stmt_info
, 0, vect_body
);
1155 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
,
1156 vec_perm
, stmt_info
, 0, vect_body
);
1158 /* FIXME: If the misalignment remains fixed across the iterations of
1159 the containing loop, the following cost should be added to the
1161 if (targetm
.vectorize
.builtin_mask_for_load
)
1162 *inside_cost
+= record_stmt_cost (body_cost_vec
, 1, vector_stmt
,
1163 stmt_info
, 0, vect_body
);
1165 if (dump_enabled_p ())
1166 dump_printf_loc (MSG_NOTE
, vect_location
,
1167 "vect_model_load_cost: explicit realign\n");
1171 case dr_explicit_realign_optimized
:
1173 if (dump_enabled_p ())
1174 dump_printf_loc (MSG_NOTE
, vect_location
,
1175 "vect_model_load_cost: unaligned software "
1178 /* Unaligned software pipeline has a load of an address, an initial
1179 load, and possibly a mask operation to "prime" the loop. However,
1180 if this is an access in a group of loads, which provide grouped
1181 access, then the above cost should only be considered for one
1182 access in the group. Inside the loop, there is a load op
1183 and a realignment op. */
1185 if (add_realign_cost
&& record_prologue_costs
)
1187 *prologue_cost
+= record_stmt_cost (prologue_cost_vec
, 2,
1188 vector_stmt
, stmt_info
,
1190 if (targetm
.vectorize
.builtin_mask_for_load
)
1191 *prologue_cost
+= record_stmt_cost (prologue_cost_vec
, 1,
1192 vector_stmt
, stmt_info
,
1196 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
, vector_load
,
1197 stmt_info
, 0, vect_body
);
1198 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
, vec_perm
,
1199 stmt_info
, 0, vect_body
);
1201 if (dump_enabled_p ())
1202 dump_printf_loc (MSG_NOTE
, vect_location
,
1203 "vect_model_load_cost: explicit realign optimized"
1209 case dr_unaligned_unsupported
:
1211 *inside_cost
= VECT_MAX_COST
;
1213 if (dump_enabled_p ())
1214 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1215 "vect_model_load_cost: unsupported access.\n");
1224 /* Insert the new stmt NEW_STMT at *GSI or at the appropriate place in
1225 the loop preheader for the vectorized stmt STMT. */
1228 vect_init_vector_1 (gimple
*stmt
, gimple
*new_stmt
, gimple_stmt_iterator
*gsi
)
1231 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
1234 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (stmt
);
1235 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_vinfo
);
1239 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
1243 if (nested_in_vect_loop_p (loop
, stmt
))
1246 pe
= loop_preheader_edge (loop
);
1247 new_bb
= gsi_insert_on_edge_immediate (pe
, new_stmt
);
1248 gcc_assert (!new_bb
);
1252 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_vinfo
);
1254 gimple_stmt_iterator gsi_bb_start
;
1256 gcc_assert (bb_vinfo
);
1257 bb
= BB_VINFO_BB (bb_vinfo
);
1258 gsi_bb_start
= gsi_after_labels (bb
);
1259 gsi_insert_before (&gsi_bb_start
, new_stmt
, GSI_SAME_STMT
);
1263 if (dump_enabled_p ())
1265 dump_printf_loc (MSG_NOTE
, vect_location
,
1266 "created new init_stmt: ");
1267 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, new_stmt
, 0);
1271 /* Function vect_init_vector.
1273 Insert a new stmt (INIT_STMT) that initializes a new variable of type
1274 TYPE with the value VAL. If TYPE is a vector type and VAL does not have
1275 vector type a vector with all elements equal to VAL is created first.
1276 Place the initialization at BSI if it is not NULL. Otherwise, place the
1277 initialization at the loop preheader.
1278 Return the DEF of INIT_STMT.
1279 It will be used in the vectorization of STMT. */
1282 vect_init_vector (gimple
*stmt
, tree val
, tree type
, gimple_stmt_iterator
*gsi
)
1287 /* We abuse this function to push sth to a SSA name with initial 'val'. */
1288 if (! useless_type_conversion_p (type
, TREE_TYPE (val
)))
1290 gcc_assert (TREE_CODE (type
) == VECTOR_TYPE
);
1291 if (! types_compatible_p (TREE_TYPE (type
), TREE_TYPE (val
)))
1293 /* Scalar boolean value should be transformed into
1294 all zeros or all ones value before building a vector. */
1295 if (VECTOR_BOOLEAN_TYPE_P (type
))
1297 tree true_val
= build_all_ones_cst (TREE_TYPE (type
));
1298 tree false_val
= build_zero_cst (TREE_TYPE (type
));
1300 if (CONSTANT_CLASS_P (val
))
1301 val
= integer_zerop (val
) ? false_val
: true_val
;
1304 new_temp
= make_ssa_name (TREE_TYPE (type
));
1305 init_stmt
= gimple_build_assign (new_temp
, COND_EXPR
,
1306 val
, true_val
, false_val
);
1307 vect_init_vector_1 (stmt
, init_stmt
, gsi
);
1311 else if (CONSTANT_CLASS_P (val
))
1312 val
= fold_convert (TREE_TYPE (type
), val
);
1315 new_temp
= make_ssa_name (TREE_TYPE (type
));
1316 if (! INTEGRAL_TYPE_P (TREE_TYPE (val
)))
1317 init_stmt
= gimple_build_assign (new_temp
,
1318 fold_build1 (VIEW_CONVERT_EXPR
,
1322 init_stmt
= gimple_build_assign (new_temp
, NOP_EXPR
, val
);
1323 vect_init_vector_1 (stmt
, init_stmt
, gsi
);
1327 val
= build_vector_from_val (type
, val
);
1330 new_temp
= vect_get_new_ssa_name (type
, vect_simple_var
, "cst_");
1331 init_stmt
= gimple_build_assign (new_temp
, val
);
1332 vect_init_vector_1 (stmt
, init_stmt
, gsi
);
1336 /* Function vect_get_vec_def_for_operand_1.
1338 For a defining stmt DEF_STMT of a scalar stmt, return a vector def with type
1339 DT that will be used in the vectorized stmt. */
1342 vect_get_vec_def_for_operand_1 (gimple
*def_stmt
, enum vect_def_type dt
)
1346 stmt_vec_info def_stmt_info
= NULL
;
1350 /* operand is a constant or a loop invariant. */
1351 case vect_constant_def
:
1352 case vect_external_def
:
1353 /* Code should use vect_get_vec_def_for_operand. */
1356 /* operand is defined inside the loop. */
1357 case vect_internal_def
:
1359 /* Get the def from the vectorized stmt. */
1360 def_stmt_info
= vinfo_for_stmt (def_stmt
);
1362 vec_stmt
= STMT_VINFO_VEC_STMT (def_stmt_info
);
1363 /* Get vectorized pattern statement. */
1365 && STMT_VINFO_IN_PATTERN_P (def_stmt_info
)
1366 && !STMT_VINFO_RELEVANT (def_stmt_info
))
1367 vec_stmt
= STMT_VINFO_VEC_STMT (vinfo_for_stmt (
1368 STMT_VINFO_RELATED_STMT (def_stmt_info
)));
1369 gcc_assert (vec_stmt
);
1370 if (gimple_code (vec_stmt
) == GIMPLE_PHI
)
1371 vec_oprnd
= PHI_RESULT (vec_stmt
);
1372 else if (is_gimple_call (vec_stmt
))
1373 vec_oprnd
= gimple_call_lhs (vec_stmt
);
1375 vec_oprnd
= gimple_assign_lhs (vec_stmt
);
1379 /* operand is defined by a loop header phi. */
1380 case vect_reduction_def
:
1381 case vect_double_reduction_def
:
1382 case vect_nested_cycle
:
1383 case vect_induction_def
:
1385 gcc_assert (gimple_code (def_stmt
) == GIMPLE_PHI
);
1387 /* Get the def from the vectorized stmt. */
1388 def_stmt_info
= vinfo_for_stmt (def_stmt
);
1389 vec_stmt
= STMT_VINFO_VEC_STMT (def_stmt_info
);
1390 if (gimple_code (vec_stmt
) == GIMPLE_PHI
)
1391 vec_oprnd
= PHI_RESULT (vec_stmt
);
1393 vec_oprnd
= gimple_get_lhs (vec_stmt
);
1403 /* Function vect_get_vec_def_for_operand.
1405 OP is an operand in STMT. This function returns a (vector) def that will be
1406 used in the vectorized stmt for STMT.
1408 In the case that OP is an SSA_NAME which is defined in the loop, then
1409 STMT_VINFO_VEC_STMT of the defining stmt holds the relevant def.
1411 In case OP is an invariant or constant, a new stmt that creates a vector def
1412 needs to be introduced. VECTYPE may be used to specify a required type for
1413 vector invariant. */
1416 vect_get_vec_def_for_operand (tree op
, gimple
*stmt
, tree vectype
)
1419 enum vect_def_type dt
;
1421 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (stmt
);
1422 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_vinfo
);
1424 if (dump_enabled_p ())
1426 dump_printf_loc (MSG_NOTE
, vect_location
,
1427 "vect_get_vec_def_for_operand: ");
1428 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, op
);
1429 dump_printf (MSG_NOTE
, "\n");
1432 is_simple_use
= vect_is_simple_use (op
, loop_vinfo
, &def_stmt
, &dt
);
1433 gcc_assert (is_simple_use
);
1434 if (def_stmt
&& dump_enabled_p ())
1436 dump_printf_loc (MSG_NOTE
, vect_location
, " def_stmt = ");
1437 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, def_stmt
, 0);
1440 if (dt
== vect_constant_def
|| dt
== vect_external_def
)
1442 tree stmt_vectype
= STMT_VINFO_VECTYPE (stmt_vinfo
);
1446 vector_type
= vectype
;
1447 else if (VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (op
))
1448 && VECTOR_BOOLEAN_TYPE_P (stmt_vectype
))
1449 vector_type
= build_same_sized_truth_vector_type (stmt_vectype
);
1451 vector_type
= get_vectype_for_scalar_type (TREE_TYPE (op
));
1453 gcc_assert (vector_type
);
1454 return vect_init_vector (stmt
, op
, vector_type
, NULL
);
1457 return vect_get_vec_def_for_operand_1 (def_stmt
, dt
);
1461 /* Function vect_get_vec_def_for_stmt_copy
1463 Return a vector-def for an operand. This function is used when the
1464 vectorized stmt to be created (by the caller to this function) is a "copy"
1465 created in case the vectorized result cannot fit in one vector, and several
1466 copies of the vector-stmt are required. In this case the vector-def is
1467 retrieved from the vector stmt recorded in the STMT_VINFO_RELATED_STMT field
1468 of the stmt that defines VEC_OPRND.
1469 DT is the type of the vector def VEC_OPRND.
1472 In case the vectorization factor (VF) is bigger than the number
1473 of elements that can fit in a vectype (nunits), we have to generate
1474 more than one vector stmt to vectorize the scalar stmt. This situation
1475 arises when there are multiple data-types operated upon in the loop; the
1476 smallest data-type determines the VF, and as a result, when vectorizing
1477 stmts operating on wider types we need to create 'VF/nunits' "copies" of the
1478 vector stmt (each computing a vector of 'nunits' results, and together
1479 computing 'VF' results in each iteration). This function is called when
1480 vectorizing such a stmt (e.g. vectorizing S2 in the illustration below, in
1481 which VF=16 and nunits=4, so the number of copies required is 4):
1483 scalar stmt: vectorized into: STMT_VINFO_RELATED_STMT
1485 S1: x = load VS1.0: vx.0 = memref0 VS1.1
1486 VS1.1: vx.1 = memref1 VS1.2
1487 VS1.2: vx.2 = memref2 VS1.3
1488 VS1.3: vx.3 = memref3
1490 S2: z = x + ... VSnew.0: vz0 = vx.0 + ... VSnew.1
1491 VSnew.1: vz1 = vx.1 + ... VSnew.2
1492 VSnew.2: vz2 = vx.2 + ... VSnew.3
1493 VSnew.3: vz3 = vx.3 + ...
1495 The vectorization of S1 is explained in vectorizable_load.
1496 The vectorization of S2:
1497 To create the first vector-stmt out of the 4 copies - VSnew.0 -
1498 the function 'vect_get_vec_def_for_operand' is called to
1499 get the relevant vector-def for each operand of S2. For operand x it
1500 returns the vector-def 'vx.0'.
1502 To create the remaining copies of the vector-stmt (VSnew.j), this
1503 function is called to get the relevant vector-def for each operand. It is
1504 obtained from the respective VS1.j stmt, which is recorded in the
1505 STMT_VINFO_RELATED_STMT field of the stmt that defines VEC_OPRND.
1507 For example, to obtain the vector-def 'vx.1' in order to create the
1508 vector stmt 'VSnew.1', this function is called with VEC_OPRND='vx.0'.
1509 Given 'vx0' we obtain the stmt that defines it ('VS1.0'); from the
1510 STMT_VINFO_RELATED_STMT field of 'VS1.0' we obtain the next copy - 'VS1.1',
1511 and return its def ('vx.1').
1512 Overall, to create the above sequence this function will be called 3 times:
1513 vx.1 = vect_get_vec_def_for_stmt_copy (dt, vx.0);
1514 vx.2 = vect_get_vec_def_for_stmt_copy (dt, vx.1);
1515 vx.3 = vect_get_vec_def_for_stmt_copy (dt, vx.2); */
1518 vect_get_vec_def_for_stmt_copy (enum vect_def_type dt
, tree vec_oprnd
)
1520 gimple
*vec_stmt_for_operand
;
1521 stmt_vec_info def_stmt_info
;
1523 /* Do nothing; can reuse same def. */
1524 if (dt
== vect_external_def
|| dt
== vect_constant_def
)
1527 vec_stmt_for_operand
= SSA_NAME_DEF_STMT (vec_oprnd
);
1528 def_stmt_info
= vinfo_for_stmt (vec_stmt_for_operand
);
1529 gcc_assert (def_stmt_info
);
1530 vec_stmt_for_operand
= STMT_VINFO_RELATED_STMT (def_stmt_info
);
1531 gcc_assert (vec_stmt_for_operand
);
1532 if (gimple_code (vec_stmt_for_operand
) == GIMPLE_PHI
)
1533 vec_oprnd
= PHI_RESULT (vec_stmt_for_operand
);
1535 vec_oprnd
= gimple_get_lhs (vec_stmt_for_operand
);
1540 /* Get vectorized definitions for the operands to create a copy of an original
1541 stmt. See vect_get_vec_def_for_stmt_copy () for details. */
1544 vect_get_vec_defs_for_stmt_copy (enum vect_def_type
*dt
,
1545 vec
<tree
> *vec_oprnds0
,
1546 vec
<tree
> *vec_oprnds1
)
1548 tree vec_oprnd
= vec_oprnds0
->pop ();
1550 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
[0], vec_oprnd
);
1551 vec_oprnds0
->quick_push (vec_oprnd
);
1553 if (vec_oprnds1
&& vec_oprnds1
->length ())
1555 vec_oprnd
= vec_oprnds1
->pop ();
1556 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
[1], vec_oprnd
);
1557 vec_oprnds1
->quick_push (vec_oprnd
);
1562 /* Get vectorized definitions for OP0 and OP1. */
1565 vect_get_vec_defs (tree op0
, tree op1
, gimple
*stmt
,
1566 vec
<tree
> *vec_oprnds0
,
1567 vec
<tree
> *vec_oprnds1
,
1572 int nops
= (op1
== NULL_TREE
) ? 1 : 2;
1573 auto_vec
<tree
> ops (nops
);
1574 auto_vec
<vec
<tree
> > vec_defs (nops
);
1576 ops
.quick_push (op0
);
1578 ops
.quick_push (op1
);
1580 vect_get_slp_defs (ops
, slp_node
, &vec_defs
);
1582 *vec_oprnds0
= vec_defs
[0];
1584 *vec_oprnds1
= vec_defs
[1];
1590 vec_oprnds0
->create (1);
1591 vec_oprnd
= vect_get_vec_def_for_operand (op0
, stmt
);
1592 vec_oprnds0
->quick_push (vec_oprnd
);
1596 vec_oprnds1
->create (1);
1597 vec_oprnd
= vect_get_vec_def_for_operand (op1
, stmt
);
1598 vec_oprnds1
->quick_push (vec_oprnd
);
1603 /* Helper function called by vect_finish_replace_stmt and
1604 vect_finish_stmt_generation. Set the location of the new
1605 statement and create a stmt_vec_info for it. */
1608 vect_finish_stmt_generation_1 (gimple
*stmt
, gimple
*vec_stmt
)
1610 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1611 vec_info
*vinfo
= stmt_info
->vinfo
;
1613 set_vinfo_for_stmt (vec_stmt
, new_stmt_vec_info (vec_stmt
, vinfo
));
1615 if (dump_enabled_p ())
1617 dump_printf_loc (MSG_NOTE
, vect_location
, "add new stmt: ");
1618 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, vec_stmt
, 0);
1621 gimple_set_location (vec_stmt
, gimple_location (stmt
));
1623 /* While EH edges will generally prevent vectorization, stmt might
1624 e.g. be in a must-not-throw region. Ensure newly created stmts
1625 that could throw are part of the same region. */
1626 int lp_nr
= lookup_stmt_eh_lp (stmt
);
1627 if (lp_nr
!= 0 && stmt_could_throw_p (vec_stmt
))
1628 add_stmt_to_eh_lp (vec_stmt
, lp_nr
);
1631 /* Replace the scalar statement STMT with a new vector statement VEC_STMT,
1632 which sets the same scalar result as STMT did. */
1635 vect_finish_replace_stmt (gimple
*stmt
, gimple
*vec_stmt
)
1637 gcc_assert (gimple_get_lhs (stmt
) == gimple_get_lhs (vec_stmt
));
1639 gimple_stmt_iterator gsi
= gsi_for_stmt (stmt
);
1640 gsi_replace (&gsi
, vec_stmt
, false);
1642 vect_finish_stmt_generation_1 (stmt
, vec_stmt
);
1645 /* Function vect_finish_stmt_generation.
1647 Insert a new stmt. */
1650 vect_finish_stmt_generation (gimple
*stmt
, gimple
*vec_stmt
,
1651 gimple_stmt_iterator
*gsi
)
1653 gcc_assert (gimple_code (stmt
) != GIMPLE_LABEL
);
1655 if (!gsi_end_p (*gsi
)
1656 && gimple_has_mem_ops (vec_stmt
))
1658 gimple
*at_stmt
= gsi_stmt (*gsi
);
1659 tree vuse
= gimple_vuse (at_stmt
);
1660 if (vuse
&& TREE_CODE (vuse
) == SSA_NAME
)
1662 tree vdef
= gimple_vdef (at_stmt
);
1663 gimple_set_vuse (vec_stmt
, gimple_vuse (at_stmt
));
1664 /* If we have an SSA vuse and insert a store, update virtual
1665 SSA form to avoid triggering the renamer. Do so only
1666 if we can easily see all uses - which is what almost always
1667 happens with the way vectorized stmts are inserted. */
1668 if ((vdef
&& TREE_CODE (vdef
) == SSA_NAME
)
1669 && ((is_gimple_assign (vec_stmt
)
1670 && !is_gimple_reg (gimple_assign_lhs (vec_stmt
)))
1671 || (is_gimple_call (vec_stmt
)
1672 && !(gimple_call_flags (vec_stmt
)
1673 & (ECF_CONST
|ECF_PURE
|ECF_NOVOPS
)))))
1675 tree new_vdef
= copy_ssa_name (vuse
, vec_stmt
);
1676 gimple_set_vdef (vec_stmt
, new_vdef
);
1677 SET_USE (gimple_vuse_op (at_stmt
), new_vdef
);
1681 gsi_insert_before (gsi
, vec_stmt
, GSI_SAME_STMT
);
1682 vect_finish_stmt_generation_1 (stmt
, vec_stmt
);
1685 /* We want to vectorize a call to combined function CFN with function
1686 decl FNDECL, using VECTYPE_OUT as the type of the output and VECTYPE_IN
1687 as the types of all inputs. Check whether this is possible using
1688 an internal function, returning its code if so or IFN_LAST if not. */
1691 vectorizable_internal_function (combined_fn cfn
, tree fndecl
,
1692 tree vectype_out
, tree vectype_in
)
1695 if (internal_fn_p (cfn
))
1696 ifn
= as_internal_fn (cfn
);
1698 ifn
= associated_internal_fn (fndecl
);
1699 if (ifn
!= IFN_LAST
&& direct_internal_fn_p (ifn
))
1701 const direct_internal_fn_info
&info
= direct_internal_fn (ifn
);
1702 if (info
.vectorizable
)
1704 tree type0
= (info
.type0
< 0 ? vectype_out
: vectype_in
);
1705 tree type1
= (info
.type1
< 0 ? vectype_out
: vectype_in
);
1706 if (direct_internal_fn_supported_p (ifn
, tree_pair (type0
, type1
),
1707 OPTIMIZE_FOR_SPEED
))
1715 static tree
permute_vec_elements (tree
, tree
, tree
, gimple
*,
1716 gimple_stmt_iterator
*);
1718 /* Check whether a load or store statement in the loop described by
1719 LOOP_VINFO is possible in a fully-masked loop. This is testing
1720 whether the vectorizer pass has the appropriate support, as well as
1721 whether the target does.
1723 VLS_TYPE says whether the statement is a load or store and VECTYPE
1724 is the type of the vector being loaded or stored. MEMORY_ACCESS_TYPE
1725 says how the load or store is going to be implemented and GROUP_SIZE
1726 is the number of load or store statements in the containing group.
1727 If the access is a gather load or scatter store, GS_INFO describes
1730 Clear LOOP_VINFO_CAN_FULLY_MASK_P if a fully-masked loop is not
1731 supported, otherwise record the required mask types. */
1734 check_load_store_masking (loop_vec_info loop_vinfo
, tree vectype
,
1735 vec_load_store_type vls_type
, int group_size
,
1736 vect_memory_access_type memory_access_type
,
1737 gather_scatter_info
*gs_info
)
1739 /* Invariant loads need no special support. */
1740 if (memory_access_type
== VMAT_INVARIANT
)
1743 vec_loop_masks
*masks
= &LOOP_VINFO_MASKS (loop_vinfo
);
1744 machine_mode vecmode
= TYPE_MODE (vectype
);
1745 bool is_load
= (vls_type
== VLS_LOAD
);
1746 if (memory_access_type
== VMAT_LOAD_STORE_LANES
)
1749 ? !vect_load_lanes_supported (vectype
, group_size
, true)
1750 : !vect_store_lanes_supported (vectype
, group_size
, true))
1752 if (dump_enabled_p ())
1753 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1754 "can't use a fully-masked loop because the"
1755 " target doesn't have an appropriate masked"
1756 " load/store-lanes instruction.\n");
1757 LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo
) = false;
1760 unsigned int ncopies
= vect_get_num_copies (loop_vinfo
, vectype
);
1761 vect_record_loop_mask (loop_vinfo
, masks
, ncopies
, vectype
);
1765 if (memory_access_type
== VMAT_GATHER_SCATTER
)
1767 internal_fn ifn
= (is_load
1768 ? IFN_MASK_GATHER_LOAD
1769 : IFN_MASK_SCATTER_STORE
);
1770 tree offset_type
= TREE_TYPE (gs_info
->offset
);
1771 if (!internal_gather_scatter_fn_supported_p (ifn
, vectype
,
1772 gs_info
->memory_type
,
1773 TYPE_SIGN (offset_type
),
1776 if (dump_enabled_p ())
1777 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1778 "can't use a fully-masked loop because the"
1779 " target doesn't have an appropriate masked"
1780 " gather load or scatter store instruction.\n");
1781 LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo
) = false;
1784 unsigned int ncopies
= vect_get_num_copies (loop_vinfo
, vectype
);
1785 vect_record_loop_mask (loop_vinfo
, masks
, ncopies
, vectype
);
1789 if (memory_access_type
!= VMAT_CONTIGUOUS
1790 && memory_access_type
!= VMAT_CONTIGUOUS_PERMUTE
)
1792 /* Element X of the data must come from iteration i * VF + X of the
1793 scalar loop. We need more work to support other mappings. */
1794 if (dump_enabled_p ())
1795 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1796 "can't use a fully-masked loop because an access"
1797 " isn't contiguous.\n");
1798 LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo
) = false;
1802 machine_mode mask_mode
;
1803 if (!(targetm
.vectorize
.get_mask_mode
1804 (GET_MODE_NUNITS (vecmode
),
1805 GET_MODE_SIZE (vecmode
)).exists (&mask_mode
))
1806 || !can_vec_mask_load_store_p (vecmode
, mask_mode
, is_load
))
1808 if (dump_enabled_p ())
1809 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1810 "can't use a fully-masked loop because the target"
1811 " doesn't have the appropriate masked load or"
1813 LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo
) = false;
1816 /* We might load more scalars than we need for permuting SLP loads.
1817 We checked in get_group_load_store_type that the extra elements
1818 don't leak into a new vector. */
1819 poly_uint64 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
1820 poly_uint64 vf
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
1821 unsigned int nvectors
;
1822 if (can_div_away_from_zero_p (group_size
* vf
, nunits
, &nvectors
))
1823 vect_record_loop_mask (loop_vinfo
, masks
, nvectors
, vectype
);
1828 /* Return the mask input to a masked load or store. VEC_MASK is the vectorized
1829 form of the scalar mask condition and LOOP_MASK, if nonnull, is the mask
1830 that needs to be applied to all loads and stores in a vectorized loop.
1831 Return VEC_MASK if LOOP_MASK is null, otherwise return VEC_MASK & LOOP_MASK.
1833 MASK_TYPE is the type of both masks. If new statements are needed,
1834 insert them before GSI. */
1837 prepare_load_store_mask (tree mask_type
, tree loop_mask
, tree vec_mask
,
1838 gimple_stmt_iterator
*gsi
)
1840 gcc_assert (useless_type_conversion_p (mask_type
, TREE_TYPE (vec_mask
)));
1844 gcc_assert (TREE_TYPE (loop_mask
) == mask_type
);
1845 tree and_res
= make_temp_ssa_name (mask_type
, NULL
, "vec_mask_and");
1846 gimple
*and_stmt
= gimple_build_assign (and_res
, BIT_AND_EXPR
,
1847 vec_mask
, loop_mask
);
1848 gsi_insert_before (gsi
, and_stmt
, GSI_SAME_STMT
);
1852 /* Determine whether we can use a gather load or scatter store to vectorize
1853 strided load or store STMT by truncating the current offset to a smaller
1854 width. We need to be able to construct an offset vector:
1856 { 0, X, X*2, X*3, ... }
1858 without loss of precision, where X is STMT's DR_STEP.
1860 Return true if this is possible, describing the gather load or scatter
1861 store in GS_INFO. MASKED_P is true if the load or store is conditional. */
1864 vect_truncate_gather_scatter_offset (gimple
*stmt
, loop_vec_info loop_vinfo
,
1866 gather_scatter_info
*gs_info
)
1868 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1869 data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
);
1870 tree step
= DR_STEP (dr
);
1871 if (TREE_CODE (step
) != INTEGER_CST
)
1873 /* ??? Perhaps we could use range information here? */
1874 if (dump_enabled_p ())
1875 dump_printf_loc (MSG_NOTE
, vect_location
,
1876 "cannot truncate variable step.\n");
1880 /* Get the number of bits in an element. */
1881 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
1882 scalar_mode element_mode
= SCALAR_TYPE_MODE (TREE_TYPE (vectype
));
1883 unsigned int element_bits
= GET_MODE_BITSIZE (element_mode
);
1885 /* Set COUNT to the upper limit on the number of elements - 1.
1886 Start with the maximum vectorization factor. */
1887 unsigned HOST_WIDE_INT count
= vect_max_vf (loop_vinfo
) - 1;
1889 /* Try lowering COUNT to the number of scalar latch iterations. */
1890 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
1891 widest_int max_iters
;
1892 if (max_loop_iterations (loop
, &max_iters
)
1893 && max_iters
< count
)
1894 count
= max_iters
.to_shwi ();
1896 /* Try scales of 1 and the element size. */
1897 int scales
[] = { 1, vect_get_scalar_dr_size (dr
) };
1898 bool overflow_p
= false;
1899 for (int i
= 0; i
< 2; ++i
)
1901 int scale
= scales
[i
];
1903 if (!wi::multiple_of_p (wi::to_widest (step
), scale
, SIGNED
, &factor
))
1906 /* See whether we can calculate (COUNT - 1) * STEP / SCALE
1907 in OFFSET_BITS bits. */
1908 widest_int range
= wi::mul (count
, factor
, SIGNED
, &overflow_p
);
1911 signop sign
= range
>= 0 ? UNSIGNED
: SIGNED
;
1912 if (wi::min_precision (range
, sign
) > element_bits
)
1918 /* See whether the target supports the operation. */
1919 tree memory_type
= TREE_TYPE (DR_REF (dr
));
1920 if (!vect_gather_scatter_fn_p (DR_IS_READ (dr
), masked_p
, vectype
,
1921 memory_type
, element_bits
, sign
, scale
,
1922 &gs_info
->ifn
, &gs_info
->element_type
))
1925 tree offset_type
= build_nonstandard_integer_type (element_bits
,
1928 gs_info
->decl
= NULL_TREE
;
1929 /* Logically the sum of DR_BASE_ADDRESS, DR_INIT and DR_OFFSET,
1930 but we don't need to store that here. */
1931 gs_info
->base
= NULL_TREE
;
1932 gs_info
->offset
= fold_convert (offset_type
, step
);
1933 gs_info
->offset_dt
= vect_constant_def
;
1934 gs_info
->offset_vectype
= NULL_TREE
;
1935 gs_info
->scale
= scale
;
1936 gs_info
->memory_type
= memory_type
;
1940 if (overflow_p
&& dump_enabled_p ())
1941 dump_printf_loc (MSG_NOTE
, vect_location
,
1942 "truncating gather/scatter offset to %d bits"
1943 " might change its value.\n", element_bits
);
1948 /* Return true if we can use gather/scatter internal functions to
1949 vectorize STMT, which is a grouped or strided load or store.
1950 MASKED_P is true if load or store is conditional. When returning
1951 true, fill in GS_INFO with the information required to perform the
1955 vect_use_strided_gather_scatters_p (gimple
*stmt
, loop_vec_info loop_vinfo
,
1957 gather_scatter_info
*gs_info
)
1959 if (!vect_check_gather_scatter (stmt
, loop_vinfo
, gs_info
)
1961 return vect_truncate_gather_scatter_offset (stmt
, loop_vinfo
,
1964 scalar_mode element_mode
= SCALAR_TYPE_MODE (gs_info
->element_type
);
1965 unsigned int element_bits
= GET_MODE_BITSIZE (element_mode
);
1966 tree offset_type
= TREE_TYPE (gs_info
->offset
);
1967 unsigned int offset_bits
= TYPE_PRECISION (offset_type
);
1969 /* Enforced by vect_check_gather_scatter. */
1970 gcc_assert (element_bits
>= offset_bits
);
1972 /* If the elements are wider than the offset, convert the offset to the
1973 same width, without changing its sign. */
1974 if (element_bits
> offset_bits
)
1976 bool unsigned_p
= TYPE_UNSIGNED (offset_type
);
1977 offset_type
= build_nonstandard_integer_type (element_bits
, unsigned_p
);
1978 gs_info
->offset
= fold_convert (offset_type
, gs_info
->offset
);
1981 if (dump_enabled_p ())
1982 dump_printf_loc (MSG_NOTE
, vect_location
,
1983 "using gather/scatter for strided/grouped access,"
1984 " scale = %d\n", gs_info
->scale
);
1989 /* STMT is a non-strided load or store, meaning that it accesses
1990 elements with a known constant step. Return -1 if that step
1991 is negative, 0 if it is zero, and 1 if it is greater than zero. */
1994 compare_step_with_zero (gimple
*stmt
)
1996 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1997 data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
);
1998 return tree_int_cst_compare (vect_dr_behavior (dr
)->step
,
2002 /* If the target supports a permute mask that reverses the elements in
2003 a vector of type VECTYPE, return that mask, otherwise return null. */
2006 perm_mask_for_reverse (tree vectype
)
2008 poly_uint64 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
2010 /* The encoding has a single stepped pattern. */
2011 vec_perm_builder
sel (nunits
, 1, 3);
2012 for (int i
= 0; i
< 3; ++i
)
2013 sel
.quick_push (nunits
- 1 - i
);
2015 vec_perm_indices
indices (sel
, 1, nunits
);
2016 if (!can_vec_perm_const_p (TYPE_MODE (vectype
), indices
))
2018 return vect_gen_perm_mask_checked (vectype
, indices
);
2021 /* STMT is either a masked or unconditional store. Return the value
2025 vect_get_store_rhs (gimple
*stmt
)
2027 if (gassign
*assign
= dyn_cast
<gassign
*> (stmt
))
2029 gcc_assert (gimple_assign_single_p (assign
));
2030 return gimple_assign_rhs1 (assign
);
2032 if (gcall
*call
= dyn_cast
<gcall
*> (stmt
))
2034 internal_fn ifn
= gimple_call_internal_fn (call
);
2035 int index
= internal_fn_stored_value_index (ifn
);
2036 gcc_assert (index
>= 0);
2037 return gimple_call_arg (stmt
, index
);
2042 /* A subroutine of get_load_store_type, with a subset of the same
2043 arguments. Handle the case where STMT is part of a grouped load
2046 For stores, the statements in the group are all consecutive
2047 and there is no gap at the end. For loads, the statements in the
2048 group might not be consecutive; there can be gaps between statements
2049 as well as at the end. */
2052 get_group_load_store_type (gimple
*stmt
, tree vectype
, bool slp
,
2053 bool masked_p
, vec_load_store_type vls_type
,
2054 vect_memory_access_type
*memory_access_type
,
2055 gather_scatter_info
*gs_info
)
2057 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
2058 vec_info
*vinfo
= stmt_info
->vinfo
;
2059 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
2060 struct loop
*loop
= loop_vinfo
? LOOP_VINFO_LOOP (loop_vinfo
) : NULL
;
2061 gimple
*first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
2062 data_reference
*first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
2063 unsigned int group_size
= GROUP_SIZE (vinfo_for_stmt (first_stmt
));
2064 bool single_element_p
= (stmt
== first_stmt
2065 && !GROUP_NEXT_ELEMENT (stmt_info
));
2066 unsigned HOST_WIDE_INT gap
= GROUP_GAP (vinfo_for_stmt (first_stmt
));
2067 poly_uint64 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
2069 /* True if the vectorized statements would access beyond the last
2070 statement in the group. */
2071 bool overrun_p
= false;
2073 /* True if we can cope with such overrun by peeling for gaps, so that
2074 there is at least one final scalar iteration after the vector loop. */
2075 bool can_overrun_p
= (!masked_p
2076 && vls_type
== VLS_LOAD
2080 /* There can only be a gap at the end of the group if the stride is
2081 known at compile time. */
2082 gcc_assert (!STMT_VINFO_STRIDED_P (stmt_info
) || gap
== 0);
2084 /* Stores can't yet have gaps. */
2085 gcc_assert (slp
|| vls_type
== VLS_LOAD
|| gap
== 0);
2089 if (STMT_VINFO_STRIDED_P (stmt_info
))
2091 /* Try to use consecutive accesses of GROUP_SIZE elements,
2092 separated by the stride, until we have a complete vector.
2093 Fall back to scalar accesses if that isn't possible. */
2094 if (multiple_p (nunits
, group_size
))
2095 *memory_access_type
= VMAT_STRIDED_SLP
;
2097 *memory_access_type
= VMAT_ELEMENTWISE
;
2101 overrun_p
= loop_vinfo
&& gap
!= 0;
2102 if (overrun_p
&& vls_type
!= VLS_LOAD
)
2104 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2105 "Grouped store with gaps requires"
2106 " non-consecutive accesses\n");
2109 /* An overrun is fine if the trailing elements are smaller
2110 than the alignment boundary B. Every vector access will
2111 be a multiple of B and so we are guaranteed to access a
2112 non-gap element in the same B-sized block. */
2114 && gap
< (vect_known_alignment_in_bytes (first_dr
)
2115 / vect_get_scalar_dr_size (first_dr
)))
2117 if (overrun_p
&& !can_overrun_p
)
2119 if (dump_enabled_p ())
2120 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2121 "Peeling for outer loop is not supported\n");
2124 *memory_access_type
= VMAT_CONTIGUOUS
;
2129 /* We can always handle this case using elementwise accesses,
2130 but see if something more efficient is available. */
2131 *memory_access_type
= VMAT_ELEMENTWISE
;
2133 /* If there is a gap at the end of the group then these optimizations
2134 would access excess elements in the last iteration. */
2135 bool would_overrun_p
= (gap
!= 0);
2136 /* An overrun is fine if the trailing elements are smaller than the
2137 alignment boundary B. Every vector access will be a multiple of B
2138 and so we are guaranteed to access a non-gap element in the
2139 same B-sized block. */
2142 && gap
< (vect_known_alignment_in_bytes (first_dr
)
2143 / vect_get_scalar_dr_size (first_dr
)))
2144 would_overrun_p
= false;
2146 if (!STMT_VINFO_STRIDED_P (stmt_info
)
2147 && (can_overrun_p
|| !would_overrun_p
)
2148 && compare_step_with_zero (stmt
) > 0)
2150 /* First cope with the degenerate case of a single-element
2152 if (known_eq (TYPE_VECTOR_SUBPARTS (vectype
), 1U))
2153 *memory_access_type
= VMAT_CONTIGUOUS
;
2155 /* Otherwise try using LOAD/STORE_LANES. */
2156 if (*memory_access_type
== VMAT_ELEMENTWISE
2157 && (vls_type
== VLS_LOAD
2158 ? vect_load_lanes_supported (vectype
, group_size
, masked_p
)
2159 : vect_store_lanes_supported (vectype
, group_size
,
2162 *memory_access_type
= VMAT_LOAD_STORE_LANES
;
2163 overrun_p
= would_overrun_p
;
2166 /* If that fails, try using permuting loads. */
2167 if (*memory_access_type
== VMAT_ELEMENTWISE
2168 && (vls_type
== VLS_LOAD
2169 ? vect_grouped_load_supported (vectype
, single_element_p
,
2171 : vect_grouped_store_supported (vectype
, group_size
)))
2173 *memory_access_type
= VMAT_CONTIGUOUS_PERMUTE
;
2174 overrun_p
= would_overrun_p
;
2178 /* As a last resort, trying using a gather load or scatter store.
2180 ??? Although the code can handle all group sizes correctly,
2181 it probably isn't a win to use separate strided accesses based
2182 on nearby locations. Or, even if it's a win over scalar code,
2183 it might not be a win over vectorizing at a lower VF, if that
2184 allows us to use contiguous accesses. */
2185 if (*memory_access_type
== VMAT_ELEMENTWISE
2188 && vect_use_strided_gather_scatters_p (stmt
, loop_vinfo
,
2190 *memory_access_type
= VMAT_GATHER_SCATTER
;
2193 if (vls_type
!= VLS_LOAD
&& first_stmt
== stmt
)
2195 /* STMT is the leader of the group. Check the operands of all the
2196 stmts of the group. */
2197 gimple
*next_stmt
= GROUP_NEXT_ELEMENT (stmt_info
);
2200 tree op
= vect_get_store_rhs (next_stmt
);
2202 enum vect_def_type dt
;
2203 if (!vect_is_simple_use (op
, vinfo
, &def_stmt
, &dt
))
2205 if (dump_enabled_p ())
2206 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2207 "use not simple.\n");
2210 next_stmt
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt
));
2216 gcc_assert (can_overrun_p
);
2217 if (dump_enabled_p ())
2218 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2219 "Data access with gaps requires scalar "
2221 LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo
) = true;
2227 /* A subroutine of get_load_store_type, with a subset of the same
2228 arguments. Handle the case where STMT is a load or store that
2229 accesses consecutive elements with a negative step. */
2231 static vect_memory_access_type
2232 get_negative_load_store_type (gimple
*stmt
, tree vectype
,
2233 vec_load_store_type vls_type
,
2234 unsigned int ncopies
)
2236 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
2237 struct data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
);
2238 dr_alignment_support alignment_support_scheme
;
2242 if (dump_enabled_p ())
2243 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2244 "multiple types with negative step.\n");
2245 return VMAT_ELEMENTWISE
;
2248 alignment_support_scheme
= vect_supportable_dr_alignment (dr
, false);
2249 if (alignment_support_scheme
!= dr_aligned
2250 && alignment_support_scheme
!= dr_unaligned_supported
)
2252 if (dump_enabled_p ())
2253 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2254 "negative step but alignment required.\n");
2255 return VMAT_ELEMENTWISE
;
2258 if (vls_type
== VLS_STORE_INVARIANT
)
2260 if (dump_enabled_p ())
2261 dump_printf_loc (MSG_NOTE
, vect_location
,
2262 "negative step with invariant source;"
2263 " no permute needed.\n");
2264 return VMAT_CONTIGUOUS_DOWN
;
2267 if (!perm_mask_for_reverse (vectype
))
2269 if (dump_enabled_p ())
2270 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2271 "negative step and reversing not supported.\n");
2272 return VMAT_ELEMENTWISE
;
2275 return VMAT_CONTIGUOUS_REVERSE
;
2278 /* Analyze load or store statement STMT of type VLS_TYPE. Return true
2279 if there is a memory access type that the vectorized form can use,
2280 storing it in *MEMORY_ACCESS_TYPE if so. If we decide to use gathers
2281 or scatters, fill in GS_INFO accordingly.
2283 SLP says whether we're performing SLP rather than loop vectorization.
2284 MASKED_P is true if the statement is conditional on a vectorized mask.
2285 VECTYPE is the vector type that the vectorized statements will use.
2286 NCOPIES is the number of vector statements that will be needed. */
2289 get_load_store_type (gimple
*stmt
, tree vectype
, bool slp
, bool masked_p
,
2290 vec_load_store_type vls_type
, unsigned int ncopies
,
2291 vect_memory_access_type
*memory_access_type
,
2292 gather_scatter_info
*gs_info
)
2294 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
2295 vec_info
*vinfo
= stmt_info
->vinfo
;
2296 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
2297 poly_uint64 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
2298 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info
))
2300 *memory_access_type
= VMAT_GATHER_SCATTER
;
2302 if (!vect_check_gather_scatter (stmt
, loop_vinfo
, gs_info
))
2304 else if (!vect_is_simple_use (gs_info
->offset
, vinfo
, &def_stmt
,
2305 &gs_info
->offset_dt
,
2306 &gs_info
->offset_vectype
))
2308 if (dump_enabled_p ())
2309 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2310 "%s index use not simple.\n",
2311 vls_type
== VLS_LOAD
? "gather" : "scatter");
2315 else if (STMT_VINFO_GROUPED_ACCESS (stmt_info
))
2317 if (!get_group_load_store_type (stmt
, vectype
, slp
, masked_p
, vls_type
,
2318 memory_access_type
, gs_info
))
2321 else if (STMT_VINFO_STRIDED_P (stmt_info
))
2325 && vect_use_strided_gather_scatters_p (stmt
, loop_vinfo
,
2327 *memory_access_type
= VMAT_GATHER_SCATTER
;
2329 *memory_access_type
= VMAT_ELEMENTWISE
;
2333 int cmp
= compare_step_with_zero (stmt
);
2335 *memory_access_type
= get_negative_load_store_type
2336 (stmt
, vectype
, vls_type
, ncopies
);
2339 gcc_assert (vls_type
== VLS_LOAD
);
2340 *memory_access_type
= VMAT_INVARIANT
;
2343 *memory_access_type
= VMAT_CONTIGUOUS
;
2346 if ((*memory_access_type
== VMAT_ELEMENTWISE
2347 || *memory_access_type
== VMAT_STRIDED_SLP
)
2348 && !nunits
.is_constant ())
2350 if (dump_enabled_p ())
2351 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2352 "Not using elementwise accesses due to variable "
2353 "vectorization factor.\n");
2357 /* FIXME: At the moment the cost model seems to underestimate the
2358 cost of using elementwise accesses. This check preserves the
2359 traditional behavior until that can be fixed. */
2360 if (*memory_access_type
== VMAT_ELEMENTWISE
2361 && !STMT_VINFO_STRIDED_P (stmt_info
)
2362 && !(stmt
== GROUP_FIRST_ELEMENT (stmt_info
)
2363 && !GROUP_NEXT_ELEMENT (stmt_info
)
2364 && !pow2p_hwi (GROUP_SIZE (stmt_info
))))
2366 if (dump_enabled_p ())
2367 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2368 "not falling back to elementwise accesses\n");
2374 /* Return true if boolean argument MASK is suitable for vectorizing
2375 conditional load or store STMT. When returning true, store the type
2376 of the definition in *MASK_DT_OUT and the type of the vectorized mask
2377 in *MASK_VECTYPE_OUT. */
2380 vect_check_load_store_mask (gimple
*stmt
, tree mask
,
2381 vect_def_type
*mask_dt_out
,
2382 tree
*mask_vectype_out
)
2384 if (!VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (mask
)))
2386 if (dump_enabled_p ())
2387 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2388 "mask argument is not a boolean.\n");
2392 if (TREE_CODE (mask
) != SSA_NAME
)
2394 if (dump_enabled_p ())
2395 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2396 "mask argument is not an SSA name.\n");
2400 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
2402 enum vect_def_type mask_dt
;
2404 if (!vect_is_simple_use (mask
, stmt_info
->vinfo
, &def_stmt
, &mask_dt
,
2407 if (dump_enabled_p ())
2408 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2409 "mask use not simple.\n");
2413 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
2415 mask_vectype
= get_mask_type_for_scalar_type (TREE_TYPE (vectype
));
2417 if (!mask_vectype
|| !VECTOR_BOOLEAN_TYPE_P (mask_vectype
))
2419 if (dump_enabled_p ())
2420 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2421 "could not find an appropriate vector mask type.\n");
2425 if (maybe_ne (TYPE_VECTOR_SUBPARTS (mask_vectype
),
2426 TYPE_VECTOR_SUBPARTS (vectype
)))
2428 if (dump_enabled_p ())
2430 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2431 "vector mask type ");
2432 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, mask_vectype
);
2433 dump_printf (MSG_MISSED_OPTIMIZATION
,
2434 " does not match vector data type ");
2435 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, vectype
);
2436 dump_printf (MSG_MISSED_OPTIMIZATION
, ".\n");
2441 *mask_dt_out
= mask_dt
;
2442 *mask_vectype_out
= mask_vectype
;
2446 /* Return true if stored value RHS is suitable for vectorizing store
2447 statement STMT. When returning true, store the type of the
2448 definition in *RHS_DT_OUT, the type of the vectorized store value in
2449 *RHS_VECTYPE_OUT and the type of the store in *VLS_TYPE_OUT. */
2452 vect_check_store_rhs (gimple
*stmt
, tree rhs
, vect_def_type
*rhs_dt_out
,
2453 tree
*rhs_vectype_out
, vec_load_store_type
*vls_type_out
)
2455 /* In the case this is a store from a constant make sure
2456 native_encode_expr can handle it. */
2457 if (CONSTANT_CLASS_P (rhs
) && native_encode_expr (rhs
, NULL
, 64) == 0)
2459 if (dump_enabled_p ())
2460 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2461 "cannot encode constant as a byte sequence.\n");
2465 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
2467 enum vect_def_type rhs_dt
;
2469 if (!vect_is_simple_use (rhs
, stmt_info
->vinfo
, &def_stmt
, &rhs_dt
,
2472 if (dump_enabled_p ())
2473 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2474 "use not simple.\n");
2478 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
2479 if (rhs_vectype
&& !useless_type_conversion_p (vectype
, rhs_vectype
))
2481 if (dump_enabled_p ())
2482 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2483 "incompatible vector types.\n");
2487 *rhs_dt_out
= rhs_dt
;
2488 *rhs_vectype_out
= rhs_vectype
;
2489 if (rhs_dt
== vect_constant_def
|| rhs_dt
== vect_external_def
)
2490 *vls_type_out
= VLS_STORE_INVARIANT
;
2492 *vls_type_out
= VLS_STORE
;
2496 /* Build an all-ones vector mask of type MASKTYPE while vectorizing STMT.
2497 Note that we support masks with floating-point type, in which case the
2498 floats are interpreted as a bitmask. */
2501 vect_build_all_ones_mask (gimple
*stmt
, tree masktype
)
2503 if (TREE_CODE (masktype
) == INTEGER_TYPE
)
2504 return build_int_cst (masktype
, -1);
2505 else if (TREE_CODE (TREE_TYPE (masktype
)) == INTEGER_TYPE
)
2507 tree mask
= build_int_cst (TREE_TYPE (masktype
), -1);
2508 mask
= build_vector_from_val (masktype
, mask
);
2509 return vect_init_vector (stmt
, mask
, masktype
, NULL
);
2511 else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (masktype
)))
2515 for (int j
= 0; j
< 6; ++j
)
2517 real_from_target (&r
, tmp
, TYPE_MODE (TREE_TYPE (masktype
)));
2518 tree mask
= build_real (TREE_TYPE (masktype
), r
);
2519 mask
= build_vector_from_val (masktype
, mask
);
2520 return vect_init_vector (stmt
, mask
, masktype
, NULL
);
2525 /* Build an all-zero merge value of type VECTYPE while vectorizing
2526 STMT as a gather load. */
2529 vect_build_zero_merge_argument (gimple
*stmt
, tree vectype
)
2532 if (TREE_CODE (TREE_TYPE (vectype
)) == INTEGER_TYPE
)
2533 merge
= build_int_cst (TREE_TYPE (vectype
), 0);
2534 else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (vectype
)))
2538 for (int j
= 0; j
< 6; ++j
)
2540 real_from_target (&r
, tmp
, TYPE_MODE (TREE_TYPE (vectype
)));
2541 merge
= build_real (TREE_TYPE (vectype
), r
);
2545 merge
= build_vector_from_val (vectype
, merge
);
2546 return vect_init_vector (stmt
, merge
, vectype
, NULL
);
2549 /* Build a gather load call while vectorizing STMT. Insert new instructions
2550 before GSI and add them to VEC_STMT. GS_INFO describes the gather load
2551 operation. If the load is conditional, MASK is the unvectorized
2552 condition and MASK_DT is its definition type, otherwise MASK is null. */
2555 vect_build_gather_load_calls (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
2556 gimple
**vec_stmt
, gather_scatter_info
*gs_info
,
2557 tree mask
, vect_def_type mask_dt
)
2559 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
2560 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
2561 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
2562 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
2563 poly_uint64 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
2564 int ncopies
= vect_get_num_copies (loop_vinfo
, vectype
);
2565 edge pe
= loop_preheader_edge (loop
);
2566 enum { NARROW
, NONE
, WIDEN
} modifier
;
2567 poly_uint64 gather_off_nunits
2568 = TYPE_VECTOR_SUBPARTS (gs_info
->offset_vectype
);
2570 tree arglist
= TYPE_ARG_TYPES (TREE_TYPE (gs_info
->decl
));
2571 tree rettype
= TREE_TYPE (TREE_TYPE (gs_info
->decl
));
2572 tree srctype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
2573 tree ptrtype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
2574 tree idxtype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
2575 tree masktype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
2576 tree scaletype
= TREE_VALUE (arglist
);
2577 gcc_checking_assert (types_compatible_p (srctype
, rettype
)
2578 && (!mask
|| types_compatible_p (srctype
, masktype
)));
2580 tree perm_mask
= NULL_TREE
;
2581 tree mask_perm_mask
= NULL_TREE
;
2582 if (known_eq (nunits
, gather_off_nunits
))
2584 else if (known_eq (nunits
* 2, gather_off_nunits
))
2588 /* Currently widening gathers and scatters are only supported for
2589 fixed-length vectors. */
2590 int count
= gather_off_nunits
.to_constant ();
2591 vec_perm_builder
sel (count
, count
, 1);
2592 for (int i
= 0; i
< count
; ++i
)
2593 sel
.quick_push (i
| (count
/ 2));
2595 vec_perm_indices
indices (sel
, 1, count
);
2596 perm_mask
= vect_gen_perm_mask_checked (gs_info
->offset_vectype
,
2599 else if (known_eq (nunits
, gather_off_nunits
* 2))
2603 /* Currently narrowing gathers and scatters are only supported for
2604 fixed-length vectors. */
2605 int count
= nunits
.to_constant ();
2606 vec_perm_builder
sel (count
, count
, 1);
2607 sel
.quick_grow (count
);
2608 for (int i
= 0; i
< count
; ++i
)
2609 sel
[i
] = i
< count
/ 2 ? i
: i
+ count
/ 2;
2610 vec_perm_indices
indices (sel
, 2, count
);
2611 perm_mask
= vect_gen_perm_mask_checked (vectype
, indices
);
2617 for (int i
= 0; i
< count
; ++i
)
2618 sel
[i
] = i
| (count
/ 2);
2619 indices
.new_vector (sel
, 2, count
);
2620 mask_perm_mask
= vect_gen_perm_mask_checked (masktype
, indices
);
2626 tree vec_dest
= vect_create_destination_var (gimple_get_lhs (stmt
),
2629 tree ptr
= fold_convert (ptrtype
, gs_info
->base
);
2630 if (!is_gimple_min_invariant (ptr
))
2633 ptr
= force_gimple_operand (ptr
, &seq
, true, NULL_TREE
);
2634 basic_block new_bb
= gsi_insert_seq_on_edge_immediate (pe
, seq
);
2635 gcc_assert (!new_bb
);
2638 tree scale
= build_int_cst (scaletype
, gs_info
->scale
);
2640 tree vec_oprnd0
= NULL_TREE
;
2641 tree vec_mask
= NULL_TREE
;
2642 tree src_op
= NULL_TREE
;
2643 tree mask_op
= NULL_TREE
;
2644 tree prev_res
= NULL_TREE
;
2645 stmt_vec_info prev_stmt_info
= NULL
;
2649 src_op
= vect_build_zero_merge_argument (stmt
, rettype
);
2650 mask_op
= vect_build_all_ones_mask (stmt
, masktype
);
2653 for (int j
= 0; j
< ncopies
; ++j
)
2657 if (modifier
== WIDEN
&& (j
& 1))
2658 op
= permute_vec_elements (vec_oprnd0
, vec_oprnd0
,
2659 perm_mask
, stmt
, gsi
);
2662 = vect_get_vec_def_for_operand (gs_info
->offset
, stmt
);
2665 = vect_get_vec_def_for_stmt_copy (gs_info
->offset_dt
, vec_oprnd0
);
2667 if (!useless_type_conversion_p (idxtype
, TREE_TYPE (op
)))
2669 gcc_assert (known_eq (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op
)),
2670 TYPE_VECTOR_SUBPARTS (idxtype
)));
2671 var
= vect_get_new_ssa_name (idxtype
, vect_simple_var
);
2672 op
= build1 (VIEW_CONVERT_EXPR
, idxtype
, op
);
2673 new_stmt
= gimple_build_assign (var
, VIEW_CONVERT_EXPR
, op
);
2674 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2680 if (mask_perm_mask
&& (j
& 1))
2681 mask_op
= permute_vec_elements (mask_op
, mask_op
,
2682 mask_perm_mask
, stmt
, gsi
);
2686 vec_mask
= vect_get_vec_def_for_operand (mask
, stmt
);
2688 vec_mask
= vect_get_vec_def_for_stmt_copy (mask_dt
, vec_mask
);
2691 if (!useless_type_conversion_p (masktype
, TREE_TYPE (vec_mask
)))
2694 (known_eq (TYPE_VECTOR_SUBPARTS (TREE_TYPE (mask_op
)),
2695 TYPE_VECTOR_SUBPARTS (masktype
)));
2696 var
= vect_get_new_ssa_name (masktype
, vect_simple_var
);
2697 mask_op
= build1 (VIEW_CONVERT_EXPR
, masktype
, mask_op
);
2698 new_stmt
= gimple_build_assign (var
, VIEW_CONVERT_EXPR
,
2700 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2707 new_stmt
= gimple_build_call (gs_info
->decl
, 5, src_op
, ptr
, op
,
2710 if (!useless_type_conversion_p (vectype
, rettype
))
2712 gcc_assert (known_eq (TYPE_VECTOR_SUBPARTS (vectype
),
2713 TYPE_VECTOR_SUBPARTS (rettype
)));
2714 op
= vect_get_new_ssa_name (rettype
, vect_simple_var
);
2715 gimple_call_set_lhs (new_stmt
, op
);
2716 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2717 var
= make_ssa_name (vec_dest
);
2718 op
= build1 (VIEW_CONVERT_EXPR
, vectype
, op
);
2719 new_stmt
= gimple_build_assign (var
, VIEW_CONVERT_EXPR
, op
);
2723 var
= make_ssa_name (vec_dest
, new_stmt
);
2724 gimple_call_set_lhs (new_stmt
, var
);
2727 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2729 if (modifier
== NARROW
)
2736 var
= permute_vec_elements (prev_res
, var
, perm_mask
, stmt
, gsi
);
2737 new_stmt
= SSA_NAME_DEF_STMT (var
);
2740 if (prev_stmt_info
== NULL
)
2741 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
2743 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
2744 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
2748 /* Prepare the base and offset in GS_INFO for vectorization.
2749 Set *DATAREF_PTR to the loop-invariant base address and *VEC_OFFSET
2750 to the vectorized offset argument for the first copy of STMT. STMT
2751 is the statement described by GS_INFO and LOOP is the containing loop. */
2754 vect_get_gather_scatter_ops (struct loop
*loop
, gimple
*stmt
,
2755 gather_scatter_info
*gs_info
,
2756 tree
*dataref_ptr
, tree
*vec_offset
)
2758 gimple_seq stmts
= NULL
;
2759 *dataref_ptr
= force_gimple_operand (gs_info
->base
, &stmts
, true, NULL_TREE
);
2763 edge pe
= loop_preheader_edge (loop
);
2764 new_bb
= gsi_insert_seq_on_edge_immediate (pe
, stmts
);
2765 gcc_assert (!new_bb
);
2767 tree offset_type
= TREE_TYPE (gs_info
->offset
);
2768 tree offset_vectype
= get_vectype_for_scalar_type (offset_type
);
2769 *vec_offset
= vect_get_vec_def_for_operand (gs_info
->offset
, stmt
,
2773 /* Prepare to implement a grouped or strided load or store using
2774 the gather load or scatter store operation described by GS_INFO.
2775 STMT is the load or store statement.
2777 Set *DATAREF_BUMP to the amount that should be added to the base
2778 address after each copy of the vectorized statement. Set *VEC_OFFSET
2779 to an invariant offset vector in which element I has the value
2780 I * DR_STEP / SCALE. */
2783 vect_get_strided_load_store_ops (gimple
*stmt
, loop_vec_info loop_vinfo
,
2784 gather_scatter_info
*gs_info
,
2785 tree
*dataref_bump
, tree
*vec_offset
)
2787 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
2788 struct data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
);
2789 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
2790 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
2793 tree bump
= size_binop (MULT_EXPR
,
2794 fold_convert (sizetype
, DR_STEP (dr
)),
2795 size_int (TYPE_VECTOR_SUBPARTS (vectype
)));
2796 *dataref_bump
= force_gimple_operand (bump
, &stmts
, true, NULL_TREE
);
2798 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop
), stmts
);
2800 /* The offset given in GS_INFO can have pointer type, so use the element
2801 type of the vector instead. */
2802 tree offset_type
= TREE_TYPE (gs_info
->offset
);
2803 tree offset_vectype
= get_vectype_for_scalar_type (offset_type
);
2804 offset_type
= TREE_TYPE (offset_vectype
);
2806 /* Calculate X = DR_STEP / SCALE and convert it to the appropriate type. */
2807 tree step
= size_binop (EXACT_DIV_EXPR
, DR_STEP (dr
),
2808 ssize_int (gs_info
->scale
));
2809 step
= fold_convert (offset_type
, step
);
2810 step
= force_gimple_operand (step
, &stmts
, true, NULL_TREE
);
2812 /* Create {0, X, X*2, X*3, ...}. */
2813 *vec_offset
= gimple_build (&stmts
, VEC_SERIES_EXPR
, offset_vectype
,
2814 build_zero_cst (offset_type
), step
);
2816 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop
), stmts
);
2819 /* Return the amount that should be added to a vector pointer to move
2820 to the next or previous copy of AGGR_TYPE. DR is the data reference
2821 being vectorized and MEMORY_ACCESS_TYPE describes the type of
2825 vect_get_data_ptr_increment (data_reference
*dr
, tree aggr_type
,
2826 vect_memory_access_type memory_access_type
)
2828 if (memory_access_type
== VMAT_INVARIANT
)
2829 return size_zero_node
;
2831 tree iv_step
= TYPE_SIZE_UNIT (aggr_type
);
2832 tree step
= vect_dr_behavior (dr
)->step
;
2833 if (tree_int_cst_sgn (step
) == -1)
2834 iv_step
= fold_build1 (NEGATE_EXPR
, TREE_TYPE (iv_step
), iv_step
);
2838 /* Check and perform vectorization of BUILT_IN_BSWAP{16,32,64}. */
2841 vectorizable_bswap (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
2842 gimple
**vec_stmt
, slp_tree slp_node
,
2843 tree vectype_in
, enum vect_def_type
*dt
)
2846 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
2847 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
2849 unsigned HOST_WIDE_INT nunits
, num_bytes
;
2851 op
= gimple_call_arg (stmt
, 0);
2852 vectype
= STMT_VINFO_VECTYPE (stmt_info
);
2854 if (!TYPE_VECTOR_SUBPARTS (vectype
).is_constant (&nunits
))
2857 /* Multiple types in SLP are handled by creating the appropriate number of
2858 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
2863 ncopies
= vect_get_num_copies (loop_vinfo
, vectype
);
2865 gcc_assert (ncopies
>= 1);
2867 tree char_vectype
= get_same_sized_vectype (char_type_node
, vectype_in
);
2871 if (!TYPE_VECTOR_SUBPARTS (char_vectype
).is_constant (&num_bytes
))
2874 unsigned word_bytes
= num_bytes
/ nunits
;
2876 /* The encoding uses one stepped pattern for each byte in the word. */
2877 vec_perm_builder
elts (num_bytes
, word_bytes
, 3);
2878 for (unsigned i
= 0; i
< 3; ++i
)
2879 for (unsigned j
= 0; j
< word_bytes
; ++j
)
2880 elts
.quick_push ((i
+ 1) * word_bytes
- j
- 1);
2882 vec_perm_indices
indices (elts
, 1, num_bytes
);
2883 if (!can_vec_perm_const_p (TYPE_MODE (char_vectype
), indices
))
2888 STMT_VINFO_TYPE (stmt_info
) = call_vec_info_type
;
2889 if (dump_enabled_p ())
2890 dump_printf_loc (MSG_NOTE
, vect_location
, "=== vectorizable_bswap ==="
2894 add_stmt_cost (stmt_info
->vinfo
->target_cost_data
,
2895 1, vector_stmt
, stmt_info
, 0, vect_prologue
);
2896 add_stmt_cost (stmt_info
->vinfo
->target_cost_data
,
2897 ncopies
, vec_perm
, stmt_info
, 0, vect_body
);
2902 tree bswap_vconst
= vec_perm_indices_to_tree (char_vectype
, indices
);
2905 vec
<tree
> vec_oprnds
= vNULL
;
2906 gimple
*new_stmt
= NULL
;
2907 stmt_vec_info prev_stmt_info
= NULL
;
2908 for (unsigned j
= 0; j
< ncopies
; j
++)
2912 vect_get_vec_defs (op
, NULL
, stmt
, &vec_oprnds
, NULL
, slp_node
);
2914 vect_get_vec_defs_for_stmt_copy (dt
, &vec_oprnds
, NULL
);
2916 /* Arguments are ready. create the new vector stmt. */
2919 FOR_EACH_VEC_ELT (vec_oprnds
, i
, vop
)
2921 tree tem
= make_ssa_name (char_vectype
);
2922 new_stmt
= gimple_build_assign (tem
, build1 (VIEW_CONVERT_EXPR
,
2923 char_vectype
, vop
));
2924 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2925 tree tem2
= make_ssa_name (char_vectype
);
2926 new_stmt
= gimple_build_assign (tem2
, VEC_PERM_EXPR
,
2927 tem
, tem
, bswap_vconst
);
2928 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2929 tem
= make_ssa_name (vectype
);
2930 new_stmt
= gimple_build_assign (tem
, build1 (VIEW_CONVERT_EXPR
,
2932 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2934 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
2941 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
2943 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
2945 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
2948 vec_oprnds
.release ();
2952 /* Return true if vector types VECTYPE_IN and VECTYPE_OUT have
2953 integer elements and if we can narrow VECTYPE_IN to VECTYPE_OUT
2954 in a single step. On success, store the binary pack code in
2958 simple_integer_narrowing (tree vectype_out
, tree vectype_in
,
2959 tree_code
*convert_code
)
2961 if (!INTEGRAL_TYPE_P (TREE_TYPE (vectype_out
))
2962 || !INTEGRAL_TYPE_P (TREE_TYPE (vectype_in
)))
2966 int multi_step_cvt
= 0;
2967 auto_vec
<tree
, 8> interm_types
;
2968 if (!supportable_narrowing_operation (NOP_EXPR
, vectype_out
, vectype_in
,
2969 &code
, &multi_step_cvt
,
2974 *convert_code
= code
;
2978 /* Function vectorizable_call.
2980 Check if GS performs a function call that can be vectorized.
2981 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2982 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2983 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2986 vectorizable_call (gimple
*gs
, gimple_stmt_iterator
*gsi
, gimple
**vec_stmt
,
2993 tree vec_oprnd0
= NULL_TREE
, vec_oprnd1
= NULL_TREE
;
2994 stmt_vec_info stmt_info
= vinfo_for_stmt (gs
), prev_stmt_info
;
2995 tree vectype_out
, vectype_in
;
2996 poly_uint64 nunits_in
;
2997 poly_uint64 nunits_out
;
2998 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
2999 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
3000 vec_info
*vinfo
= stmt_info
->vinfo
;
3001 tree fndecl
, new_temp
, rhs_type
;
3003 enum vect_def_type dt
[3]
3004 = {vect_unknown_def_type
, vect_unknown_def_type
, vect_unknown_def_type
};
3006 gimple
*new_stmt
= NULL
;
3008 vec
<tree
> vargs
= vNULL
;
3009 enum { NARROW
, NONE
, WIDEN
} modifier
;
3013 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
3016 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
3020 /* Is GS a vectorizable call? */
3021 stmt
= dyn_cast
<gcall
*> (gs
);
3025 if (gimple_call_internal_p (stmt
)
3026 && (internal_load_fn_p (gimple_call_internal_fn (stmt
))
3027 || internal_store_fn_p (gimple_call_internal_fn (stmt
))))
3028 /* Handled by vectorizable_load and vectorizable_store. */
3031 if (gimple_call_lhs (stmt
) == NULL_TREE
3032 || TREE_CODE (gimple_call_lhs (stmt
)) != SSA_NAME
)
3035 gcc_checking_assert (!stmt_can_throw_internal (stmt
));
3037 vectype_out
= STMT_VINFO_VECTYPE (stmt_info
);
3039 /* Process function arguments. */
3040 rhs_type
= NULL_TREE
;
3041 vectype_in
= NULL_TREE
;
3042 nargs
= gimple_call_num_args (stmt
);
3044 /* Bail out if the function has more than three arguments, we do not have
3045 interesting builtin functions to vectorize with more than two arguments
3046 except for fma. No arguments is also not good. */
3047 if (nargs
== 0 || nargs
> 3)
3050 /* Ignore the argument of IFN_GOMP_SIMD_LANE, it is magic. */
3051 if (gimple_call_internal_p (stmt
)
3052 && gimple_call_internal_fn (stmt
) == IFN_GOMP_SIMD_LANE
)
3055 rhs_type
= unsigned_type_node
;
3058 for (i
= 0; i
< nargs
; i
++)
3062 op
= gimple_call_arg (stmt
, i
);
3064 /* We can only handle calls with arguments of the same type. */
3066 && !types_compatible_p (rhs_type
, TREE_TYPE (op
)))
3068 if (dump_enabled_p ())
3069 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3070 "argument types differ.\n");
3074 rhs_type
= TREE_TYPE (op
);
3076 if (!vect_is_simple_use (op
, vinfo
, &def_stmt
, &dt
[i
], &opvectype
))
3078 if (dump_enabled_p ())
3079 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3080 "use not simple.\n");
3085 vectype_in
= opvectype
;
3087 && opvectype
!= vectype_in
)
3089 if (dump_enabled_p ())
3090 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3091 "argument vector types differ.\n");
3095 /* If all arguments are external or constant defs use a vector type with
3096 the same size as the output vector type. */
3098 vectype_in
= get_same_sized_vectype (rhs_type
, vectype_out
);
3100 gcc_assert (vectype_in
);
3103 if (dump_enabled_p ())
3105 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3106 "no vectype for scalar type ");
3107 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, rhs_type
);
3108 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
3115 nunits_in
= TYPE_VECTOR_SUBPARTS (vectype_in
);
3116 nunits_out
= TYPE_VECTOR_SUBPARTS (vectype_out
);
3117 if (known_eq (nunits_in
* 2, nunits_out
))
3119 else if (known_eq (nunits_out
, nunits_in
))
3121 else if (known_eq (nunits_out
* 2, nunits_in
))
3126 /* We only handle functions that do not read or clobber memory. */
3127 if (gimple_vuse (stmt
))
3129 if (dump_enabled_p ())
3130 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3131 "function reads from or writes to memory.\n");
3135 /* For now, we only vectorize functions if a target specific builtin
3136 is available. TODO -- in some cases, it might be profitable to
3137 insert the calls for pieces of the vector, in order to be able
3138 to vectorize other operations in the loop. */
3140 internal_fn ifn
= IFN_LAST
;
3141 combined_fn cfn
= gimple_call_combined_fn (stmt
);
3142 tree callee
= gimple_call_fndecl (stmt
);
3144 /* First try using an internal function. */
3145 tree_code convert_code
= ERROR_MARK
;
3147 && (modifier
== NONE
3148 || (modifier
== NARROW
3149 && simple_integer_narrowing (vectype_out
, vectype_in
,
3151 ifn
= vectorizable_internal_function (cfn
, callee
, vectype_out
,
3154 /* If that fails, try asking for a target-specific built-in function. */
3155 if (ifn
== IFN_LAST
)
3157 if (cfn
!= CFN_LAST
)
3158 fndecl
= targetm
.vectorize
.builtin_vectorized_function
3159 (cfn
, vectype_out
, vectype_in
);
3161 fndecl
= targetm
.vectorize
.builtin_md_vectorized_function
3162 (callee
, vectype_out
, vectype_in
);
3165 if (ifn
== IFN_LAST
&& !fndecl
)
3167 if (cfn
== CFN_GOMP_SIMD_LANE
3170 && LOOP_VINFO_LOOP (loop_vinfo
)->simduid
3171 && TREE_CODE (gimple_call_arg (stmt
, 0)) == SSA_NAME
3172 && LOOP_VINFO_LOOP (loop_vinfo
)->simduid
3173 == SSA_NAME_VAR (gimple_call_arg (stmt
, 0)))
3175 /* We can handle IFN_GOMP_SIMD_LANE by returning a
3176 { 0, 1, 2, ... vf - 1 } vector. */
3177 gcc_assert (nargs
== 0);
3179 else if (modifier
== NONE
3180 && (gimple_call_builtin_p (stmt
, BUILT_IN_BSWAP16
)
3181 || gimple_call_builtin_p (stmt
, BUILT_IN_BSWAP32
)
3182 || gimple_call_builtin_p (stmt
, BUILT_IN_BSWAP64
)))
3183 return vectorizable_bswap (stmt
, gsi
, vec_stmt
, slp_node
,
3187 if (dump_enabled_p ())
3188 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3189 "function is not vectorizable.\n");
3196 else if (modifier
== NARROW
&& ifn
== IFN_LAST
)
3197 ncopies
= vect_get_num_copies (loop_vinfo
, vectype_out
);
3199 ncopies
= vect_get_num_copies (loop_vinfo
, vectype_in
);
3201 /* Sanity check: make sure that at least one copy of the vectorized stmt
3202 needs to be generated. */
3203 gcc_assert (ncopies
>= 1);
3205 if (!vec_stmt
) /* transformation not required. */
3207 STMT_VINFO_TYPE (stmt_info
) = call_vec_info_type
;
3208 if (dump_enabled_p ())
3209 dump_printf_loc (MSG_NOTE
, vect_location
, "=== vectorizable_call ==="
3213 vect_model_simple_cost (stmt_info
, ncopies
, dt
, ndts
, NULL
, NULL
);
3214 if (ifn
!= IFN_LAST
&& modifier
== NARROW
&& !slp_node
)
3215 add_stmt_cost (stmt_info
->vinfo
->target_cost_data
, ncopies
/ 2,
3216 vec_promote_demote
, stmt_info
, 0, vect_body
);
3224 if (dump_enabled_p ())
3225 dump_printf_loc (MSG_NOTE
, vect_location
, "transform call.\n");
3228 scalar_dest
= gimple_call_lhs (stmt
);
3229 vec_dest
= vect_create_destination_var (scalar_dest
, vectype_out
);
3231 prev_stmt_info
= NULL
;
3232 if (modifier
== NONE
|| ifn
!= IFN_LAST
)
3234 tree prev_res
= NULL_TREE
;
3235 for (j
= 0; j
< ncopies
; ++j
)
3237 /* Build argument list for the vectorized call. */
3239 vargs
.create (nargs
);
3245 auto_vec
<vec
<tree
> > vec_defs (nargs
);
3246 vec
<tree
> vec_oprnds0
;
3248 for (i
= 0; i
< nargs
; i
++)
3249 vargs
.quick_push (gimple_call_arg (stmt
, i
));
3250 vect_get_slp_defs (vargs
, slp_node
, &vec_defs
);
3251 vec_oprnds0
= vec_defs
[0];
3253 /* Arguments are ready. Create the new vector stmt. */
3254 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vec_oprnd0
)
3257 for (k
= 0; k
< nargs
; k
++)
3259 vec
<tree
> vec_oprndsk
= vec_defs
[k
];
3260 vargs
[k
] = vec_oprndsk
[i
];
3262 if (modifier
== NARROW
)
3264 tree half_res
= make_ssa_name (vectype_in
);
3266 = gimple_build_call_internal_vec (ifn
, vargs
);
3267 gimple_call_set_lhs (call
, half_res
);
3268 gimple_call_set_nothrow (call
, true);
3270 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3273 prev_res
= half_res
;
3276 new_temp
= make_ssa_name (vec_dest
);
3277 new_stmt
= gimple_build_assign (new_temp
, convert_code
,
3278 prev_res
, half_res
);
3283 if (ifn
!= IFN_LAST
)
3284 call
= gimple_build_call_internal_vec (ifn
, vargs
);
3286 call
= gimple_build_call_vec (fndecl
, vargs
);
3287 new_temp
= make_ssa_name (vec_dest
, call
);
3288 gimple_call_set_lhs (call
, new_temp
);
3289 gimple_call_set_nothrow (call
, true);
3292 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3293 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
3296 for (i
= 0; i
< nargs
; i
++)
3298 vec
<tree
> vec_oprndsi
= vec_defs
[i
];
3299 vec_oprndsi
.release ();
3304 for (i
= 0; i
< nargs
; i
++)
3306 op
= gimple_call_arg (stmt
, i
);
3309 = vect_get_vec_def_for_operand (op
, stmt
);
3312 vec_oprnd0
= gimple_call_arg (new_stmt
, i
);
3314 = vect_get_vec_def_for_stmt_copy (dt
[i
], vec_oprnd0
);
3317 vargs
.quick_push (vec_oprnd0
);
3320 if (gimple_call_internal_p (stmt
)
3321 && gimple_call_internal_fn (stmt
) == IFN_GOMP_SIMD_LANE
)
3323 tree cst
= build_index_vector (vectype_out
, j
* nunits_out
, 1);
3325 = vect_get_new_ssa_name (vectype_out
, vect_simple_var
, "cst_");
3326 gimple
*init_stmt
= gimple_build_assign (new_var
, cst
);
3327 vect_init_vector_1 (stmt
, init_stmt
, NULL
);
3328 new_temp
= make_ssa_name (vec_dest
);
3329 new_stmt
= gimple_build_assign (new_temp
, new_var
);
3331 else if (modifier
== NARROW
)
3333 tree half_res
= make_ssa_name (vectype_in
);
3334 gcall
*call
= gimple_build_call_internal_vec (ifn
, vargs
);
3335 gimple_call_set_lhs (call
, half_res
);
3336 gimple_call_set_nothrow (call
, true);
3338 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3341 prev_res
= half_res
;
3344 new_temp
= make_ssa_name (vec_dest
);
3345 new_stmt
= gimple_build_assign (new_temp
, convert_code
,
3346 prev_res
, half_res
);
3351 if (ifn
!= IFN_LAST
)
3352 call
= gimple_build_call_internal_vec (ifn
, vargs
);
3354 call
= gimple_build_call_vec (fndecl
, vargs
);
3355 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
3356 gimple_call_set_lhs (call
, new_temp
);
3357 gimple_call_set_nothrow (call
, true);
3360 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3362 if (j
== (modifier
== NARROW
? 1 : 0))
3363 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
3365 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
3367 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
3370 else if (modifier
== NARROW
)
3372 for (j
= 0; j
< ncopies
; ++j
)
3374 /* Build argument list for the vectorized call. */
3376 vargs
.create (nargs
* 2);
3382 auto_vec
<vec
<tree
> > vec_defs (nargs
);
3383 vec
<tree
> vec_oprnds0
;
3385 for (i
= 0; i
< nargs
; i
++)
3386 vargs
.quick_push (gimple_call_arg (stmt
, i
));
3387 vect_get_slp_defs (vargs
, slp_node
, &vec_defs
);
3388 vec_oprnds0
= vec_defs
[0];
3390 /* Arguments are ready. Create the new vector stmt. */
3391 for (i
= 0; vec_oprnds0
.iterate (i
, &vec_oprnd0
); i
+= 2)
3395 for (k
= 0; k
< nargs
; k
++)
3397 vec
<tree
> vec_oprndsk
= vec_defs
[k
];
3398 vargs
.quick_push (vec_oprndsk
[i
]);
3399 vargs
.quick_push (vec_oprndsk
[i
+ 1]);
3402 if (ifn
!= IFN_LAST
)
3403 call
= gimple_build_call_internal_vec (ifn
, vargs
);
3405 call
= gimple_build_call_vec (fndecl
, vargs
);
3406 new_temp
= make_ssa_name (vec_dest
, call
);
3407 gimple_call_set_lhs (call
, new_temp
);
3408 gimple_call_set_nothrow (call
, true);
3410 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3411 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
3414 for (i
= 0; i
< nargs
; i
++)
3416 vec
<tree
> vec_oprndsi
= vec_defs
[i
];
3417 vec_oprndsi
.release ();
3422 for (i
= 0; i
< nargs
; i
++)
3424 op
= gimple_call_arg (stmt
, i
);
3428 = vect_get_vec_def_for_operand (op
, stmt
);
3430 = vect_get_vec_def_for_stmt_copy (dt
[i
], vec_oprnd0
);
3434 vec_oprnd1
= gimple_call_arg (new_stmt
, 2*i
+ 1);
3436 = vect_get_vec_def_for_stmt_copy (dt
[i
], vec_oprnd1
);
3438 = vect_get_vec_def_for_stmt_copy (dt
[i
], vec_oprnd0
);
3441 vargs
.quick_push (vec_oprnd0
);
3442 vargs
.quick_push (vec_oprnd1
);
3445 new_stmt
= gimple_build_call_vec (fndecl
, vargs
);
3446 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
3447 gimple_call_set_lhs (new_stmt
, new_temp
);
3448 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3451 STMT_VINFO_VEC_STMT (stmt_info
) = new_stmt
;
3453 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
3455 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
3458 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
3461 /* No current target implements this case. */
3466 /* The call in STMT might prevent it from being removed in dce.
3467 We however cannot remove it here, due to the way the ssa name
3468 it defines is mapped to the new definition. So just replace
3469 rhs of the statement with something harmless. */
3474 type
= TREE_TYPE (scalar_dest
);
3475 if (is_pattern_stmt_p (stmt_info
))
3476 lhs
= gimple_call_lhs (STMT_VINFO_RELATED_STMT (stmt_info
));
3478 lhs
= gimple_call_lhs (stmt
);
3480 new_stmt
= gimple_build_assign (lhs
, build_zero_cst (type
));
3481 set_vinfo_for_stmt (new_stmt
, stmt_info
);
3482 set_vinfo_for_stmt (stmt
, NULL
);
3483 STMT_VINFO_STMT (stmt_info
) = new_stmt
;
3484 gsi_replace (gsi
, new_stmt
, false);
3490 struct simd_call_arg_info
3494 HOST_WIDE_INT linear_step
;
3495 enum vect_def_type dt
;
3497 bool simd_lane_linear
;
3500 /* Helper function of vectorizable_simd_clone_call. If OP, an SSA_NAME,
3501 is linear within simd lane (but not within whole loop), note it in
3505 vect_simd_lane_linear (tree op
, struct loop
*loop
,
3506 struct simd_call_arg_info
*arginfo
)
3508 gimple
*def_stmt
= SSA_NAME_DEF_STMT (op
);
3510 if (!is_gimple_assign (def_stmt
)
3511 || gimple_assign_rhs_code (def_stmt
) != POINTER_PLUS_EXPR
3512 || !is_gimple_min_invariant (gimple_assign_rhs1 (def_stmt
)))
3515 tree base
= gimple_assign_rhs1 (def_stmt
);
3516 HOST_WIDE_INT linear_step
= 0;
3517 tree v
= gimple_assign_rhs2 (def_stmt
);
3518 while (TREE_CODE (v
) == SSA_NAME
)
3521 def_stmt
= SSA_NAME_DEF_STMT (v
);
3522 if (is_gimple_assign (def_stmt
))
3523 switch (gimple_assign_rhs_code (def_stmt
))
3526 t
= gimple_assign_rhs2 (def_stmt
);
3527 if (linear_step
|| TREE_CODE (t
) != INTEGER_CST
)
3529 base
= fold_build2 (POINTER_PLUS_EXPR
, TREE_TYPE (base
), base
, t
);
3530 v
= gimple_assign_rhs1 (def_stmt
);
3533 t
= gimple_assign_rhs2 (def_stmt
);
3534 if (linear_step
|| !tree_fits_shwi_p (t
) || integer_zerop (t
))
3536 linear_step
= tree_to_shwi (t
);
3537 v
= gimple_assign_rhs1 (def_stmt
);
3540 t
= gimple_assign_rhs1 (def_stmt
);
3541 if (TREE_CODE (TREE_TYPE (t
)) != INTEGER_TYPE
3542 || (TYPE_PRECISION (TREE_TYPE (v
))
3543 < TYPE_PRECISION (TREE_TYPE (t
))))
3552 else if (gimple_call_internal_p (def_stmt
, IFN_GOMP_SIMD_LANE
)
3554 && TREE_CODE (gimple_call_arg (def_stmt
, 0)) == SSA_NAME
3555 && (SSA_NAME_VAR (gimple_call_arg (def_stmt
, 0))
3560 arginfo
->linear_step
= linear_step
;
3562 arginfo
->simd_lane_linear
= true;
3568 /* Return the number of elements in vector type VECTYPE, which is associated
3569 with a SIMD clone. At present these vectors always have a constant
3572 static unsigned HOST_WIDE_INT
3573 simd_clone_subparts (tree vectype
)
3575 return TYPE_VECTOR_SUBPARTS (vectype
).to_constant ();
3578 /* Function vectorizable_simd_clone_call.
3580 Check if STMT performs a function call that can be vectorized
3581 by calling a simd clone of the function.
3582 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3583 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
3584 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3587 vectorizable_simd_clone_call (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
3588 gimple
**vec_stmt
, slp_tree slp_node
)
3593 tree vec_oprnd0
= NULL_TREE
;
3594 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
), prev_stmt_info
;
3596 unsigned int nunits
;
3597 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
3598 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
3599 vec_info
*vinfo
= stmt_info
->vinfo
;
3600 struct loop
*loop
= loop_vinfo
? LOOP_VINFO_LOOP (loop_vinfo
) : NULL
;
3601 tree fndecl
, new_temp
;
3603 gimple
*new_stmt
= NULL
;
3605 auto_vec
<simd_call_arg_info
> arginfo
;
3606 vec
<tree
> vargs
= vNULL
;
3608 tree lhs
, rtype
, ratype
;
3609 vec
<constructor_elt
, va_gc
> *ret_ctor_elts
= NULL
;
3611 /* Is STMT a vectorizable call? */
3612 if (!is_gimple_call (stmt
))
3615 fndecl
= gimple_call_fndecl (stmt
);
3616 if (fndecl
== NULL_TREE
)
3619 struct cgraph_node
*node
= cgraph_node::get (fndecl
);
3620 if (node
== NULL
|| node
->simd_clones
== NULL
)
3623 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
3626 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
3630 if (gimple_call_lhs (stmt
)
3631 && TREE_CODE (gimple_call_lhs (stmt
)) != SSA_NAME
)
3634 gcc_checking_assert (!stmt_can_throw_internal (stmt
));
3636 vectype
= STMT_VINFO_VECTYPE (stmt_info
);
3638 if (loop_vinfo
&& nested_in_vect_loop_p (loop
, stmt
))
3645 /* Process function arguments. */
3646 nargs
= gimple_call_num_args (stmt
);
3648 /* Bail out if the function has zero arguments. */
3652 arginfo
.reserve (nargs
, true);
3654 for (i
= 0; i
< nargs
; i
++)
3656 simd_call_arg_info thisarginfo
;
3659 thisarginfo
.linear_step
= 0;
3660 thisarginfo
.align
= 0;
3661 thisarginfo
.op
= NULL_TREE
;
3662 thisarginfo
.simd_lane_linear
= false;
3664 op
= gimple_call_arg (stmt
, i
);
3665 if (!vect_is_simple_use (op
, vinfo
, &def_stmt
, &thisarginfo
.dt
,
3666 &thisarginfo
.vectype
)
3667 || thisarginfo
.dt
== vect_uninitialized_def
)
3669 if (dump_enabled_p ())
3670 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3671 "use not simple.\n");
3675 if (thisarginfo
.dt
== vect_constant_def
3676 || thisarginfo
.dt
== vect_external_def
)
3677 gcc_assert (thisarginfo
.vectype
== NULL_TREE
);
3679 gcc_assert (thisarginfo
.vectype
!= NULL_TREE
);
3681 /* For linear arguments, the analyze phase should have saved
3682 the base and step in STMT_VINFO_SIMD_CLONE_INFO. */
3683 if (i
* 3 + 4 <= STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).length ()
3684 && STMT_VINFO_SIMD_CLONE_INFO (stmt_info
)[i
* 3 + 2])
3686 gcc_assert (vec_stmt
);
3687 thisarginfo
.linear_step
3688 = tree_to_shwi (STMT_VINFO_SIMD_CLONE_INFO (stmt_info
)[i
* 3 + 2]);
3690 = STMT_VINFO_SIMD_CLONE_INFO (stmt_info
)[i
* 3 + 1];
3691 thisarginfo
.simd_lane_linear
3692 = (STMT_VINFO_SIMD_CLONE_INFO (stmt_info
)[i
* 3 + 3]
3693 == boolean_true_node
);
3694 /* If loop has been peeled for alignment, we need to adjust it. */
3695 tree n1
= LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo
);
3696 tree n2
= LOOP_VINFO_NITERS (loop_vinfo
);
3697 if (n1
!= n2
&& !thisarginfo
.simd_lane_linear
)
3699 tree bias
= fold_build2 (MINUS_EXPR
, TREE_TYPE (n1
), n1
, n2
);
3700 tree step
= STMT_VINFO_SIMD_CLONE_INFO (stmt_info
)[i
* 3 + 2];
3701 tree opt
= TREE_TYPE (thisarginfo
.op
);
3702 bias
= fold_convert (TREE_TYPE (step
), bias
);
3703 bias
= fold_build2 (MULT_EXPR
, TREE_TYPE (step
), bias
, step
);
3705 = fold_build2 (POINTER_TYPE_P (opt
)
3706 ? POINTER_PLUS_EXPR
: PLUS_EXPR
, opt
,
3707 thisarginfo
.op
, bias
);
3711 && thisarginfo
.dt
!= vect_constant_def
3712 && thisarginfo
.dt
!= vect_external_def
3714 && TREE_CODE (op
) == SSA_NAME
3715 && simple_iv (loop
, loop_containing_stmt (stmt
), op
,
3717 && tree_fits_shwi_p (iv
.step
))
3719 thisarginfo
.linear_step
= tree_to_shwi (iv
.step
);
3720 thisarginfo
.op
= iv
.base
;
3722 else if ((thisarginfo
.dt
== vect_constant_def
3723 || thisarginfo
.dt
== vect_external_def
)
3724 && POINTER_TYPE_P (TREE_TYPE (op
)))
3725 thisarginfo
.align
= get_pointer_alignment (op
) / BITS_PER_UNIT
;
3726 /* Addresses of array elements indexed by GOMP_SIMD_LANE are
3728 if (POINTER_TYPE_P (TREE_TYPE (op
))
3729 && !thisarginfo
.linear_step
3731 && thisarginfo
.dt
!= vect_constant_def
3732 && thisarginfo
.dt
!= vect_external_def
3735 && TREE_CODE (op
) == SSA_NAME
)
3736 vect_simd_lane_linear (op
, loop
, &thisarginfo
);
3738 arginfo
.quick_push (thisarginfo
);
3741 unsigned HOST_WIDE_INT vf
;
3742 if (!LOOP_VINFO_VECT_FACTOR (loop_vinfo
).is_constant (&vf
))
3744 if (dump_enabled_p ())
3745 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3746 "not considering SIMD clones; not yet supported"
3747 " for variable-width vectors.\n");
3751 unsigned int badness
= 0;
3752 struct cgraph_node
*bestn
= NULL
;
3753 if (STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).exists ())
3754 bestn
= cgraph_node::get (STMT_VINFO_SIMD_CLONE_INFO (stmt_info
)[0]);
3756 for (struct cgraph_node
*n
= node
->simd_clones
; n
!= NULL
;
3757 n
= n
->simdclone
->next_clone
)
3759 unsigned int this_badness
= 0;
3760 if (n
->simdclone
->simdlen
> vf
3761 || n
->simdclone
->nargs
!= nargs
)
3763 if (n
->simdclone
->simdlen
< vf
)
3764 this_badness
+= (exact_log2 (vf
)
3765 - exact_log2 (n
->simdclone
->simdlen
)) * 1024;
3766 if (n
->simdclone
->inbranch
)
3767 this_badness
+= 2048;
3768 int target_badness
= targetm
.simd_clone
.usable (n
);
3769 if (target_badness
< 0)
3771 this_badness
+= target_badness
* 512;
3772 /* FORNOW: Have to add code to add the mask argument. */
3773 if (n
->simdclone
->inbranch
)
3775 for (i
= 0; i
< nargs
; i
++)
3777 switch (n
->simdclone
->args
[i
].arg_type
)
3779 case SIMD_CLONE_ARG_TYPE_VECTOR
:
3780 if (!useless_type_conversion_p
3781 (n
->simdclone
->args
[i
].orig_type
,
3782 TREE_TYPE (gimple_call_arg (stmt
, i
))))
3784 else if (arginfo
[i
].dt
== vect_constant_def
3785 || arginfo
[i
].dt
== vect_external_def
3786 || arginfo
[i
].linear_step
)
3789 case SIMD_CLONE_ARG_TYPE_UNIFORM
:
3790 if (arginfo
[i
].dt
!= vect_constant_def
3791 && arginfo
[i
].dt
!= vect_external_def
)
3794 case SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP
:
3795 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP
:
3796 if (arginfo
[i
].dt
== vect_constant_def
3797 || arginfo
[i
].dt
== vect_external_def
3798 || (arginfo
[i
].linear_step
3799 != n
->simdclone
->args
[i
].linear_step
))
3802 case SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP
:
3803 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_CONSTANT_STEP
:
3804 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_CONSTANT_STEP
:
3805 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_VARIABLE_STEP
:
3806 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_VARIABLE_STEP
:
3807 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_VARIABLE_STEP
:
3811 case SIMD_CLONE_ARG_TYPE_MASK
:
3814 if (i
== (size_t) -1)
3816 if (n
->simdclone
->args
[i
].alignment
> arginfo
[i
].align
)
3821 if (arginfo
[i
].align
)
3822 this_badness
+= (exact_log2 (arginfo
[i
].align
)
3823 - exact_log2 (n
->simdclone
->args
[i
].alignment
));
3825 if (i
== (size_t) -1)
3827 if (bestn
== NULL
|| this_badness
< badness
)
3830 badness
= this_badness
;
3837 for (i
= 0; i
< nargs
; i
++)
3838 if ((arginfo
[i
].dt
== vect_constant_def
3839 || arginfo
[i
].dt
== vect_external_def
)
3840 && bestn
->simdclone
->args
[i
].arg_type
== SIMD_CLONE_ARG_TYPE_VECTOR
)
3843 = get_vectype_for_scalar_type (TREE_TYPE (gimple_call_arg (stmt
,
3845 if (arginfo
[i
].vectype
== NULL
3846 || (simd_clone_subparts (arginfo
[i
].vectype
)
3847 > bestn
->simdclone
->simdlen
))
3851 fndecl
= bestn
->decl
;
3852 nunits
= bestn
->simdclone
->simdlen
;
3853 ncopies
= vf
/ nunits
;
3855 /* If the function isn't const, only allow it in simd loops where user
3856 has asserted that at least nunits consecutive iterations can be
3857 performed using SIMD instructions. */
3858 if ((loop
== NULL
|| (unsigned) loop
->safelen
< nunits
)
3859 && gimple_vuse (stmt
))
3862 /* Sanity check: make sure that at least one copy of the vectorized stmt
3863 needs to be generated. */
3864 gcc_assert (ncopies
>= 1);
3866 if (!vec_stmt
) /* transformation not required. */
3868 STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).safe_push (bestn
->decl
);
3869 for (i
= 0; i
< nargs
; i
++)
3870 if ((bestn
->simdclone
->args
[i
].arg_type
3871 == SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP
)
3872 || (bestn
->simdclone
->args
[i
].arg_type
3873 == SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP
))
3875 STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).safe_grow_cleared (i
* 3
3877 STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).safe_push (arginfo
[i
].op
);
3878 tree lst
= POINTER_TYPE_P (TREE_TYPE (arginfo
[i
].op
))
3879 ? size_type_node
: TREE_TYPE (arginfo
[i
].op
);
3880 tree ls
= build_int_cst (lst
, arginfo
[i
].linear_step
);
3881 STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).safe_push (ls
);
3882 tree sll
= arginfo
[i
].simd_lane_linear
3883 ? boolean_true_node
: boolean_false_node
;
3884 STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).safe_push (sll
);
3886 STMT_VINFO_TYPE (stmt_info
) = call_simd_clone_vec_info_type
;
3887 if (dump_enabled_p ())
3888 dump_printf_loc (MSG_NOTE
, vect_location
,
3889 "=== vectorizable_simd_clone_call ===\n");
3890 /* vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL); */
3896 if (dump_enabled_p ())
3897 dump_printf_loc (MSG_NOTE
, vect_location
, "transform call.\n");
3900 scalar_dest
= gimple_call_lhs (stmt
);
3901 vec_dest
= NULL_TREE
;
3906 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
3907 rtype
= TREE_TYPE (TREE_TYPE (fndecl
));
3908 if (TREE_CODE (rtype
) == ARRAY_TYPE
)
3911 rtype
= TREE_TYPE (ratype
);
3915 prev_stmt_info
= NULL
;
3916 for (j
= 0; j
< ncopies
; ++j
)
3918 /* Build argument list for the vectorized call. */
3920 vargs
.create (nargs
);
3924 for (i
= 0; i
< nargs
; i
++)
3926 unsigned int k
, l
, m
, o
;
3928 op
= gimple_call_arg (stmt
, i
);
3929 switch (bestn
->simdclone
->args
[i
].arg_type
)
3931 case SIMD_CLONE_ARG_TYPE_VECTOR
:
3932 atype
= bestn
->simdclone
->args
[i
].vector_type
;
3933 o
= nunits
/ simd_clone_subparts (atype
);
3934 for (m
= j
* o
; m
< (j
+ 1) * o
; m
++)
3936 if (simd_clone_subparts (atype
)
3937 < simd_clone_subparts (arginfo
[i
].vectype
))
3939 poly_uint64 prec
= GET_MODE_BITSIZE (TYPE_MODE (atype
));
3940 k
= (simd_clone_subparts (arginfo
[i
].vectype
)
3941 / simd_clone_subparts (atype
));
3942 gcc_assert ((k
& (k
- 1)) == 0);
3945 = vect_get_vec_def_for_operand (op
, stmt
);
3948 vec_oprnd0
= arginfo
[i
].op
;
3949 if ((m
& (k
- 1)) == 0)
3951 = vect_get_vec_def_for_stmt_copy (arginfo
[i
].dt
,
3954 arginfo
[i
].op
= vec_oprnd0
;
3956 = build3 (BIT_FIELD_REF
, atype
, vec_oprnd0
,
3958 bitsize_int ((m
& (k
- 1)) * prec
));
3960 = gimple_build_assign (make_ssa_name (atype
),
3962 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3963 vargs
.safe_push (gimple_assign_lhs (new_stmt
));
3967 k
= (simd_clone_subparts (atype
)
3968 / simd_clone_subparts (arginfo
[i
].vectype
));
3969 gcc_assert ((k
& (k
- 1)) == 0);
3970 vec
<constructor_elt
, va_gc
> *ctor_elts
;
3972 vec_alloc (ctor_elts
, k
);
3975 for (l
= 0; l
< k
; l
++)
3977 if (m
== 0 && l
== 0)
3979 = vect_get_vec_def_for_operand (op
, stmt
);
3982 = vect_get_vec_def_for_stmt_copy (arginfo
[i
].dt
,
3984 arginfo
[i
].op
= vec_oprnd0
;
3987 CONSTRUCTOR_APPEND_ELT (ctor_elts
, NULL_TREE
,
3991 vargs
.safe_push (vec_oprnd0
);
3994 vec_oprnd0
= build_constructor (atype
, ctor_elts
);
3996 = gimple_build_assign (make_ssa_name (atype
),
3998 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3999 vargs
.safe_push (gimple_assign_lhs (new_stmt
));
4004 case SIMD_CLONE_ARG_TYPE_UNIFORM
:
4005 vargs
.safe_push (op
);
4007 case SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP
:
4008 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP
:
4013 = force_gimple_operand (arginfo
[i
].op
, &stmts
, true,
4018 edge pe
= loop_preheader_edge (loop
);
4019 new_bb
= gsi_insert_seq_on_edge_immediate (pe
, stmts
);
4020 gcc_assert (!new_bb
);
4022 if (arginfo
[i
].simd_lane_linear
)
4024 vargs
.safe_push (arginfo
[i
].op
);
4027 tree phi_res
= copy_ssa_name (op
);
4028 gphi
*new_phi
= create_phi_node (phi_res
, loop
->header
);
4029 set_vinfo_for_stmt (new_phi
,
4030 new_stmt_vec_info (new_phi
, loop_vinfo
));
4031 add_phi_arg (new_phi
, arginfo
[i
].op
,
4032 loop_preheader_edge (loop
), UNKNOWN_LOCATION
);
4034 = POINTER_TYPE_P (TREE_TYPE (op
))
4035 ? POINTER_PLUS_EXPR
: PLUS_EXPR
;
4036 tree type
= POINTER_TYPE_P (TREE_TYPE (op
))
4037 ? sizetype
: TREE_TYPE (op
);
4039 = wi::mul (bestn
->simdclone
->args
[i
].linear_step
,
4041 tree tcst
= wide_int_to_tree (type
, cst
);
4042 tree phi_arg
= copy_ssa_name (op
);
4044 = gimple_build_assign (phi_arg
, code
, phi_res
, tcst
);
4045 gimple_stmt_iterator si
= gsi_after_labels (loop
->header
);
4046 gsi_insert_after (&si
, new_stmt
, GSI_NEW_STMT
);
4047 set_vinfo_for_stmt (new_stmt
,
4048 new_stmt_vec_info (new_stmt
, loop_vinfo
));
4049 add_phi_arg (new_phi
, phi_arg
, loop_latch_edge (loop
),
4051 arginfo
[i
].op
= phi_res
;
4052 vargs
.safe_push (phi_res
);
4057 = POINTER_TYPE_P (TREE_TYPE (op
))
4058 ? POINTER_PLUS_EXPR
: PLUS_EXPR
;
4059 tree type
= POINTER_TYPE_P (TREE_TYPE (op
))
4060 ? sizetype
: TREE_TYPE (op
);
4062 = wi::mul (bestn
->simdclone
->args
[i
].linear_step
,
4064 tree tcst
= wide_int_to_tree (type
, cst
);
4065 new_temp
= make_ssa_name (TREE_TYPE (op
));
4066 new_stmt
= gimple_build_assign (new_temp
, code
,
4067 arginfo
[i
].op
, tcst
);
4068 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
4069 vargs
.safe_push (new_temp
);
4072 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_CONSTANT_STEP
:
4073 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_CONSTANT_STEP
:
4074 case SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP
:
4075 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_VARIABLE_STEP
:
4076 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_VARIABLE_STEP
:
4077 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_VARIABLE_STEP
:
4083 new_stmt
= gimple_build_call_vec (fndecl
, vargs
);
4086 gcc_assert (ratype
|| simd_clone_subparts (rtype
) == nunits
);
4088 new_temp
= create_tmp_var (ratype
);
4089 else if (simd_clone_subparts (vectype
)
4090 == simd_clone_subparts (rtype
))
4091 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
4093 new_temp
= make_ssa_name (rtype
, new_stmt
);
4094 gimple_call_set_lhs (new_stmt
, new_temp
);
4096 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
4100 if (simd_clone_subparts (vectype
) < nunits
)
4103 poly_uint64 prec
= GET_MODE_BITSIZE (TYPE_MODE (vectype
));
4104 poly_uint64 bytes
= GET_MODE_SIZE (TYPE_MODE (vectype
));
4105 k
= nunits
/ simd_clone_subparts (vectype
);
4106 gcc_assert ((k
& (k
- 1)) == 0);
4107 for (l
= 0; l
< k
; l
++)
4112 t
= build_fold_addr_expr (new_temp
);
4113 t
= build2 (MEM_REF
, vectype
, t
,
4114 build_int_cst (TREE_TYPE (t
), l
* bytes
));
4117 t
= build3 (BIT_FIELD_REF
, vectype
, new_temp
,
4118 bitsize_int (prec
), bitsize_int (l
* prec
));
4120 = gimple_build_assign (make_ssa_name (vectype
), t
);
4121 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
4122 if (j
== 0 && l
== 0)
4123 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
4125 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
4127 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
4132 tree clobber
= build_constructor (ratype
, NULL
);
4133 TREE_THIS_VOLATILE (clobber
) = 1;
4134 new_stmt
= gimple_build_assign (new_temp
, clobber
);
4135 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
4139 else if (simd_clone_subparts (vectype
) > nunits
)
4141 unsigned int k
= (simd_clone_subparts (vectype
)
4142 / simd_clone_subparts (rtype
));
4143 gcc_assert ((k
& (k
- 1)) == 0);
4144 if ((j
& (k
- 1)) == 0)
4145 vec_alloc (ret_ctor_elts
, k
);
4148 unsigned int m
, o
= nunits
/ simd_clone_subparts (rtype
);
4149 for (m
= 0; m
< o
; m
++)
4151 tree tem
= build4 (ARRAY_REF
, rtype
, new_temp
,
4152 size_int (m
), NULL_TREE
, NULL_TREE
);
4154 = gimple_build_assign (make_ssa_name (rtype
), tem
);
4155 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
4156 CONSTRUCTOR_APPEND_ELT (ret_ctor_elts
, NULL_TREE
,
4157 gimple_assign_lhs (new_stmt
));
4159 tree clobber
= build_constructor (ratype
, NULL
);
4160 TREE_THIS_VOLATILE (clobber
) = 1;
4161 new_stmt
= gimple_build_assign (new_temp
, clobber
);
4162 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
4165 CONSTRUCTOR_APPEND_ELT (ret_ctor_elts
, NULL_TREE
, new_temp
);
4166 if ((j
& (k
- 1)) != k
- 1)
4168 vec_oprnd0
= build_constructor (vectype
, ret_ctor_elts
);
4170 = gimple_build_assign (make_ssa_name (vec_dest
), vec_oprnd0
);
4171 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
4173 if ((unsigned) j
== k
- 1)
4174 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
4176 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
4178 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
4183 tree t
= build_fold_addr_expr (new_temp
);
4184 t
= build2 (MEM_REF
, vectype
, t
,
4185 build_int_cst (TREE_TYPE (t
), 0));
4187 = gimple_build_assign (make_ssa_name (vec_dest
), t
);
4188 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
4189 tree clobber
= build_constructor (ratype
, NULL
);
4190 TREE_THIS_VOLATILE (clobber
) = 1;
4191 vect_finish_stmt_generation (stmt
,
4192 gimple_build_assign (new_temp
,
4198 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
4200 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
4202 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
4207 /* The call in STMT might prevent it from being removed in dce.
4208 We however cannot remove it here, due to the way the ssa name
4209 it defines is mapped to the new definition. So just replace
4210 rhs of the statement with something harmless. */
4217 type
= TREE_TYPE (scalar_dest
);
4218 if (is_pattern_stmt_p (stmt_info
))
4219 lhs
= gimple_call_lhs (STMT_VINFO_RELATED_STMT (stmt_info
));
4221 lhs
= gimple_call_lhs (stmt
);
4222 new_stmt
= gimple_build_assign (lhs
, build_zero_cst (type
));
4225 new_stmt
= gimple_build_nop ();
4226 set_vinfo_for_stmt (new_stmt
, stmt_info
);
4227 set_vinfo_for_stmt (stmt
, NULL
);
4228 STMT_VINFO_STMT (stmt_info
) = new_stmt
;
4229 gsi_replace (gsi
, new_stmt
, true);
4230 unlink_stmt_vdef (stmt
);
4236 /* Function vect_gen_widened_results_half
4238 Create a vector stmt whose code, type, number of arguments, and result
4239 variable are CODE, OP_TYPE, and VEC_DEST, and its arguments are
4240 VEC_OPRND0 and VEC_OPRND1. The new vector stmt is to be inserted at BSI.
4241 In the case that CODE is a CALL_EXPR, this means that a call to DECL
4242 needs to be created (DECL is a function-decl of a target-builtin).
4243 STMT is the original scalar stmt that we are vectorizing. */
4246 vect_gen_widened_results_half (enum tree_code code
,
4248 tree vec_oprnd0
, tree vec_oprnd1
, int op_type
,
4249 tree vec_dest
, gimple_stmt_iterator
*gsi
,
4255 /* Generate half of the widened result: */
4256 if (code
== CALL_EXPR
)
4258 /* Target specific support */
4259 if (op_type
== binary_op
)
4260 new_stmt
= gimple_build_call (decl
, 2, vec_oprnd0
, vec_oprnd1
);
4262 new_stmt
= gimple_build_call (decl
, 1, vec_oprnd0
);
4263 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
4264 gimple_call_set_lhs (new_stmt
, new_temp
);
4268 /* Generic support */
4269 gcc_assert (op_type
== TREE_CODE_LENGTH (code
));
4270 if (op_type
!= binary_op
)
4272 new_stmt
= gimple_build_assign (vec_dest
, code
, vec_oprnd0
, vec_oprnd1
);
4273 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
4274 gimple_assign_set_lhs (new_stmt
, new_temp
);
4276 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
4282 /* Get vectorized definitions for loop-based vectorization. For the first
4283 operand we call vect_get_vec_def_for_operand() (with OPRND containing
4284 scalar operand), and for the rest we get a copy with
4285 vect_get_vec_def_for_stmt_copy() using the previous vector definition
4286 (stored in OPRND). See vect_get_vec_def_for_stmt_copy() for details.
4287 The vectors are collected into VEC_OPRNDS. */
4290 vect_get_loop_based_defs (tree
*oprnd
, gimple
*stmt
, enum vect_def_type dt
,
4291 vec
<tree
> *vec_oprnds
, int multi_step_cvt
)
4295 /* Get first vector operand. */
4296 /* All the vector operands except the very first one (that is scalar oprnd)
4298 if (TREE_CODE (TREE_TYPE (*oprnd
)) != VECTOR_TYPE
)
4299 vec_oprnd
= vect_get_vec_def_for_operand (*oprnd
, stmt
);
4301 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
, *oprnd
);
4303 vec_oprnds
->quick_push (vec_oprnd
);
4305 /* Get second vector operand. */
4306 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
, vec_oprnd
);
4307 vec_oprnds
->quick_push (vec_oprnd
);
4311 /* For conversion in multiple steps, continue to get operands
4314 vect_get_loop_based_defs (oprnd
, stmt
, dt
, vec_oprnds
, multi_step_cvt
- 1);
4318 /* Create vectorized demotion statements for vector operands from VEC_OPRNDS.
4319 For multi-step conversions store the resulting vectors and call the function
4323 vect_create_vectorized_demotion_stmts (vec
<tree
> *vec_oprnds
,
4324 int multi_step_cvt
, gimple
*stmt
,
4326 gimple_stmt_iterator
*gsi
,
4327 slp_tree slp_node
, enum tree_code code
,
4328 stmt_vec_info
*prev_stmt_info
)
4331 tree vop0
, vop1
, new_tmp
, vec_dest
;
4333 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
4335 vec_dest
= vec_dsts
.pop ();
4337 for (i
= 0; i
< vec_oprnds
->length (); i
+= 2)
4339 /* Create demotion operation. */
4340 vop0
= (*vec_oprnds
)[i
];
4341 vop1
= (*vec_oprnds
)[i
+ 1];
4342 new_stmt
= gimple_build_assign (vec_dest
, code
, vop0
, vop1
);
4343 new_tmp
= make_ssa_name (vec_dest
, new_stmt
);
4344 gimple_assign_set_lhs (new_stmt
, new_tmp
);
4345 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
4348 /* Store the resulting vector for next recursive call. */
4349 (*vec_oprnds
)[i
/2] = new_tmp
;
4352 /* This is the last step of the conversion sequence. Store the
4353 vectors in SLP_NODE or in vector info of the scalar statement
4354 (or in STMT_VINFO_RELATED_STMT chain). */
4356 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
4359 if (!*prev_stmt_info
)
4360 STMT_VINFO_VEC_STMT (stmt_info
) = new_stmt
;
4362 STMT_VINFO_RELATED_STMT (*prev_stmt_info
) = new_stmt
;
4364 *prev_stmt_info
= vinfo_for_stmt (new_stmt
);
4369 /* For multi-step demotion operations we first generate demotion operations
4370 from the source type to the intermediate types, and then combine the
4371 results (stored in VEC_OPRNDS) in demotion operation to the destination
4375 /* At each level of recursion we have half of the operands we had at the
4377 vec_oprnds
->truncate ((i
+1)/2);
4378 vect_create_vectorized_demotion_stmts (vec_oprnds
, multi_step_cvt
- 1,
4379 stmt
, vec_dsts
, gsi
, slp_node
,
4380 VEC_PACK_TRUNC_EXPR
,
4384 vec_dsts
.quick_push (vec_dest
);
4388 /* Create vectorized promotion statements for vector operands from VEC_OPRNDS0
4389 and VEC_OPRNDS1 (for binary operations). For multi-step conversions store
4390 the resulting vectors and call the function recursively. */
4393 vect_create_vectorized_promotion_stmts (vec
<tree
> *vec_oprnds0
,
4394 vec
<tree
> *vec_oprnds1
,
4395 gimple
*stmt
, tree vec_dest
,
4396 gimple_stmt_iterator
*gsi
,
4397 enum tree_code code1
,
4398 enum tree_code code2
, tree decl1
,
4399 tree decl2
, int op_type
)
4402 tree vop0
, vop1
, new_tmp1
, new_tmp2
;
4403 gimple
*new_stmt1
, *new_stmt2
;
4404 vec
<tree
> vec_tmp
= vNULL
;
4406 vec_tmp
.create (vec_oprnds0
->length () * 2);
4407 FOR_EACH_VEC_ELT (*vec_oprnds0
, i
, vop0
)
4409 if (op_type
== binary_op
)
4410 vop1
= (*vec_oprnds1
)[i
];
4414 /* Generate the two halves of promotion operation. */
4415 new_stmt1
= vect_gen_widened_results_half (code1
, decl1
, vop0
, vop1
,
4416 op_type
, vec_dest
, gsi
, stmt
);
4417 new_stmt2
= vect_gen_widened_results_half (code2
, decl2
, vop0
, vop1
,
4418 op_type
, vec_dest
, gsi
, stmt
);
4419 if (is_gimple_call (new_stmt1
))
4421 new_tmp1
= gimple_call_lhs (new_stmt1
);
4422 new_tmp2
= gimple_call_lhs (new_stmt2
);
4426 new_tmp1
= gimple_assign_lhs (new_stmt1
);
4427 new_tmp2
= gimple_assign_lhs (new_stmt2
);
4430 /* Store the results for the next step. */
4431 vec_tmp
.quick_push (new_tmp1
);
4432 vec_tmp
.quick_push (new_tmp2
);
4435 vec_oprnds0
->release ();
4436 *vec_oprnds0
= vec_tmp
;
4440 /* Check if STMT performs a conversion operation, that can be vectorized.
4441 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4442 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
4443 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4446 vectorizable_conversion (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
4447 gimple
**vec_stmt
, slp_tree slp_node
)
4451 tree op0
, op1
= NULL_TREE
;
4452 tree vec_oprnd0
= NULL_TREE
, vec_oprnd1
= NULL_TREE
;
4453 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
4454 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
4455 enum tree_code code
, code1
= ERROR_MARK
, code2
= ERROR_MARK
;
4456 enum tree_code codecvt1
= ERROR_MARK
, codecvt2
= ERROR_MARK
;
4457 tree decl1
= NULL_TREE
, decl2
= NULL_TREE
;
4460 enum vect_def_type dt
[2] = {vect_unknown_def_type
, vect_unknown_def_type
};
4462 gimple
*new_stmt
= NULL
;
4463 stmt_vec_info prev_stmt_info
;
4464 poly_uint64 nunits_in
;
4465 poly_uint64 nunits_out
;
4466 tree vectype_out
, vectype_in
;
4468 tree lhs_type
, rhs_type
;
4469 enum { NARROW
, NONE
, WIDEN
} modifier
;
4470 vec
<tree
> vec_oprnds0
= vNULL
;
4471 vec
<tree
> vec_oprnds1
= vNULL
;
4473 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
4474 vec_info
*vinfo
= stmt_info
->vinfo
;
4475 int multi_step_cvt
= 0;
4476 vec
<tree
> interm_types
= vNULL
;
4477 tree last_oprnd
, intermediate_type
, cvt_type
= NULL_TREE
;
4479 unsigned short fltsz
;
4481 /* Is STMT a vectorizable conversion? */
4483 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
4486 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
4490 if (!is_gimple_assign (stmt
))
4493 if (TREE_CODE (gimple_assign_lhs (stmt
)) != SSA_NAME
)
4496 code
= gimple_assign_rhs_code (stmt
);
4497 if (!CONVERT_EXPR_CODE_P (code
)
4498 && code
!= FIX_TRUNC_EXPR
4499 && code
!= FLOAT_EXPR
4500 && code
!= WIDEN_MULT_EXPR
4501 && code
!= WIDEN_LSHIFT_EXPR
)
4504 op_type
= TREE_CODE_LENGTH (code
);
4506 /* Check types of lhs and rhs. */
4507 scalar_dest
= gimple_assign_lhs (stmt
);
4508 lhs_type
= TREE_TYPE (scalar_dest
);
4509 vectype_out
= STMT_VINFO_VECTYPE (stmt_info
);
4511 op0
= gimple_assign_rhs1 (stmt
);
4512 rhs_type
= TREE_TYPE (op0
);
4514 if ((code
!= FIX_TRUNC_EXPR
&& code
!= FLOAT_EXPR
)
4515 && !((INTEGRAL_TYPE_P (lhs_type
)
4516 && INTEGRAL_TYPE_P (rhs_type
))
4517 || (SCALAR_FLOAT_TYPE_P (lhs_type
)
4518 && SCALAR_FLOAT_TYPE_P (rhs_type
))))
4521 if (!VECTOR_BOOLEAN_TYPE_P (vectype_out
)
4522 && ((INTEGRAL_TYPE_P (lhs_type
)
4523 && !type_has_mode_precision_p (lhs_type
))
4524 || (INTEGRAL_TYPE_P (rhs_type
)
4525 && !type_has_mode_precision_p (rhs_type
))))
4527 if (dump_enabled_p ())
4528 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4529 "type conversion to/from bit-precision unsupported."
4534 /* Check the operands of the operation. */
4535 if (!vect_is_simple_use (op0
, vinfo
, &def_stmt
, &dt
[0], &vectype_in
))
4537 if (dump_enabled_p ())
4538 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4539 "use not simple.\n");
4542 if (op_type
== binary_op
)
4546 op1
= gimple_assign_rhs2 (stmt
);
4547 gcc_assert (code
== WIDEN_MULT_EXPR
|| code
== WIDEN_LSHIFT_EXPR
);
4548 /* For WIDEN_MULT_EXPR, if OP0 is a constant, use the type of
4550 if (CONSTANT_CLASS_P (op0
))
4551 ok
= vect_is_simple_use (op1
, vinfo
, &def_stmt
, &dt
[1], &vectype_in
);
4553 ok
= vect_is_simple_use (op1
, vinfo
, &def_stmt
, &dt
[1]);
4557 if (dump_enabled_p ())
4558 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4559 "use not simple.\n");
4564 /* If op0 is an external or constant defs use a vector type of
4565 the same size as the output vector type. */
4567 vectype_in
= get_same_sized_vectype (rhs_type
, vectype_out
);
4569 gcc_assert (vectype_in
);
4572 if (dump_enabled_p ())
4574 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4575 "no vectype for scalar type ");
4576 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, rhs_type
);
4577 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
4583 if (VECTOR_BOOLEAN_TYPE_P (vectype_out
)
4584 && !VECTOR_BOOLEAN_TYPE_P (vectype_in
))
4586 if (dump_enabled_p ())
4588 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4589 "can't convert between boolean and non "
4591 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, rhs_type
);
4592 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
4598 nunits_in
= TYPE_VECTOR_SUBPARTS (vectype_in
);
4599 nunits_out
= TYPE_VECTOR_SUBPARTS (vectype_out
);
4600 if (known_eq (nunits_out
, nunits_in
))
4602 else if (multiple_p (nunits_out
, nunits_in
))
4606 gcc_checking_assert (multiple_p (nunits_in
, nunits_out
));
4610 /* Multiple types in SLP are handled by creating the appropriate number of
4611 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4615 else if (modifier
== NARROW
)
4616 ncopies
= vect_get_num_copies (loop_vinfo
, vectype_out
);
4618 ncopies
= vect_get_num_copies (loop_vinfo
, vectype_in
);
4620 /* Sanity check: make sure that at least one copy of the vectorized stmt
4621 needs to be generated. */
4622 gcc_assert (ncopies
>= 1);
4624 bool found_mode
= false;
4625 scalar_mode lhs_mode
= SCALAR_TYPE_MODE (lhs_type
);
4626 scalar_mode rhs_mode
= SCALAR_TYPE_MODE (rhs_type
);
4627 opt_scalar_mode rhs_mode_iter
;
4629 /* Supportable by target? */
4633 if (code
!= FIX_TRUNC_EXPR
&& code
!= FLOAT_EXPR
)
4635 if (supportable_convert_operation (code
, vectype_out
, vectype_in
,
4640 if (dump_enabled_p ())
4641 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4642 "conversion not supported by target.\n");
4646 if (supportable_widening_operation (code
, stmt
, vectype_out
, vectype_in
,
4647 &code1
, &code2
, &multi_step_cvt
,
4650 /* Binary widening operation can only be supported directly by the
4652 gcc_assert (!(multi_step_cvt
&& op_type
== binary_op
));
4656 if (code
!= FLOAT_EXPR
4657 || GET_MODE_SIZE (lhs_mode
) <= GET_MODE_SIZE (rhs_mode
))
4660 fltsz
= GET_MODE_SIZE (lhs_mode
);
4661 FOR_EACH_2XWIDER_MODE (rhs_mode_iter
, rhs_mode
)
4663 rhs_mode
= rhs_mode_iter
.require ();
4664 if (GET_MODE_SIZE (rhs_mode
) > fltsz
)
4668 = build_nonstandard_integer_type (GET_MODE_BITSIZE (rhs_mode
), 0);
4669 cvt_type
= get_same_sized_vectype (cvt_type
, vectype_in
);
4670 if (cvt_type
== NULL_TREE
)
4673 if (GET_MODE_SIZE (rhs_mode
) == fltsz
)
4675 if (!supportable_convert_operation (code
, vectype_out
,
4676 cvt_type
, &decl1
, &codecvt1
))
4679 else if (!supportable_widening_operation (code
, stmt
, vectype_out
,
4680 cvt_type
, &codecvt1
,
4681 &codecvt2
, &multi_step_cvt
,
4685 gcc_assert (multi_step_cvt
== 0);
4687 if (supportable_widening_operation (NOP_EXPR
, stmt
, cvt_type
,
4688 vectype_in
, &code1
, &code2
,
4689 &multi_step_cvt
, &interm_types
))
4699 if (GET_MODE_SIZE (rhs_mode
) == fltsz
)
4700 codecvt2
= ERROR_MARK
;
4704 interm_types
.safe_push (cvt_type
);
4705 cvt_type
= NULL_TREE
;
4710 gcc_assert (op_type
== unary_op
);
4711 if (supportable_narrowing_operation (code
, vectype_out
, vectype_in
,
4712 &code1
, &multi_step_cvt
,
4716 if (code
!= FIX_TRUNC_EXPR
4717 || GET_MODE_SIZE (lhs_mode
) >= GET_MODE_SIZE (rhs_mode
))
4721 = build_nonstandard_integer_type (GET_MODE_BITSIZE (rhs_mode
), 0);
4722 cvt_type
= get_same_sized_vectype (cvt_type
, vectype_in
);
4723 if (cvt_type
== NULL_TREE
)
4725 if (!supportable_convert_operation (code
, cvt_type
, vectype_in
,
4728 if (supportable_narrowing_operation (NOP_EXPR
, vectype_out
, cvt_type
,
4729 &code1
, &multi_step_cvt
,
4738 if (!vec_stmt
) /* transformation not required. */
4740 if (dump_enabled_p ())
4741 dump_printf_loc (MSG_NOTE
, vect_location
,
4742 "=== vectorizable_conversion ===\n");
4743 if (code
== FIX_TRUNC_EXPR
|| code
== FLOAT_EXPR
)
4745 STMT_VINFO_TYPE (stmt_info
) = type_conversion_vec_info_type
;
4747 vect_model_simple_cost (stmt_info
, ncopies
, dt
, ndts
, NULL
, NULL
);
4749 else if (modifier
== NARROW
)
4751 STMT_VINFO_TYPE (stmt_info
) = type_demotion_vec_info_type
;
4753 vect_model_promotion_demotion_cost (stmt_info
, dt
, multi_step_cvt
);
4757 STMT_VINFO_TYPE (stmt_info
) = type_promotion_vec_info_type
;
4759 vect_model_promotion_demotion_cost (stmt_info
, dt
, multi_step_cvt
);
4761 interm_types
.release ();
4766 if (dump_enabled_p ())
4767 dump_printf_loc (MSG_NOTE
, vect_location
,
4768 "transform conversion. ncopies = %d.\n", ncopies
);
4770 if (op_type
== binary_op
)
4772 if (CONSTANT_CLASS_P (op0
))
4773 op0
= fold_convert (TREE_TYPE (op1
), op0
);
4774 else if (CONSTANT_CLASS_P (op1
))
4775 op1
= fold_convert (TREE_TYPE (op0
), op1
);
4778 /* In case of multi-step conversion, we first generate conversion operations
4779 to the intermediate types, and then from that types to the final one.
4780 We create vector destinations for the intermediate type (TYPES) received
4781 from supportable_*_operation, and store them in the correct order
4782 for future use in vect_create_vectorized_*_stmts (). */
4783 auto_vec
<tree
> vec_dsts (multi_step_cvt
+ 1);
4784 vec_dest
= vect_create_destination_var (scalar_dest
,
4785 (cvt_type
&& modifier
== WIDEN
)
4786 ? cvt_type
: vectype_out
);
4787 vec_dsts
.quick_push (vec_dest
);
4791 for (i
= interm_types
.length () - 1;
4792 interm_types
.iterate (i
, &intermediate_type
); i
--)
4794 vec_dest
= vect_create_destination_var (scalar_dest
,
4796 vec_dsts
.quick_push (vec_dest
);
4801 vec_dest
= vect_create_destination_var (scalar_dest
,
4803 ? vectype_out
: cvt_type
);
4807 if (modifier
== WIDEN
)
4809 vec_oprnds0
.create (multi_step_cvt
? vect_pow2 (multi_step_cvt
) : 1);
4810 if (op_type
== binary_op
)
4811 vec_oprnds1
.create (1);
4813 else if (modifier
== NARROW
)
4814 vec_oprnds0
.create (
4815 2 * (multi_step_cvt
? vect_pow2 (multi_step_cvt
) : 1));
4817 else if (code
== WIDEN_LSHIFT_EXPR
)
4818 vec_oprnds1
.create (slp_node
->vec_stmts_size
);
4821 prev_stmt_info
= NULL
;
4825 for (j
= 0; j
< ncopies
; j
++)
4828 vect_get_vec_defs (op0
, NULL
, stmt
, &vec_oprnds0
, NULL
, slp_node
);
4830 vect_get_vec_defs_for_stmt_copy (dt
, &vec_oprnds0
, NULL
);
4832 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vop0
)
4834 /* Arguments are ready, create the new vector stmt. */
4835 if (code1
== CALL_EXPR
)
4837 new_stmt
= gimple_build_call (decl1
, 1, vop0
);
4838 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
4839 gimple_call_set_lhs (new_stmt
, new_temp
);
4843 gcc_assert (TREE_CODE_LENGTH (code1
) == unary_op
);
4844 new_stmt
= gimple_build_assign (vec_dest
, code1
, vop0
);
4845 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
4846 gimple_assign_set_lhs (new_stmt
, new_temp
);
4849 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
4851 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
4854 if (!prev_stmt_info
)
4855 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
4857 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
4858 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
4865 /* In case the vectorization factor (VF) is bigger than the number
4866 of elements that we can fit in a vectype (nunits), we have to
4867 generate more than one vector stmt - i.e - we need to "unroll"
4868 the vector stmt by a factor VF/nunits. */
4869 for (j
= 0; j
< ncopies
; j
++)
4876 if (code
== WIDEN_LSHIFT_EXPR
)
4881 /* Store vec_oprnd1 for every vector stmt to be created
4882 for SLP_NODE. We check during the analysis that all
4883 the shift arguments are the same. */
4884 for (k
= 0; k
< slp_node
->vec_stmts_size
- 1; k
++)
4885 vec_oprnds1
.quick_push (vec_oprnd1
);
4887 vect_get_vec_defs (op0
, NULL_TREE
, stmt
, &vec_oprnds0
, NULL
,
4891 vect_get_vec_defs (op0
, op1
, stmt
, &vec_oprnds0
,
4892 &vec_oprnds1
, slp_node
);
4896 vec_oprnd0
= vect_get_vec_def_for_operand (op0
, stmt
);
4897 vec_oprnds0
.quick_push (vec_oprnd0
);
4898 if (op_type
== binary_op
)
4900 if (code
== WIDEN_LSHIFT_EXPR
)
4903 vec_oprnd1
= vect_get_vec_def_for_operand (op1
, stmt
);
4904 vec_oprnds1
.quick_push (vec_oprnd1
);
4910 vec_oprnd0
= vect_get_vec_def_for_stmt_copy (dt
[0], vec_oprnd0
);
4911 vec_oprnds0
.truncate (0);
4912 vec_oprnds0
.quick_push (vec_oprnd0
);
4913 if (op_type
== binary_op
)
4915 if (code
== WIDEN_LSHIFT_EXPR
)
4918 vec_oprnd1
= vect_get_vec_def_for_stmt_copy (dt
[1],
4920 vec_oprnds1
.truncate (0);
4921 vec_oprnds1
.quick_push (vec_oprnd1
);
4925 /* Arguments are ready. Create the new vector stmts. */
4926 for (i
= multi_step_cvt
; i
>= 0; i
--)
4928 tree this_dest
= vec_dsts
[i
];
4929 enum tree_code c1
= code1
, c2
= code2
;
4930 if (i
== 0 && codecvt2
!= ERROR_MARK
)
4935 vect_create_vectorized_promotion_stmts (&vec_oprnds0
,
4937 stmt
, this_dest
, gsi
,
4938 c1
, c2
, decl1
, decl2
,
4942 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vop0
)
4946 if (codecvt1
== CALL_EXPR
)
4948 new_stmt
= gimple_build_call (decl1
, 1, vop0
);
4949 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
4950 gimple_call_set_lhs (new_stmt
, new_temp
);
4954 gcc_assert (TREE_CODE_LENGTH (codecvt1
) == unary_op
);
4955 new_temp
= make_ssa_name (vec_dest
);
4956 new_stmt
= gimple_build_assign (new_temp
, codecvt1
,
4960 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
4963 new_stmt
= SSA_NAME_DEF_STMT (vop0
);
4966 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
4969 if (!prev_stmt_info
)
4970 STMT_VINFO_VEC_STMT (stmt_info
) = new_stmt
;
4972 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
4973 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
4978 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
4982 /* In case the vectorization factor (VF) is bigger than the number
4983 of elements that we can fit in a vectype (nunits), we have to
4984 generate more than one vector stmt - i.e - we need to "unroll"
4985 the vector stmt by a factor VF/nunits. */
4986 for (j
= 0; j
< ncopies
; j
++)
4990 vect_get_vec_defs (op0
, NULL_TREE
, stmt
, &vec_oprnds0
, NULL
,
4994 vec_oprnds0
.truncate (0);
4995 vect_get_loop_based_defs (&last_oprnd
, stmt
, dt
[0], &vec_oprnds0
,
4996 vect_pow2 (multi_step_cvt
) - 1);
4999 /* Arguments are ready. Create the new vector stmts. */
5001 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vop0
)
5003 if (codecvt1
== CALL_EXPR
)
5005 new_stmt
= gimple_build_call (decl1
, 1, vop0
);
5006 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
5007 gimple_call_set_lhs (new_stmt
, new_temp
);
5011 gcc_assert (TREE_CODE_LENGTH (codecvt1
) == unary_op
);
5012 new_temp
= make_ssa_name (vec_dest
);
5013 new_stmt
= gimple_build_assign (new_temp
, codecvt1
,
5017 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
5018 vec_oprnds0
[i
] = new_temp
;
5021 vect_create_vectorized_demotion_stmts (&vec_oprnds0
, multi_step_cvt
,
5022 stmt
, vec_dsts
, gsi
,
5027 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
5031 vec_oprnds0
.release ();
5032 vec_oprnds1
.release ();
5033 interm_types
.release ();
5039 /* Function vectorizable_assignment.
5041 Check if STMT performs an assignment (copy) that can be vectorized.
5042 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
5043 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
5044 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
5047 vectorizable_assignment (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
5048 gimple
**vec_stmt
, slp_tree slp_node
)
5053 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
5054 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
5057 enum vect_def_type dt
[1] = {vect_unknown_def_type
};
5061 vec
<tree
> vec_oprnds
= vNULL
;
5063 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
5064 vec_info
*vinfo
= stmt_info
->vinfo
;
5065 gimple
*new_stmt
= NULL
;
5066 stmt_vec_info prev_stmt_info
= NULL
;
5067 enum tree_code code
;
5070 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
5073 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
5077 /* Is vectorizable assignment? */
5078 if (!is_gimple_assign (stmt
))
5081 scalar_dest
= gimple_assign_lhs (stmt
);
5082 if (TREE_CODE (scalar_dest
) != SSA_NAME
)
5085 code
= gimple_assign_rhs_code (stmt
);
5086 if (gimple_assign_single_p (stmt
)
5087 || code
== PAREN_EXPR
5088 || CONVERT_EXPR_CODE_P (code
))
5089 op
= gimple_assign_rhs1 (stmt
);
5093 if (code
== VIEW_CONVERT_EXPR
)
5094 op
= TREE_OPERAND (op
, 0);
5096 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
5097 poly_uint64 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
5099 /* Multiple types in SLP are handled by creating the appropriate number of
5100 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
5105 ncopies
= vect_get_num_copies (loop_vinfo
, vectype
);
5107 gcc_assert (ncopies
>= 1);
5109 if (!vect_is_simple_use (op
, vinfo
, &def_stmt
, &dt
[0], &vectype_in
))
5111 if (dump_enabled_p ())
5112 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5113 "use not simple.\n");
5117 /* We can handle NOP_EXPR conversions that do not change the number
5118 of elements or the vector size. */
5119 if ((CONVERT_EXPR_CODE_P (code
)
5120 || code
== VIEW_CONVERT_EXPR
)
5122 || maybe_ne (TYPE_VECTOR_SUBPARTS (vectype_in
), nunits
)
5123 || maybe_ne (GET_MODE_SIZE (TYPE_MODE (vectype
)),
5124 GET_MODE_SIZE (TYPE_MODE (vectype_in
)))))
5127 /* We do not handle bit-precision changes. */
5128 if ((CONVERT_EXPR_CODE_P (code
)
5129 || code
== VIEW_CONVERT_EXPR
)
5130 && INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest
))
5131 && (!type_has_mode_precision_p (TREE_TYPE (scalar_dest
))
5132 || !type_has_mode_precision_p (TREE_TYPE (op
)))
5133 /* But a conversion that does not change the bit-pattern is ok. */
5134 && !((TYPE_PRECISION (TREE_TYPE (scalar_dest
))
5135 > TYPE_PRECISION (TREE_TYPE (op
)))
5136 && TYPE_UNSIGNED (TREE_TYPE (op
)))
5137 /* Conversion between boolean types of different sizes is
5138 a simple assignment in case their vectypes are same
5140 && (!VECTOR_BOOLEAN_TYPE_P (vectype
)
5141 || !VECTOR_BOOLEAN_TYPE_P (vectype_in
)))
5143 if (dump_enabled_p ())
5144 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5145 "type conversion to/from bit-precision "
5150 if (!vec_stmt
) /* transformation not required. */
5152 STMT_VINFO_TYPE (stmt_info
) = assignment_vec_info_type
;
5153 if (dump_enabled_p ())
5154 dump_printf_loc (MSG_NOTE
, vect_location
,
5155 "=== vectorizable_assignment ===\n");
5157 vect_model_simple_cost (stmt_info
, ncopies
, dt
, ndts
, NULL
, NULL
);
5162 if (dump_enabled_p ())
5163 dump_printf_loc (MSG_NOTE
, vect_location
, "transform assignment.\n");
5166 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
5169 for (j
= 0; j
< ncopies
; j
++)
5173 vect_get_vec_defs (op
, NULL
, stmt
, &vec_oprnds
, NULL
, slp_node
);
5175 vect_get_vec_defs_for_stmt_copy (dt
, &vec_oprnds
, NULL
);
5177 /* Arguments are ready. create the new vector stmt. */
5178 FOR_EACH_VEC_ELT (vec_oprnds
, i
, vop
)
5180 if (CONVERT_EXPR_CODE_P (code
)
5181 || code
== VIEW_CONVERT_EXPR
)
5182 vop
= build1 (VIEW_CONVERT_EXPR
, vectype
, vop
);
5183 new_stmt
= gimple_build_assign (vec_dest
, vop
);
5184 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
5185 gimple_assign_set_lhs (new_stmt
, new_temp
);
5186 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
5188 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
5195 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
5197 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
5199 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
5202 vec_oprnds
.release ();
5207 /* Return TRUE if CODE (a shift operation) is supported for SCALAR_TYPE
5208 either as shift by a scalar or by a vector. */
5211 vect_supportable_shift (enum tree_code code
, tree scalar_type
)
5214 machine_mode vec_mode
;
5219 vectype
= get_vectype_for_scalar_type (scalar_type
);
5223 optab
= optab_for_tree_code (code
, vectype
, optab_scalar
);
5225 || optab_handler (optab
, TYPE_MODE (vectype
)) == CODE_FOR_nothing
)
5227 optab
= optab_for_tree_code (code
, vectype
, optab_vector
);
5229 || (optab_handler (optab
, TYPE_MODE (vectype
))
5230 == CODE_FOR_nothing
))
5234 vec_mode
= TYPE_MODE (vectype
);
5235 icode
= (int) optab_handler (optab
, vec_mode
);
5236 if (icode
== CODE_FOR_nothing
)
5243 /* Function vectorizable_shift.
5245 Check if STMT performs a shift operation that can be vectorized.
5246 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
5247 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
5248 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
5251 vectorizable_shift (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
5252 gimple
**vec_stmt
, slp_tree slp_node
)
5256 tree op0
, op1
= NULL
;
5257 tree vec_oprnd1
= NULL_TREE
;
5258 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
5260 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
5261 enum tree_code code
;
5262 machine_mode vec_mode
;
5266 machine_mode optab_op2_mode
;
5268 enum vect_def_type dt
[2] = {vect_unknown_def_type
, vect_unknown_def_type
};
5270 gimple
*new_stmt
= NULL
;
5271 stmt_vec_info prev_stmt_info
;
5272 poly_uint64 nunits_in
;
5273 poly_uint64 nunits_out
;
5278 vec
<tree
> vec_oprnds0
= vNULL
;
5279 vec
<tree
> vec_oprnds1
= vNULL
;
5282 bool scalar_shift_arg
= true;
5283 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
5284 vec_info
*vinfo
= stmt_info
->vinfo
;
5286 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
5289 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
5293 /* Is STMT a vectorizable binary/unary operation? */
5294 if (!is_gimple_assign (stmt
))
5297 if (TREE_CODE (gimple_assign_lhs (stmt
)) != SSA_NAME
)
5300 code
= gimple_assign_rhs_code (stmt
);
5302 if (!(code
== LSHIFT_EXPR
|| code
== RSHIFT_EXPR
|| code
== LROTATE_EXPR
5303 || code
== RROTATE_EXPR
))
5306 scalar_dest
= gimple_assign_lhs (stmt
);
5307 vectype_out
= STMT_VINFO_VECTYPE (stmt_info
);
5308 if (!type_has_mode_precision_p (TREE_TYPE (scalar_dest
)))
5310 if (dump_enabled_p ())
5311 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5312 "bit-precision shifts not supported.\n");
5316 op0
= gimple_assign_rhs1 (stmt
);
5317 if (!vect_is_simple_use (op0
, vinfo
, &def_stmt
, &dt
[0], &vectype
))
5319 if (dump_enabled_p ())
5320 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5321 "use not simple.\n");
5324 /* If op0 is an external or constant def use a vector type with
5325 the same size as the output vector type. */
5327 vectype
= get_same_sized_vectype (TREE_TYPE (op0
), vectype_out
);
5329 gcc_assert (vectype
);
5332 if (dump_enabled_p ())
5333 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5334 "no vectype for scalar type\n");
5338 nunits_out
= TYPE_VECTOR_SUBPARTS (vectype_out
);
5339 nunits_in
= TYPE_VECTOR_SUBPARTS (vectype
);
5340 if (maybe_ne (nunits_out
, nunits_in
))
5343 op1
= gimple_assign_rhs2 (stmt
);
5344 if (!vect_is_simple_use (op1
, vinfo
, &def_stmt
, &dt
[1], &op1_vectype
))
5346 if (dump_enabled_p ())
5347 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5348 "use not simple.\n");
5352 /* Multiple types in SLP are handled by creating the appropriate number of
5353 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
5358 ncopies
= vect_get_num_copies (loop_vinfo
, vectype
);
5360 gcc_assert (ncopies
>= 1);
5362 /* Determine whether the shift amount is a vector, or scalar. If the
5363 shift/rotate amount is a vector, use the vector/vector shift optabs. */
5365 if ((dt
[1] == vect_internal_def
5366 || dt
[1] == vect_induction_def
)
5368 scalar_shift_arg
= false;
5369 else if (dt
[1] == vect_constant_def
5370 || dt
[1] == vect_external_def
5371 || dt
[1] == vect_internal_def
)
5373 /* In SLP, need to check whether the shift count is the same,
5374 in loops if it is a constant or invariant, it is always
5378 vec
<gimple
*> stmts
= SLP_TREE_SCALAR_STMTS (slp_node
);
5381 FOR_EACH_VEC_ELT (stmts
, k
, slpstmt
)
5382 if (!operand_equal_p (gimple_assign_rhs2 (slpstmt
), op1
, 0))
5383 scalar_shift_arg
= false;
5386 /* If the shift amount is computed by a pattern stmt we cannot
5387 use the scalar amount directly thus give up and use a vector
5389 if (dt
[1] == vect_internal_def
)
5391 gimple
*def
= SSA_NAME_DEF_STMT (op1
);
5392 if (is_pattern_stmt_p (vinfo_for_stmt (def
)))
5393 scalar_shift_arg
= false;
5398 if (dump_enabled_p ())
5399 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5400 "operand mode requires invariant argument.\n");
5404 /* Vector shifted by vector. */
5405 if (!scalar_shift_arg
)
5407 optab
= optab_for_tree_code (code
, vectype
, optab_vector
);
5408 if (dump_enabled_p ())
5409 dump_printf_loc (MSG_NOTE
, vect_location
,
5410 "vector/vector shift/rotate found.\n");
5413 op1_vectype
= get_same_sized_vectype (TREE_TYPE (op1
), vectype_out
);
5414 if (op1_vectype
== NULL_TREE
5415 || TYPE_MODE (op1_vectype
) != TYPE_MODE (vectype
))
5417 if (dump_enabled_p ())
5418 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5419 "unusable type for last operand in"
5420 " vector/vector shift/rotate.\n");
5424 /* See if the machine has a vector shifted by scalar insn and if not
5425 then see if it has a vector shifted by vector insn. */
5428 optab
= optab_for_tree_code (code
, vectype
, optab_scalar
);
5430 && optab_handler (optab
, TYPE_MODE (vectype
)) != CODE_FOR_nothing
)
5432 if (dump_enabled_p ())
5433 dump_printf_loc (MSG_NOTE
, vect_location
,
5434 "vector/scalar shift/rotate found.\n");
5438 optab
= optab_for_tree_code (code
, vectype
, optab_vector
);
5440 && (optab_handler (optab
, TYPE_MODE (vectype
))
5441 != CODE_FOR_nothing
))
5443 scalar_shift_arg
= false;
5445 if (dump_enabled_p ())
5446 dump_printf_loc (MSG_NOTE
, vect_location
,
5447 "vector/vector shift/rotate found.\n");
5449 /* Unlike the other binary operators, shifts/rotates have
5450 the rhs being int, instead of the same type as the lhs,
5451 so make sure the scalar is the right type if we are
5452 dealing with vectors of long long/long/short/char. */
5453 if (dt
[1] == vect_constant_def
)
5454 op1
= fold_convert (TREE_TYPE (vectype
), op1
);
5455 else if (!useless_type_conversion_p (TREE_TYPE (vectype
),
5459 && TYPE_MODE (TREE_TYPE (vectype
))
5460 != TYPE_MODE (TREE_TYPE (op1
)))
5462 if (dump_enabled_p ())
5463 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5464 "unusable type for last operand in"
5465 " vector/vector shift/rotate.\n");
5468 if (vec_stmt
&& !slp_node
)
5470 op1
= fold_convert (TREE_TYPE (vectype
), op1
);
5471 op1
= vect_init_vector (stmt
, op1
,
5472 TREE_TYPE (vectype
), NULL
);
5479 /* Supportable by target? */
5482 if (dump_enabled_p ())
5483 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5487 vec_mode
= TYPE_MODE (vectype
);
5488 icode
= (int) optab_handler (optab
, vec_mode
);
5489 if (icode
== CODE_FOR_nothing
)
5491 if (dump_enabled_p ())
5492 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5493 "op not supported by target.\n");
5494 /* Check only during analysis. */
5495 if (maybe_ne (GET_MODE_SIZE (vec_mode
), UNITS_PER_WORD
)
5497 && !vect_worthwhile_without_simd_p (vinfo
, code
)))
5499 if (dump_enabled_p ())
5500 dump_printf_loc (MSG_NOTE
, vect_location
,
5501 "proceeding using word mode.\n");
5504 /* Worthwhile without SIMD support? Check only during analysis. */
5506 && !VECTOR_MODE_P (TYPE_MODE (vectype
))
5507 && !vect_worthwhile_without_simd_p (vinfo
, code
))
5509 if (dump_enabled_p ())
5510 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5511 "not worthwhile without SIMD support.\n");
5515 if (!vec_stmt
) /* transformation not required. */
5517 STMT_VINFO_TYPE (stmt_info
) = shift_vec_info_type
;
5518 if (dump_enabled_p ())
5519 dump_printf_loc (MSG_NOTE
, vect_location
,
5520 "=== vectorizable_shift ===\n");
5522 vect_model_simple_cost (stmt_info
, ncopies
, dt
, ndts
, NULL
, NULL
);
5528 if (dump_enabled_p ())
5529 dump_printf_loc (MSG_NOTE
, vect_location
,
5530 "transform binary/unary operation.\n");
5533 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
5535 prev_stmt_info
= NULL
;
5536 for (j
= 0; j
< ncopies
; j
++)
5541 if (scalar_shift_arg
)
5543 /* Vector shl and shr insn patterns can be defined with scalar
5544 operand 2 (shift operand). In this case, use constant or loop
5545 invariant op1 directly, without extending it to vector mode
5547 optab_op2_mode
= insn_data
[icode
].operand
[2].mode
;
5548 if (!VECTOR_MODE_P (optab_op2_mode
))
5550 if (dump_enabled_p ())
5551 dump_printf_loc (MSG_NOTE
, vect_location
,
5552 "operand 1 using scalar mode.\n");
5554 vec_oprnds1
.create (slp_node
? slp_node
->vec_stmts_size
: 1);
5555 vec_oprnds1
.quick_push (vec_oprnd1
);
5558 /* Store vec_oprnd1 for every vector stmt to be created
5559 for SLP_NODE. We check during the analysis that all
5560 the shift arguments are the same.
5561 TODO: Allow different constants for different vector
5562 stmts generated for an SLP instance. */
5563 for (k
= 0; k
< slp_node
->vec_stmts_size
- 1; k
++)
5564 vec_oprnds1
.quick_push (vec_oprnd1
);
5569 /* vec_oprnd1 is available if operand 1 should be of a scalar-type
5570 (a special case for certain kind of vector shifts); otherwise,
5571 operand 1 should be of a vector type (the usual case). */
5573 vect_get_vec_defs (op0
, NULL_TREE
, stmt
, &vec_oprnds0
, NULL
,
5576 vect_get_vec_defs (op0
, op1
, stmt
, &vec_oprnds0
, &vec_oprnds1
,
5580 vect_get_vec_defs_for_stmt_copy (dt
, &vec_oprnds0
, &vec_oprnds1
);
5582 /* Arguments are ready. Create the new vector stmt. */
5583 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vop0
)
5585 vop1
= vec_oprnds1
[i
];
5586 new_stmt
= gimple_build_assign (vec_dest
, code
, vop0
, vop1
);
5587 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
5588 gimple_assign_set_lhs (new_stmt
, new_temp
);
5589 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
5591 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
5598 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
5600 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
5601 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
5604 vec_oprnds0
.release ();
5605 vec_oprnds1
.release ();
5611 /* Function vectorizable_operation.
5613 Check if STMT performs a binary, unary or ternary operation that can
5615 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
5616 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
5617 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
5620 vectorizable_operation (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
5621 gimple
**vec_stmt
, slp_tree slp_node
)
5625 tree op0
, op1
= NULL_TREE
, op2
= NULL_TREE
;
5626 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
5628 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
5629 enum tree_code code
, orig_code
;
5630 machine_mode vec_mode
;
5634 bool target_support_p
;
5636 enum vect_def_type dt
[3]
5637 = {vect_unknown_def_type
, vect_unknown_def_type
, vect_unknown_def_type
};
5639 gimple
*new_stmt
= NULL
;
5640 stmt_vec_info prev_stmt_info
;
5641 poly_uint64 nunits_in
;
5642 poly_uint64 nunits_out
;
5646 vec
<tree
> vec_oprnds0
= vNULL
;
5647 vec
<tree
> vec_oprnds1
= vNULL
;
5648 vec
<tree
> vec_oprnds2
= vNULL
;
5649 tree vop0
, vop1
, vop2
;
5650 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
5651 vec_info
*vinfo
= stmt_info
->vinfo
;
5653 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
5656 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
5660 /* Is STMT a vectorizable binary/unary operation? */
5661 if (!is_gimple_assign (stmt
))
5664 if (TREE_CODE (gimple_assign_lhs (stmt
)) != SSA_NAME
)
5667 orig_code
= code
= gimple_assign_rhs_code (stmt
);
5669 /* For pointer addition and subtraction, we should use the normal
5670 plus and minus for the vector operation. */
5671 if (code
== POINTER_PLUS_EXPR
)
5673 if (code
== POINTER_DIFF_EXPR
)
5676 /* Support only unary or binary operations. */
5677 op_type
= TREE_CODE_LENGTH (code
);
5678 if (op_type
!= unary_op
&& op_type
!= binary_op
&& op_type
!= ternary_op
)
5680 if (dump_enabled_p ())
5681 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5682 "num. args = %d (not unary/binary/ternary op).\n",
5687 scalar_dest
= gimple_assign_lhs (stmt
);
5688 vectype_out
= STMT_VINFO_VECTYPE (stmt_info
);
5690 /* Most operations cannot handle bit-precision types without extra
5692 if (!VECTOR_BOOLEAN_TYPE_P (vectype_out
)
5693 && !type_has_mode_precision_p (TREE_TYPE (scalar_dest
))
5694 /* Exception are bitwise binary operations. */
5695 && code
!= BIT_IOR_EXPR
5696 && code
!= BIT_XOR_EXPR
5697 && code
!= BIT_AND_EXPR
)
5699 if (dump_enabled_p ())
5700 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5701 "bit-precision arithmetic not supported.\n");
5705 op0
= gimple_assign_rhs1 (stmt
);
5706 if (!vect_is_simple_use (op0
, vinfo
, &def_stmt
, &dt
[0], &vectype
))
5708 if (dump_enabled_p ())
5709 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5710 "use not simple.\n");
5713 /* If op0 is an external or constant def use a vector type with
5714 the same size as the output vector type. */
5717 /* For boolean type we cannot determine vectype by
5718 invariant value (don't know whether it is a vector
5719 of booleans or vector of integers). We use output
5720 vectype because operations on boolean don't change
5722 if (VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (op0
)))
5724 if (!VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (scalar_dest
)))
5726 if (dump_enabled_p ())
5727 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5728 "not supported operation on bool value.\n");
5731 vectype
= vectype_out
;
5734 vectype
= get_same_sized_vectype (TREE_TYPE (op0
), vectype_out
);
5737 gcc_assert (vectype
);
5740 if (dump_enabled_p ())
5742 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5743 "no vectype for scalar type ");
5744 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
5746 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
5752 nunits_out
= TYPE_VECTOR_SUBPARTS (vectype_out
);
5753 nunits_in
= TYPE_VECTOR_SUBPARTS (vectype
);
5754 if (maybe_ne (nunits_out
, nunits_in
))
5757 if (op_type
== binary_op
|| op_type
== ternary_op
)
5759 op1
= gimple_assign_rhs2 (stmt
);
5760 if (!vect_is_simple_use (op1
, vinfo
, &def_stmt
, &dt
[1]))
5762 if (dump_enabled_p ())
5763 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5764 "use not simple.\n");
5768 if (op_type
== ternary_op
)
5770 op2
= gimple_assign_rhs3 (stmt
);
5771 if (!vect_is_simple_use (op2
, vinfo
, &def_stmt
, &dt
[2]))
5773 if (dump_enabled_p ())
5774 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5775 "use not simple.\n");
5780 /* Multiple types in SLP are handled by creating the appropriate number of
5781 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
5786 ncopies
= vect_get_num_copies (loop_vinfo
, vectype
);
5788 gcc_assert (ncopies
>= 1);
5790 /* Shifts are handled in vectorizable_shift (). */
5791 if (code
== LSHIFT_EXPR
|| code
== RSHIFT_EXPR
|| code
== LROTATE_EXPR
5792 || code
== RROTATE_EXPR
)
5795 /* Supportable by target? */
5797 vec_mode
= TYPE_MODE (vectype
);
5798 if (code
== MULT_HIGHPART_EXPR
)
5799 target_support_p
= can_mult_highpart_p (vec_mode
, TYPE_UNSIGNED (vectype
));
5802 optab
= optab_for_tree_code (code
, vectype
, optab_default
);
5805 if (dump_enabled_p ())
5806 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5810 target_support_p
= (optab_handler (optab
, vec_mode
)
5811 != CODE_FOR_nothing
);
5814 if (!target_support_p
)
5816 if (dump_enabled_p ())
5817 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5818 "op not supported by target.\n");
5819 /* Check only during analysis. */
5820 if (maybe_ne (GET_MODE_SIZE (vec_mode
), UNITS_PER_WORD
)
5821 || (!vec_stmt
&& !vect_worthwhile_without_simd_p (vinfo
, code
)))
5823 if (dump_enabled_p ())
5824 dump_printf_loc (MSG_NOTE
, vect_location
,
5825 "proceeding using word mode.\n");
5828 /* Worthwhile without SIMD support? Check only during analysis. */
5829 if (!VECTOR_MODE_P (vec_mode
)
5831 && !vect_worthwhile_without_simd_p (vinfo
, code
))
5833 if (dump_enabled_p ())
5834 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5835 "not worthwhile without SIMD support.\n");
5839 if (!vec_stmt
) /* transformation not required. */
5841 STMT_VINFO_TYPE (stmt_info
) = op_vec_info_type
;
5842 if (dump_enabled_p ())
5843 dump_printf_loc (MSG_NOTE
, vect_location
,
5844 "=== vectorizable_operation ===\n");
5846 vect_model_simple_cost (stmt_info
, ncopies
, dt
, ndts
, NULL
, NULL
);
5852 if (dump_enabled_p ())
5853 dump_printf_loc (MSG_NOTE
, vect_location
,
5854 "transform binary/unary operation.\n");
5857 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
5859 /* POINTER_DIFF_EXPR has pointer arguments which are vectorized as
5860 vectors with unsigned elements, but the result is signed. So, we
5861 need to compute the MINUS_EXPR into vectype temporary and
5862 VIEW_CONVERT_EXPR it into the final vectype_out result. */
5863 tree vec_cvt_dest
= NULL_TREE
;
5864 if (orig_code
== POINTER_DIFF_EXPR
)
5865 vec_cvt_dest
= vect_create_destination_var (scalar_dest
, vectype_out
);
5867 /* In case the vectorization factor (VF) is bigger than the number
5868 of elements that we can fit in a vectype (nunits), we have to generate
5869 more than one vector stmt - i.e - we need to "unroll" the
5870 vector stmt by a factor VF/nunits. In doing so, we record a pointer
5871 from one copy of the vector stmt to the next, in the field
5872 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
5873 stages to find the correct vector defs to be used when vectorizing
5874 stmts that use the defs of the current stmt. The example below
5875 illustrates the vectorization process when VF=16 and nunits=4 (i.e.,
5876 we need to create 4 vectorized stmts):
5878 before vectorization:
5879 RELATED_STMT VEC_STMT
5883 step 1: vectorize stmt S1 (done in vectorizable_load. See more details
5885 RELATED_STMT VEC_STMT
5886 VS1_0: vx0 = memref0 VS1_1 -
5887 VS1_1: vx1 = memref1 VS1_2 -
5888 VS1_2: vx2 = memref2 VS1_3 -
5889 VS1_3: vx3 = memref3 - -
5890 S1: x = load - VS1_0
5893 step2: vectorize stmt S2 (done here):
5894 To vectorize stmt S2 we first need to find the relevant vector
5895 def for the first operand 'x'. This is, as usual, obtained from
5896 the vector stmt recorded in the STMT_VINFO_VEC_STMT of the stmt
5897 that defines 'x' (S1). This way we find the stmt VS1_0, and the
5898 relevant vector def 'vx0'. Having found 'vx0' we can generate
5899 the vector stmt VS2_0, and as usual, record it in the
5900 STMT_VINFO_VEC_STMT of stmt S2.
5901 When creating the second copy (VS2_1), we obtain the relevant vector
5902 def from the vector stmt recorded in the STMT_VINFO_RELATED_STMT of
5903 stmt VS1_0. This way we find the stmt VS1_1 and the relevant
5904 vector def 'vx1'. Using 'vx1' we create stmt VS2_1 and record a
5905 pointer to it in the STMT_VINFO_RELATED_STMT of the vector stmt VS2_0.
5906 Similarly when creating stmts VS2_2 and VS2_3. This is the resulting
5907 chain of stmts and pointers:
5908 RELATED_STMT VEC_STMT
5909 VS1_0: vx0 = memref0 VS1_1 -
5910 VS1_1: vx1 = memref1 VS1_2 -
5911 VS1_2: vx2 = memref2 VS1_3 -
5912 VS1_3: vx3 = memref3 - -
5913 S1: x = load - VS1_0
5914 VS2_0: vz0 = vx0 + v1 VS2_1 -
5915 VS2_1: vz1 = vx1 + v1 VS2_2 -
5916 VS2_2: vz2 = vx2 + v1 VS2_3 -
5917 VS2_3: vz3 = vx3 + v1 - -
5918 S2: z = x + 1 - VS2_0 */
5920 prev_stmt_info
= NULL
;
5921 for (j
= 0; j
< ncopies
; j
++)
5926 if (op_type
== binary_op
|| op_type
== ternary_op
)
5927 vect_get_vec_defs (op0
, op1
, stmt
, &vec_oprnds0
, &vec_oprnds1
,
5930 vect_get_vec_defs (op0
, NULL_TREE
, stmt
, &vec_oprnds0
, NULL
,
5932 if (op_type
== ternary_op
)
5933 vect_get_vec_defs (op2
, NULL_TREE
, stmt
, &vec_oprnds2
, NULL
,
5938 vect_get_vec_defs_for_stmt_copy (dt
, &vec_oprnds0
, &vec_oprnds1
);
5939 if (op_type
== ternary_op
)
5941 tree vec_oprnd
= vec_oprnds2
.pop ();
5942 vec_oprnds2
.quick_push (vect_get_vec_def_for_stmt_copy (dt
[2],
5947 /* Arguments are ready. Create the new vector stmt. */
5948 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vop0
)
5950 vop1
= ((op_type
== binary_op
|| op_type
== ternary_op
)
5951 ? vec_oprnds1
[i
] : NULL_TREE
);
5952 vop2
= ((op_type
== ternary_op
)
5953 ? vec_oprnds2
[i
] : NULL_TREE
);
5954 new_stmt
= gimple_build_assign (vec_dest
, code
, vop0
, vop1
, vop2
);
5955 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
5956 gimple_assign_set_lhs (new_stmt
, new_temp
);
5957 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
5960 new_temp
= build1 (VIEW_CONVERT_EXPR
, vectype_out
, new_temp
);
5961 new_stmt
= gimple_build_assign (vec_cvt_dest
, VIEW_CONVERT_EXPR
,
5963 new_temp
= make_ssa_name (vec_cvt_dest
, new_stmt
);
5964 gimple_assign_set_lhs (new_stmt
, new_temp
);
5965 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
5968 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
5975 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
5977 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
5978 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
5981 vec_oprnds0
.release ();
5982 vec_oprnds1
.release ();
5983 vec_oprnds2
.release ();
5988 /* A helper function to ensure data reference DR's base alignment. */
5991 ensure_base_align (struct data_reference
*dr
)
5996 if (DR_VECT_AUX (dr
)->base_misaligned
)
5998 tree base_decl
= DR_VECT_AUX (dr
)->base_decl
;
6000 unsigned int align_base_to
= DR_TARGET_ALIGNMENT (dr
) * BITS_PER_UNIT
;
6002 if (decl_in_symtab_p (base_decl
))
6003 symtab_node::get (base_decl
)->increase_alignment (align_base_to
);
6006 SET_DECL_ALIGN (base_decl
, align_base_to
);
6007 DECL_USER_ALIGN (base_decl
) = 1;
6009 DR_VECT_AUX (dr
)->base_misaligned
= false;
6014 /* Function get_group_alias_ptr_type.
6016 Return the alias type for the group starting at FIRST_STMT. */
6019 get_group_alias_ptr_type (gimple
*first_stmt
)
6021 struct data_reference
*first_dr
, *next_dr
;
6024 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
6025 next_stmt
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (first_stmt
));
6028 next_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (next_stmt
));
6029 if (get_alias_set (DR_REF (first_dr
))
6030 != get_alias_set (DR_REF (next_dr
)))
6032 if (dump_enabled_p ())
6033 dump_printf_loc (MSG_NOTE
, vect_location
,
6034 "conflicting alias set types.\n");
6035 return ptr_type_node
;
6037 next_stmt
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt
));
6039 return reference_alias_ptr_type (DR_REF (first_dr
));
6043 /* Function vectorizable_store.
6045 Check if STMT defines a non scalar data-ref (array/pointer/structure) that
6047 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
6048 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
6049 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
6052 vectorizable_store (gimple
*stmt
, gimple_stmt_iterator
*gsi
, gimple
**vec_stmt
,
6057 tree vec_oprnd
= NULL_TREE
;
6058 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
6059 struct data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
), *first_dr
= NULL
;
6061 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
6062 struct loop
*loop
= NULL
;
6063 machine_mode vec_mode
;
6065 enum dr_alignment_support alignment_support_scheme
;
6067 enum vect_def_type rhs_dt
= vect_unknown_def_type
;
6068 enum vect_def_type mask_dt
= vect_unknown_def_type
;
6069 stmt_vec_info prev_stmt_info
= NULL
;
6070 tree dataref_ptr
= NULL_TREE
;
6071 tree dataref_offset
= NULL_TREE
;
6072 gimple
*ptr_incr
= NULL
;
6075 gimple
*next_stmt
, *first_stmt
;
6077 unsigned int group_size
, i
;
6078 vec
<tree
> oprnds
= vNULL
;
6079 vec
<tree
> result_chain
= vNULL
;
6081 tree offset
= NULL_TREE
;
6082 vec
<tree
> vec_oprnds
= vNULL
;
6083 bool slp
= (slp_node
!= NULL
);
6084 unsigned int vec_num
;
6085 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
6086 vec_info
*vinfo
= stmt_info
->vinfo
;
6088 gather_scatter_info gs_info
;
6091 vec_load_store_type vls_type
;
6094 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
6097 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
6101 /* Is vectorizable store? */
6103 tree mask
= NULL_TREE
, mask_vectype
= NULL_TREE
;
6104 if (is_gimple_assign (stmt
))
6106 tree scalar_dest
= gimple_assign_lhs (stmt
);
6107 if (TREE_CODE (scalar_dest
) == VIEW_CONVERT_EXPR
6108 && is_pattern_stmt_p (stmt_info
))
6109 scalar_dest
= TREE_OPERAND (scalar_dest
, 0);
6110 if (TREE_CODE (scalar_dest
) != ARRAY_REF
6111 && TREE_CODE (scalar_dest
) != BIT_FIELD_REF
6112 && TREE_CODE (scalar_dest
) != INDIRECT_REF
6113 && TREE_CODE (scalar_dest
) != COMPONENT_REF
6114 && TREE_CODE (scalar_dest
) != IMAGPART_EXPR
6115 && TREE_CODE (scalar_dest
) != REALPART_EXPR
6116 && TREE_CODE (scalar_dest
) != MEM_REF
)
6121 gcall
*call
= dyn_cast
<gcall
*> (stmt
);
6122 if (!call
|| !gimple_call_internal_p (call
))
6125 internal_fn ifn
= gimple_call_internal_fn (call
);
6126 if (!internal_store_fn_p (ifn
))
6129 if (slp_node
!= NULL
)
6131 if (dump_enabled_p ())
6132 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6133 "SLP of masked stores not supported.\n");
6137 int mask_index
= internal_fn_mask_index (ifn
);
6138 if (mask_index
>= 0)
6140 mask
= gimple_call_arg (call
, mask_index
);
6141 if (!vect_check_load_store_mask (stmt
, mask
, &mask_dt
,
6147 op
= vect_get_store_rhs (stmt
);
6149 /* Cannot have hybrid store SLP -- that would mean storing to the
6150 same location twice. */
6151 gcc_assert (slp
== PURE_SLP_STMT (stmt_info
));
6153 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
), rhs_vectype
= NULL_TREE
;
6154 poly_uint64 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
6158 loop
= LOOP_VINFO_LOOP (loop_vinfo
);
6159 vf
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
6164 /* Multiple types in SLP are handled by creating the appropriate number of
6165 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
6170 ncopies
= vect_get_num_copies (loop_vinfo
, vectype
);
6172 gcc_assert (ncopies
>= 1);
6174 /* FORNOW. This restriction should be relaxed. */
6175 if (loop
&& nested_in_vect_loop_p (loop
, stmt
) && ncopies
> 1)
6177 if (dump_enabled_p ())
6178 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6179 "multiple types in nested loop.\n");
6183 if (!vect_check_store_rhs (stmt
, op
, &rhs_dt
, &rhs_vectype
, &vls_type
))
6186 elem_type
= TREE_TYPE (vectype
);
6187 vec_mode
= TYPE_MODE (vectype
);
6189 if (!STMT_VINFO_DATA_REF (stmt_info
))
6192 vect_memory_access_type memory_access_type
;
6193 if (!get_load_store_type (stmt
, vectype
, slp
, mask
, vls_type
, ncopies
,
6194 &memory_access_type
, &gs_info
))
6199 if (memory_access_type
== VMAT_CONTIGUOUS
)
6201 if (!VECTOR_MODE_P (vec_mode
)
6202 || !can_vec_mask_load_store_p (vec_mode
,
6203 TYPE_MODE (mask_vectype
), false))
6206 else if (memory_access_type
!= VMAT_LOAD_STORE_LANES
6207 && (memory_access_type
!= VMAT_GATHER_SCATTER
|| gs_info
.decl
))
6209 if (dump_enabled_p ())
6210 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6211 "unsupported access type for masked store.\n");
6217 /* FORNOW. In some cases can vectorize even if data-type not supported
6218 (e.g. - array initialization with 0). */
6219 if (optab_handler (mov_optab
, vec_mode
) == CODE_FOR_nothing
)
6223 grouped_store
= (STMT_VINFO_GROUPED_ACCESS (stmt_info
)
6224 && memory_access_type
!= VMAT_GATHER_SCATTER
6225 && (slp
|| memory_access_type
!= VMAT_CONTIGUOUS
));
6228 first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
6229 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
6230 group_size
= GROUP_SIZE (vinfo_for_stmt (first_stmt
));
6236 group_size
= vec_num
= 1;
6239 if (!vec_stmt
) /* transformation not required. */
6241 STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info
) = memory_access_type
;
6244 && LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo
))
6245 check_load_store_masking (loop_vinfo
, vectype
, vls_type
, group_size
,
6246 memory_access_type
, &gs_info
);
6248 STMT_VINFO_TYPE (stmt_info
) = store_vec_info_type
;
6249 /* The SLP costs are calculated during SLP analysis. */
6251 vect_model_store_cost (stmt_info
, ncopies
, memory_access_type
,
6252 vls_type
, NULL
, NULL
, NULL
);
6255 gcc_assert (memory_access_type
== STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info
));
6259 ensure_base_align (dr
);
6261 if (memory_access_type
== VMAT_GATHER_SCATTER
&& gs_info
.decl
)
6263 tree vec_oprnd0
= NULL_TREE
, vec_oprnd1
= NULL_TREE
, src
;
6264 tree arglist
= TYPE_ARG_TYPES (TREE_TYPE (gs_info
.decl
));
6265 tree rettype
, srctype
, ptrtype
, idxtype
, masktype
, scaletype
;
6266 tree ptr
, mask
, var
, scale
, perm_mask
= NULL_TREE
;
6267 edge pe
= loop_preheader_edge (loop
);
6270 enum { NARROW
, NONE
, WIDEN
} modifier
;
6271 poly_uint64 scatter_off_nunits
6272 = TYPE_VECTOR_SUBPARTS (gs_info
.offset_vectype
);
6274 if (known_eq (nunits
, scatter_off_nunits
))
6276 else if (known_eq (nunits
* 2, scatter_off_nunits
))
6280 /* Currently gathers and scatters are only supported for
6281 fixed-length vectors. */
6282 unsigned int count
= scatter_off_nunits
.to_constant ();
6283 vec_perm_builder
sel (count
, count
, 1);
6284 for (i
= 0; i
< (unsigned int) count
; ++i
)
6285 sel
.quick_push (i
| (count
/ 2));
6287 vec_perm_indices
indices (sel
, 1, count
);
6288 perm_mask
= vect_gen_perm_mask_checked (gs_info
.offset_vectype
,
6290 gcc_assert (perm_mask
!= NULL_TREE
);
6292 else if (known_eq (nunits
, scatter_off_nunits
* 2))
6296 /* Currently gathers and scatters are only supported for
6297 fixed-length vectors. */
6298 unsigned int count
= nunits
.to_constant ();
6299 vec_perm_builder
sel (count
, count
, 1);
6300 for (i
= 0; i
< (unsigned int) count
; ++i
)
6301 sel
.quick_push (i
| (count
/ 2));
6303 vec_perm_indices
indices (sel
, 2, count
);
6304 perm_mask
= vect_gen_perm_mask_checked (vectype
, indices
);
6305 gcc_assert (perm_mask
!= NULL_TREE
);
6311 rettype
= TREE_TYPE (TREE_TYPE (gs_info
.decl
));
6312 ptrtype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
6313 masktype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
6314 idxtype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
6315 srctype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
6316 scaletype
= TREE_VALUE (arglist
);
6318 gcc_checking_assert (TREE_CODE (masktype
) == INTEGER_TYPE
6319 && TREE_CODE (rettype
) == VOID_TYPE
);
6321 ptr
= fold_convert (ptrtype
, gs_info
.base
);
6322 if (!is_gimple_min_invariant (ptr
))
6324 ptr
= force_gimple_operand (ptr
, &seq
, true, NULL_TREE
);
6325 new_bb
= gsi_insert_seq_on_edge_immediate (pe
, seq
);
6326 gcc_assert (!new_bb
);
6329 /* Currently we support only unconditional scatter stores,
6330 so mask should be all ones. */
6331 mask
= build_int_cst (masktype
, -1);
6332 mask
= vect_init_vector (stmt
, mask
, masktype
, NULL
);
6334 scale
= build_int_cst (scaletype
, gs_info
.scale
);
6336 prev_stmt_info
= NULL
;
6337 for (j
= 0; j
< ncopies
; ++j
)
6342 = vect_get_vec_def_for_operand (op
, stmt
);
6344 = vect_get_vec_def_for_operand (gs_info
.offset
, stmt
);
6346 else if (modifier
!= NONE
&& (j
& 1))
6348 if (modifier
== WIDEN
)
6351 = vect_get_vec_def_for_stmt_copy (rhs_dt
, vec_oprnd1
);
6352 op
= permute_vec_elements (vec_oprnd0
, vec_oprnd0
, perm_mask
,
6355 else if (modifier
== NARROW
)
6357 src
= permute_vec_elements (vec_oprnd1
, vec_oprnd1
, perm_mask
,
6360 = vect_get_vec_def_for_stmt_copy (gs_info
.offset_dt
,
6369 = vect_get_vec_def_for_stmt_copy (rhs_dt
, vec_oprnd1
);
6371 = vect_get_vec_def_for_stmt_copy (gs_info
.offset_dt
,
6375 if (!useless_type_conversion_p (srctype
, TREE_TYPE (src
)))
6377 gcc_assert (known_eq (TYPE_VECTOR_SUBPARTS (TREE_TYPE (src
)),
6378 TYPE_VECTOR_SUBPARTS (srctype
)));
6379 var
= vect_get_new_ssa_name (srctype
, vect_simple_var
);
6380 src
= build1 (VIEW_CONVERT_EXPR
, srctype
, src
);
6381 new_stmt
= gimple_build_assign (var
, VIEW_CONVERT_EXPR
, src
);
6382 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6386 if (!useless_type_conversion_p (idxtype
, TREE_TYPE (op
)))
6388 gcc_assert (known_eq (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op
)),
6389 TYPE_VECTOR_SUBPARTS (idxtype
)));
6390 var
= vect_get_new_ssa_name (idxtype
, vect_simple_var
);
6391 op
= build1 (VIEW_CONVERT_EXPR
, idxtype
, op
);
6392 new_stmt
= gimple_build_assign (var
, VIEW_CONVERT_EXPR
, op
);
6393 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6398 = gimple_build_call (gs_info
.decl
, 5, ptr
, mask
, op
, src
, scale
);
6400 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6402 if (prev_stmt_info
== NULL
)
6403 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
6405 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
6406 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
6411 if (STMT_VINFO_GROUPED_ACCESS (stmt_info
))
6413 gimple
*group_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
6414 GROUP_STORE_COUNT (vinfo_for_stmt (group_stmt
))++;
6420 gcc_assert (!loop
|| !nested_in_vect_loop_p (loop
, stmt
));
6422 /* We vectorize all the stmts of the interleaving group when we
6423 reach the last stmt in the group. */
6424 if (GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt
))
6425 < GROUP_SIZE (vinfo_for_stmt (first_stmt
))
6434 grouped_store
= false;
6435 /* VEC_NUM is the number of vect stmts to be created for this
6437 vec_num
= SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node
);
6438 first_stmt
= SLP_TREE_SCALAR_STMTS (slp_node
)[0];
6439 gcc_assert (GROUP_FIRST_ELEMENT (vinfo_for_stmt (first_stmt
)) == first_stmt
);
6440 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
6441 op
= vect_get_store_rhs (first_stmt
);
6444 /* VEC_NUM is the number of vect stmts to be created for this
6446 vec_num
= group_size
;
6448 ref_type
= get_group_alias_ptr_type (first_stmt
);
6451 ref_type
= reference_alias_ptr_type (DR_REF (first_dr
));
6453 if (dump_enabled_p ())
6454 dump_printf_loc (MSG_NOTE
, vect_location
,
6455 "transform store. ncopies = %d\n", ncopies
);
6457 if (memory_access_type
== VMAT_ELEMENTWISE
6458 || memory_access_type
== VMAT_STRIDED_SLP
)
6460 gimple_stmt_iterator incr_gsi
;
6466 tree stride_base
, stride_step
, alias_off
;
6469 /* Checked by get_load_store_type. */
6470 unsigned int const_nunits
= nunits
.to_constant ();
6472 gcc_assert (!LOOP_VINFO_FULLY_MASKED_P (loop_vinfo
));
6473 gcc_assert (!nested_in_vect_loop_p (loop
, stmt
));
6476 = fold_build_pointer_plus
6477 (DR_BASE_ADDRESS (first_dr
),
6478 size_binop (PLUS_EXPR
,
6479 convert_to_ptrofftype (DR_OFFSET (first_dr
)),
6480 convert_to_ptrofftype (DR_INIT (first_dr
))));
6481 stride_step
= fold_convert (sizetype
, DR_STEP (first_dr
));
6483 /* For a store with loop-invariant (but other than power-of-2)
6484 stride (i.e. not a grouped access) like so:
6486 for (i = 0; i < n; i += stride)
6489 we generate a new induction variable and new stores from
6490 the components of the (vectorized) rhs:
6492 for (j = 0; ; j += VF*stride)
6497 array[j + stride] = tmp2;
6501 unsigned nstores
= const_nunits
;
6503 tree ltype
= elem_type
;
6504 tree lvectype
= vectype
;
6507 if (group_size
< const_nunits
6508 && const_nunits
% group_size
== 0)
6510 nstores
= const_nunits
/ group_size
;
6512 ltype
= build_vector_type (elem_type
, group_size
);
6515 /* First check if vec_extract optab doesn't support extraction
6516 of vector elts directly. */
6517 scalar_mode elmode
= SCALAR_TYPE_MODE (elem_type
);
6519 if (!mode_for_vector (elmode
, group_size
).exists (&vmode
)
6520 || !VECTOR_MODE_P (vmode
)
6521 || !targetm
.vector_mode_supported_p (vmode
)
6522 || (convert_optab_handler (vec_extract_optab
,
6523 TYPE_MODE (vectype
), vmode
)
6524 == CODE_FOR_nothing
))
6526 /* Try to avoid emitting an extract of vector elements
6527 by performing the extracts using an integer type of the
6528 same size, extracting from a vector of those and then
6529 re-interpreting it as the original vector type if
6532 = group_size
* GET_MODE_BITSIZE (elmode
);
6533 elmode
= int_mode_for_size (lsize
, 0).require ();
6534 unsigned int lnunits
= const_nunits
/ group_size
;
6535 /* If we can't construct such a vector fall back to
6536 element extracts from the original vector type and
6537 element size stores. */
6538 if (mode_for_vector (elmode
, lnunits
).exists (&vmode
)
6539 && VECTOR_MODE_P (vmode
)
6540 && targetm
.vector_mode_supported_p (vmode
)
6541 && (convert_optab_handler (vec_extract_optab
,
6543 != CODE_FOR_nothing
))
6547 ltype
= build_nonstandard_integer_type (lsize
, 1);
6548 lvectype
= build_vector_type (ltype
, nstores
);
6550 /* Else fall back to vector extraction anyway.
6551 Fewer stores are more important than avoiding spilling
6552 of the vector we extract from. Compared to the
6553 construction case in vectorizable_load no store-forwarding
6554 issue exists here for reasonable archs. */
6557 else if (group_size
>= const_nunits
6558 && group_size
% const_nunits
== 0)
6561 lnel
= const_nunits
;
6565 ltype
= build_aligned_type (ltype
, TYPE_ALIGN (elem_type
));
6566 ncopies
= SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node
);
6569 ivstep
= stride_step
;
6570 ivstep
= fold_build2 (MULT_EXPR
, TREE_TYPE (ivstep
), ivstep
,
6571 build_int_cst (TREE_TYPE (ivstep
), vf
));
6573 standard_iv_increment_position (loop
, &incr_gsi
, &insert_after
);
6575 stride_base
= cse_and_gimplify_to_preheader (loop_vinfo
, stride_base
);
6576 ivstep
= cse_and_gimplify_to_preheader (loop_vinfo
, ivstep
);
6577 create_iv (stride_base
, ivstep
, NULL
,
6578 loop
, &incr_gsi
, insert_after
,
6580 incr
= gsi_stmt (incr_gsi
);
6581 set_vinfo_for_stmt (incr
, new_stmt_vec_info (incr
, loop_vinfo
));
6583 stride_step
= cse_and_gimplify_to_preheader (loop_vinfo
, stride_step
);
6585 prev_stmt_info
= NULL
;
6586 alias_off
= build_int_cst (ref_type
, 0);
6587 next_stmt
= first_stmt
;
6588 for (g
= 0; g
< group_size
; g
++)
6590 running_off
= offvar
;
6593 tree size
= TYPE_SIZE_UNIT (ltype
);
6594 tree pos
= fold_build2 (MULT_EXPR
, sizetype
, size_int (g
),
6596 tree newoff
= copy_ssa_name (running_off
, NULL
);
6597 incr
= gimple_build_assign (newoff
, POINTER_PLUS_EXPR
,
6599 vect_finish_stmt_generation (stmt
, incr
, gsi
);
6600 running_off
= newoff
;
6602 unsigned int group_el
= 0;
6603 unsigned HOST_WIDE_INT
6604 elsz
= tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (vectype
)));
6605 for (j
= 0; j
< ncopies
; j
++)
6607 /* We've set op and dt above, from vect_get_store_rhs,
6608 and first_stmt == stmt. */
6613 vect_get_vec_defs (op
, NULL_TREE
, stmt
, &vec_oprnds
, NULL
,
6615 vec_oprnd
= vec_oprnds
[0];
6619 op
= vect_get_store_rhs (next_stmt
);
6620 vec_oprnd
= vect_get_vec_def_for_operand (op
, next_stmt
);
6626 vec_oprnd
= vec_oprnds
[j
];
6629 vect_is_simple_use (op
, vinfo
, &def_stmt
, &rhs_dt
);
6630 vec_oprnd
= vect_get_vec_def_for_stmt_copy (rhs_dt
,
6634 /* Pun the vector to extract from if necessary. */
6635 if (lvectype
!= vectype
)
6637 tree tem
= make_ssa_name (lvectype
);
6639 = gimple_build_assign (tem
, build1 (VIEW_CONVERT_EXPR
,
6640 lvectype
, vec_oprnd
));
6641 vect_finish_stmt_generation (stmt
, pun
, gsi
);
6644 for (i
= 0; i
< nstores
; i
++)
6646 tree newref
, newoff
;
6647 gimple
*incr
, *assign
;
6648 tree size
= TYPE_SIZE (ltype
);
6649 /* Extract the i'th component. */
6650 tree pos
= fold_build2 (MULT_EXPR
, bitsizetype
,
6651 bitsize_int (i
), size
);
6652 tree elem
= fold_build3 (BIT_FIELD_REF
, ltype
, vec_oprnd
,
6655 elem
= force_gimple_operand_gsi (gsi
, elem
, true,
6659 tree this_off
= build_int_cst (TREE_TYPE (alias_off
),
6661 newref
= build2 (MEM_REF
, ltype
,
6662 running_off
, this_off
);
6664 /* And store it to *running_off. */
6665 assign
= gimple_build_assign (newref
, elem
);
6666 vect_finish_stmt_generation (stmt
, assign
, gsi
);
6670 || group_el
== group_size
)
6672 newoff
= copy_ssa_name (running_off
, NULL
);
6673 incr
= gimple_build_assign (newoff
, POINTER_PLUS_EXPR
,
6674 running_off
, stride_step
);
6675 vect_finish_stmt_generation (stmt
, incr
, gsi
);
6677 running_off
= newoff
;
6680 if (g
== group_size
- 1
6683 if (j
== 0 && i
== 0)
6684 STMT_VINFO_VEC_STMT (stmt_info
)
6685 = *vec_stmt
= assign
;
6687 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = assign
;
6688 prev_stmt_info
= vinfo_for_stmt (assign
);
6692 next_stmt
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt
));
6697 vec_oprnds
.release ();
6701 auto_vec
<tree
> dr_chain (group_size
);
6702 oprnds
.create (group_size
);
6704 alignment_support_scheme
= vect_supportable_dr_alignment (first_dr
, false);
6705 gcc_assert (alignment_support_scheme
);
6706 vec_loop_masks
*loop_masks
6707 = (loop_vinfo
&& LOOP_VINFO_FULLY_MASKED_P (loop_vinfo
)
6708 ? &LOOP_VINFO_MASKS (loop_vinfo
)
6710 /* Targets with store-lane instructions must not require explicit
6711 realignment. vect_supportable_dr_alignment always returns either
6712 dr_aligned or dr_unaligned_supported for masked operations. */
6713 gcc_assert ((memory_access_type
!= VMAT_LOAD_STORE_LANES
6716 || alignment_support_scheme
== dr_aligned
6717 || alignment_support_scheme
== dr_unaligned_supported
);
6719 if (memory_access_type
== VMAT_CONTIGUOUS_DOWN
6720 || memory_access_type
== VMAT_CONTIGUOUS_REVERSE
)
6721 offset
= size_int (-TYPE_VECTOR_SUBPARTS (vectype
) + 1);
6724 tree vec_offset
= NULL_TREE
;
6725 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info
))
6727 aggr_type
= NULL_TREE
;
6730 else if (memory_access_type
== VMAT_GATHER_SCATTER
)
6732 aggr_type
= elem_type
;
6733 vect_get_strided_load_store_ops (stmt
, loop_vinfo
, &gs_info
,
6734 &bump
, &vec_offset
);
6738 if (memory_access_type
== VMAT_LOAD_STORE_LANES
)
6739 aggr_type
= build_array_type_nelts (elem_type
, vec_num
* nunits
);
6741 aggr_type
= vectype
;
6742 bump
= vect_get_data_ptr_increment (dr
, aggr_type
, memory_access_type
);
6746 LOOP_VINFO_HAS_MASK_STORE (loop_vinfo
) = true;
6748 /* In case the vectorization factor (VF) is bigger than the number
6749 of elements that we can fit in a vectype (nunits), we have to generate
6750 more than one vector stmt - i.e - we need to "unroll" the
6751 vector stmt by a factor VF/nunits. For more details see documentation in
6752 vect_get_vec_def_for_copy_stmt. */
6754 /* In case of interleaving (non-unit grouped access):
6761 We create vectorized stores starting from base address (the access of the
6762 first stmt in the chain (S2 in the above example), when the last store stmt
6763 of the chain (S4) is reached:
6766 VS2: &base + vec_size*1 = vx0
6767 VS3: &base + vec_size*2 = vx1
6768 VS4: &base + vec_size*3 = vx3
6770 Then permutation statements are generated:
6772 VS5: vx5 = VEC_PERM_EXPR < vx0, vx3, {0, 8, 1, 9, 2, 10, 3, 11} >
6773 VS6: vx6 = VEC_PERM_EXPR < vx0, vx3, {4, 12, 5, 13, 6, 14, 7, 15} >
6776 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
6777 (the order of the data-refs in the output of vect_permute_store_chain
6778 corresponds to the order of scalar stmts in the interleaving chain - see
6779 the documentation of vect_permute_store_chain()).
6781 In case of both multiple types and interleaving, above vector stores and
6782 permutation stmts are created for every copy. The result vector stmts are
6783 put in STMT_VINFO_VEC_STMT for the first copy and in the corresponding
6784 STMT_VINFO_RELATED_STMT for the next copies.
6787 prev_stmt_info
= NULL
;
6788 tree vec_mask
= NULL_TREE
;
6789 for (j
= 0; j
< ncopies
; j
++)
6796 /* Get vectorized arguments for SLP_NODE. */
6797 vect_get_vec_defs (op
, NULL_TREE
, stmt
, &vec_oprnds
,
6800 vec_oprnd
= vec_oprnds
[0];
6804 /* For interleaved stores we collect vectorized defs for all the
6805 stores in the group in DR_CHAIN and OPRNDS. DR_CHAIN is then
6806 used as an input to vect_permute_store_chain(), and OPRNDS as
6807 an input to vect_get_vec_def_for_stmt_copy() for the next copy.
6809 If the store is not grouped, GROUP_SIZE is 1, and DR_CHAIN and
6810 OPRNDS are of size 1. */
6811 next_stmt
= first_stmt
;
6812 for (i
= 0; i
< group_size
; i
++)
6814 /* Since gaps are not supported for interleaved stores,
6815 GROUP_SIZE is the exact number of stmts in the chain.
6816 Therefore, NEXT_STMT can't be NULL_TREE. In case that
6817 there is no interleaving, GROUP_SIZE is 1, and only one
6818 iteration of the loop will be executed. */
6819 op
= vect_get_store_rhs (next_stmt
);
6820 vec_oprnd
= vect_get_vec_def_for_operand (op
, next_stmt
);
6821 dr_chain
.quick_push (vec_oprnd
);
6822 oprnds
.quick_push (vec_oprnd
);
6823 next_stmt
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt
));
6826 vec_mask
= vect_get_vec_def_for_operand (mask
, stmt
,
6830 /* We should have catched mismatched types earlier. */
6831 gcc_assert (useless_type_conversion_p (vectype
,
6832 TREE_TYPE (vec_oprnd
)));
6833 bool simd_lane_access_p
6834 = STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info
);
6835 if (simd_lane_access_p
6836 && TREE_CODE (DR_BASE_ADDRESS (first_dr
)) == ADDR_EXPR
6837 && VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (first_dr
), 0))
6838 && integer_zerop (DR_OFFSET (first_dr
))
6839 && integer_zerop (DR_INIT (first_dr
))
6840 && alias_sets_conflict_p (get_alias_set (aggr_type
),
6841 get_alias_set (TREE_TYPE (ref_type
))))
6843 dataref_ptr
= unshare_expr (DR_BASE_ADDRESS (first_dr
));
6844 dataref_offset
= build_int_cst (ref_type
, 0);
6847 else if (STMT_VINFO_GATHER_SCATTER_P (stmt_info
))
6849 vect_get_gather_scatter_ops (loop
, stmt
, &gs_info
,
6850 &dataref_ptr
, &vec_offset
);
6855 = vect_create_data_ref_ptr (first_stmt
, aggr_type
,
6856 simd_lane_access_p
? loop
: NULL
,
6857 offset
, &dummy
, gsi
, &ptr_incr
,
6858 simd_lane_access_p
, &inv_p
,
6860 gcc_assert (bb_vinfo
|| !inv_p
);
6864 /* For interleaved stores we created vectorized defs for all the
6865 defs stored in OPRNDS in the previous iteration (previous copy).
6866 DR_CHAIN is then used as an input to vect_permute_store_chain(),
6867 and OPRNDS as an input to vect_get_vec_def_for_stmt_copy() for the
6869 If the store is not grouped, GROUP_SIZE is 1, and DR_CHAIN and
6870 OPRNDS are of size 1. */
6871 for (i
= 0; i
< group_size
; i
++)
6874 vect_is_simple_use (op
, vinfo
, &def_stmt
, &rhs_dt
);
6875 vec_oprnd
= vect_get_vec_def_for_stmt_copy (rhs_dt
, op
);
6876 dr_chain
[i
] = vec_oprnd
;
6877 oprnds
[i
] = vec_oprnd
;
6880 vec_mask
= vect_get_vec_def_for_stmt_copy (mask_dt
, vec_mask
);
6883 = int_const_binop (PLUS_EXPR
, dataref_offset
, bump
);
6884 else if (STMT_VINFO_GATHER_SCATTER_P (stmt_info
))
6885 vec_offset
= vect_get_vec_def_for_stmt_copy (gs_info
.offset_dt
,
6888 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
, stmt
,
6892 if (memory_access_type
== VMAT_LOAD_STORE_LANES
)
6896 /* Combine all the vectors into an array. */
6897 vec_array
= create_vector_array (vectype
, vec_num
);
6898 for (i
= 0; i
< vec_num
; i
++)
6900 vec_oprnd
= dr_chain
[i
];
6901 write_vector_array (stmt
, gsi
, vec_oprnd
, vec_array
, i
);
6904 tree final_mask
= NULL
;
6906 final_mask
= vect_get_loop_mask (gsi
, loop_masks
, ncopies
,
6909 final_mask
= prepare_load_store_mask (mask_vectype
, final_mask
,
6916 MASK_STORE_LANES (DATAREF_PTR, ALIAS_PTR, VEC_MASK,
6918 unsigned int align
= TYPE_ALIGN_UNIT (TREE_TYPE (vectype
));
6919 tree alias_ptr
= build_int_cst (ref_type
, align
);
6920 call
= gimple_build_call_internal (IFN_MASK_STORE_LANES
, 4,
6921 dataref_ptr
, alias_ptr
,
6922 final_mask
, vec_array
);
6927 MEM_REF[...all elements...] = STORE_LANES (VEC_ARRAY). */
6928 data_ref
= create_array_ref (aggr_type
, dataref_ptr
, ref_type
);
6929 call
= gimple_build_call_internal (IFN_STORE_LANES
, 1,
6931 gimple_call_set_lhs (call
, data_ref
);
6933 gimple_call_set_nothrow (call
, true);
6935 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6943 result_chain
.create (group_size
);
6945 vect_permute_store_chain (dr_chain
, group_size
, stmt
, gsi
,
6949 next_stmt
= first_stmt
;
6950 for (i
= 0; i
< vec_num
; i
++)
6952 unsigned align
, misalign
;
6954 tree final_mask
= NULL_TREE
;
6956 final_mask
= vect_get_loop_mask (gsi
, loop_masks
,
6958 vectype
, vec_num
* j
+ i
);
6960 final_mask
= prepare_load_store_mask (mask_vectype
, final_mask
,
6963 if (memory_access_type
== VMAT_GATHER_SCATTER
)
6965 tree scale
= size_int (gs_info
.scale
);
6968 call
= gimple_build_call_internal
6969 (IFN_MASK_SCATTER_STORE
, 5, dataref_ptr
, vec_offset
,
6970 scale
, vec_oprnd
, final_mask
);
6972 call
= gimple_build_call_internal
6973 (IFN_SCATTER_STORE
, 4, dataref_ptr
, vec_offset
,
6975 gimple_call_set_nothrow (call
, true);
6977 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6982 /* Bump the vector pointer. */
6983 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
,
6987 vec_oprnd
= vec_oprnds
[i
];
6988 else if (grouped_store
)
6989 /* For grouped stores vectorized defs are interleaved in
6990 vect_permute_store_chain(). */
6991 vec_oprnd
= result_chain
[i
];
6993 align
= DR_TARGET_ALIGNMENT (first_dr
);
6994 if (aligned_access_p (first_dr
))
6996 else if (DR_MISALIGNMENT (first_dr
) == -1)
6998 align
= dr_alignment (vect_dr_behavior (first_dr
));
7002 misalign
= DR_MISALIGNMENT (first_dr
);
7003 if (dataref_offset
== NULL_TREE
7004 && TREE_CODE (dataref_ptr
) == SSA_NAME
)
7005 set_ptr_info_alignment (get_ptr_info (dataref_ptr
), align
,
7008 if (memory_access_type
== VMAT_CONTIGUOUS_REVERSE
)
7010 tree perm_mask
= perm_mask_for_reverse (vectype
);
7012 = vect_create_destination_var (vect_get_store_rhs (stmt
),
7014 tree new_temp
= make_ssa_name (perm_dest
);
7016 /* Generate the permute statement. */
7018 = gimple_build_assign (new_temp
, VEC_PERM_EXPR
, vec_oprnd
,
7019 vec_oprnd
, perm_mask
);
7020 vect_finish_stmt_generation (stmt
, perm_stmt
, gsi
);
7022 perm_stmt
= SSA_NAME_DEF_STMT (new_temp
);
7023 vec_oprnd
= new_temp
;
7026 /* Arguments are ready. Create the new vector stmt. */
7029 align
= least_bit_hwi (misalign
| align
);
7030 tree ptr
= build_int_cst (ref_type
, align
);
7032 = gimple_build_call_internal (IFN_MASK_STORE
, 4,
7034 final_mask
, vec_oprnd
);
7035 gimple_call_set_nothrow (call
, true);
7040 data_ref
= fold_build2 (MEM_REF
, vectype
,
7044 : build_int_cst (ref_type
, 0));
7045 if (aligned_access_p (first_dr
))
7047 else if (DR_MISALIGNMENT (first_dr
) == -1)
7048 TREE_TYPE (data_ref
)
7049 = build_aligned_type (TREE_TYPE (data_ref
),
7050 align
* BITS_PER_UNIT
);
7052 TREE_TYPE (data_ref
)
7053 = build_aligned_type (TREE_TYPE (data_ref
),
7054 TYPE_ALIGN (elem_type
));
7055 new_stmt
= gimple_build_assign (data_ref
, vec_oprnd
);
7057 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
7062 next_stmt
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt
));
7070 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
7072 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
7073 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
7078 result_chain
.release ();
7079 vec_oprnds
.release ();
7084 /* Given a vector type VECTYPE, turns permutation SEL into the equivalent
7085 VECTOR_CST mask. No checks are made that the target platform supports the
7086 mask, so callers may wish to test can_vec_perm_const_p separately, or use
7087 vect_gen_perm_mask_checked. */
7090 vect_gen_perm_mask_any (tree vectype
, const vec_perm_indices
&sel
)
7094 poly_uint64 nunits
= sel
.length ();
7095 gcc_assert (known_eq (nunits
, TYPE_VECTOR_SUBPARTS (vectype
)));
7097 mask_type
= build_vector_type (ssizetype
, nunits
);
7098 return vec_perm_indices_to_tree (mask_type
, sel
);
7101 /* Checked version of vect_gen_perm_mask_any. Asserts can_vec_perm_const_p,
7102 i.e. that the target supports the pattern _for arbitrary input vectors_. */
7105 vect_gen_perm_mask_checked (tree vectype
, const vec_perm_indices
&sel
)
7107 gcc_assert (can_vec_perm_const_p (TYPE_MODE (vectype
), sel
));
7108 return vect_gen_perm_mask_any (vectype
, sel
);
7111 /* Given a vector variable X and Y, that was generated for the scalar
7112 STMT, generate instructions to permute the vector elements of X and Y
7113 using permutation mask MASK_VEC, insert them at *GSI and return the
7114 permuted vector variable. */
7117 permute_vec_elements (tree x
, tree y
, tree mask_vec
, gimple
*stmt
,
7118 gimple_stmt_iterator
*gsi
)
7120 tree vectype
= TREE_TYPE (x
);
7121 tree perm_dest
, data_ref
;
7124 tree scalar_dest
= gimple_get_lhs (stmt
);
7125 if (TREE_CODE (scalar_dest
) == SSA_NAME
)
7126 perm_dest
= vect_create_destination_var (scalar_dest
, vectype
);
7128 perm_dest
= vect_get_new_vect_var (vectype
, vect_simple_var
, NULL
);
7129 data_ref
= make_ssa_name (perm_dest
);
7131 /* Generate the permute statement. */
7132 perm_stmt
= gimple_build_assign (data_ref
, VEC_PERM_EXPR
, x
, y
, mask_vec
);
7133 vect_finish_stmt_generation (stmt
, perm_stmt
, gsi
);
7138 /* Hoist the definitions of all SSA uses on STMT out of the loop LOOP,
7139 inserting them on the loops preheader edge. Returns true if we
7140 were successful in doing so (and thus STMT can be moved then),
7141 otherwise returns false. */
7144 hoist_defs_of_uses (gimple
*stmt
, struct loop
*loop
)
7150 FOR_EACH_SSA_TREE_OPERAND (op
, stmt
, i
, SSA_OP_USE
)
7152 gimple
*def_stmt
= SSA_NAME_DEF_STMT (op
);
7153 if (!gimple_nop_p (def_stmt
)
7154 && flow_bb_inside_loop_p (loop
, gimple_bb (def_stmt
)))
7156 /* Make sure we don't need to recurse. While we could do
7157 so in simple cases when there are more complex use webs
7158 we don't have an easy way to preserve stmt order to fulfil
7159 dependencies within them. */
7162 if (gimple_code (def_stmt
) == GIMPLE_PHI
)
7164 FOR_EACH_SSA_TREE_OPERAND (op2
, def_stmt
, i2
, SSA_OP_USE
)
7166 gimple
*def_stmt2
= SSA_NAME_DEF_STMT (op2
);
7167 if (!gimple_nop_p (def_stmt2
)
7168 && flow_bb_inside_loop_p (loop
, gimple_bb (def_stmt2
)))
7178 FOR_EACH_SSA_TREE_OPERAND (op
, stmt
, i
, SSA_OP_USE
)
7180 gimple
*def_stmt
= SSA_NAME_DEF_STMT (op
);
7181 if (!gimple_nop_p (def_stmt
)
7182 && flow_bb_inside_loop_p (loop
, gimple_bb (def_stmt
)))
7184 gimple_stmt_iterator gsi
= gsi_for_stmt (def_stmt
);
7185 gsi_remove (&gsi
, false);
7186 gsi_insert_on_edge_immediate (loop_preheader_edge (loop
), def_stmt
);
7193 /* vectorizable_load.
7195 Check if STMT reads a non scalar data-ref (array/pointer/structure) that
7197 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
7198 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
7199 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
7202 vectorizable_load (gimple
*stmt
, gimple_stmt_iterator
*gsi
, gimple
**vec_stmt
,
7203 slp_tree slp_node
, slp_instance slp_node_instance
)
7206 tree vec_dest
= NULL
;
7207 tree data_ref
= NULL
;
7208 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
7209 stmt_vec_info prev_stmt_info
;
7210 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
7211 struct loop
*loop
= NULL
;
7212 struct loop
*containing_loop
= (gimple_bb (stmt
))->loop_father
;
7213 bool nested_in_vect_loop
= false;
7214 struct data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
), *first_dr
= NULL
;
7218 gimple
*new_stmt
= NULL
;
7220 enum dr_alignment_support alignment_support_scheme
;
7221 tree dataref_ptr
= NULL_TREE
;
7222 tree dataref_offset
= NULL_TREE
;
7223 gimple
*ptr_incr
= NULL
;
7226 unsigned int group_size
;
7227 poly_uint64 group_gap_adj
;
7228 tree msq
= NULL_TREE
, lsq
;
7229 tree offset
= NULL_TREE
;
7230 tree byte_offset
= NULL_TREE
;
7231 tree realignment_token
= NULL_TREE
;
7233 vec
<tree
> dr_chain
= vNULL
;
7234 bool grouped_load
= false;
7236 gimple
*first_stmt_for_drptr
= NULL
;
7238 bool compute_in_loop
= false;
7239 struct loop
*at_loop
;
7241 bool slp
= (slp_node
!= NULL
);
7242 bool slp_perm
= false;
7243 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
7246 gather_scatter_info gs_info
;
7247 vec_info
*vinfo
= stmt_info
->vinfo
;
7249 enum vect_def_type mask_dt
= vect_unknown_def_type
;
7251 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
7254 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
7258 tree mask
= NULL_TREE
, mask_vectype
= NULL_TREE
;
7259 if (is_gimple_assign (stmt
))
7261 scalar_dest
= gimple_assign_lhs (stmt
);
7262 if (TREE_CODE (scalar_dest
) != SSA_NAME
)
7265 tree_code code
= gimple_assign_rhs_code (stmt
);
7266 if (code
!= ARRAY_REF
7267 && code
!= BIT_FIELD_REF
7268 && code
!= INDIRECT_REF
7269 && code
!= COMPONENT_REF
7270 && code
!= IMAGPART_EXPR
7271 && code
!= REALPART_EXPR
7273 && TREE_CODE_CLASS (code
) != tcc_declaration
)
7278 gcall
*call
= dyn_cast
<gcall
*> (stmt
);
7279 if (!call
|| !gimple_call_internal_p (call
))
7282 internal_fn ifn
= gimple_call_internal_fn (call
);
7283 if (!internal_load_fn_p (ifn
))
7286 scalar_dest
= gimple_call_lhs (call
);
7290 if (slp_node
!= NULL
)
7292 if (dump_enabled_p ())
7293 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7294 "SLP of masked loads not supported.\n");
7298 int mask_index
= internal_fn_mask_index (ifn
);
7299 if (mask_index
>= 0)
7301 mask
= gimple_call_arg (call
, mask_index
);
7302 if (!vect_check_load_store_mask (stmt
, mask
, &mask_dt
,
7308 if (!STMT_VINFO_DATA_REF (stmt_info
))
7311 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
7312 poly_uint64 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
7316 loop
= LOOP_VINFO_LOOP (loop_vinfo
);
7317 nested_in_vect_loop
= nested_in_vect_loop_p (loop
, stmt
);
7318 vf
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
7323 /* Multiple types in SLP are handled by creating the appropriate number of
7324 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
7329 ncopies
= vect_get_num_copies (loop_vinfo
, vectype
);
7331 gcc_assert (ncopies
>= 1);
7333 /* FORNOW. This restriction should be relaxed. */
7334 if (nested_in_vect_loop
&& ncopies
> 1)
7336 if (dump_enabled_p ())
7337 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7338 "multiple types in nested loop.\n");
7342 /* Invalidate assumptions made by dependence analysis when vectorization
7343 on the unrolled body effectively re-orders stmts. */
7345 && STMT_VINFO_MIN_NEG_DIST (stmt_info
) != 0
7346 && maybe_gt (LOOP_VINFO_VECT_FACTOR (loop_vinfo
),
7347 STMT_VINFO_MIN_NEG_DIST (stmt_info
)))
7349 if (dump_enabled_p ())
7350 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7351 "cannot perform implicit CSE when unrolling "
7352 "with negative dependence distance\n");
7356 elem_type
= TREE_TYPE (vectype
);
7357 mode
= TYPE_MODE (vectype
);
7359 /* FORNOW. In some cases can vectorize even if data-type not supported
7360 (e.g. - data copies). */
7361 if (optab_handler (mov_optab
, mode
) == CODE_FOR_nothing
)
7363 if (dump_enabled_p ())
7364 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7365 "Aligned load, but unsupported type.\n");
7369 /* Check if the load is a part of an interleaving chain. */
7370 if (STMT_VINFO_GROUPED_ACCESS (stmt_info
))
7372 grouped_load
= true;
7374 gcc_assert (!nested_in_vect_loop
);
7375 gcc_assert (!STMT_VINFO_GATHER_SCATTER_P (stmt_info
));
7377 first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
7378 group_size
= GROUP_SIZE (vinfo_for_stmt (first_stmt
));
7380 if (slp
&& SLP_TREE_LOAD_PERMUTATION (slp_node
).exists ())
7383 /* Invalidate assumptions made by dependence analysis when vectorization
7384 on the unrolled body effectively re-orders stmts. */
7385 if (!PURE_SLP_STMT (stmt_info
)
7386 && STMT_VINFO_MIN_NEG_DIST (stmt_info
) != 0
7387 && maybe_gt (LOOP_VINFO_VECT_FACTOR (loop_vinfo
),
7388 STMT_VINFO_MIN_NEG_DIST (stmt_info
)))
7390 if (dump_enabled_p ())
7391 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7392 "cannot perform implicit CSE when performing "
7393 "group loads with negative dependence distance\n");
7397 /* Similarly when the stmt is a load that is both part of a SLP
7398 instance and a loop vectorized stmt via the same-dr mechanism
7399 we have to give up. */
7400 if (STMT_VINFO_GROUP_SAME_DR_STMT (stmt_info
)
7401 && (STMT_SLP_TYPE (stmt_info
)
7402 != STMT_SLP_TYPE (vinfo_for_stmt
7403 (STMT_VINFO_GROUP_SAME_DR_STMT (stmt_info
)))))
7405 if (dump_enabled_p ())
7406 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7407 "conflicting SLP types for CSEd load\n");
7414 vect_memory_access_type memory_access_type
;
7415 if (!get_load_store_type (stmt
, vectype
, slp
, mask
, VLS_LOAD
, ncopies
,
7416 &memory_access_type
, &gs_info
))
7421 if (memory_access_type
== VMAT_CONTIGUOUS
)
7423 machine_mode vec_mode
= TYPE_MODE (vectype
);
7424 if (!VECTOR_MODE_P (vec_mode
)
7425 || !can_vec_mask_load_store_p (vec_mode
,
7426 TYPE_MODE (mask_vectype
), true))
7429 else if (memory_access_type
== VMAT_GATHER_SCATTER
&& gs_info
.decl
)
7431 tree arglist
= TYPE_ARG_TYPES (TREE_TYPE (gs_info
.decl
));
7433 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (TREE_CHAIN (arglist
))));
7434 if (TREE_CODE (masktype
) == INTEGER_TYPE
)
7436 if (dump_enabled_p ())
7437 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7438 "masked gather with integer mask not"
7443 else if (memory_access_type
!= VMAT_LOAD_STORE_LANES
7444 && memory_access_type
!= VMAT_GATHER_SCATTER
)
7446 if (dump_enabled_p ())
7447 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7448 "unsupported access type for masked load.\n");
7453 if (!vec_stmt
) /* transformation not required. */
7456 STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info
) = memory_access_type
;
7459 && LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo
))
7460 check_load_store_masking (loop_vinfo
, vectype
, VLS_LOAD
, group_size
,
7461 memory_access_type
, &gs_info
);
7463 STMT_VINFO_TYPE (stmt_info
) = load_vec_info_type
;
7464 /* The SLP costs are calculated during SLP analysis. */
7466 vect_model_load_cost (stmt_info
, ncopies
, memory_access_type
,
7472 gcc_assert (memory_access_type
7473 == STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info
));
7475 if (dump_enabled_p ())
7476 dump_printf_loc (MSG_NOTE
, vect_location
,
7477 "transform load. ncopies = %d\n", ncopies
);
7481 ensure_base_align (dr
);
7483 if (memory_access_type
== VMAT_GATHER_SCATTER
&& gs_info
.decl
)
7485 vect_build_gather_load_calls (stmt
, gsi
, vec_stmt
, &gs_info
, mask
,
7490 if (memory_access_type
== VMAT_ELEMENTWISE
7491 || memory_access_type
== VMAT_STRIDED_SLP
)
7493 gimple_stmt_iterator incr_gsi
;
7499 vec
<constructor_elt
, va_gc
> *v
= NULL
;
7500 tree stride_base
, stride_step
, alias_off
;
7501 /* Checked by get_load_store_type. */
7502 unsigned int const_nunits
= nunits
.to_constant ();
7503 unsigned HOST_WIDE_INT cst_offset
= 0;
7505 gcc_assert (!LOOP_VINFO_FULLY_MASKED_P (loop_vinfo
));
7506 gcc_assert (!nested_in_vect_loop
);
7510 first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
7511 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
7518 if (slp
&& grouped_load
)
7520 group_size
= GROUP_SIZE (vinfo_for_stmt (first_stmt
));
7521 ref_type
= get_group_alias_ptr_type (first_stmt
);
7527 = (tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (vectype
)))
7528 * vect_get_place_in_interleaving_chain (stmt
, first_stmt
));
7530 ref_type
= reference_alias_ptr_type (DR_REF (dr
));
7534 = fold_build_pointer_plus
7535 (DR_BASE_ADDRESS (first_dr
),
7536 size_binop (PLUS_EXPR
,
7537 convert_to_ptrofftype (DR_OFFSET (first_dr
)),
7538 convert_to_ptrofftype (DR_INIT (first_dr
))));
7539 stride_step
= fold_convert (sizetype
, DR_STEP (first_dr
));
7541 /* For a load with loop-invariant (but other than power-of-2)
7542 stride (i.e. not a grouped access) like so:
7544 for (i = 0; i < n; i += stride)
7547 we generate a new induction variable and new accesses to
7548 form a new vector (or vectors, depending on ncopies):
7550 for (j = 0; ; j += VF*stride)
7552 tmp2 = array[j + stride];
7554 vectemp = {tmp1, tmp2, ...}
7557 ivstep
= fold_build2 (MULT_EXPR
, TREE_TYPE (stride_step
), stride_step
,
7558 build_int_cst (TREE_TYPE (stride_step
), vf
));
7560 standard_iv_increment_position (loop
, &incr_gsi
, &insert_after
);
7562 stride_base
= cse_and_gimplify_to_preheader (loop_vinfo
, stride_base
);
7563 ivstep
= cse_and_gimplify_to_preheader (loop_vinfo
, ivstep
);
7564 create_iv (stride_base
, ivstep
, NULL
,
7565 loop
, &incr_gsi
, insert_after
,
7567 incr
= gsi_stmt (incr_gsi
);
7568 set_vinfo_for_stmt (incr
, new_stmt_vec_info (incr
, loop_vinfo
));
7570 stride_step
= cse_and_gimplify_to_preheader (loop_vinfo
, stride_step
);
7572 prev_stmt_info
= NULL
;
7573 running_off
= offvar
;
7574 alias_off
= build_int_cst (ref_type
, 0);
7575 int nloads
= const_nunits
;
7577 tree ltype
= TREE_TYPE (vectype
);
7578 tree lvectype
= vectype
;
7579 auto_vec
<tree
> dr_chain
;
7580 if (memory_access_type
== VMAT_STRIDED_SLP
)
7582 if (group_size
< const_nunits
)
7584 /* First check if vec_init optab supports construction from
7585 vector elts directly. */
7586 scalar_mode elmode
= SCALAR_TYPE_MODE (TREE_TYPE (vectype
));
7588 if (mode_for_vector (elmode
, group_size
).exists (&vmode
)
7589 && VECTOR_MODE_P (vmode
)
7590 && targetm
.vector_mode_supported_p (vmode
)
7591 && (convert_optab_handler (vec_init_optab
,
7592 TYPE_MODE (vectype
), vmode
)
7593 != CODE_FOR_nothing
))
7595 nloads
= const_nunits
/ group_size
;
7597 ltype
= build_vector_type (TREE_TYPE (vectype
), group_size
);
7601 /* Otherwise avoid emitting a constructor of vector elements
7602 by performing the loads using an integer type of the same
7603 size, constructing a vector of those and then
7604 re-interpreting it as the original vector type.
7605 This avoids a huge runtime penalty due to the general
7606 inability to perform store forwarding from smaller stores
7607 to a larger load. */
7609 = group_size
* TYPE_PRECISION (TREE_TYPE (vectype
));
7610 elmode
= int_mode_for_size (lsize
, 0).require ();
7611 unsigned int lnunits
= const_nunits
/ group_size
;
7612 /* If we can't construct such a vector fall back to
7613 element loads of the original vector type. */
7614 if (mode_for_vector (elmode
, lnunits
).exists (&vmode
)
7615 && VECTOR_MODE_P (vmode
)
7616 && targetm
.vector_mode_supported_p (vmode
)
7617 && (convert_optab_handler (vec_init_optab
, vmode
, elmode
)
7618 != CODE_FOR_nothing
))
7622 ltype
= build_nonstandard_integer_type (lsize
, 1);
7623 lvectype
= build_vector_type (ltype
, nloads
);
7630 lnel
= const_nunits
;
7633 ltype
= build_aligned_type (ltype
, TYPE_ALIGN (TREE_TYPE (vectype
)));
7637 /* For SLP permutation support we need to load the whole group,
7638 not only the number of vector stmts the permutation result
7642 /* We don't yet generate SLP_TREE_LOAD_PERMUTATIONs for
7644 unsigned int const_vf
= vf
.to_constant ();
7645 ncopies
= CEIL (group_size
* const_vf
, const_nunits
);
7646 dr_chain
.create (ncopies
);
7649 ncopies
= SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node
);
7651 unsigned int group_el
= 0;
7652 unsigned HOST_WIDE_INT
7653 elsz
= tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (vectype
)));
7654 for (j
= 0; j
< ncopies
; j
++)
7657 vec_alloc (v
, nloads
);
7658 for (i
= 0; i
< nloads
; i
++)
7660 tree this_off
= build_int_cst (TREE_TYPE (alias_off
),
7661 group_el
* elsz
+ cst_offset
);
7662 new_stmt
= gimple_build_assign (make_ssa_name (ltype
),
7663 build2 (MEM_REF
, ltype
,
7664 running_off
, this_off
));
7665 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
7667 CONSTRUCTOR_APPEND_ELT (v
, NULL_TREE
,
7668 gimple_assign_lhs (new_stmt
));
7672 || group_el
== group_size
)
7674 tree newoff
= copy_ssa_name (running_off
);
7675 gimple
*incr
= gimple_build_assign (newoff
, POINTER_PLUS_EXPR
,
7676 running_off
, stride_step
);
7677 vect_finish_stmt_generation (stmt
, incr
, gsi
);
7679 running_off
= newoff
;
7685 tree vec_inv
= build_constructor (lvectype
, v
);
7686 new_temp
= vect_init_vector (stmt
, vec_inv
, lvectype
, gsi
);
7687 new_stmt
= SSA_NAME_DEF_STMT (new_temp
);
7688 if (lvectype
!= vectype
)
7690 new_stmt
= gimple_build_assign (make_ssa_name (vectype
),
7692 build1 (VIEW_CONVERT_EXPR
,
7693 vectype
, new_temp
));
7694 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
7701 dr_chain
.quick_push (gimple_assign_lhs (new_stmt
));
7703 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
7708 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
7710 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
7711 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
7717 vect_transform_slp_perm_load (slp_node
, dr_chain
, gsi
, vf
,
7718 slp_node_instance
, false, &n_perms
);
7723 if (memory_access_type
== VMAT_GATHER_SCATTER
7724 || (!slp
&& memory_access_type
== VMAT_CONTIGUOUS
))
7725 grouped_load
= false;
7729 first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
7730 group_size
= GROUP_SIZE (vinfo_for_stmt (first_stmt
));
7731 /* For SLP vectorization we directly vectorize a subchain
7732 without permutation. */
7733 if (slp
&& ! SLP_TREE_LOAD_PERMUTATION (slp_node
).exists ())
7734 first_stmt
= SLP_TREE_SCALAR_STMTS (slp_node
)[0];
7735 /* For BB vectorization always use the first stmt to base
7736 the data ref pointer on. */
7738 first_stmt_for_drptr
= SLP_TREE_SCALAR_STMTS (slp_node
)[0];
7740 /* Check if the chain of loads is already vectorized. */
7741 if (STMT_VINFO_VEC_STMT (vinfo_for_stmt (first_stmt
))
7742 /* For SLP we would need to copy over SLP_TREE_VEC_STMTS.
7743 ??? But we can only do so if there is exactly one
7744 as we have no way to get at the rest. Leave the CSE
7746 ??? With the group load eventually participating
7747 in multiple different permutations (having multiple
7748 slp nodes which refer to the same group) the CSE
7749 is even wrong code. See PR56270. */
7752 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
7755 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
7758 /* VEC_NUM is the number of vect stmts to be created for this group. */
7761 grouped_load
= false;
7762 /* For SLP permutation support we need to load the whole group,
7763 not only the number of vector stmts the permutation result
7767 /* We don't yet generate SLP_TREE_LOAD_PERMUTATIONs for
7769 unsigned int const_vf
= vf
.to_constant ();
7770 unsigned int const_nunits
= nunits
.to_constant ();
7771 vec_num
= CEIL (group_size
* const_vf
, const_nunits
);
7772 group_gap_adj
= vf
* group_size
- nunits
* vec_num
;
7776 vec_num
= SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node
);
7778 = group_size
- SLP_INSTANCE_GROUP_SIZE (slp_node_instance
);
7782 vec_num
= group_size
;
7784 ref_type
= get_group_alias_ptr_type (first_stmt
);
7790 group_size
= vec_num
= 1;
7792 ref_type
= reference_alias_ptr_type (DR_REF (first_dr
));
7795 alignment_support_scheme
= vect_supportable_dr_alignment (first_dr
, false);
7796 gcc_assert (alignment_support_scheme
);
7797 vec_loop_masks
*loop_masks
7798 = (loop_vinfo
&& LOOP_VINFO_FULLY_MASKED_P (loop_vinfo
)
7799 ? &LOOP_VINFO_MASKS (loop_vinfo
)
7801 /* Targets with store-lane instructions must not require explicit
7802 realignment. vect_supportable_dr_alignment always returns either
7803 dr_aligned or dr_unaligned_supported for masked operations. */
7804 gcc_assert ((memory_access_type
!= VMAT_LOAD_STORE_LANES
7807 || alignment_support_scheme
== dr_aligned
7808 || alignment_support_scheme
== dr_unaligned_supported
);
7810 /* In case the vectorization factor (VF) is bigger than the number
7811 of elements that we can fit in a vectype (nunits), we have to generate
7812 more than one vector stmt - i.e - we need to "unroll" the
7813 vector stmt by a factor VF/nunits. In doing so, we record a pointer
7814 from one copy of the vector stmt to the next, in the field
7815 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
7816 stages to find the correct vector defs to be used when vectorizing
7817 stmts that use the defs of the current stmt. The example below
7818 illustrates the vectorization process when VF=16 and nunits=4 (i.e., we
7819 need to create 4 vectorized stmts):
7821 before vectorization:
7822 RELATED_STMT VEC_STMT
7826 step 1: vectorize stmt S1:
7827 We first create the vector stmt VS1_0, and, as usual, record a
7828 pointer to it in the STMT_VINFO_VEC_STMT of the scalar stmt S1.
7829 Next, we create the vector stmt VS1_1, and record a pointer to
7830 it in the STMT_VINFO_RELATED_STMT of the vector stmt VS1_0.
7831 Similarly, for VS1_2 and VS1_3. This is the resulting chain of
7833 RELATED_STMT VEC_STMT
7834 VS1_0: vx0 = memref0 VS1_1 -
7835 VS1_1: vx1 = memref1 VS1_2 -
7836 VS1_2: vx2 = memref2 VS1_3 -
7837 VS1_3: vx3 = memref3 - -
7838 S1: x = load - VS1_0
7841 See in documentation in vect_get_vec_def_for_stmt_copy for how the
7842 information we recorded in RELATED_STMT field is used to vectorize
7845 /* In case of interleaving (non-unit grouped access):
7852 Vectorized loads are created in the order of memory accesses
7853 starting from the access of the first stmt of the chain:
7856 VS2: vx1 = &base + vec_size*1
7857 VS3: vx3 = &base + vec_size*2
7858 VS4: vx4 = &base + vec_size*3
7860 Then permutation statements are generated:
7862 VS5: vx5 = VEC_PERM_EXPR < vx0, vx1, { 0, 2, ..., i*2 } >
7863 VS6: vx6 = VEC_PERM_EXPR < vx0, vx1, { 1, 3, ..., i*2+1 } >
7866 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
7867 (the order of the data-refs in the output of vect_permute_load_chain
7868 corresponds to the order of scalar stmts in the interleaving chain - see
7869 the documentation of vect_permute_load_chain()).
7870 The generation of permutation stmts and recording them in
7871 STMT_VINFO_VEC_STMT is done in vect_transform_grouped_load().
7873 In case of both multiple types and interleaving, the vector loads and
7874 permutation stmts above are created for every copy. The result vector
7875 stmts are put in STMT_VINFO_VEC_STMT for the first copy and in the
7876 corresponding STMT_VINFO_RELATED_STMT for the next copies. */
7878 /* If the data reference is aligned (dr_aligned) or potentially unaligned
7879 on a target that supports unaligned accesses (dr_unaligned_supported)
7880 we generate the following code:
7884 p = p + indx * vectype_size;
7889 Otherwise, the data reference is potentially unaligned on a target that
7890 does not support unaligned accesses (dr_explicit_realign_optimized) -
7891 then generate the following code, in which the data in each iteration is
7892 obtained by two vector loads, one from the previous iteration, and one
7893 from the current iteration:
7895 msq_init = *(floor(p1))
7896 p2 = initial_addr + VS - 1;
7897 realignment_token = call target_builtin;
7900 p2 = p2 + indx * vectype_size
7902 vec_dest = realign_load (msq, lsq, realignment_token)
7907 /* If the misalignment remains the same throughout the execution of the
7908 loop, we can create the init_addr and permutation mask at the loop
7909 preheader. Otherwise, it needs to be created inside the loop.
7910 This can only occur when vectorizing memory accesses in the inner-loop
7911 nested within an outer-loop that is being vectorized. */
7913 if (nested_in_vect_loop
7914 && !multiple_p (DR_STEP_ALIGNMENT (dr
),
7915 GET_MODE_SIZE (TYPE_MODE (vectype
))))
7917 gcc_assert (alignment_support_scheme
!= dr_explicit_realign_optimized
);
7918 compute_in_loop
= true;
7921 if ((alignment_support_scheme
== dr_explicit_realign_optimized
7922 || alignment_support_scheme
== dr_explicit_realign
)
7923 && !compute_in_loop
)
7925 msq
= vect_setup_realignment (first_stmt
, gsi
, &realignment_token
,
7926 alignment_support_scheme
, NULL_TREE
,
7928 if (alignment_support_scheme
== dr_explicit_realign_optimized
)
7930 phi
= as_a
<gphi
*> (SSA_NAME_DEF_STMT (msq
));
7931 byte_offset
= size_binop (MINUS_EXPR
, TYPE_SIZE_UNIT (vectype
),
7938 if (memory_access_type
== VMAT_CONTIGUOUS_REVERSE
)
7939 offset
= size_int (-TYPE_VECTOR_SUBPARTS (vectype
) + 1);
7942 tree vec_offset
= NULL_TREE
;
7943 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info
))
7945 aggr_type
= NULL_TREE
;
7948 else if (memory_access_type
== VMAT_GATHER_SCATTER
)
7950 aggr_type
= elem_type
;
7951 vect_get_strided_load_store_ops (stmt
, loop_vinfo
, &gs_info
,
7952 &bump
, &vec_offset
);
7956 if (memory_access_type
== VMAT_LOAD_STORE_LANES
)
7957 aggr_type
= build_array_type_nelts (elem_type
, vec_num
* nunits
);
7959 aggr_type
= vectype
;
7960 bump
= vect_get_data_ptr_increment (dr
, aggr_type
, memory_access_type
);
7963 tree vec_mask
= NULL_TREE
;
7964 prev_stmt_info
= NULL
;
7965 poly_uint64 group_elt
= 0;
7966 for (j
= 0; j
< ncopies
; j
++)
7968 /* 1. Create the vector or array pointer update chain. */
7971 bool simd_lane_access_p
7972 = STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info
);
7973 if (simd_lane_access_p
7974 && TREE_CODE (DR_BASE_ADDRESS (first_dr
)) == ADDR_EXPR
7975 && VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (first_dr
), 0))
7976 && integer_zerop (DR_OFFSET (first_dr
))
7977 && integer_zerop (DR_INIT (first_dr
))
7978 && alias_sets_conflict_p (get_alias_set (aggr_type
),
7979 get_alias_set (TREE_TYPE (ref_type
)))
7980 && (alignment_support_scheme
== dr_aligned
7981 || alignment_support_scheme
== dr_unaligned_supported
))
7983 dataref_ptr
= unshare_expr (DR_BASE_ADDRESS (first_dr
));
7984 dataref_offset
= build_int_cst (ref_type
, 0);
7987 else if (first_stmt_for_drptr
7988 && first_stmt
!= first_stmt_for_drptr
)
7991 = vect_create_data_ref_ptr (first_stmt_for_drptr
, aggr_type
,
7992 at_loop
, offset
, &dummy
, gsi
,
7993 &ptr_incr
, simd_lane_access_p
,
7994 &inv_p
, byte_offset
, bump
);
7995 /* Adjust the pointer by the difference to first_stmt. */
7996 data_reference_p ptrdr
7997 = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt_for_drptr
));
7998 tree diff
= fold_convert (sizetype
,
7999 size_binop (MINUS_EXPR
,
8002 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
,
8005 else if (STMT_VINFO_GATHER_SCATTER_P (stmt_info
))
8007 vect_get_gather_scatter_ops (loop
, stmt
, &gs_info
,
8008 &dataref_ptr
, &vec_offset
);
8013 = vect_create_data_ref_ptr (first_stmt
, aggr_type
, at_loop
,
8014 offset
, &dummy
, gsi
, &ptr_incr
,
8015 simd_lane_access_p
, &inv_p
,
8018 vec_mask
= vect_get_vec_def_for_operand (mask
, stmt
,
8024 dataref_offset
= int_const_binop (PLUS_EXPR
, dataref_offset
,
8026 else if (STMT_VINFO_GATHER_SCATTER_P (stmt_info
))
8027 vec_offset
= vect_get_vec_def_for_stmt_copy (gs_info
.offset_dt
,
8030 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
,
8033 vec_mask
= vect_get_vec_def_for_stmt_copy (mask_dt
, vec_mask
);
8036 if (grouped_load
|| slp_perm
)
8037 dr_chain
.create (vec_num
);
8039 if (memory_access_type
== VMAT_LOAD_STORE_LANES
)
8043 vec_array
= create_vector_array (vectype
, vec_num
);
8045 tree final_mask
= NULL_TREE
;
8047 final_mask
= vect_get_loop_mask (gsi
, loop_masks
, ncopies
,
8050 final_mask
= prepare_load_store_mask (mask_vectype
, final_mask
,
8057 VEC_ARRAY = MASK_LOAD_LANES (DATAREF_PTR, ALIAS_PTR,
8059 unsigned int align
= TYPE_ALIGN_UNIT (TREE_TYPE (vectype
));
8060 tree alias_ptr
= build_int_cst (ref_type
, align
);
8061 call
= gimple_build_call_internal (IFN_MASK_LOAD_LANES
, 3,
8062 dataref_ptr
, alias_ptr
,
8068 VEC_ARRAY = LOAD_LANES (MEM_REF[...all elements...]). */
8069 data_ref
= create_array_ref (aggr_type
, dataref_ptr
, ref_type
);
8070 call
= gimple_build_call_internal (IFN_LOAD_LANES
, 1, data_ref
);
8072 gimple_call_set_lhs (call
, vec_array
);
8073 gimple_call_set_nothrow (call
, true);
8075 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
8077 /* Extract each vector into an SSA_NAME. */
8078 for (i
= 0; i
< vec_num
; i
++)
8080 new_temp
= read_vector_array (stmt
, gsi
, scalar_dest
,
8082 dr_chain
.quick_push (new_temp
);
8085 /* Record the mapping between SSA_NAMEs and statements. */
8086 vect_record_grouped_load_vectors (stmt
, dr_chain
);
8090 for (i
= 0; i
< vec_num
; i
++)
8092 tree final_mask
= NULL_TREE
;
8094 && memory_access_type
!= VMAT_INVARIANT
)
8095 final_mask
= vect_get_loop_mask (gsi
, loop_masks
,
8097 vectype
, vec_num
* j
+ i
);
8099 final_mask
= prepare_load_store_mask (mask_vectype
, final_mask
,
8103 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
,
8106 /* 2. Create the vector-load in the loop. */
8107 switch (alignment_support_scheme
)
8110 case dr_unaligned_supported
:
8112 unsigned int align
, misalign
;
8114 if (memory_access_type
== VMAT_GATHER_SCATTER
)
8116 tree scale
= size_int (gs_info
.scale
);
8119 call
= gimple_build_call_internal
8120 (IFN_MASK_GATHER_LOAD
, 4, dataref_ptr
,
8121 vec_offset
, scale
, final_mask
);
8123 call
= gimple_build_call_internal
8124 (IFN_GATHER_LOAD
, 3, dataref_ptr
,
8126 gimple_call_set_nothrow (call
, true);
8128 data_ref
= NULL_TREE
;
8132 align
= DR_TARGET_ALIGNMENT (dr
);
8133 if (alignment_support_scheme
== dr_aligned
)
8135 gcc_assert (aligned_access_p (first_dr
));
8138 else if (DR_MISALIGNMENT (first_dr
) == -1)
8140 align
= dr_alignment (vect_dr_behavior (first_dr
));
8144 misalign
= DR_MISALIGNMENT (first_dr
);
8145 if (dataref_offset
== NULL_TREE
8146 && TREE_CODE (dataref_ptr
) == SSA_NAME
)
8147 set_ptr_info_alignment (get_ptr_info (dataref_ptr
),
8152 align
= least_bit_hwi (misalign
| align
);
8153 tree ptr
= build_int_cst (ref_type
, align
);
8155 = gimple_build_call_internal (IFN_MASK_LOAD
, 3,
8158 gimple_call_set_nothrow (call
, true);
8160 data_ref
= NULL_TREE
;
8165 = fold_build2 (MEM_REF
, vectype
, dataref_ptr
,
8168 : build_int_cst (ref_type
, 0));
8169 if (alignment_support_scheme
== dr_aligned
)
8171 else if (DR_MISALIGNMENT (first_dr
) == -1)
8172 TREE_TYPE (data_ref
)
8173 = build_aligned_type (TREE_TYPE (data_ref
),
8174 align
* BITS_PER_UNIT
);
8176 TREE_TYPE (data_ref
)
8177 = build_aligned_type (TREE_TYPE (data_ref
),
8178 TYPE_ALIGN (elem_type
));
8182 case dr_explicit_realign
:
8186 tree vs
= size_int (TYPE_VECTOR_SUBPARTS (vectype
));
8188 if (compute_in_loop
)
8189 msq
= vect_setup_realignment (first_stmt
, gsi
,
8191 dr_explicit_realign
,
8194 if (TREE_CODE (dataref_ptr
) == SSA_NAME
)
8195 ptr
= copy_ssa_name (dataref_ptr
);
8197 ptr
= make_ssa_name (TREE_TYPE (dataref_ptr
));
8198 unsigned int align
= DR_TARGET_ALIGNMENT (first_dr
);
8199 new_stmt
= gimple_build_assign
8200 (ptr
, BIT_AND_EXPR
, dataref_ptr
,
8202 (TREE_TYPE (dataref_ptr
),
8203 -(HOST_WIDE_INT
) align
));
8204 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
8206 = build2 (MEM_REF
, vectype
, ptr
,
8207 build_int_cst (ref_type
, 0));
8208 vec_dest
= vect_create_destination_var (scalar_dest
,
8210 new_stmt
= gimple_build_assign (vec_dest
, data_ref
);
8211 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
8212 gimple_assign_set_lhs (new_stmt
, new_temp
);
8213 gimple_set_vdef (new_stmt
, gimple_vdef (stmt
));
8214 gimple_set_vuse (new_stmt
, gimple_vuse (stmt
));
8215 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
8218 bump
= size_binop (MULT_EXPR
, vs
,
8219 TYPE_SIZE_UNIT (elem_type
));
8220 bump
= size_binop (MINUS_EXPR
, bump
, size_one_node
);
8221 ptr
= bump_vector_ptr (dataref_ptr
, NULL
, gsi
, stmt
, bump
);
8222 new_stmt
= gimple_build_assign
8223 (NULL_TREE
, BIT_AND_EXPR
, ptr
,
8225 (TREE_TYPE (ptr
), -(HOST_WIDE_INT
) align
));
8226 ptr
= copy_ssa_name (ptr
, new_stmt
);
8227 gimple_assign_set_lhs (new_stmt
, ptr
);
8228 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
8230 = build2 (MEM_REF
, vectype
, ptr
,
8231 build_int_cst (ref_type
, 0));
8234 case dr_explicit_realign_optimized
:
8236 if (TREE_CODE (dataref_ptr
) == SSA_NAME
)
8237 new_temp
= copy_ssa_name (dataref_ptr
);
8239 new_temp
= make_ssa_name (TREE_TYPE (dataref_ptr
));
8240 unsigned int align
= DR_TARGET_ALIGNMENT (first_dr
);
8241 new_stmt
= gimple_build_assign
8242 (new_temp
, BIT_AND_EXPR
, dataref_ptr
,
8243 build_int_cst (TREE_TYPE (dataref_ptr
),
8244 -(HOST_WIDE_INT
) align
));
8245 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
8247 = build2 (MEM_REF
, vectype
, new_temp
,
8248 build_int_cst (ref_type
, 0));
8254 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
8255 /* DATA_REF is null if we've already built the statement. */
8257 new_stmt
= gimple_build_assign (vec_dest
, data_ref
);
8258 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
8259 gimple_set_lhs (new_stmt
, new_temp
);
8260 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
8262 /* 3. Handle explicit realignment if necessary/supported.
8264 vec_dest = realign_load (msq, lsq, realignment_token) */
8265 if (alignment_support_scheme
== dr_explicit_realign_optimized
8266 || alignment_support_scheme
== dr_explicit_realign
)
8268 lsq
= gimple_assign_lhs (new_stmt
);
8269 if (!realignment_token
)
8270 realignment_token
= dataref_ptr
;
8271 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
8272 new_stmt
= gimple_build_assign (vec_dest
, REALIGN_LOAD_EXPR
,
8273 msq
, lsq
, realignment_token
);
8274 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
8275 gimple_assign_set_lhs (new_stmt
, new_temp
);
8276 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
8278 if (alignment_support_scheme
== dr_explicit_realign_optimized
)
8281 if (i
== vec_num
- 1 && j
== ncopies
- 1)
8282 add_phi_arg (phi
, lsq
,
8283 loop_latch_edge (containing_loop
),
8289 /* 4. Handle invariant-load. */
8290 if (inv_p
&& !bb_vinfo
)
8292 gcc_assert (!grouped_load
);
8293 /* If we have versioned for aliasing or the loop doesn't
8294 have any data dependencies that would preclude this,
8295 then we are sure this is a loop invariant load and
8296 thus we can insert it on the preheader edge. */
8297 if (LOOP_VINFO_NO_DATA_DEPENDENCIES (loop_vinfo
)
8298 && !nested_in_vect_loop
8299 && hoist_defs_of_uses (stmt
, loop
))
8301 if (dump_enabled_p ())
8303 dump_printf_loc (MSG_NOTE
, vect_location
,
8304 "hoisting out of the vectorized "
8306 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
8308 tree tem
= copy_ssa_name (scalar_dest
);
8309 gsi_insert_on_edge_immediate
8310 (loop_preheader_edge (loop
),
8311 gimple_build_assign (tem
,
8313 (gimple_assign_rhs1 (stmt
))));
8314 new_temp
= vect_init_vector (stmt
, tem
, vectype
, NULL
);
8315 new_stmt
= SSA_NAME_DEF_STMT (new_temp
);
8316 set_vinfo_for_stmt (new_stmt
,
8317 new_stmt_vec_info (new_stmt
, vinfo
));
8321 gimple_stmt_iterator gsi2
= *gsi
;
8323 new_temp
= vect_init_vector (stmt
, scalar_dest
,
8325 new_stmt
= SSA_NAME_DEF_STMT (new_temp
);
8329 if (memory_access_type
== VMAT_CONTIGUOUS_REVERSE
)
8331 tree perm_mask
= perm_mask_for_reverse (vectype
);
8332 new_temp
= permute_vec_elements (new_temp
, new_temp
,
8333 perm_mask
, stmt
, gsi
);
8334 new_stmt
= SSA_NAME_DEF_STMT (new_temp
);
8337 /* Collect vector loads and later create their permutation in
8338 vect_transform_grouped_load (). */
8339 if (grouped_load
|| slp_perm
)
8340 dr_chain
.quick_push (new_temp
);
8342 /* Store vector loads in the corresponding SLP_NODE. */
8343 if (slp
&& !slp_perm
)
8344 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
8346 /* With SLP permutation we load the gaps as well, without
8347 we need to skip the gaps after we manage to fully load
8348 all elements. group_gap_adj is GROUP_SIZE here. */
8349 group_elt
+= nunits
;
8350 if (maybe_ne (group_gap_adj
, 0U)
8352 && known_eq (group_elt
, group_size
- group_gap_adj
))
8354 poly_wide_int bump_val
8355 = (wi::to_wide (TYPE_SIZE_UNIT (elem_type
))
8357 tree bump
= wide_int_to_tree (sizetype
, bump_val
);
8358 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
,
8363 /* Bump the vector pointer to account for a gap or for excess
8364 elements loaded for a permuted SLP load. */
8365 if (maybe_ne (group_gap_adj
, 0U) && slp_perm
)
8367 poly_wide_int bump_val
8368 = (wi::to_wide (TYPE_SIZE_UNIT (elem_type
))
8370 tree bump
= wide_int_to_tree (sizetype
, bump_val
);
8371 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
,
8376 if (slp
&& !slp_perm
)
8382 if (!vect_transform_slp_perm_load (slp_node
, dr_chain
, gsi
, vf
,
8383 slp_node_instance
, false,
8386 dr_chain
.release ();
8394 if (memory_access_type
!= VMAT_LOAD_STORE_LANES
)
8395 vect_transform_grouped_load (stmt
, dr_chain
, group_size
, gsi
);
8396 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
8401 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
8403 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
8404 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
8407 dr_chain
.release ();
8413 /* Function vect_is_simple_cond.
8416 LOOP - the loop that is being vectorized.
8417 COND - Condition that is checked for simple use.
8420 *COMP_VECTYPE - the vector type for the comparison.
8421 *DTS - The def types for the arguments of the comparison
8423 Returns whether a COND can be vectorized. Checks whether
8424 condition operands are supportable using vec_is_simple_use. */
8427 vect_is_simple_cond (tree cond
, vec_info
*vinfo
,
8428 tree
*comp_vectype
, enum vect_def_type
*dts
,
8432 tree vectype1
= NULL_TREE
, vectype2
= NULL_TREE
;
8435 if (TREE_CODE (cond
) == SSA_NAME
8436 && VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (cond
)))
8438 gimple
*lhs_def_stmt
= SSA_NAME_DEF_STMT (cond
);
8439 if (!vect_is_simple_use (cond
, vinfo
, &lhs_def_stmt
,
8440 &dts
[0], comp_vectype
)
8442 || !VECTOR_BOOLEAN_TYPE_P (*comp_vectype
))
8447 if (!COMPARISON_CLASS_P (cond
))
8450 lhs
= TREE_OPERAND (cond
, 0);
8451 rhs
= TREE_OPERAND (cond
, 1);
8453 if (TREE_CODE (lhs
) == SSA_NAME
)
8455 gimple
*lhs_def_stmt
= SSA_NAME_DEF_STMT (lhs
);
8456 if (!vect_is_simple_use (lhs
, vinfo
, &lhs_def_stmt
, &dts
[0], &vectype1
))
8459 else if (TREE_CODE (lhs
) == INTEGER_CST
|| TREE_CODE (lhs
) == REAL_CST
8460 || TREE_CODE (lhs
) == FIXED_CST
)
8461 dts
[0] = vect_constant_def
;
8465 if (TREE_CODE (rhs
) == SSA_NAME
)
8467 gimple
*rhs_def_stmt
= SSA_NAME_DEF_STMT (rhs
);
8468 if (!vect_is_simple_use (rhs
, vinfo
, &rhs_def_stmt
, &dts
[1], &vectype2
))
8471 else if (TREE_CODE (rhs
) == INTEGER_CST
|| TREE_CODE (rhs
) == REAL_CST
8472 || TREE_CODE (rhs
) == FIXED_CST
)
8473 dts
[1] = vect_constant_def
;
8477 if (vectype1
&& vectype2
8478 && maybe_ne (TYPE_VECTOR_SUBPARTS (vectype1
),
8479 TYPE_VECTOR_SUBPARTS (vectype2
)))
8482 *comp_vectype
= vectype1
? vectype1
: vectype2
;
8483 /* Invariant comparison. */
8484 if (! *comp_vectype
)
8486 tree scalar_type
= TREE_TYPE (lhs
);
8487 /* If we can widen the comparison to match vectype do so. */
8488 if (INTEGRAL_TYPE_P (scalar_type
)
8489 && tree_int_cst_lt (TYPE_SIZE (scalar_type
),
8490 TYPE_SIZE (TREE_TYPE (vectype
))))
8491 scalar_type
= build_nonstandard_integer_type
8492 (tree_to_uhwi (TYPE_SIZE (TREE_TYPE (vectype
))),
8493 TYPE_UNSIGNED (scalar_type
));
8494 *comp_vectype
= get_vectype_for_scalar_type (scalar_type
);
8500 /* vectorizable_condition.
8502 Check if STMT is conditional modify expression that can be vectorized.
8503 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
8504 stmt using VEC_COND_EXPR to replace it, put it in VEC_STMT, and insert it
8507 When STMT is vectorized as nested cycle, REDUC_DEF is the vector variable
8508 to be used at REDUC_INDEX (in then clause if REDUC_INDEX is 1, and in
8509 else clause if it is 2).
8511 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
8514 vectorizable_condition (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
8515 gimple
**vec_stmt
, tree reduc_def
, int reduc_index
,
8518 tree scalar_dest
= NULL_TREE
;
8519 tree vec_dest
= NULL_TREE
;
8520 tree cond_expr
, cond_expr0
= NULL_TREE
, cond_expr1
= NULL_TREE
;
8521 tree then_clause
, else_clause
;
8522 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
8523 tree comp_vectype
= NULL_TREE
;
8524 tree vec_cond_lhs
= NULL_TREE
, vec_cond_rhs
= NULL_TREE
;
8525 tree vec_then_clause
= NULL_TREE
, vec_else_clause
= NULL_TREE
;
8528 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
8529 enum vect_def_type dts
[4]
8530 = {vect_unknown_def_type
, vect_unknown_def_type
,
8531 vect_unknown_def_type
, vect_unknown_def_type
};
8534 enum tree_code code
, cond_code
, bitop1
= NOP_EXPR
, bitop2
= NOP_EXPR
;
8535 stmt_vec_info prev_stmt_info
= NULL
;
8537 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
8538 vec
<tree
> vec_oprnds0
= vNULL
;
8539 vec
<tree
> vec_oprnds1
= vNULL
;
8540 vec
<tree
> vec_oprnds2
= vNULL
;
8541 vec
<tree
> vec_oprnds3
= vNULL
;
8543 bool masked
= false;
8545 if (reduc_index
&& STMT_SLP_TYPE (stmt_info
))
8548 vect_reduction_type reduction_type
8549 = STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info
);
8550 if (reduction_type
== TREE_CODE_REDUCTION
)
8552 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
8555 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
8556 && !(STMT_VINFO_DEF_TYPE (stmt_info
) == vect_nested_cycle
8560 /* FORNOW: not yet supported. */
8561 if (STMT_VINFO_LIVE_P (stmt_info
))
8563 if (dump_enabled_p ())
8564 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
8565 "value used after loop.\n");
8570 /* Is vectorizable conditional operation? */
8571 if (!is_gimple_assign (stmt
))
8574 code
= gimple_assign_rhs_code (stmt
);
8576 if (code
!= COND_EXPR
)
8579 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
8580 tree vectype1
= NULL_TREE
, vectype2
= NULL_TREE
;
8585 ncopies
= vect_get_num_copies (loop_vinfo
, vectype
);
8587 gcc_assert (ncopies
>= 1);
8588 if (reduc_index
&& ncopies
> 1)
8589 return false; /* FORNOW */
8591 cond_expr
= gimple_assign_rhs1 (stmt
);
8592 then_clause
= gimple_assign_rhs2 (stmt
);
8593 else_clause
= gimple_assign_rhs3 (stmt
);
8595 if (!vect_is_simple_cond (cond_expr
, stmt_info
->vinfo
,
8596 &comp_vectype
, &dts
[0], vectype
)
8601 if (!vect_is_simple_use (then_clause
, stmt_info
->vinfo
, &def_stmt
, &dts
[2],
8604 if (!vect_is_simple_use (else_clause
, stmt_info
->vinfo
, &def_stmt
, &dts
[3],
8608 if (vectype1
&& !useless_type_conversion_p (vectype
, vectype1
))
8611 if (vectype2
&& !useless_type_conversion_p (vectype
, vectype2
))
8614 masked
= !COMPARISON_CLASS_P (cond_expr
);
8615 vec_cmp_type
= build_same_sized_truth_vector_type (comp_vectype
);
8617 if (vec_cmp_type
== NULL_TREE
)
8620 cond_code
= TREE_CODE (cond_expr
);
8623 cond_expr0
= TREE_OPERAND (cond_expr
, 0);
8624 cond_expr1
= TREE_OPERAND (cond_expr
, 1);
8627 if (!masked
&& VECTOR_BOOLEAN_TYPE_P (comp_vectype
))
8629 /* Boolean values may have another representation in vectors
8630 and therefore we prefer bit operations over comparison for
8631 them (which also works for scalar masks). We store opcodes
8632 to use in bitop1 and bitop2. Statement is vectorized as
8633 BITOP2 (rhs1 BITOP1 rhs2) or rhs1 BITOP2 (BITOP1 rhs2)
8634 depending on bitop1 and bitop2 arity. */
8638 bitop1
= BIT_NOT_EXPR
;
8639 bitop2
= BIT_AND_EXPR
;
8642 bitop1
= BIT_NOT_EXPR
;
8643 bitop2
= BIT_IOR_EXPR
;
8646 bitop1
= BIT_NOT_EXPR
;
8647 bitop2
= BIT_AND_EXPR
;
8648 std::swap (cond_expr0
, cond_expr1
);
8651 bitop1
= BIT_NOT_EXPR
;
8652 bitop2
= BIT_IOR_EXPR
;
8653 std::swap (cond_expr0
, cond_expr1
);
8656 bitop1
= BIT_XOR_EXPR
;
8659 bitop1
= BIT_XOR_EXPR
;
8660 bitop2
= BIT_NOT_EXPR
;
8665 cond_code
= SSA_NAME
;
8670 STMT_VINFO_TYPE (stmt_info
) = condition_vec_info_type
;
8671 if (bitop1
!= NOP_EXPR
)
8673 machine_mode mode
= TYPE_MODE (comp_vectype
);
8676 optab
= optab_for_tree_code (bitop1
, comp_vectype
, optab_default
);
8677 if (!optab
|| optab_handler (optab
, mode
) == CODE_FOR_nothing
)
8680 if (bitop2
!= NOP_EXPR
)
8682 optab
= optab_for_tree_code (bitop2
, comp_vectype
,
8684 if (!optab
|| optab_handler (optab
, mode
) == CODE_FOR_nothing
)
8688 if (expand_vec_cond_expr_p (vectype
, comp_vectype
,
8692 vect_model_simple_cost (stmt_info
, ncopies
, dts
, ndts
, NULL
, NULL
);
8702 vec_oprnds0
.create (1);
8703 vec_oprnds1
.create (1);
8704 vec_oprnds2
.create (1);
8705 vec_oprnds3
.create (1);
8709 scalar_dest
= gimple_assign_lhs (stmt
);
8710 if (reduction_type
!= EXTRACT_LAST_REDUCTION
)
8711 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
8713 /* Handle cond expr. */
8714 for (j
= 0; j
< ncopies
; j
++)
8716 gimple
*new_stmt
= NULL
;
8721 auto_vec
<tree
, 4> ops
;
8722 auto_vec
<vec
<tree
>, 4> vec_defs
;
8725 ops
.safe_push (cond_expr
);
8728 ops
.safe_push (cond_expr0
);
8729 ops
.safe_push (cond_expr1
);
8731 ops
.safe_push (then_clause
);
8732 ops
.safe_push (else_clause
);
8733 vect_get_slp_defs (ops
, slp_node
, &vec_defs
);
8734 vec_oprnds3
= vec_defs
.pop ();
8735 vec_oprnds2
= vec_defs
.pop ();
8737 vec_oprnds1
= vec_defs
.pop ();
8738 vec_oprnds0
= vec_defs
.pop ();
8746 = vect_get_vec_def_for_operand (cond_expr
, stmt
,
8748 vect_is_simple_use (cond_expr
, stmt_info
->vinfo
,
8754 = vect_get_vec_def_for_operand (cond_expr0
,
8755 stmt
, comp_vectype
);
8756 vect_is_simple_use (cond_expr0
, loop_vinfo
, >emp
, &dts
[0]);
8759 = vect_get_vec_def_for_operand (cond_expr1
,
8760 stmt
, comp_vectype
);
8761 vect_is_simple_use (cond_expr1
, loop_vinfo
, >emp
, &dts
[1]);
8763 if (reduc_index
== 1)
8764 vec_then_clause
= reduc_def
;
8767 vec_then_clause
= vect_get_vec_def_for_operand (then_clause
,
8769 vect_is_simple_use (then_clause
, loop_vinfo
,
8772 if (reduc_index
== 2)
8773 vec_else_clause
= reduc_def
;
8776 vec_else_clause
= vect_get_vec_def_for_operand (else_clause
,
8778 vect_is_simple_use (else_clause
, loop_vinfo
, >emp
, &dts
[3]);
8785 = vect_get_vec_def_for_stmt_copy (dts
[0],
8786 vec_oprnds0
.pop ());
8789 = vect_get_vec_def_for_stmt_copy (dts
[1],
8790 vec_oprnds1
.pop ());
8792 vec_then_clause
= vect_get_vec_def_for_stmt_copy (dts
[2],
8793 vec_oprnds2
.pop ());
8794 vec_else_clause
= vect_get_vec_def_for_stmt_copy (dts
[3],
8795 vec_oprnds3
.pop ());
8800 vec_oprnds0
.quick_push (vec_cond_lhs
);
8802 vec_oprnds1
.quick_push (vec_cond_rhs
);
8803 vec_oprnds2
.quick_push (vec_then_clause
);
8804 vec_oprnds3
.quick_push (vec_else_clause
);
8807 /* Arguments are ready. Create the new vector stmt. */
8808 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vec_cond_lhs
)
8810 vec_then_clause
= vec_oprnds2
[i
];
8811 vec_else_clause
= vec_oprnds3
[i
];
8814 vec_compare
= vec_cond_lhs
;
8817 vec_cond_rhs
= vec_oprnds1
[i
];
8818 if (bitop1
== NOP_EXPR
)
8819 vec_compare
= build2 (cond_code
, vec_cmp_type
,
8820 vec_cond_lhs
, vec_cond_rhs
);
8823 new_temp
= make_ssa_name (vec_cmp_type
);
8824 if (bitop1
== BIT_NOT_EXPR
)
8825 new_stmt
= gimple_build_assign (new_temp
, bitop1
,
8829 = gimple_build_assign (new_temp
, bitop1
, vec_cond_lhs
,
8831 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
8832 if (bitop2
== NOP_EXPR
)
8833 vec_compare
= new_temp
;
8834 else if (bitop2
== BIT_NOT_EXPR
)
8836 /* Instead of doing ~x ? y : z do x ? z : y. */
8837 vec_compare
= new_temp
;
8838 std::swap (vec_then_clause
, vec_else_clause
);
8842 vec_compare
= make_ssa_name (vec_cmp_type
);
8844 = gimple_build_assign (vec_compare
, bitop2
,
8845 vec_cond_lhs
, new_temp
);
8846 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
8850 if (reduction_type
== EXTRACT_LAST_REDUCTION
)
8852 if (!is_gimple_val (vec_compare
))
8854 tree vec_compare_name
= make_ssa_name (vec_cmp_type
);
8855 new_stmt
= gimple_build_assign (vec_compare_name
,
8857 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
8858 vec_compare
= vec_compare_name
;
8860 gcc_assert (reduc_index
== 2);
8861 new_stmt
= gimple_build_call_internal
8862 (IFN_FOLD_EXTRACT_LAST
, 3, else_clause
, vec_compare
,
8864 gimple_call_set_lhs (new_stmt
, scalar_dest
);
8865 SSA_NAME_DEF_STMT (scalar_dest
) = new_stmt
;
8866 if (stmt
== gsi_stmt (*gsi
))
8867 vect_finish_replace_stmt (stmt
, new_stmt
);
8870 /* In this case we're moving the definition to later in the
8871 block. That doesn't matter because the only uses of the
8872 lhs are in phi statements. */
8873 gimple_stmt_iterator old_gsi
= gsi_for_stmt (stmt
);
8874 gsi_remove (&old_gsi
, true);
8875 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
8880 new_temp
= make_ssa_name (vec_dest
);
8881 new_stmt
= gimple_build_assign (new_temp
, VEC_COND_EXPR
,
8882 vec_compare
, vec_then_clause
,
8884 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
8887 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
8894 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
8896 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
8898 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
8901 vec_oprnds0
.release ();
8902 vec_oprnds1
.release ();
8903 vec_oprnds2
.release ();
8904 vec_oprnds3
.release ();
8909 /* vectorizable_comparison.
8911 Check if STMT is comparison expression that can be vectorized.
8912 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
8913 comparison, put it in VEC_STMT, and insert it at GSI.
8915 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
8918 vectorizable_comparison (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
8919 gimple
**vec_stmt
, tree reduc_def
,
8922 tree lhs
, rhs1
, rhs2
;
8923 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
8924 tree vectype1
= NULL_TREE
, vectype2
= NULL_TREE
;
8925 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
8926 tree vec_rhs1
= NULL_TREE
, vec_rhs2
= NULL_TREE
;
8928 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
8929 enum vect_def_type dts
[2] = {vect_unknown_def_type
, vect_unknown_def_type
};
8933 enum tree_code code
, bitop1
= NOP_EXPR
, bitop2
= NOP_EXPR
;
8934 stmt_vec_info prev_stmt_info
= NULL
;
8936 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
8937 vec
<tree
> vec_oprnds0
= vNULL
;
8938 vec
<tree
> vec_oprnds1
= vNULL
;
8943 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
8946 if (!vectype
|| !VECTOR_BOOLEAN_TYPE_P (vectype
))
8949 mask_type
= vectype
;
8950 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
8955 ncopies
= vect_get_num_copies (loop_vinfo
, vectype
);
8957 gcc_assert (ncopies
>= 1);
8958 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
8959 && !(STMT_VINFO_DEF_TYPE (stmt_info
) == vect_nested_cycle
8963 if (STMT_VINFO_LIVE_P (stmt_info
))
8965 if (dump_enabled_p ())
8966 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
8967 "value used after loop.\n");
8971 if (!is_gimple_assign (stmt
))
8974 code
= gimple_assign_rhs_code (stmt
);
8976 if (TREE_CODE_CLASS (code
) != tcc_comparison
)
8979 rhs1
= gimple_assign_rhs1 (stmt
);
8980 rhs2
= gimple_assign_rhs2 (stmt
);
8982 if (!vect_is_simple_use (rhs1
, stmt_info
->vinfo
, &def_stmt
,
8983 &dts
[0], &vectype1
))
8986 if (!vect_is_simple_use (rhs2
, stmt_info
->vinfo
, &def_stmt
,
8987 &dts
[1], &vectype2
))
8990 if (vectype1
&& vectype2
8991 && maybe_ne (TYPE_VECTOR_SUBPARTS (vectype1
),
8992 TYPE_VECTOR_SUBPARTS (vectype2
)))
8995 vectype
= vectype1
? vectype1
: vectype2
;
8997 /* Invariant comparison. */
9000 vectype
= get_vectype_for_scalar_type (TREE_TYPE (rhs1
));
9001 if (maybe_ne (TYPE_VECTOR_SUBPARTS (vectype
), nunits
))
9004 else if (maybe_ne (nunits
, TYPE_VECTOR_SUBPARTS (vectype
)))
9007 /* Can't compare mask and non-mask types. */
9008 if (vectype1
&& vectype2
9009 && (VECTOR_BOOLEAN_TYPE_P (vectype1
) ^ VECTOR_BOOLEAN_TYPE_P (vectype2
)))
9012 /* Boolean values may have another representation in vectors
9013 and therefore we prefer bit operations over comparison for
9014 them (which also works for scalar masks). We store opcodes
9015 to use in bitop1 and bitop2. Statement is vectorized as
9016 BITOP2 (rhs1 BITOP1 rhs2) or
9017 rhs1 BITOP2 (BITOP1 rhs2)
9018 depending on bitop1 and bitop2 arity. */
9019 if (VECTOR_BOOLEAN_TYPE_P (vectype
))
9021 if (code
== GT_EXPR
)
9023 bitop1
= BIT_NOT_EXPR
;
9024 bitop2
= BIT_AND_EXPR
;
9026 else if (code
== GE_EXPR
)
9028 bitop1
= BIT_NOT_EXPR
;
9029 bitop2
= BIT_IOR_EXPR
;
9031 else if (code
== LT_EXPR
)
9033 bitop1
= BIT_NOT_EXPR
;
9034 bitop2
= BIT_AND_EXPR
;
9035 std::swap (rhs1
, rhs2
);
9036 std::swap (dts
[0], dts
[1]);
9038 else if (code
== LE_EXPR
)
9040 bitop1
= BIT_NOT_EXPR
;
9041 bitop2
= BIT_IOR_EXPR
;
9042 std::swap (rhs1
, rhs2
);
9043 std::swap (dts
[0], dts
[1]);
9047 bitop1
= BIT_XOR_EXPR
;
9048 if (code
== EQ_EXPR
)
9049 bitop2
= BIT_NOT_EXPR
;
9055 STMT_VINFO_TYPE (stmt_info
) = comparison_vec_info_type
;
9057 vect_model_simple_cost (stmt_info
, ncopies
* (1 + (bitop2
!= NOP_EXPR
)),
9058 dts
, ndts
, NULL
, NULL
);
9059 if (bitop1
== NOP_EXPR
)
9060 return expand_vec_cmp_expr_p (vectype
, mask_type
, code
);
9063 machine_mode mode
= TYPE_MODE (vectype
);
9066 optab
= optab_for_tree_code (bitop1
, vectype
, optab_default
);
9067 if (!optab
|| optab_handler (optab
, mode
) == CODE_FOR_nothing
)
9070 if (bitop2
!= NOP_EXPR
)
9072 optab
= optab_for_tree_code (bitop2
, vectype
, optab_default
);
9073 if (!optab
|| optab_handler (optab
, mode
) == CODE_FOR_nothing
)
9083 vec_oprnds0
.create (1);
9084 vec_oprnds1
.create (1);
9088 lhs
= gimple_assign_lhs (stmt
);
9089 mask
= vect_create_destination_var (lhs
, mask_type
);
9091 /* Handle cmp expr. */
9092 for (j
= 0; j
< ncopies
; j
++)
9094 gassign
*new_stmt
= NULL
;
9099 auto_vec
<tree
, 2> ops
;
9100 auto_vec
<vec
<tree
>, 2> vec_defs
;
9102 ops
.safe_push (rhs1
);
9103 ops
.safe_push (rhs2
);
9104 vect_get_slp_defs (ops
, slp_node
, &vec_defs
);
9105 vec_oprnds1
= vec_defs
.pop ();
9106 vec_oprnds0
= vec_defs
.pop ();
9110 vec_rhs1
= vect_get_vec_def_for_operand (rhs1
, stmt
, vectype
);
9111 vec_rhs2
= vect_get_vec_def_for_operand (rhs2
, stmt
, vectype
);
9116 vec_rhs1
= vect_get_vec_def_for_stmt_copy (dts
[0],
9117 vec_oprnds0
.pop ());
9118 vec_rhs2
= vect_get_vec_def_for_stmt_copy (dts
[1],
9119 vec_oprnds1
.pop ());
9124 vec_oprnds0
.quick_push (vec_rhs1
);
9125 vec_oprnds1
.quick_push (vec_rhs2
);
9128 /* Arguments are ready. Create the new vector stmt. */
9129 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vec_rhs1
)
9131 vec_rhs2
= vec_oprnds1
[i
];
9133 new_temp
= make_ssa_name (mask
);
9134 if (bitop1
== NOP_EXPR
)
9136 new_stmt
= gimple_build_assign (new_temp
, code
,
9137 vec_rhs1
, vec_rhs2
);
9138 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
9142 if (bitop1
== BIT_NOT_EXPR
)
9143 new_stmt
= gimple_build_assign (new_temp
, bitop1
, vec_rhs2
);
9145 new_stmt
= gimple_build_assign (new_temp
, bitop1
, vec_rhs1
,
9147 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
9148 if (bitop2
!= NOP_EXPR
)
9150 tree res
= make_ssa_name (mask
);
9151 if (bitop2
== BIT_NOT_EXPR
)
9152 new_stmt
= gimple_build_assign (res
, bitop2
, new_temp
);
9154 new_stmt
= gimple_build_assign (res
, bitop2
, vec_rhs1
,
9156 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
9160 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
9167 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
9169 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
9171 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
9174 vec_oprnds0
.release ();
9175 vec_oprnds1
.release ();
9180 /* If SLP_NODE is nonnull, return true if vectorizable_live_operation
9181 can handle all live statements in the node. Otherwise return true
9182 if STMT is not live or if vectorizable_live_operation can handle it.
9183 GSI and VEC_STMT are as for vectorizable_live_operation. */
9186 can_vectorize_live_stmts (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
9187 slp_tree slp_node
, gimple
**vec_stmt
)
9193 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (slp_node
), i
, slp_stmt
)
9195 stmt_vec_info slp_stmt_info
= vinfo_for_stmt (slp_stmt
);
9196 if (STMT_VINFO_LIVE_P (slp_stmt_info
)
9197 && !vectorizable_live_operation (slp_stmt
, gsi
, slp_node
, i
,
9202 else if (STMT_VINFO_LIVE_P (vinfo_for_stmt (stmt
))
9203 && !vectorizable_live_operation (stmt
, gsi
, slp_node
, -1, vec_stmt
))
9209 /* Make sure the statement is vectorizable. */
9212 vect_analyze_stmt (gimple
*stmt
, bool *need_to_vectorize
, slp_tree node
,
9213 slp_instance node_instance
)
9215 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
9216 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
9217 enum vect_relevant relevance
= STMT_VINFO_RELEVANT (stmt_info
);
9219 gimple
*pattern_stmt
;
9220 gimple_seq pattern_def_seq
;
9222 if (dump_enabled_p ())
9224 dump_printf_loc (MSG_NOTE
, vect_location
, "==> examining statement: ");
9225 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
9228 if (gimple_has_volatile_ops (stmt
))
9230 if (dump_enabled_p ())
9231 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
9232 "not vectorized: stmt has volatile operands\n");
9237 /* Skip stmts that do not need to be vectorized. In loops this is expected
9239 - the COND_EXPR which is the loop exit condition
9240 - any LABEL_EXPRs in the loop
9241 - computations that are used only for array indexing or loop control.
9242 In basic blocks we only analyze statements that are a part of some SLP
9243 instance, therefore, all the statements are relevant.
9245 Pattern statement needs to be analyzed instead of the original statement
9246 if the original statement is not relevant. Otherwise, we analyze both
9247 statements. In basic blocks we are called from some SLP instance
9248 traversal, don't analyze pattern stmts instead, the pattern stmts
9249 already will be part of SLP instance. */
9251 pattern_stmt
= STMT_VINFO_RELATED_STMT (stmt_info
);
9252 if (!STMT_VINFO_RELEVANT_P (stmt_info
)
9253 && !STMT_VINFO_LIVE_P (stmt_info
))
9255 if (STMT_VINFO_IN_PATTERN_P (stmt_info
)
9257 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt
))
9258 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt
))))
9260 /* Analyze PATTERN_STMT instead of the original stmt. */
9261 stmt
= pattern_stmt
;
9262 stmt_info
= vinfo_for_stmt (pattern_stmt
);
9263 if (dump_enabled_p ())
9265 dump_printf_loc (MSG_NOTE
, vect_location
,
9266 "==> examining pattern statement: ");
9267 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
9272 if (dump_enabled_p ())
9273 dump_printf_loc (MSG_NOTE
, vect_location
, "irrelevant.\n");
9278 else if (STMT_VINFO_IN_PATTERN_P (stmt_info
)
9281 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt
))
9282 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt
))))
9284 /* Analyze PATTERN_STMT too. */
9285 if (dump_enabled_p ())
9287 dump_printf_loc (MSG_NOTE
, vect_location
,
9288 "==> examining pattern statement: ");
9289 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
9292 if (!vect_analyze_stmt (pattern_stmt
, need_to_vectorize
, node
,
9297 if (is_pattern_stmt_p (stmt_info
)
9299 && (pattern_def_seq
= STMT_VINFO_PATTERN_DEF_SEQ (stmt_info
)))
9301 gimple_stmt_iterator si
;
9303 for (si
= gsi_start (pattern_def_seq
); !gsi_end_p (si
); gsi_next (&si
))
9305 gimple
*pattern_def_stmt
= gsi_stmt (si
);
9306 if (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_def_stmt
))
9307 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_def_stmt
)))
9309 /* Analyze def stmt of STMT if it's a pattern stmt. */
9310 if (dump_enabled_p ())
9312 dump_printf_loc (MSG_NOTE
, vect_location
,
9313 "==> examining pattern def statement: ");
9314 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, pattern_def_stmt
, 0);
9317 if (!vect_analyze_stmt (pattern_def_stmt
,
9318 need_to_vectorize
, node
, node_instance
))
9324 switch (STMT_VINFO_DEF_TYPE (stmt_info
))
9326 case vect_internal_def
:
9329 case vect_reduction_def
:
9330 case vect_nested_cycle
:
9331 gcc_assert (!bb_vinfo
9332 && (relevance
== vect_used_in_outer
9333 || relevance
== vect_used_in_outer_by_reduction
9334 || relevance
== vect_used_by_reduction
9335 || relevance
== vect_unused_in_scope
9336 || relevance
== vect_used_only_live
));
9339 case vect_induction_def
:
9340 gcc_assert (!bb_vinfo
);
9343 case vect_constant_def
:
9344 case vect_external_def
:
9345 case vect_unknown_def_type
:
9350 if (STMT_VINFO_RELEVANT_P (stmt_info
))
9352 gcc_assert (!VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt
))));
9353 gcc_assert (STMT_VINFO_VECTYPE (stmt_info
)
9354 || (is_gimple_call (stmt
)
9355 && gimple_call_lhs (stmt
) == NULL_TREE
));
9356 *need_to_vectorize
= true;
9359 if (PURE_SLP_STMT (stmt_info
) && !node
)
9361 dump_printf_loc (MSG_NOTE
, vect_location
,
9362 "handled only by SLP analysis\n");
9368 && (STMT_VINFO_RELEVANT_P (stmt_info
)
9369 || STMT_VINFO_DEF_TYPE (stmt_info
) == vect_reduction_def
))
9370 ok
= (vectorizable_simd_clone_call (stmt
, NULL
, NULL
, node
)
9371 || vectorizable_conversion (stmt
, NULL
, NULL
, node
)
9372 || vectorizable_shift (stmt
, NULL
, NULL
, node
)
9373 || vectorizable_operation (stmt
, NULL
, NULL
, node
)
9374 || vectorizable_assignment (stmt
, NULL
, NULL
, node
)
9375 || vectorizable_load (stmt
, NULL
, NULL
, node
, NULL
)
9376 || vectorizable_call (stmt
, NULL
, NULL
, node
)
9377 || vectorizable_store (stmt
, NULL
, NULL
, node
)
9378 || vectorizable_reduction (stmt
, NULL
, NULL
, node
, node_instance
)
9379 || vectorizable_induction (stmt
, NULL
, NULL
, node
)
9380 || vectorizable_condition (stmt
, NULL
, NULL
, NULL
, 0, node
)
9381 || vectorizable_comparison (stmt
, NULL
, NULL
, NULL
, node
));
9385 ok
= (vectorizable_simd_clone_call (stmt
, NULL
, NULL
, node
)
9386 || vectorizable_conversion (stmt
, NULL
, NULL
, node
)
9387 || vectorizable_shift (stmt
, NULL
, NULL
, node
)
9388 || vectorizable_operation (stmt
, NULL
, NULL
, node
)
9389 || vectorizable_assignment (stmt
, NULL
, NULL
, node
)
9390 || vectorizable_load (stmt
, NULL
, NULL
, node
, NULL
)
9391 || vectorizable_call (stmt
, NULL
, NULL
, node
)
9392 || vectorizable_store (stmt
, NULL
, NULL
, node
)
9393 || vectorizable_condition (stmt
, NULL
, NULL
, NULL
, 0, node
)
9394 || vectorizable_comparison (stmt
, NULL
, NULL
, NULL
, node
));
9399 if (dump_enabled_p ())
9401 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
9402 "not vectorized: relevant stmt not ");
9403 dump_printf (MSG_MISSED_OPTIMIZATION
, "supported: ");
9404 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
9413 /* Stmts that are (also) "live" (i.e. - that are used out of the loop)
9414 need extra handling, except for vectorizable reductions. */
9415 if (STMT_VINFO_TYPE (stmt_info
) != reduc_vec_info_type
9416 && !can_vectorize_live_stmts (stmt
, NULL
, node
, NULL
))
9418 if (dump_enabled_p ())
9420 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
9421 "not vectorized: live stmt not supported: ");
9422 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
9432 /* Function vect_transform_stmt.
9434 Create a vectorized stmt to replace STMT, and insert it at BSI. */
9437 vect_transform_stmt (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
9438 bool *grouped_store
, slp_tree slp_node
,
9439 slp_instance slp_node_instance
)
9441 bool is_store
= false;
9442 gimple
*vec_stmt
= NULL
;
9443 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
9446 gcc_assert (slp_node
|| !PURE_SLP_STMT (stmt_info
));
9447 gimple
*old_vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
9449 bool nested_p
= (STMT_VINFO_LOOP_VINFO (stmt_info
)
9450 && nested_in_vect_loop_p
9451 (LOOP_VINFO_LOOP (STMT_VINFO_LOOP_VINFO (stmt_info
)),
9454 switch (STMT_VINFO_TYPE (stmt_info
))
9456 case type_demotion_vec_info_type
:
9457 case type_promotion_vec_info_type
:
9458 case type_conversion_vec_info_type
:
9459 done
= vectorizable_conversion (stmt
, gsi
, &vec_stmt
, slp_node
);
9463 case induc_vec_info_type
:
9464 done
= vectorizable_induction (stmt
, gsi
, &vec_stmt
, slp_node
);
9468 case shift_vec_info_type
:
9469 done
= vectorizable_shift (stmt
, gsi
, &vec_stmt
, slp_node
);
9473 case op_vec_info_type
:
9474 done
= vectorizable_operation (stmt
, gsi
, &vec_stmt
, slp_node
);
9478 case assignment_vec_info_type
:
9479 done
= vectorizable_assignment (stmt
, gsi
, &vec_stmt
, slp_node
);
9483 case load_vec_info_type
:
9484 done
= vectorizable_load (stmt
, gsi
, &vec_stmt
, slp_node
,
9489 case store_vec_info_type
:
9490 done
= vectorizable_store (stmt
, gsi
, &vec_stmt
, slp_node
);
9492 if (STMT_VINFO_GROUPED_ACCESS (stmt_info
) && !slp_node
)
9494 /* In case of interleaving, the whole chain is vectorized when the
9495 last store in the chain is reached. Store stmts before the last
9496 one are skipped, and there vec_stmt_info shouldn't be freed
9498 *grouped_store
= true;
9499 stmt_vec_info group_info
9500 = vinfo_for_stmt (GROUP_FIRST_ELEMENT (stmt_info
));
9501 if (GROUP_STORE_COUNT (group_info
) == GROUP_SIZE (group_info
))
9508 case condition_vec_info_type
:
9509 done
= vectorizable_condition (stmt
, gsi
, &vec_stmt
, NULL
, 0, slp_node
);
9513 case comparison_vec_info_type
:
9514 done
= vectorizable_comparison (stmt
, gsi
, &vec_stmt
, NULL
, slp_node
);
9518 case call_vec_info_type
:
9519 done
= vectorizable_call (stmt
, gsi
, &vec_stmt
, slp_node
);
9520 stmt
= gsi_stmt (*gsi
);
9523 case call_simd_clone_vec_info_type
:
9524 done
= vectorizable_simd_clone_call (stmt
, gsi
, &vec_stmt
, slp_node
);
9525 stmt
= gsi_stmt (*gsi
);
9528 case reduc_vec_info_type
:
9529 done
= vectorizable_reduction (stmt
, gsi
, &vec_stmt
, slp_node
,
9535 if (!STMT_VINFO_LIVE_P (stmt_info
))
9537 if (dump_enabled_p ())
9538 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
9539 "stmt not supported.\n");
9544 /* Verify SLP vectorization doesn't mess with STMT_VINFO_VEC_STMT.
9545 This would break hybrid SLP vectorization. */
9547 gcc_assert (!vec_stmt
9548 && STMT_VINFO_VEC_STMT (stmt_info
) == old_vec_stmt
);
9550 /* Handle inner-loop stmts whose DEF is used in the loop-nest that
9551 is being vectorized, but outside the immediately enclosing loop. */
9554 && STMT_VINFO_TYPE (stmt_info
) != reduc_vec_info_type
9555 && (STMT_VINFO_RELEVANT (stmt_info
) == vect_used_in_outer
9556 || STMT_VINFO_RELEVANT (stmt_info
) ==
9557 vect_used_in_outer_by_reduction
))
9559 struct loop
*innerloop
= LOOP_VINFO_LOOP (
9560 STMT_VINFO_LOOP_VINFO (stmt_info
))->inner
;
9561 imm_use_iterator imm_iter
;
9562 use_operand_p use_p
;
9566 if (dump_enabled_p ())
9567 dump_printf_loc (MSG_NOTE
, vect_location
,
9568 "Record the vdef for outer-loop vectorization.\n");
9570 /* Find the relevant loop-exit phi-node, and reord the vec_stmt there
9571 (to be used when vectorizing outer-loop stmts that use the DEF of
9573 if (gimple_code (stmt
) == GIMPLE_PHI
)
9574 scalar_dest
= PHI_RESULT (stmt
);
9576 scalar_dest
= gimple_assign_lhs (stmt
);
9578 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, scalar_dest
)
9580 if (!flow_bb_inside_loop_p (innerloop
, gimple_bb (USE_STMT (use_p
))))
9582 exit_phi
= USE_STMT (use_p
);
9583 STMT_VINFO_VEC_STMT (vinfo_for_stmt (exit_phi
)) = vec_stmt
;
9588 /* Handle stmts whose DEF is used outside the loop-nest that is
9589 being vectorized. */
9590 if (STMT_VINFO_TYPE (stmt_info
) != reduc_vec_info_type
)
9592 done
= can_vectorize_live_stmts (stmt
, gsi
, slp_node
, &vec_stmt
);
9597 STMT_VINFO_VEC_STMT (stmt_info
) = vec_stmt
;
9603 /* Remove a group of stores (for SLP or interleaving), free their
9607 vect_remove_stores (gimple
*first_stmt
)
9609 gimple
*next
= first_stmt
;
9611 gimple_stmt_iterator next_si
;
9615 stmt_vec_info stmt_info
= vinfo_for_stmt (next
);
9617 tmp
= GROUP_NEXT_ELEMENT (stmt_info
);
9618 if (is_pattern_stmt_p (stmt_info
))
9619 next
= STMT_VINFO_RELATED_STMT (stmt_info
);
9620 /* Free the attached stmt_vec_info and remove the stmt. */
9621 next_si
= gsi_for_stmt (next
);
9622 unlink_stmt_vdef (next
);
9623 gsi_remove (&next_si
, true);
9624 release_defs (next
);
9625 free_stmt_vec_info (next
);
9631 /* Function new_stmt_vec_info.
9633 Create and initialize a new stmt_vec_info struct for STMT. */
9636 new_stmt_vec_info (gimple
*stmt
, vec_info
*vinfo
)
9639 res
= (stmt_vec_info
) xcalloc (1, sizeof (struct _stmt_vec_info
));
9641 STMT_VINFO_TYPE (res
) = undef_vec_info_type
;
9642 STMT_VINFO_STMT (res
) = stmt
;
9644 STMT_VINFO_RELEVANT (res
) = vect_unused_in_scope
;
9645 STMT_VINFO_LIVE_P (res
) = false;
9646 STMT_VINFO_VECTYPE (res
) = NULL
;
9647 STMT_VINFO_VEC_STMT (res
) = NULL
;
9648 STMT_VINFO_VECTORIZABLE (res
) = true;
9649 STMT_VINFO_IN_PATTERN_P (res
) = false;
9650 STMT_VINFO_RELATED_STMT (res
) = NULL
;
9651 STMT_VINFO_PATTERN_DEF_SEQ (res
) = NULL
;
9652 STMT_VINFO_DATA_REF (res
) = NULL
;
9653 STMT_VINFO_VEC_REDUCTION_TYPE (res
) = TREE_CODE_REDUCTION
;
9654 STMT_VINFO_VEC_CONST_COND_REDUC_CODE (res
) = ERROR_MARK
;
9656 if (gimple_code (stmt
) == GIMPLE_PHI
9657 && is_loop_header_bb_p (gimple_bb (stmt
)))
9658 STMT_VINFO_DEF_TYPE (res
) = vect_unknown_def_type
;
9660 STMT_VINFO_DEF_TYPE (res
) = vect_internal_def
;
9662 STMT_VINFO_SAME_ALIGN_REFS (res
).create (0);
9663 STMT_SLP_TYPE (res
) = loop_vect
;
9664 STMT_VINFO_NUM_SLP_USES (res
) = 0;
9666 GROUP_FIRST_ELEMENT (res
) = NULL
;
9667 GROUP_NEXT_ELEMENT (res
) = NULL
;
9668 GROUP_SIZE (res
) = 0;
9669 GROUP_STORE_COUNT (res
) = 0;
9670 GROUP_GAP (res
) = 0;
9671 GROUP_SAME_DR_STMT (res
) = NULL
;
9677 /* Create a hash table for stmt_vec_info. */
9680 init_stmt_vec_info_vec (void)
9682 gcc_assert (!stmt_vec_info_vec
.exists ());
9683 stmt_vec_info_vec
.create (50);
9687 /* Free hash table for stmt_vec_info. */
9690 free_stmt_vec_info_vec (void)
9694 FOR_EACH_VEC_ELT (stmt_vec_info_vec
, i
, info
)
9696 free_stmt_vec_info (STMT_VINFO_STMT (info
));
9697 gcc_assert (stmt_vec_info_vec
.exists ());
9698 stmt_vec_info_vec
.release ();
9702 /* Free stmt vectorization related info. */
9705 free_stmt_vec_info (gimple
*stmt
)
9707 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
9712 /* Check if this statement has a related "pattern stmt"
9713 (introduced by the vectorizer during the pattern recognition
9714 pass). Free pattern's stmt_vec_info and def stmt's stmt_vec_info
9716 if (STMT_VINFO_IN_PATTERN_P (stmt_info
))
9718 stmt_vec_info patt_info
9719 = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info
));
9722 gimple_seq seq
= STMT_VINFO_PATTERN_DEF_SEQ (patt_info
);
9723 gimple
*patt_stmt
= STMT_VINFO_STMT (patt_info
);
9724 gimple_set_bb (patt_stmt
, NULL
);
9725 tree lhs
= gimple_get_lhs (patt_stmt
);
9726 if (lhs
&& TREE_CODE (lhs
) == SSA_NAME
)
9727 release_ssa_name (lhs
);
9730 gimple_stmt_iterator si
;
9731 for (si
= gsi_start (seq
); !gsi_end_p (si
); gsi_next (&si
))
9733 gimple
*seq_stmt
= gsi_stmt (si
);
9734 gimple_set_bb (seq_stmt
, NULL
);
9735 lhs
= gimple_get_lhs (seq_stmt
);
9736 if (lhs
&& TREE_CODE (lhs
) == SSA_NAME
)
9737 release_ssa_name (lhs
);
9738 free_stmt_vec_info (seq_stmt
);
9741 free_stmt_vec_info (patt_stmt
);
9745 STMT_VINFO_SAME_ALIGN_REFS (stmt_info
).release ();
9746 STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).release ();
9747 set_vinfo_for_stmt (stmt
, NULL
);
9752 /* Function get_vectype_for_scalar_type_and_size.
9754 Returns the vector type corresponding to SCALAR_TYPE and SIZE as supported
9758 get_vectype_for_scalar_type_and_size (tree scalar_type
, poly_uint64 size
)
9760 tree orig_scalar_type
= scalar_type
;
9761 scalar_mode inner_mode
;
9762 machine_mode simd_mode
;
9766 if (!is_int_mode (TYPE_MODE (scalar_type
), &inner_mode
)
9767 && !is_float_mode (TYPE_MODE (scalar_type
), &inner_mode
))
9770 unsigned int nbytes
= GET_MODE_SIZE (inner_mode
);
9772 /* For vector types of elements whose mode precision doesn't
9773 match their types precision we use a element type of mode
9774 precision. The vectorization routines will have to make sure
9775 they support the proper result truncation/extension.
9776 We also make sure to build vector types with INTEGER_TYPE
9777 component type only. */
9778 if (INTEGRAL_TYPE_P (scalar_type
)
9779 && (GET_MODE_BITSIZE (inner_mode
) != TYPE_PRECISION (scalar_type
)
9780 || TREE_CODE (scalar_type
) != INTEGER_TYPE
))
9781 scalar_type
= build_nonstandard_integer_type (GET_MODE_BITSIZE (inner_mode
),
9782 TYPE_UNSIGNED (scalar_type
));
9784 /* We shouldn't end up building VECTOR_TYPEs of non-scalar components.
9785 When the component mode passes the above test simply use a type
9786 corresponding to that mode. The theory is that any use that
9787 would cause problems with this will disable vectorization anyway. */
9788 else if (!SCALAR_FLOAT_TYPE_P (scalar_type
)
9789 && !INTEGRAL_TYPE_P (scalar_type
))
9790 scalar_type
= lang_hooks
.types
.type_for_mode (inner_mode
, 1);
9792 /* We can't build a vector type of elements with alignment bigger than
9794 else if (nbytes
< TYPE_ALIGN_UNIT (scalar_type
))
9795 scalar_type
= lang_hooks
.types
.type_for_mode (inner_mode
,
9796 TYPE_UNSIGNED (scalar_type
));
9798 /* If we felt back to using the mode fail if there was
9799 no scalar type for it. */
9800 if (scalar_type
== NULL_TREE
)
9803 /* If no size was supplied use the mode the target prefers. Otherwise
9804 lookup a vector mode of the specified size. */
9805 if (known_eq (size
, 0U))
9806 simd_mode
= targetm
.vectorize
.preferred_simd_mode (inner_mode
);
9807 else if (!multiple_p (size
, nbytes
, &nunits
)
9808 || !mode_for_vector (inner_mode
, nunits
).exists (&simd_mode
))
9810 /* NOTE: nunits == 1 is allowed to support single element vector types. */
9811 if (!multiple_p (GET_MODE_SIZE (simd_mode
), nbytes
, &nunits
))
9814 vectype
= build_vector_type (scalar_type
, nunits
);
9816 if (!VECTOR_MODE_P (TYPE_MODE (vectype
))
9817 && !INTEGRAL_MODE_P (TYPE_MODE (vectype
)))
9820 /* Re-attach the address-space qualifier if we canonicalized the scalar
9822 if (TYPE_ADDR_SPACE (orig_scalar_type
) != TYPE_ADDR_SPACE (vectype
))
9823 return build_qualified_type
9824 (vectype
, KEEP_QUAL_ADDR_SPACE (TYPE_QUALS (orig_scalar_type
)));
9829 poly_uint64 current_vector_size
;
9831 /* Function get_vectype_for_scalar_type.
9833 Returns the vector type corresponding to SCALAR_TYPE as supported
9837 get_vectype_for_scalar_type (tree scalar_type
)
9840 vectype
= get_vectype_for_scalar_type_and_size (scalar_type
,
9841 current_vector_size
);
9843 && known_eq (current_vector_size
, 0U))
9844 current_vector_size
= GET_MODE_SIZE (TYPE_MODE (vectype
));
9848 /* Function get_mask_type_for_scalar_type.
9850 Returns the mask type corresponding to a result of comparison
9851 of vectors of specified SCALAR_TYPE as supported by target. */
9854 get_mask_type_for_scalar_type (tree scalar_type
)
9856 tree vectype
= get_vectype_for_scalar_type (scalar_type
);
9861 return build_truth_vector_type (TYPE_VECTOR_SUBPARTS (vectype
),
9862 current_vector_size
);
9865 /* Function get_same_sized_vectype
9867 Returns a vector type corresponding to SCALAR_TYPE of size
9868 VECTOR_TYPE if supported by the target. */
9871 get_same_sized_vectype (tree scalar_type
, tree vector_type
)
9873 if (VECT_SCALAR_BOOLEAN_TYPE_P (scalar_type
))
9874 return build_same_sized_truth_vector_type (vector_type
);
9876 return get_vectype_for_scalar_type_and_size
9877 (scalar_type
, GET_MODE_SIZE (TYPE_MODE (vector_type
)));
9880 /* Function vect_is_simple_use.
9883 VINFO - the vect info of the loop or basic block that is being vectorized.
9884 OPERAND - operand in the loop or bb.
9886 DEF_STMT - the defining stmt in case OPERAND is an SSA_NAME.
9887 DT - the type of definition
9889 Returns whether a stmt with OPERAND can be vectorized.
9890 For loops, supportable operands are constants, loop invariants, and operands
9891 that are defined by the current iteration of the loop. Unsupportable
9892 operands are those that are defined by a previous iteration of the loop (as
9893 is the case in reduction/induction computations).
9894 For basic blocks, supportable operands are constants and bb invariants.
9895 For now, operands defined outside the basic block are not supported. */
9898 vect_is_simple_use (tree operand
, vec_info
*vinfo
,
9899 gimple
**def_stmt
, enum vect_def_type
*dt
)
9902 *dt
= vect_unknown_def_type
;
9904 if (dump_enabled_p ())
9906 dump_printf_loc (MSG_NOTE
, vect_location
,
9907 "vect_is_simple_use: operand ");
9908 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, operand
);
9909 dump_printf (MSG_NOTE
, "\n");
9912 if (CONSTANT_CLASS_P (operand
))
9914 *dt
= vect_constant_def
;
9918 if (is_gimple_min_invariant (operand
))
9920 *dt
= vect_external_def
;
9924 if (TREE_CODE (operand
) != SSA_NAME
)
9926 if (dump_enabled_p ())
9927 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
9932 if (SSA_NAME_IS_DEFAULT_DEF (operand
))
9934 *dt
= vect_external_def
;
9938 *def_stmt
= SSA_NAME_DEF_STMT (operand
);
9939 if (dump_enabled_p ())
9941 dump_printf_loc (MSG_NOTE
, vect_location
, "def_stmt: ");
9942 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, *def_stmt
, 0);
9945 if (! vect_stmt_in_region_p (vinfo
, *def_stmt
))
9946 *dt
= vect_external_def
;
9949 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (*def_stmt
);
9950 *dt
= STMT_VINFO_DEF_TYPE (stmt_vinfo
);
9953 if (dump_enabled_p ())
9955 dump_printf_loc (MSG_NOTE
, vect_location
, "type of def: ");
9958 case vect_uninitialized_def
:
9959 dump_printf (MSG_NOTE
, "uninitialized\n");
9961 case vect_constant_def
:
9962 dump_printf (MSG_NOTE
, "constant\n");
9964 case vect_external_def
:
9965 dump_printf (MSG_NOTE
, "external\n");
9967 case vect_internal_def
:
9968 dump_printf (MSG_NOTE
, "internal\n");
9970 case vect_induction_def
:
9971 dump_printf (MSG_NOTE
, "induction\n");
9973 case vect_reduction_def
:
9974 dump_printf (MSG_NOTE
, "reduction\n");
9976 case vect_double_reduction_def
:
9977 dump_printf (MSG_NOTE
, "double reduction\n");
9979 case vect_nested_cycle
:
9980 dump_printf (MSG_NOTE
, "nested cycle\n");
9982 case vect_unknown_def_type
:
9983 dump_printf (MSG_NOTE
, "unknown\n");
9988 if (*dt
== vect_unknown_def_type
)
9990 if (dump_enabled_p ())
9991 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
9992 "Unsupported pattern.\n");
9996 switch (gimple_code (*def_stmt
))
10003 if (dump_enabled_p ())
10004 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
10005 "unsupported defining stmt:\n");
10012 /* Function vect_is_simple_use.
10014 Same as vect_is_simple_use but also determines the vector operand
10015 type of OPERAND and stores it to *VECTYPE. If the definition of
10016 OPERAND is vect_uninitialized_def, vect_constant_def or
10017 vect_external_def *VECTYPE will be set to NULL_TREE and the caller
10018 is responsible to compute the best suited vector type for the
10022 vect_is_simple_use (tree operand
, vec_info
*vinfo
,
10023 gimple
**def_stmt
, enum vect_def_type
*dt
, tree
*vectype
)
10025 if (!vect_is_simple_use (operand
, vinfo
, def_stmt
, dt
))
10028 /* Now get a vector type if the def is internal, otherwise supply
10029 NULL_TREE and leave it up to the caller to figure out a proper
10030 type for the use stmt. */
10031 if (*dt
== vect_internal_def
10032 || *dt
== vect_induction_def
10033 || *dt
== vect_reduction_def
10034 || *dt
== vect_double_reduction_def
10035 || *dt
== vect_nested_cycle
)
10037 stmt_vec_info stmt_info
= vinfo_for_stmt (*def_stmt
);
10039 if (STMT_VINFO_IN_PATTERN_P (stmt_info
)
10040 && !STMT_VINFO_RELEVANT (stmt_info
)
10041 && !STMT_VINFO_LIVE_P (stmt_info
))
10042 stmt_info
= vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info
));
10044 *vectype
= STMT_VINFO_VECTYPE (stmt_info
);
10045 gcc_assert (*vectype
!= NULL_TREE
);
10047 else if (*dt
== vect_uninitialized_def
10048 || *dt
== vect_constant_def
10049 || *dt
== vect_external_def
)
10050 *vectype
= NULL_TREE
;
10052 gcc_unreachable ();
10058 /* Function supportable_widening_operation
10060 Check whether an operation represented by the code CODE is a
10061 widening operation that is supported by the target platform in
10062 vector form (i.e., when operating on arguments of type VECTYPE_IN
10063 producing a result of type VECTYPE_OUT).
10065 Widening operations we currently support are NOP (CONVERT), FLOAT
10066 and WIDEN_MULT. This function checks if these operations are supported
10067 by the target platform either directly (via vector tree-codes), or via
10071 - CODE1 and CODE2 are codes of vector operations to be used when
10072 vectorizing the operation, if available.
10073 - MULTI_STEP_CVT determines the number of required intermediate steps in
10074 case of multi-step conversion (like char->short->int - in that case
10075 MULTI_STEP_CVT will be 1).
10076 - INTERM_TYPES contains the intermediate type required to perform the
10077 widening operation (short in the above example). */
10080 supportable_widening_operation (enum tree_code code
, gimple
*stmt
,
10081 tree vectype_out
, tree vectype_in
,
10082 enum tree_code
*code1
, enum tree_code
*code2
,
10083 int *multi_step_cvt
,
10084 vec
<tree
> *interm_types
)
10086 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
10087 loop_vec_info loop_info
= STMT_VINFO_LOOP_VINFO (stmt_info
);
10088 struct loop
*vect_loop
= NULL
;
10089 machine_mode vec_mode
;
10090 enum insn_code icode1
, icode2
;
10091 optab optab1
, optab2
;
10092 tree vectype
= vectype_in
;
10093 tree wide_vectype
= vectype_out
;
10094 enum tree_code c1
, c2
;
10096 tree prev_type
, intermediate_type
;
10097 machine_mode intermediate_mode
, prev_mode
;
10098 optab optab3
, optab4
;
10100 *multi_step_cvt
= 0;
10102 vect_loop
= LOOP_VINFO_LOOP (loop_info
);
10106 case WIDEN_MULT_EXPR
:
10107 /* The result of a vectorized widening operation usually requires
10108 two vectors (because the widened results do not fit into one vector).
10109 The generated vector results would normally be expected to be
10110 generated in the same order as in the original scalar computation,
10111 i.e. if 8 results are generated in each vector iteration, they are
10112 to be organized as follows:
10113 vect1: [res1,res2,res3,res4],
10114 vect2: [res5,res6,res7,res8].
10116 However, in the special case that the result of the widening
10117 operation is used in a reduction computation only, the order doesn't
10118 matter (because when vectorizing a reduction we change the order of
10119 the computation). Some targets can take advantage of this and
10120 generate more efficient code. For example, targets like Altivec,
10121 that support widen_mult using a sequence of {mult_even,mult_odd}
10122 generate the following vectors:
10123 vect1: [res1,res3,res5,res7],
10124 vect2: [res2,res4,res6,res8].
10126 When vectorizing outer-loops, we execute the inner-loop sequentially
10127 (each vectorized inner-loop iteration contributes to VF outer-loop
10128 iterations in parallel). We therefore don't allow to change the
10129 order of the computation in the inner-loop during outer-loop
10131 /* TODO: Another case in which order doesn't *really* matter is when we
10132 widen and then contract again, e.g. (short)((int)x * y >> 8).
10133 Normally, pack_trunc performs an even/odd permute, whereas the
10134 repack from an even/odd expansion would be an interleave, which
10135 would be significantly simpler for e.g. AVX2. */
10136 /* In any case, in order to avoid duplicating the code below, recurse
10137 on VEC_WIDEN_MULT_EVEN_EXPR. If it succeeds, all the return values
10138 are properly set up for the caller. If we fail, we'll continue with
10139 a VEC_WIDEN_MULT_LO/HI_EXPR check. */
10141 && STMT_VINFO_RELEVANT (stmt_info
) == vect_used_by_reduction
10142 && !nested_in_vect_loop_p (vect_loop
, stmt
)
10143 && supportable_widening_operation (VEC_WIDEN_MULT_EVEN_EXPR
,
10144 stmt
, vectype_out
, vectype_in
,
10145 code1
, code2
, multi_step_cvt
,
10148 /* Elements in a vector with vect_used_by_reduction property cannot
10149 be reordered if the use chain with this property does not have the
10150 same operation. One such an example is s += a * b, where elements
10151 in a and b cannot be reordered. Here we check if the vector defined
10152 by STMT is only directly used in the reduction statement. */
10153 tree lhs
= gimple_assign_lhs (stmt
);
10154 use_operand_p dummy
;
10156 stmt_vec_info use_stmt_info
= NULL
;
10157 if (single_imm_use (lhs
, &dummy
, &use_stmt
)
10158 && (use_stmt_info
= vinfo_for_stmt (use_stmt
))
10159 && STMT_VINFO_DEF_TYPE (use_stmt_info
) == vect_reduction_def
)
10162 c1
= VEC_WIDEN_MULT_LO_EXPR
;
10163 c2
= VEC_WIDEN_MULT_HI_EXPR
;
10166 case DOT_PROD_EXPR
:
10167 c1
= DOT_PROD_EXPR
;
10168 c2
= DOT_PROD_EXPR
;
10176 case VEC_WIDEN_MULT_EVEN_EXPR
:
10177 /* Support the recursion induced just above. */
10178 c1
= VEC_WIDEN_MULT_EVEN_EXPR
;
10179 c2
= VEC_WIDEN_MULT_ODD_EXPR
;
10182 case WIDEN_LSHIFT_EXPR
:
10183 c1
= VEC_WIDEN_LSHIFT_LO_EXPR
;
10184 c2
= VEC_WIDEN_LSHIFT_HI_EXPR
;
10188 c1
= VEC_UNPACK_LO_EXPR
;
10189 c2
= VEC_UNPACK_HI_EXPR
;
10193 c1
= VEC_UNPACK_FLOAT_LO_EXPR
;
10194 c2
= VEC_UNPACK_FLOAT_HI_EXPR
;
10197 case FIX_TRUNC_EXPR
:
10198 /* ??? Not yet implemented due to missing VEC_UNPACK_FIX_TRUNC_HI_EXPR/
10199 VEC_UNPACK_FIX_TRUNC_LO_EXPR tree codes and optabs used for
10200 computing the operation. */
10204 gcc_unreachable ();
10207 if (BYTES_BIG_ENDIAN
&& c1
!= VEC_WIDEN_MULT_EVEN_EXPR
)
10208 std::swap (c1
, c2
);
10210 if (code
== FIX_TRUNC_EXPR
)
10212 /* The signedness is determined from output operand. */
10213 optab1
= optab_for_tree_code (c1
, vectype_out
, optab_default
);
10214 optab2
= optab_for_tree_code (c2
, vectype_out
, optab_default
);
10218 optab1
= optab_for_tree_code (c1
, vectype
, optab_default
);
10219 optab2
= optab_for_tree_code (c2
, vectype
, optab_default
);
10222 if (!optab1
|| !optab2
)
10225 vec_mode
= TYPE_MODE (vectype
);
10226 if ((icode1
= optab_handler (optab1
, vec_mode
)) == CODE_FOR_nothing
10227 || (icode2
= optab_handler (optab2
, vec_mode
)) == CODE_FOR_nothing
)
10233 if (insn_data
[icode1
].operand
[0].mode
== TYPE_MODE (wide_vectype
)
10234 && insn_data
[icode2
].operand
[0].mode
== TYPE_MODE (wide_vectype
))
10235 /* For scalar masks we may have different boolean
10236 vector types having the same QImode. Thus we
10237 add additional check for elements number. */
10238 return (!VECTOR_BOOLEAN_TYPE_P (vectype
)
10239 || known_eq (TYPE_VECTOR_SUBPARTS (vectype
),
10240 TYPE_VECTOR_SUBPARTS (wide_vectype
) * 2));
10242 /* Check if it's a multi-step conversion that can be done using intermediate
10245 prev_type
= vectype
;
10246 prev_mode
= vec_mode
;
10248 if (!CONVERT_EXPR_CODE_P (code
))
10251 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
10252 intermediate steps in promotion sequence. We try
10253 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do
10255 interm_types
->create (MAX_INTERM_CVT_STEPS
);
10256 for (i
= 0; i
< MAX_INTERM_CVT_STEPS
; i
++)
10258 intermediate_mode
= insn_data
[icode1
].operand
[0].mode
;
10259 if (VECTOR_BOOLEAN_TYPE_P (prev_type
))
10261 intermediate_type
= vect_halve_mask_nunits (prev_type
);
10262 if (intermediate_mode
!= TYPE_MODE (intermediate_type
))
10267 = lang_hooks
.types
.type_for_mode (intermediate_mode
,
10268 TYPE_UNSIGNED (prev_type
));
10270 optab3
= optab_for_tree_code (c1
, intermediate_type
, optab_default
);
10271 optab4
= optab_for_tree_code (c2
, intermediate_type
, optab_default
);
10273 if (!optab3
|| !optab4
10274 || (icode1
= optab_handler (optab1
, prev_mode
)) == CODE_FOR_nothing
10275 || insn_data
[icode1
].operand
[0].mode
!= intermediate_mode
10276 || (icode2
= optab_handler (optab2
, prev_mode
)) == CODE_FOR_nothing
10277 || insn_data
[icode2
].operand
[0].mode
!= intermediate_mode
10278 || ((icode1
= optab_handler (optab3
, intermediate_mode
))
10279 == CODE_FOR_nothing
)
10280 || ((icode2
= optab_handler (optab4
, intermediate_mode
))
10281 == CODE_FOR_nothing
))
10284 interm_types
->quick_push (intermediate_type
);
10285 (*multi_step_cvt
)++;
10287 if (insn_data
[icode1
].operand
[0].mode
== TYPE_MODE (wide_vectype
)
10288 && insn_data
[icode2
].operand
[0].mode
== TYPE_MODE (wide_vectype
))
10289 return (!VECTOR_BOOLEAN_TYPE_P (vectype
)
10290 || known_eq (TYPE_VECTOR_SUBPARTS (intermediate_type
),
10291 TYPE_VECTOR_SUBPARTS (wide_vectype
) * 2));
10293 prev_type
= intermediate_type
;
10294 prev_mode
= intermediate_mode
;
10297 interm_types
->release ();
10302 /* Function supportable_narrowing_operation
10304 Check whether an operation represented by the code CODE is a
10305 narrowing operation that is supported by the target platform in
10306 vector form (i.e., when operating on arguments of type VECTYPE_IN
10307 and producing a result of type VECTYPE_OUT).
10309 Narrowing operations we currently support are NOP (CONVERT) and
10310 FIX_TRUNC. This function checks if these operations are supported by
10311 the target platform directly via vector tree-codes.
10314 - CODE1 is the code of a vector operation to be used when
10315 vectorizing the operation, if available.
10316 - MULTI_STEP_CVT determines the number of required intermediate steps in
10317 case of multi-step conversion (like int->short->char - in that case
10318 MULTI_STEP_CVT will be 1).
10319 - INTERM_TYPES contains the intermediate type required to perform the
10320 narrowing operation (short in the above example). */
10323 supportable_narrowing_operation (enum tree_code code
,
10324 tree vectype_out
, tree vectype_in
,
10325 enum tree_code
*code1
, int *multi_step_cvt
,
10326 vec
<tree
> *interm_types
)
10328 machine_mode vec_mode
;
10329 enum insn_code icode1
;
10330 optab optab1
, interm_optab
;
10331 tree vectype
= vectype_in
;
10332 tree narrow_vectype
= vectype_out
;
10334 tree intermediate_type
, prev_type
;
10335 machine_mode intermediate_mode
, prev_mode
;
10339 *multi_step_cvt
= 0;
10343 c1
= VEC_PACK_TRUNC_EXPR
;
10346 case FIX_TRUNC_EXPR
:
10347 c1
= VEC_PACK_FIX_TRUNC_EXPR
;
10351 /* ??? Not yet implemented due to missing VEC_PACK_FLOAT_EXPR
10352 tree code and optabs used for computing the operation. */
10356 gcc_unreachable ();
10359 if (code
== FIX_TRUNC_EXPR
)
10360 /* The signedness is determined from output operand. */
10361 optab1
= optab_for_tree_code (c1
, vectype_out
, optab_default
);
10363 optab1
= optab_for_tree_code (c1
, vectype
, optab_default
);
10368 vec_mode
= TYPE_MODE (vectype
);
10369 if ((icode1
= optab_handler (optab1
, vec_mode
)) == CODE_FOR_nothing
)
10374 if (insn_data
[icode1
].operand
[0].mode
== TYPE_MODE (narrow_vectype
))
10375 /* For scalar masks we may have different boolean
10376 vector types having the same QImode. Thus we
10377 add additional check for elements number. */
10378 return (!VECTOR_BOOLEAN_TYPE_P (vectype
)
10379 || known_eq (TYPE_VECTOR_SUBPARTS (vectype
) * 2,
10380 TYPE_VECTOR_SUBPARTS (narrow_vectype
)));
10382 /* Check if it's a multi-step conversion that can be done using intermediate
10384 prev_mode
= vec_mode
;
10385 prev_type
= vectype
;
10386 if (code
== FIX_TRUNC_EXPR
)
10387 uns
= TYPE_UNSIGNED (vectype_out
);
10389 uns
= TYPE_UNSIGNED (vectype
);
10391 /* For multi-step FIX_TRUNC_EXPR prefer signed floating to integer
10392 conversion over unsigned, as unsigned FIX_TRUNC_EXPR is often more
10393 costly than signed. */
10394 if (code
== FIX_TRUNC_EXPR
&& uns
)
10396 enum insn_code icode2
;
10399 = lang_hooks
.types
.type_for_mode (TYPE_MODE (vectype_out
), 0);
10401 = optab_for_tree_code (c1
, intermediate_type
, optab_default
);
10402 if (interm_optab
!= unknown_optab
10403 && (icode2
= optab_handler (optab1
, vec_mode
)) != CODE_FOR_nothing
10404 && insn_data
[icode1
].operand
[0].mode
10405 == insn_data
[icode2
].operand
[0].mode
)
10408 optab1
= interm_optab
;
10413 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
10414 intermediate steps in promotion sequence. We try
10415 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do not. */
10416 interm_types
->create (MAX_INTERM_CVT_STEPS
);
10417 for (i
= 0; i
< MAX_INTERM_CVT_STEPS
; i
++)
10419 intermediate_mode
= insn_data
[icode1
].operand
[0].mode
;
10420 if (VECTOR_BOOLEAN_TYPE_P (prev_type
))
10422 intermediate_type
= vect_double_mask_nunits (prev_type
);
10423 if (intermediate_mode
!= TYPE_MODE (intermediate_type
))
10428 = lang_hooks
.types
.type_for_mode (intermediate_mode
, uns
);
10430 = optab_for_tree_code (VEC_PACK_TRUNC_EXPR
, intermediate_type
,
10433 || ((icode1
= optab_handler (optab1
, prev_mode
)) == CODE_FOR_nothing
)
10434 || insn_data
[icode1
].operand
[0].mode
!= intermediate_mode
10435 || ((icode1
= optab_handler (interm_optab
, intermediate_mode
))
10436 == CODE_FOR_nothing
))
10439 interm_types
->quick_push (intermediate_type
);
10440 (*multi_step_cvt
)++;
10442 if (insn_data
[icode1
].operand
[0].mode
== TYPE_MODE (narrow_vectype
))
10443 return (!VECTOR_BOOLEAN_TYPE_P (vectype
)
10444 || known_eq (TYPE_VECTOR_SUBPARTS (intermediate_type
) * 2,
10445 TYPE_VECTOR_SUBPARTS (narrow_vectype
)));
10447 prev_mode
= intermediate_mode
;
10448 prev_type
= intermediate_type
;
10449 optab1
= interm_optab
;
10452 interm_types
->release ();
10456 /* Generate and return a statement that sets vector mask MASK such that
10457 MASK[I] is true iff J + START_INDEX < END_INDEX for all J <= I. */
10460 vect_gen_while (tree mask
, tree start_index
, tree end_index
)
10462 tree cmp_type
= TREE_TYPE (start_index
);
10463 tree mask_type
= TREE_TYPE (mask
);
10464 gcc_checking_assert (direct_internal_fn_supported_p (IFN_WHILE_ULT
,
10465 cmp_type
, mask_type
,
10466 OPTIMIZE_FOR_SPEED
));
10467 gcall
*call
= gimple_build_call_internal (IFN_WHILE_ULT
, 3,
10468 start_index
, end_index
,
10469 build_zero_cst (mask_type
));
10470 gimple_call_set_lhs (call
, mask
);
10474 /* Generate a vector mask of type MASK_TYPE for which index I is false iff
10475 J + START_INDEX < END_INDEX for all J <= I. Add the statements to SEQ. */
10478 vect_gen_while_not (gimple_seq
*seq
, tree mask_type
, tree start_index
,
10481 tree tmp
= make_ssa_name (mask_type
);
10482 gcall
*call
= vect_gen_while (tmp
, start_index
, end_index
);
10483 gimple_seq_add_stmt (seq
, call
);
10484 return gimple_build (seq
, BIT_NOT_EXPR
, mask_type
, tmp
);