1 /* Statement Analysis and Transformation for Vectorization
2 Copyright (C) 2003-2018 Free Software Foundation, Inc.
3 Contributed by Dorit Naishlos <dorit@il.ibm.com>
4 and Ira Rosen <irar@il.ibm.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
24 #include "coretypes.h"
31 #include "optabs-tree.h"
32 #include "insn-config.h"
33 #include "recog.h" /* FIXME: for insn_data */
37 #include "fold-const.h"
38 #include "stor-layout.h"
41 #include "gimple-iterator.h"
42 #include "gimplify-me.h"
44 #include "tree-ssa-loop-manip.h"
46 #include "tree-ssa-loop.h"
47 #include "tree-scalar-evolution.h"
48 #include "tree-vectorizer.h"
50 #include "internal-fn.h"
51 #include "tree-vector-builder.h"
52 #include "vec-perm-indices.h"
53 #include "tree-ssa-loop-niter.h"
54 #include "gimple-fold.h"
56 /* For lang_hooks.types.type_for_mode. */
57 #include "langhooks.h"
59 /* Return the vectorized type for the given statement. */
62 stmt_vectype (struct _stmt_vec_info
*stmt_info
)
64 return STMT_VINFO_VECTYPE (stmt_info
);
67 /* Return TRUE iff the given statement is in an inner loop relative to
68 the loop being vectorized. */
70 stmt_in_inner_loop_p (struct _stmt_vec_info
*stmt_info
)
72 gimple
*stmt
= STMT_VINFO_STMT (stmt_info
);
73 basic_block bb
= gimple_bb (stmt
);
74 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
80 loop
= LOOP_VINFO_LOOP (loop_vinfo
);
82 return (bb
->loop_father
== loop
->inner
);
85 /* Record the cost of a statement, either by directly informing the
86 target model or by saving it in a vector for later processing.
87 Return a preliminary estimate of the statement's cost. */
90 record_stmt_cost (stmt_vector_for_cost
*body_cost_vec
, int count
,
91 enum vect_cost_for_stmt kind
, stmt_vec_info stmt_info
,
92 int misalign
, enum vect_cost_model_location where
)
94 if ((kind
== vector_load
|| kind
== unaligned_load
)
95 && STMT_VINFO_GATHER_SCATTER_P (stmt_info
))
96 kind
= vector_gather_load
;
97 if ((kind
== vector_store
|| kind
== unaligned_store
)
98 && STMT_VINFO_GATHER_SCATTER_P (stmt_info
))
99 kind
= vector_scatter_store
;
102 tree vectype
= stmt_info
? stmt_vectype (stmt_info
) : NULL_TREE
;
103 stmt_info_for_cost si
= { count
, kind
,
104 stmt_info
? STMT_VINFO_STMT (stmt_info
) : NULL
,
106 body_cost_vec
->safe_push (si
);
108 (builtin_vectorization_cost (kind
, vectype
, misalign
) * count
);
111 return add_stmt_cost (stmt_info
->vinfo
->target_cost_data
,
112 count
, kind
, stmt_info
, misalign
, where
);
115 /* Return a variable of type ELEM_TYPE[NELEMS]. */
118 create_vector_array (tree elem_type
, unsigned HOST_WIDE_INT nelems
)
120 return create_tmp_var (build_array_type_nelts (elem_type
, nelems
),
124 /* ARRAY is an array of vectors created by create_vector_array.
125 Return an SSA_NAME for the vector in index N. The reference
126 is part of the vectorization of STMT and the vector is associated
127 with scalar destination SCALAR_DEST. */
130 read_vector_array (gimple
*stmt
, gimple_stmt_iterator
*gsi
, tree scalar_dest
,
131 tree array
, unsigned HOST_WIDE_INT n
)
133 tree vect_type
, vect
, vect_name
, array_ref
;
136 gcc_assert (TREE_CODE (TREE_TYPE (array
)) == ARRAY_TYPE
);
137 vect_type
= TREE_TYPE (TREE_TYPE (array
));
138 vect
= vect_create_destination_var (scalar_dest
, vect_type
);
139 array_ref
= build4 (ARRAY_REF
, vect_type
, array
,
140 build_int_cst (size_type_node
, n
),
141 NULL_TREE
, NULL_TREE
);
143 new_stmt
= gimple_build_assign (vect
, array_ref
);
144 vect_name
= make_ssa_name (vect
, new_stmt
);
145 gimple_assign_set_lhs (new_stmt
, vect_name
);
146 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
151 /* ARRAY is an array of vectors created by create_vector_array.
152 Emit code to store SSA_NAME VECT in index N of the array.
153 The store is part of the vectorization of STMT. */
156 write_vector_array (gimple
*stmt
, gimple_stmt_iterator
*gsi
, tree vect
,
157 tree array
, unsigned HOST_WIDE_INT n
)
162 array_ref
= build4 (ARRAY_REF
, TREE_TYPE (vect
), array
,
163 build_int_cst (size_type_node
, n
),
164 NULL_TREE
, NULL_TREE
);
166 new_stmt
= gimple_build_assign (array_ref
, vect
);
167 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
170 /* PTR is a pointer to an array of type TYPE. Return a representation
171 of *PTR. The memory reference replaces those in FIRST_DR
175 create_array_ref (tree type
, tree ptr
, tree alias_ptr_type
)
179 mem_ref
= build2 (MEM_REF
, type
, ptr
, build_int_cst (alias_ptr_type
, 0));
180 /* Arrays have the same alignment as their type. */
181 set_ptr_info_alignment (get_ptr_info (ptr
), TYPE_ALIGN_UNIT (type
), 0);
185 /* Utility functions used by vect_mark_stmts_to_be_vectorized. */
187 /* Function vect_mark_relevant.
189 Mark STMT as "relevant for vectorization" and add it to WORKLIST. */
192 vect_mark_relevant (vec
<gimple
*> *worklist
, gimple
*stmt
,
193 enum vect_relevant relevant
, bool live_p
)
195 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
196 enum vect_relevant save_relevant
= STMT_VINFO_RELEVANT (stmt_info
);
197 bool save_live_p
= STMT_VINFO_LIVE_P (stmt_info
);
198 gimple
*pattern_stmt
;
200 if (dump_enabled_p ())
202 dump_printf_loc (MSG_NOTE
, vect_location
,
203 "mark relevant %d, live %d: ", relevant
, live_p
);
204 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
207 /* If this stmt is an original stmt in a pattern, we might need to mark its
208 related pattern stmt instead of the original stmt. However, such stmts
209 may have their own uses that are not in any pattern, in such cases the
210 stmt itself should be marked. */
211 if (STMT_VINFO_IN_PATTERN_P (stmt_info
))
213 /* This is the last stmt in a sequence that was detected as a
214 pattern that can potentially be vectorized. Don't mark the stmt
215 as relevant/live because it's not going to be vectorized.
216 Instead mark the pattern-stmt that replaces it. */
218 pattern_stmt
= STMT_VINFO_RELATED_STMT (stmt_info
);
220 if (dump_enabled_p ())
221 dump_printf_loc (MSG_NOTE
, vect_location
,
222 "last stmt in pattern. don't mark"
223 " relevant/live.\n");
224 stmt_info
= vinfo_for_stmt (pattern_stmt
);
225 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_info
) == stmt
);
226 save_relevant
= STMT_VINFO_RELEVANT (stmt_info
);
227 save_live_p
= STMT_VINFO_LIVE_P (stmt_info
);
231 STMT_VINFO_LIVE_P (stmt_info
) |= live_p
;
232 if (relevant
> STMT_VINFO_RELEVANT (stmt_info
))
233 STMT_VINFO_RELEVANT (stmt_info
) = relevant
;
235 if (STMT_VINFO_RELEVANT (stmt_info
) == save_relevant
236 && STMT_VINFO_LIVE_P (stmt_info
) == save_live_p
)
238 if (dump_enabled_p ())
239 dump_printf_loc (MSG_NOTE
, vect_location
,
240 "already marked relevant/live.\n");
244 worklist
->safe_push (stmt
);
248 /* Function is_simple_and_all_uses_invariant
250 Return true if STMT is simple and all uses of it are invariant. */
253 is_simple_and_all_uses_invariant (gimple
*stmt
, loop_vec_info loop_vinfo
)
259 if (!is_gimple_assign (stmt
))
262 FOR_EACH_SSA_TREE_OPERAND (op
, stmt
, iter
, SSA_OP_USE
)
264 enum vect_def_type dt
= vect_uninitialized_def
;
266 if (!vect_is_simple_use (op
, loop_vinfo
, &def_stmt
, &dt
))
268 if (dump_enabled_p ())
269 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
270 "use not simple.\n");
274 if (dt
!= vect_external_def
&& dt
!= vect_constant_def
)
280 /* Function vect_stmt_relevant_p.
282 Return true if STMT in loop that is represented by LOOP_VINFO is
283 "relevant for vectorization".
285 A stmt is considered "relevant for vectorization" if:
286 - it has uses outside the loop.
287 - it has vdefs (it alters memory).
288 - control stmts in the loop (except for the exit condition).
290 CHECKME: what other side effects would the vectorizer allow? */
293 vect_stmt_relevant_p (gimple
*stmt
, loop_vec_info loop_vinfo
,
294 enum vect_relevant
*relevant
, bool *live_p
)
296 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
298 imm_use_iterator imm_iter
;
302 *relevant
= vect_unused_in_scope
;
305 /* cond stmt other than loop exit cond. */
306 if (is_ctrl_stmt (stmt
)
307 && STMT_VINFO_TYPE (vinfo_for_stmt (stmt
))
308 != loop_exit_ctrl_vec_info_type
)
309 *relevant
= vect_used_in_scope
;
311 /* changing memory. */
312 if (gimple_code (stmt
) != GIMPLE_PHI
)
313 if (gimple_vdef (stmt
)
314 && !gimple_clobber_p (stmt
))
316 if (dump_enabled_p ())
317 dump_printf_loc (MSG_NOTE
, vect_location
,
318 "vec_stmt_relevant_p: stmt has vdefs.\n");
319 *relevant
= vect_used_in_scope
;
322 /* uses outside the loop. */
323 FOR_EACH_PHI_OR_STMT_DEF (def_p
, stmt
, op_iter
, SSA_OP_DEF
)
325 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, DEF_FROM_PTR (def_p
))
327 basic_block bb
= gimple_bb (USE_STMT (use_p
));
328 if (!flow_bb_inside_loop_p (loop
, bb
))
330 if (dump_enabled_p ())
331 dump_printf_loc (MSG_NOTE
, vect_location
,
332 "vec_stmt_relevant_p: used out of loop.\n");
334 if (is_gimple_debug (USE_STMT (use_p
)))
337 /* We expect all such uses to be in the loop exit phis
338 (because of loop closed form) */
339 gcc_assert (gimple_code (USE_STMT (use_p
)) == GIMPLE_PHI
);
340 gcc_assert (bb
== single_exit (loop
)->dest
);
347 if (*live_p
&& *relevant
== vect_unused_in_scope
348 && !is_simple_and_all_uses_invariant (stmt
, loop_vinfo
))
350 if (dump_enabled_p ())
351 dump_printf_loc (MSG_NOTE
, vect_location
,
352 "vec_stmt_relevant_p: stmt live but not relevant.\n");
353 *relevant
= vect_used_only_live
;
356 return (*live_p
|| *relevant
);
360 /* Function exist_non_indexing_operands_for_use_p
362 USE is one of the uses attached to STMT. Check if USE is
363 used in STMT for anything other than indexing an array. */
366 exist_non_indexing_operands_for_use_p (tree use
, gimple
*stmt
)
369 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
371 /* USE corresponds to some operand in STMT. If there is no data
372 reference in STMT, then any operand that corresponds to USE
373 is not indexing an array. */
374 if (!STMT_VINFO_DATA_REF (stmt_info
))
377 /* STMT has a data_ref. FORNOW this means that its of one of
381 (This should have been verified in analyze_data_refs).
383 'var' in the second case corresponds to a def, not a use,
384 so USE cannot correspond to any operands that are not used
387 Therefore, all we need to check is if STMT falls into the
388 first case, and whether var corresponds to USE. */
390 if (!gimple_assign_copy_p (stmt
))
392 if (is_gimple_call (stmt
)
393 && gimple_call_internal_p (stmt
))
395 internal_fn ifn
= gimple_call_internal_fn (stmt
);
396 int mask_index
= internal_fn_mask_index (ifn
);
398 && use
== gimple_call_arg (stmt
, mask_index
))
400 int stored_value_index
= internal_fn_stored_value_index (ifn
);
401 if (stored_value_index
>= 0
402 && use
== gimple_call_arg (stmt
, stored_value_index
))
404 if (internal_gather_scatter_fn_p (ifn
)
405 && use
== gimple_call_arg (stmt
, 1))
411 if (TREE_CODE (gimple_assign_lhs (stmt
)) == SSA_NAME
)
413 operand
= gimple_assign_rhs1 (stmt
);
414 if (TREE_CODE (operand
) != SSA_NAME
)
425 Function process_use.
428 - a USE in STMT in a loop represented by LOOP_VINFO
429 - RELEVANT - enum value to be set in the STMT_VINFO of the stmt
430 that defined USE. This is done by calling mark_relevant and passing it
431 the WORKLIST (to add DEF_STMT to the WORKLIST in case it is relevant).
432 - FORCE is true if exist_non_indexing_operands_for_use_p check shouldn't
436 Generally, LIVE_P and RELEVANT are used to define the liveness and
437 relevance info of the DEF_STMT of this USE:
438 STMT_VINFO_LIVE_P (DEF_STMT_info) <-- live_p
439 STMT_VINFO_RELEVANT (DEF_STMT_info) <-- relevant
441 - case 1: If USE is used only for address computations (e.g. array indexing),
442 which does not need to be directly vectorized, then the liveness/relevance
443 of the respective DEF_STMT is left unchanged.
444 - case 2: If STMT is a reduction phi and DEF_STMT is a reduction stmt, we
445 skip DEF_STMT cause it had already been processed.
446 - case 3: If DEF_STMT and STMT are in different nests, then "relevant" will
447 be modified accordingly.
449 Return true if everything is as expected. Return false otherwise. */
452 process_use (gimple
*stmt
, tree use
, loop_vec_info loop_vinfo
,
453 enum vect_relevant relevant
, vec
<gimple
*> *worklist
,
456 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
457 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (stmt
);
458 stmt_vec_info dstmt_vinfo
;
459 basic_block bb
, def_bb
;
461 enum vect_def_type dt
;
463 /* case 1: we are only interested in uses that need to be vectorized. Uses
464 that are used for address computation are not considered relevant. */
465 if (!force
&& !exist_non_indexing_operands_for_use_p (use
, stmt
))
468 if (!vect_is_simple_use (use
, loop_vinfo
, &def_stmt
, &dt
))
470 if (dump_enabled_p ())
471 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
472 "not vectorized: unsupported use in stmt.\n");
476 if (!def_stmt
|| gimple_nop_p (def_stmt
))
479 def_bb
= gimple_bb (def_stmt
);
480 if (!flow_bb_inside_loop_p (loop
, def_bb
))
482 if (dump_enabled_p ())
483 dump_printf_loc (MSG_NOTE
, vect_location
, "def_stmt is out of loop.\n");
487 /* case 2: A reduction phi (STMT) defined by a reduction stmt (DEF_STMT).
488 DEF_STMT must have already been processed, because this should be the
489 only way that STMT, which is a reduction-phi, was put in the worklist,
490 as there should be no other uses for DEF_STMT in the loop. So we just
491 check that everything is as expected, and we are done. */
492 dstmt_vinfo
= vinfo_for_stmt (def_stmt
);
493 bb
= gimple_bb (stmt
);
494 if (gimple_code (stmt
) == GIMPLE_PHI
495 && STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_reduction_def
496 && gimple_code (def_stmt
) != GIMPLE_PHI
497 && STMT_VINFO_DEF_TYPE (dstmt_vinfo
) == vect_reduction_def
498 && bb
->loop_father
== def_bb
->loop_father
)
500 if (dump_enabled_p ())
501 dump_printf_loc (MSG_NOTE
, vect_location
,
502 "reduc-stmt defining reduc-phi in the same nest.\n");
503 if (STMT_VINFO_IN_PATTERN_P (dstmt_vinfo
))
504 dstmt_vinfo
= vinfo_for_stmt (STMT_VINFO_RELATED_STMT (dstmt_vinfo
));
505 gcc_assert (STMT_VINFO_RELEVANT (dstmt_vinfo
) < vect_used_by_reduction
);
506 gcc_assert (STMT_VINFO_LIVE_P (dstmt_vinfo
)
507 || STMT_VINFO_RELEVANT (dstmt_vinfo
) > vect_unused_in_scope
);
511 /* case 3a: outer-loop stmt defining an inner-loop stmt:
512 outer-loop-header-bb:
518 if (flow_loop_nested_p (def_bb
->loop_father
, bb
->loop_father
))
520 if (dump_enabled_p ())
521 dump_printf_loc (MSG_NOTE
, vect_location
,
522 "outer-loop def-stmt defining inner-loop stmt.\n");
526 case vect_unused_in_scope
:
527 relevant
= (STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_nested_cycle
) ?
528 vect_used_in_scope
: vect_unused_in_scope
;
531 case vect_used_in_outer_by_reduction
:
532 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo
) != vect_reduction_def
);
533 relevant
= vect_used_by_reduction
;
536 case vect_used_in_outer
:
537 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo
) != vect_reduction_def
);
538 relevant
= vect_used_in_scope
;
541 case vect_used_in_scope
:
549 /* case 3b: inner-loop stmt defining an outer-loop stmt:
550 outer-loop-header-bb:
554 outer-loop-tail-bb (or outer-loop-exit-bb in double reduction):
556 else if (flow_loop_nested_p (bb
->loop_father
, def_bb
->loop_father
))
558 if (dump_enabled_p ())
559 dump_printf_loc (MSG_NOTE
, vect_location
,
560 "inner-loop def-stmt defining outer-loop stmt.\n");
564 case vect_unused_in_scope
:
565 relevant
= (STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_reduction_def
566 || STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_double_reduction_def
) ?
567 vect_used_in_outer_by_reduction
: vect_unused_in_scope
;
570 case vect_used_by_reduction
:
571 case vect_used_only_live
:
572 relevant
= vect_used_in_outer_by_reduction
;
575 case vect_used_in_scope
:
576 relevant
= vect_used_in_outer
;
583 /* We are also not interested in uses on loop PHI backedges that are
584 inductions. Otherwise we'll needlessly vectorize the IV increment
585 and cause hybrid SLP for SLP inductions. Unless the PHI is live
587 else if (gimple_code (stmt
) == GIMPLE_PHI
588 && STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_induction_def
589 && ! STMT_VINFO_LIVE_P (stmt_vinfo
)
590 && (PHI_ARG_DEF_FROM_EDGE (stmt
, loop_latch_edge (bb
->loop_father
))
593 if (dump_enabled_p ())
594 dump_printf_loc (MSG_NOTE
, vect_location
,
595 "induction value on backedge.\n");
600 vect_mark_relevant (worklist
, def_stmt
, relevant
, false);
605 /* Function vect_mark_stmts_to_be_vectorized.
607 Not all stmts in the loop need to be vectorized. For example:
616 Stmt 1 and 3 do not need to be vectorized, because loop control and
617 addressing of vectorized data-refs are handled differently.
619 This pass detects such stmts. */
622 vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo
)
624 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
625 basic_block
*bbs
= LOOP_VINFO_BBS (loop_vinfo
);
626 unsigned int nbbs
= loop
->num_nodes
;
627 gimple_stmt_iterator si
;
630 stmt_vec_info stmt_vinfo
;
634 enum vect_relevant relevant
;
636 if (dump_enabled_p ())
637 dump_printf_loc (MSG_NOTE
, vect_location
,
638 "=== vect_mark_stmts_to_be_vectorized ===\n");
640 auto_vec
<gimple
*, 64> worklist
;
642 /* 1. Init worklist. */
643 for (i
= 0; i
< nbbs
; i
++)
646 for (si
= gsi_start_phis (bb
); !gsi_end_p (si
); gsi_next (&si
))
649 if (dump_enabled_p ())
651 dump_printf_loc (MSG_NOTE
, vect_location
, "init: phi relevant? ");
652 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, phi
, 0);
655 if (vect_stmt_relevant_p (phi
, loop_vinfo
, &relevant
, &live_p
))
656 vect_mark_relevant (&worklist
, phi
, relevant
, live_p
);
658 for (si
= gsi_start_bb (bb
); !gsi_end_p (si
); gsi_next (&si
))
660 stmt
= gsi_stmt (si
);
661 if (dump_enabled_p ())
663 dump_printf_loc (MSG_NOTE
, vect_location
, "init: stmt relevant? ");
664 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
667 if (vect_stmt_relevant_p (stmt
, loop_vinfo
, &relevant
, &live_p
))
668 vect_mark_relevant (&worklist
, stmt
, relevant
, live_p
);
672 /* 2. Process_worklist */
673 while (worklist
.length () > 0)
678 stmt
= worklist
.pop ();
679 if (dump_enabled_p ())
681 dump_printf_loc (MSG_NOTE
, vect_location
, "worklist: examine stmt: ");
682 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
685 /* Examine the USEs of STMT. For each USE, mark the stmt that defines it
686 (DEF_STMT) as relevant/irrelevant according to the relevance property
688 stmt_vinfo
= vinfo_for_stmt (stmt
);
689 relevant
= STMT_VINFO_RELEVANT (stmt_vinfo
);
691 /* Generally, the relevance property of STMT (in STMT_VINFO_RELEVANT) is
692 propagated as is to the DEF_STMTs of its USEs.
694 One exception is when STMT has been identified as defining a reduction
695 variable; in this case we set the relevance to vect_used_by_reduction.
696 This is because we distinguish between two kinds of relevant stmts -
697 those that are used by a reduction computation, and those that are
698 (also) used by a regular computation. This allows us later on to
699 identify stmts that are used solely by a reduction, and therefore the
700 order of the results that they produce does not have to be kept. */
702 switch (STMT_VINFO_DEF_TYPE (stmt_vinfo
))
704 case vect_reduction_def
:
705 gcc_assert (relevant
!= vect_unused_in_scope
);
706 if (relevant
!= vect_unused_in_scope
707 && relevant
!= vect_used_in_scope
708 && relevant
!= vect_used_by_reduction
709 && relevant
!= vect_used_only_live
)
711 if (dump_enabled_p ())
712 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
713 "unsupported use of reduction.\n");
718 case vect_nested_cycle
:
719 if (relevant
!= vect_unused_in_scope
720 && relevant
!= vect_used_in_outer_by_reduction
721 && relevant
!= vect_used_in_outer
)
723 if (dump_enabled_p ())
724 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
725 "unsupported use of nested cycle.\n");
731 case vect_double_reduction_def
:
732 if (relevant
!= vect_unused_in_scope
733 && relevant
!= vect_used_by_reduction
734 && relevant
!= vect_used_only_live
)
736 if (dump_enabled_p ())
737 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
738 "unsupported use of double reduction.\n");
748 if (is_pattern_stmt_p (stmt_vinfo
))
750 /* Pattern statements are not inserted into the code, so
751 FOR_EACH_PHI_OR_STMT_USE optimizes their operands out, and we
752 have to scan the RHS or function arguments instead. */
753 if (is_gimple_assign (stmt
))
755 enum tree_code rhs_code
= gimple_assign_rhs_code (stmt
);
756 tree op
= gimple_assign_rhs1 (stmt
);
759 if (rhs_code
== COND_EXPR
&& COMPARISON_CLASS_P (op
))
761 if (!process_use (stmt
, TREE_OPERAND (op
, 0), loop_vinfo
,
762 relevant
, &worklist
, false)
763 || !process_use (stmt
, TREE_OPERAND (op
, 1), loop_vinfo
,
764 relevant
, &worklist
, false))
768 for (; i
< gimple_num_ops (stmt
); i
++)
770 op
= gimple_op (stmt
, i
);
771 if (TREE_CODE (op
) == SSA_NAME
772 && !process_use (stmt
, op
, loop_vinfo
, relevant
,
777 else if (is_gimple_call (stmt
))
779 for (i
= 0; i
< gimple_call_num_args (stmt
); i
++)
781 tree arg
= gimple_call_arg (stmt
, i
);
782 if (!process_use (stmt
, arg
, loop_vinfo
, relevant
,
789 FOR_EACH_PHI_OR_STMT_USE (use_p
, stmt
, iter
, SSA_OP_USE
)
791 tree op
= USE_FROM_PTR (use_p
);
792 if (!process_use (stmt
, op
, loop_vinfo
, relevant
,
797 if (STMT_VINFO_GATHER_SCATTER_P (stmt_vinfo
))
799 gather_scatter_info gs_info
;
800 if (!vect_check_gather_scatter (stmt
, loop_vinfo
, &gs_info
))
802 if (!process_use (stmt
, gs_info
.offset
, loop_vinfo
, relevant
,
806 } /* while worklist */
812 /* Function vect_model_simple_cost.
814 Models cost for simple operations, i.e. those that only emit ncopies of a
815 single op. Right now, this does not account for multiple insns that could
816 be generated for the single vector op. We will handle that shortly. */
819 vect_model_simple_cost (stmt_vec_info stmt_info
, int ncopies
,
820 enum vect_def_type
*dt
,
822 stmt_vector_for_cost
*prologue_cost_vec
,
823 stmt_vector_for_cost
*body_cost_vec
)
826 int inside_cost
= 0, prologue_cost
= 0;
828 /* The SLP costs were already calculated during SLP tree build. */
829 gcc_assert (!PURE_SLP_STMT (stmt_info
));
831 /* Cost the "broadcast" of a scalar operand in to a vector operand.
832 Use scalar_to_vec to cost the broadcast, as elsewhere in the vector
834 for (i
= 0; i
< ndts
; i
++)
835 if (dt
[i
] == vect_constant_def
|| dt
[i
] == vect_external_def
)
836 prologue_cost
+= record_stmt_cost (prologue_cost_vec
, 1, scalar_to_vec
,
837 stmt_info
, 0, vect_prologue
);
839 /* Pass the inside-of-loop statements to the target-specific cost model. */
840 inside_cost
= record_stmt_cost (body_cost_vec
, ncopies
, vector_stmt
,
841 stmt_info
, 0, vect_body
);
843 if (dump_enabled_p ())
844 dump_printf_loc (MSG_NOTE
, vect_location
,
845 "vect_model_simple_cost: inside_cost = %d, "
846 "prologue_cost = %d .\n", inside_cost
, prologue_cost
);
850 /* Model cost for type demotion and promotion operations. PWR is normally
851 zero for single-step promotions and demotions. It will be one if
852 two-step promotion/demotion is required, and so on. Each additional
853 step doubles the number of instructions required. */
856 vect_model_promotion_demotion_cost (stmt_vec_info stmt_info
,
857 enum vect_def_type
*dt
, int pwr
)
860 int inside_cost
= 0, prologue_cost
= 0;
861 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
862 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
863 void *target_cost_data
;
865 /* The SLP costs were already calculated during SLP tree build. */
866 gcc_assert (!PURE_SLP_STMT (stmt_info
));
869 target_cost_data
= LOOP_VINFO_TARGET_COST_DATA (loop_vinfo
);
871 target_cost_data
= BB_VINFO_TARGET_COST_DATA (bb_vinfo
);
873 for (i
= 0; i
< pwr
+ 1; i
++)
875 tmp
= (STMT_VINFO_TYPE (stmt_info
) == type_promotion_vec_info_type
) ?
877 inside_cost
+= add_stmt_cost (target_cost_data
, vect_pow2 (tmp
),
878 vec_promote_demote
, stmt_info
, 0,
882 /* FORNOW: Assuming maximum 2 args per stmts. */
883 for (i
= 0; i
< 2; i
++)
884 if (dt
[i
] == vect_constant_def
|| dt
[i
] == vect_external_def
)
885 prologue_cost
+= add_stmt_cost (target_cost_data
, 1, vector_stmt
,
886 stmt_info
, 0, vect_prologue
);
888 if (dump_enabled_p ())
889 dump_printf_loc (MSG_NOTE
, vect_location
,
890 "vect_model_promotion_demotion_cost: inside_cost = %d, "
891 "prologue_cost = %d .\n", inside_cost
, prologue_cost
);
894 /* Function vect_model_store_cost
896 Models cost for stores. In the case of grouped accesses, one access
897 has the overhead of the grouped access attributed to it. */
900 vect_model_store_cost (stmt_vec_info stmt_info
, int ncopies
,
901 vect_memory_access_type memory_access_type
,
902 vec_load_store_type vls_type
, slp_tree slp_node
,
903 stmt_vector_for_cost
*prologue_cost_vec
,
904 stmt_vector_for_cost
*body_cost_vec
)
906 unsigned int inside_cost
= 0, prologue_cost
= 0;
907 struct data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
);
908 gimple
*first_stmt
= STMT_VINFO_STMT (stmt_info
);
909 bool grouped_access_p
= STMT_VINFO_GROUPED_ACCESS (stmt_info
);
911 if (vls_type
== VLS_STORE_INVARIANT
)
912 prologue_cost
+= record_stmt_cost (prologue_cost_vec
, 1, scalar_to_vec
,
913 stmt_info
, 0, vect_prologue
);
915 /* Grouped stores update all elements in the group at once,
916 so we want the DR for the first statement. */
917 if (!slp_node
&& grouped_access_p
)
919 first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
920 dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
923 /* True if we should include any once-per-group costs as well as
924 the cost of the statement itself. For SLP we only get called
925 once per group anyhow. */
926 bool first_stmt_p
= (first_stmt
== STMT_VINFO_STMT (stmt_info
));
928 /* We assume that the cost of a single store-lanes instruction is
929 equivalent to the cost of GROUP_SIZE separate stores. If a grouped
930 access is instead being provided by a permute-and-store operation,
931 include the cost of the permutes. */
933 && memory_access_type
== VMAT_CONTIGUOUS_PERMUTE
)
935 /* Uses a high and low interleave or shuffle operations for each
937 int group_size
= GROUP_SIZE (vinfo_for_stmt (first_stmt
));
938 int nstmts
= ncopies
* ceil_log2 (group_size
) * group_size
;
939 inside_cost
= record_stmt_cost (body_cost_vec
, nstmts
, vec_perm
,
940 stmt_info
, 0, vect_body
);
942 if (dump_enabled_p ())
943 dump_printf_loc (MSG_NOTE
, vect_location
,
944 "vect_model_store_cost: strided group_size = %d .\n",
948 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
949 /* Costs of the stores. */
950 if (memory_access_type
== VMAT_ELEMENTWISE
951 || memory_access_type
== VMAT_GATHER_SCATTER
)
953 /* N scalar stores plus extracting the elements. */
954 unsigned int assumed_nunits
= vect_nunits_for_cost (vectype
);
955 inside_cost
+= record_stmt_cost (body_cost_vec
,
956 ncopies
* assumed_nunits
,
957 scalar_store
, stmt_info
, 0, vect_body
);
960 vect_get_store_cost (dr
, ncopies
, &inside_cost
, body_cost_vec
);
962 if (memory_access_type
== VMAT_ELEMENTWISE
963 || memory_access_type
== VMAT_STRIDED_SLP
)
965 /* N scalar stores plus extracting the elements. */
966 unsigned int assumed_nunits
= vect_nunits_for_cost (vectype
);
967 inside_cost
+= record_stmt_cost (body_cost_vec
,
968 ncopies
* assumed_nunits
,
969 vec_to_scalar
, stmt_info
, 0, vect_body
);
972 if (dump_enabled_p ())
973 dump_printf_loc (MSG_NOTE
, vect_location
,
974 "vect_model_store_cost: inside_cost = %d, "
975 "prologue_cost = %d .\n", inside_cost
, prologue_cost
);
979 /* Calculate cost of DR's memory access. */
981 vect_get_store_cost (struct data_reference
*dr
, int ncopies
,
982 unsigned int *inside_cost
,
983 stmt_vector_for_cost
*body_cost_vec
)
985 int alignment_support_scheme
= vect_supportable_dr_alignment (dr
, false);
986 gimple
*stmt
= DR_STMT (dr
);
987 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
989 switch (alignment_support_scheme
)
993 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
,
994 vector_store
, stmt_info
, 0,
997 if (dump_enabled_p ())
998 dump_printf_loc (MSG_NOTE
, vect_location
,
999 "vect_model_store_cost: aligned.\n");
1003 case dr_unaligned_supported
:
1005 /* Here, we assign an additional cost for the unaligned store. */
1006 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
,
1007 unaligned_store
, stmt_info
,
1008 DR_MISALIGNMENT (dr
), vect_body
);
1009 if (dump_enabled_p ())
1010 dump_printf_loc (MSG_NOTE
, vect_location
,
1011 "vect_model_store_cost: unaligned supported by "
1016 case dr_unaligned_unsupported
:
1018 *inside_cost
= VECT_MAX_COST
;
1020 if (dump_enabled_p ())
1021 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1022 "vect_model_store_cost: unsupported access.\n");
1032 /* Function vect_model_load_cost
1034 Models cost for loads. In the case of grouped accesses, one access has
1035 the overhead of the grouped access attributed to it. Since unaligned
1036 accesses are supported for loads, we also account for the costs of the
1037 access scheme chosen. */
1040 vect_model_load_cost (stmt_vec_info stmt_info
, int ncopies
,
1041 vect_memory_access_type memory_access_type
,
1043 stmt_vector_for_cost
*prologue_cost_vec
,
1044 stmt_vector_for_cost
*body_cost_vec
)
1046 gimple
*first_stmt
= STMT_VINFO_STMT (stmt_info
);
1047 struct data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
);
1048 unsigned int inside_cost
= 0, prologue_cost
= 0;
1049 bool grouped_access_p
= STMT_VINFO_GROUPED_ACCESS (stmt_info
);
1051 /* Grouped loads read all elements in the group at once,
1052 so we want the DR for the first statement. */
1053 if (!slp_node
&& grouped_access_p
)
1055 first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
1056 dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
1059 /* True if we should include any once-per-group costs as well as
1060 the cost of the statement itself. For SLP we only get called
1061 once per group anyhow. */
1062 bool first_stmt_p
= (first_stmt
== STMT_VINFO_STMT (stmt_info
));
1064 /* We assume that the cost of a single load-lanes instruction is
1065 equivalent to the cost of GROUP_SIZE separate loads. If a grouped
1066 access is instead being provided by a load-and-permute operation,
1067 include the cost of the permutes. */
1069 && memory_access_type
== VMAT_CONTIGUOUS_PERMUTE
)
1071 /* Uses an even and odd extract operations or shuffle operations
1072 for each needed permute. */
1073 int group_size
= GROUP_SIZE (vinfo_for_stmt (first_stmt
));
1074 int nstmts
= ncopies
* ceil_log2 (group_size
) * group_size
;
1075 inside_cost
= record_stmt_cost (body_cost_vec
, nstmts
, vec_perm
,
1076 stmt_info
, 0, vect_body
);
1078 if (dump_enabled_p ())
1079 dump_printf_loc (MSG_NOTE
, vect_location
,
1080 "vect_model_load_cost: strided group_size = %d .\n",
1084 /* The loads themselves. */
1085 if (memory_access_type
== VMAT_ELEMENTWISE
1086 || memory_access_type
== VMAT_GATHER_SCATTER
)
1088 /* N scalar loads plus gathering them into a vector. */
1089 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
1090 unsigned int assumed_nunits
= vect_nunits_for_cost (vectype
);
1091 inside_cost
+= record_stmt_cost (body_cost_vec
,
1092 ncopies
* assumed_nunits
,
1093 scalar_load
, stmt_info
, 0, vect_body
);
1096 vect_get_load_cost (dr
, ncopies
, first_stmt_p
,
1097 &inside_cost
, &prologue_cost
,
1098 prologue_cost_vec
, body_cost_vec
, true);
1099 if (memory_access_type
== VMAT_ELEMENTWISE
1100 || memory_access_type
== VMAT_STRIDED_SLP
)
1101 inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
, vec_construct
,
1102 stmt_info
, 0, vect_body
);
1104 if (dump_enabled_p ())
1105 dump_printf_loc (MSG_NOTE
, vect_location
,
1106 "vect_model_load_cost: inside_cost = %d, "
1107 "prologue_cost = %d .\n", inside_cost
, prologue_cost
);
1111 /* Calculate cost of DR's memory access. */
1113 vect_get_load_cost (struct data_reference
*dr
, int ncopies
,
1114 bool add_realign_cost
, unsigned int *inside_cost
,
1115 unsigned int *prologue_cost
,
1116 stmt_vector_for_cost
*prologue_cost_vec
,
1117 stmt_vector_for_cost
*body_cost_vec
,
1118 bool record_prologue_costs
)
1120 int alignment_support_scheme
= vect_supportable_dr_alignment (dr
, false);
1121 gimple
*stmt
= DR_STMT (dr
);
1122 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1124 switch (alignment_support_scheme
)
1128 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
, vector_load
,
1129 stmt_info
, 0, vect_body
);
1131 if (dump_enabled_p ())
1132 dump_printf_loc (MSG_NOTE
, vect_location
,
1133 "vect_model_load_cost: aligned.\n");
1137 case dr_unaligned_supported
:
1139 /* Here, we assign an additional cost for the unaligned load. */
1140 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
,
1141 unaligned_load
, stmt_info
,
1142 DR_MISALIGNMENT (dr
), vect_body
);
1144 if (dump_enabled_p ())
1145 dump_printf_loc (MSG_NOTE
, vect_location
,
1146 "vect_model_load_cost: unaligned supported by "
1151 case dr_explicit_realign
:
1153 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
* 2,
1154 vector_load
, stmt_info
, 0, vect_body
);
1155 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
,
1156 vec_perm
, stmt_info
, 0, vect_body
);
1158 /* FIXME: If the misalignment remains fixed across the iterations of
1159 the containing loop, the following cost should be added to the
1161 if (targetm
.vectorize
.builtin_mask_for_load
)
1162 *inside_cost
+= record_stmt_cost (body_cost_vec
, 1, vector_stmt
,
1163 stmt_info
, 0, vect_body
);
1165 if (dump_enabled_p ())
1166 dump_printf_loc (MSG_NOTE
, vect_location
,
1167 "vect_model_load_cost: explicit realign\n");
1171 case dr_explicit_realign_optimized
:
1173 if (dump_enabled_p ())
1174 dump_printf_loc (MSG_NOTE
, vect_location
,
1175 "vect_model_load_cost: unaligned software "
1178 /* Unaligned software pipeline has a load of an address, an initial
1179 load, and possibly a mask operation to "prime" the loop. However,
1180 if this is an access in a group of loads, which provide grouped
1181 access, then the above cost should only be considered for one
1182 access in the group. Inside the loop, there is a load op
1183 and a realignment op. */
1185 if (add_realign_cost
&& record_prologue_costs
)
1187 *prologue_cost
+= record_stmt_cost (prologue_cost_vec
, 2,
1188 vector_stmt
, stmt_info
,
1190 if (targetm
.vectorize
.builtin_mask_for_load
)
1191 *prologue_cost
+= record_stmt_cost (prologue_cost_vec
, 1,
1192 vector_stmt
, stmt_info
,
1196 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
, vector_load
,
1197 stmt_info
, 0, vect_body
);
1198 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
, vec_perm
,
1199 stmt_info
, 0, vect_body
);
1201 if (dump_enabled_p ())
1202 dump_printf_loc (MSG_NOTE
, vect_location
,
1203 "vect_model_load_cost: explicit realign optimized"
1209 case dr_unaligned_unsupported
:
1211 *inside_cost
= VECT_MAX_COST
;
1213 if (dump_enabled_p ())
1214 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1215 "vect_model_load_cost: unsupported access.\n");
1224 /* Insert the new stmt NEW_STMT at *GSI or at the appropriate place in
1225 the loop preheader for the vectorized stmt STMT. */
1228 vect_init_vector_1 (gimple
*stmt
, gimple
*new_stmt
, gimple_stmt_iterator
*gsi
)
1231 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
1234 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (stmt
);
1235 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_vinfo
);
1239 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
1243 if (nested_in_vect_loop_p (loop
, stmt
))
1246 pe
= loop_preheader_edge (loop
);
1247 new_bb
= gsi_insert_on_edge_immediate (pe
, new_stmt
);
1248 gcc_assert (!new_bb
);
1252 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_vinfo
);
1254 gimple_stmt_iterator gsi_bb_start
;
1256 gcc_assert (bb_vinfo
);
1257 bb
= BB_VINFO_BB (bb_vinfo
);
1258 gsi_bb_start
= gsi_after_labels (bb
);
1259 gsi_insert_before (&gsi_bb_start
, new_stmt
, GSI_SAME_STMT
);
1263 if (dump_enabled_p ())
1265 dump_printf_loc (MSG_NOTE
, vect_location
,
1266 "created new init_stmt: ");
1267 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, new_stmt
, 0);
1271 /* Function vect_init_vector.
1273 Insert a new stmt (INIT_STMT) that initializes a new variable of type
1274 TYPE with the value VAL. If TYPE is a vector type and VAL does not have
1275 vector type a vector with all elements equal to VAL is created first.
1276 Place the initialization at BSI if it is not NULL. Otherwise, place the
1277 initialization at the loop preheader.
1278 Return the DEF of INIT_STMT.
1279 It will be used in the vectorization of STMT. */
1282 vect_init_vector (gimple
*stmt
, tree val
, tree type
, gimple_stmt_iterator
*gsi
)
1287 /* We abuse this function to push sth to a SSA name with initial 'val'. */
1288 if (! useless_type_conversion_p (type
, TREE_TYPE (val
)))
1290 gcc_assert (TREE_CODE (type
) == VECTOR_TYPE
);
1291 if (! types_compatible_p (TREE_TYPE (type
), TREE_TYPE (val
)))
1293 /* Scalar boolean value should be transformed into
1294 all zeros or all ones value before building a vector. */
1295 if (VECTOR_BOOLEAN_TYPE_P (type
))
1297 tree true_val
= build_all_ones_cst (TREE_TYPE (type
));
1298 tree false_val
= build_zero_cst (TREE_TYPE (type
));
1300 if (CONSTANT_CLASS_P (val
))
1301 val
= integer_zerop (val
) ? false_val
: true_val
;
1304 new_temp
= make_ssa_name (TREE_TYPE (type
));
1305 init_stmt
= gimple_build_assign (new_temp
, COND_EXPR
,
1306 val
, true_val
, false_val
);
1307 vect_init_vector_1 (stmt
, init_stmt
, gsi
);
1311 else if (CONSTANT_CLASS_P (val
))
1312 val
= fold_convert (TREE_TYPE (type
), val
);
1315 new_temp
= make_ssa_name (TREE_TYPE (type
));
1316 if (! INTEGRAL_TYPE_P (TREE_TYPE (val
)))
1317 init_stmt
= gimple_build_assign (new_temp
,
1318 fold_build1 (VIEW_CONVERT_EXPR
,
1322 init_stmt
= gimple_build_assign (new_temp
, NOP_EXPR
, val
);
1323 vect_init_vector_1 (stmt
, init_stmt
, gsi
);
1327 val
= build_vector_from_val (type
, val
);
1330 new_temp
= vect_get_new_ssa_name (type
, vect_simple_var
, "cst_");
1331 init_stmt
= gimple_build_assign (new_temp
, val
);
1332 vect_init_vector_1 (stmt
, init_stmt
, gsi
);
1336 /* Function vect_get_vec_def_for_operand_1.
1338 For a defining stmt DEF_STMT of a scalar stmt, return a vector def with type
1339 DT that will be used in the vectorized stmt. */
1342 vect_get_vec_def_for_operand_1 (gimple
*def_stmt
, enum vect_def_type dt
)
1346 stmt_vec_info def_stmt_info
= NULL
;
1350 /* operand is a constant or a loop invariant. */
1351 case vect_constant_def
:
1352 case vect_external_def
:
1353 /* Code should use vect_get_vec_def_for_operand. */
1356 /* operand is defined inside the loop. */
1357 case vect_internal_def
:
1359 /* Get the def from the vectorized stmt. */
1360 def_stmt_info
= vinfo_for_stmt (def_stmt
);
1362 vec_stmt
= STMT_VINFO_VEC_STMT (def_stmt_info
);
1363 /* Get vectorized pattern statement. */
1365 && STMT_VINFO_IN_PATTERN_P (def_stmt_info
)
1366 && !STMT_VINFO_RELEVANT (def_stmt_info
))
1367 vec_stmt
= STMT_VINFO_VEC_STMT (vinfo_for_stmt (
1368 STMT_VINFO_RELATED_STMT (def_stmt_info
)));
1369 gcc_assert (vec_stmt
);
1370 if (gimple_code (vec_stmt
) == GIMPLE_PHI
)
1371 vec_oprnd
= PHI_RESULT (vec_stmt
);
1372 else if (is_gimple_call (vec_stmt
))
1373 vec_oprnd
= gimple_call_lhs (vec_stmt
);
1375 vec_oprnd
= gimple_assign_lhs (vec_stmt
);
1379 /* operand is defined by a loop header phi. */
1380 case vect_reduction_def
:
1381 case vect_double_reduction_def
:
1382 case vect_nested_cycle
:
1383 case vect_induction_def
:
1385 gcc_assert (gimple_code (def_stmt
) == GIMPLE_PHI
);
1387 /* Get the def from the vectorized stmt. */
1388 def_stmt_info
= vinfo_for_stmt (def_stmt
);
1389 vec_stmt
= STMT_VINFO_VEC_STMT (def_stmt_info
);
1390 if (gimple_code (vec_stmt
) == GIMPLE_PHI
)
1391 vec_oprnd
= PHI_RESULT (vec_stmt
);
1393 vec_oprnd
= gimple_get_lhs (vec_stmt
);
1403 /* Function vect_get_vec_def_for_operand.
1405 OP is an operand in STMT. This function returns a (vector) def that will be
1406 used in the vectorized stmt for STMT.
1408 In the case that OP is an SSA_NAME which is defined in the loop, then
1409 STMT_VINFO_VEC_STMT of the defining stmt holds the relevant def.
1411 In case OP is an invariant or constant, a new stmt that creates a vector def
1412 needs to be introduced. VECTYPE may be used to specify a required type for
1413 vector invariant. */
1416 vect_get_vec_def_for_operand (tree op
, gimple
*stmt
, tree vectype
)
1419 enum vect_def_type dt
;
1421 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (stmt
);
1422 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_vinfo
);
1424 if (dump_enabled_p ())
1426 dump_printf_loc (MSG_NOTE
, vect_location
,
1427 "vect_get_vec_def_for_operand: ");
1428 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, op
);
1429 dump_printf (MSG_NOTE
, "\n");
1432 is_simple_use
= vect_is_simple_use (op
, loop_vinfo
, &def_stmt
, &dt
);
1433 gcc_assert (is_simple_use
);
1434 if (def_stmt
&& dump_enabled_p ())
1436 dump_printf_loc (MSG_NOTE
, vect_location
, " def_stmt = ");
1437 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, def_stmt
, 0);
1440 if (dt
== vect_constant_def
|| dt
== vect_external_def
)
1442 tree stmt_vectype
= STMT_VINFO_VECTYPE (stmt_vinfo
);
1446 vector_type
= vectype
;
1447 else if (VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (op
))
1448 && VECTOR_BOOLEAN_TYPE_P (stmt_vectype
))
1449 vector_type
= build_same_sized_truth_vector_type (stmt_vectype
);
1451 vector_type
= get_vectype_for_scalar_type (TREE_TYPE (op
));
1453 gcc_assert (vector_type
);
1454 return vect_init_vector (stmt
, op
, vector_type
, NULL
);
1457 return vect_get_vec_def_for_operand_1 (def_stmt
, dt
);
1461 /* Function vect_get_vec_def_for_stmt_copy
1463 Return a vector-def for an operand. This function is used when the
1464 vectorized stmt to be created (by the caller to this function) is a "copy"
1465 created in case the vectorized result cannot fit in one vector, and several
1466 copies of the vector-stmt are required. In this case the vector-def is
1467 retrieved from the vector stmt recorded in the STMT_VINFO_RELATED_STMT field
1468 of the stmt that defines VEC_OPRND.
1469 DT is the type of the vector def VEC_OPRND.
1472 In case the vectorization factor (VF) is bigger than the number
1473 of elements that can fit in a vectype (nunits), we have to generate
1474 more than one vector stmt to vectorize the scalar stmt. This situation
1475 arises when there are multiple data-types operated upon in the loop; the
1476 smallest data-type determines the VF, and as a result, when vectorizing
1477 stmts operating on wider types we need to create 'VF/nunits' "copies" of the
1478 vector stmt (each computing a vector of 'nunits' results, and together
1479 computing 'VF' results in each iteration). This function is called when
1480 vectorizing such a stmt (e.g. vectorizing S2 in the illustration below, in
1481 which VF=16 and nunits=4, so the number of copies required is 4):
1483 scalar stmt: vectorized into: STMT_VINFO_RELATED_STMT
1485 S1: x = load VS1.0: vx.0 = memref0 VS1.1
1486 VS1.1: vx.1 = memref1 VS1.2
1487 VS1.2: vx.2 = memref2 VS1.3
1488 VS1.3: vx.3 = memref3
1490 S2: z = x + ... VSnew.0: vz0 = vx.0 + ... VSnew.1
1491 VSnew.1: vz1 = vx.1 + ... VSnew.2
1492 VSnew.2: vz2 = vx.2 + ... VSnew.3
1493 VSnew.3: vz3 = vx.3 + ...
1495 The vectorization of S1 is explained in vectorizable_load.
1496 The vectorization of S2:
1497 To create the first vector-stmt out of the 4 copies - VSnew.0 -
1498 the function 'vect_get_vec_def_for_operand' is called to
1499 get the relevant vector-def for each operand of S2. For operand x it
1500 returns the vector-def 'vx.0'.
1502 To create the remaining copies of the vector-stmt (VSnew.j), this
1503 function is called to get the relevant vector-def for each operand. It is
1504 obtained from the respective VS1.j stmt, which is recorded in the
1505 STMT_VINFO_RELATED_STMT field of the stmt that defines VEC_OPRND.
1507 For example, to obtain the vector-def 'vx.1' in order to create the
1508 vector stmt 'VSnew.1', this function is called with VEC_OPRND='vx.0'.
1509 Given 'vx0' we obtain the stmt that defines it ('VS1.0'); from the
1510 STMT_VINFO_RELATED_STMT field of 'VS1.0' we obtain the next copy - 'VS1.1',
1511 and return its def ('vx.1').
1512 Overall, to create the above sequence this function will be called 3 times:
1513 vx.1 = vect_get_vec_def_for_stmt_copy (dt, vx.0);
1514 vx.2 = vect_get_vec_def_for_stmt_copy (dt, vx.1);
1515 vx.3 = vect_get_vec_def_for_stmt_copy (dt, vx.2); */
1518 vect_get_vec_def_for_stmt_copy (enum vect_def_type dt
, tree vec_oprnd
)
1520 gimple
*vec_stmt_for_operand
;
1521 stmt_vec_info def_stmt_info
;
1523 /* Do nothing; can reuse same def. */
1524 if (dt
== vect_external_def
|| dt
== vect_constant_def
)
1527 vec_stmt_for_operand
= SSA_NAME_DEF_STMT (vec_oprnd
);
1528 def_stmt_info
= vinfo_for_stmt (vec_stmt_for_operand
);
1529 gcc_assert (def_stmt_info
);
1530 vec_stmt_for_operand
= STMT_VINFO_RELATED_STMT (def_stmt_info
);
1531 gcc_assert (vec_stmt_for_operand
);
1532 if (gimple_code (vec_stmt_for_operand
) == GIMPLE_PHI
)
1533 vec_oprnd
= PHI_RESULT (vec_stmt_for_operand
);
1535 vec_oprnd
= gimple_get_lhs (vec_stmt_for_operand
);
1540 /* Get vectorized definitions for the operands to create a copy of an original
1541 stmt. See vect_get_vec_def_for_stmt_copy () for details. */
1544 vect_get_vec_defs_for_stmt_copy (enum vect_def_type
*dt
,
1545 vec
<tree
> *vec_oprnds0
,
1546 vec
<tree
> *vec_oprnds1
)
1548 tree vec_oprnd
= vec_oprnds0
->pop ();
1550 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
[0], vec_oprnd
);
1551 vec_oprnds0
->quick_push (vec_oprnd
);
1553 if (vec_oprnds1
&& vec_oprnds1
->length ())
1555 vec_oprnd
= vec_oprnds1
->pop ();
1556 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
[1], vec_oprnd
);
1557 vec_oprnds1
->quick_push (vec_oprnd
);
1562 /* Get vectorized definitions for OP0 and OP1. */
1565 vect_get_vec_defs (tree op0
, tree op1
, gimple
*stmt
,
1566 vec
<tree
> *vec_oprnds0
,
1567 vec
<tree
> *vec_oprnds1
,
1572 int nops
= (op1
== NULL_TREE
) ? 1 : 2;
1573 auto_vec
<tree
> ops (nops
);
1574 auto_vec
<vec
<tree
> > vec_defs (nops
);
1576 ops
.quick_push (op0
);
1578 ops
.quick_push (op1
);
1580 vect_get_slp_defs (ops
, slp_node
, &vec_defs
);
1582 *vec_oprnds0
= vec_defs
[0];
1584 *vec_oprnds1
= vec_defs
[1];
1590 vec_oprnds0
->create (1);
1591 vec_oprnd
= vect_get_vec_def_for_operand (op0
, stmt
);
1592 vec_oprnds0
->quick_push (vec_oprnd
);
1596 vec_oprnds1
->create (1);
1597 vec_oprnd
= vect_get_vec_def_for_operand (op1
, stmt
);
1598 vec_oprnds1
->quick_push (vec_oprnd
);
1603 /* Helper function called by vect_finish_replace_stmt and
1604 vect_finish_stmt_generation. Set the location of the new
1605 statement and create a stmt_vec_info for it. */
1608 vect_finish_stmt_generation_1 (gimple
*stmt
, gimple
*vec_stmt
)
1610 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1611 vec_info
*vinfo
= stmt_info
->vinfo
;
1613 set_vinfo_for_stmt (vec_stmt
, new_stmt_vec_info (vec_stmt
, vinfo
));
1615 if (dump_enabled_p ())
1617 dump_printf_loc (MSG_NOTE
, vect_location
, "add new stmt: ");
1618 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, vec_stmt
, 0);
1621 gimple_set_location (vec_stmt
, gimple_location (stmt
));
1623 /* While EH edges will generally prevent vectorization, stmt might
1624 e.g. be in a must-not-throw region. Ensure newly created stmts
1625 that could throw are part of the same region. */
1626 int lp_nr
= lookup_stmt_eh_lp (stmt
);
1627 if (lp_nr
!= 0 && stmt_could_throw_p (vec_stmt
))
1628 add_stmt_to_eh_lp (vec_stmt
, lp_nr
);
1631 /* Replace the scalar statement STMT with a new vector statement VEC_STMT,
1632 which sets the same scalar result as STMT did. */
1635 vect_finish_replace_stmt (gimple
*stmt
, gimple
*vec_stmt
)
1637 gcc_assert (gimple_get_lhs (stmt
) == gimple_get_lhs (vec_stmt
));
1639 gimple_stmt_iterator gsi
= gsi_for_stmt (stmt
);
1640 gsi_replace (&gsi
, vec_stmt
, false);
1642 vect_finish_stmt_generation_1 (stmt
, vec_stmt
);
1645 /* Function vect_finish_stmt_generation.
1647 Insert a new stmt. */
1650 vect_finish_stmt_generation (gimple
*stmt
, gimple
*vec_stmt
,
1651 gimple_stmt_iterator
*gsi
)
1653 gcc_assert (gimple_code (stmt
) != GIMPLE_LABEL
);
1655 if (!gsi_end_p (*gsi
)
1656 && gimple_has_mem_ops (vec_stmt
))
1658 gimple
*at_stmt
= gsi_stmt (*gsi
);
1659 tree vuse
= gimple_vuse (at_stmt
);
1660 if (vuse
&& TREE_CODE (vuse
) == SSA_NAME
)
1662 tree vdef
= gimple_vdef (at_stmt
);
1663 gimple_set_vuse (vec_stmt
, gimple_vuse (at_stmt
));
1664 /* If we have an SSA vuse and insert a store, update virtual
1665 SSA form to avoid triggering the renamer. Do so only
1666 if we can easily see all uses - which is what almost always
1667 happens with the way vectorized stmts are inserted. */
1668 if ((vdef
&& TREE_CODE (vdef
) == SSA_NAME
)
1669 && ((is_gimple_assign (vec_stmt
)
1670 && !is_gimple_reg (gimple_assign_lhs (vec_stmt
)))
1671 || (is_gimple_call (vec_stmt
)
1672 && !(gimple_call_flags (vec_stmt
)
1673 & (ECF_CONST
|ECF_PURE
|ECF_NOVOPS
)))))
1675 tree new_vdef
= copy_ssa_name (vuse
, vec_stmt
);
1676 gimple_set_vdef (vec_stmt
, new_vdef
);
1677 SET_USE (gimple_vuse_op (at_stmt
), new_vdef
);
1681 gsi_insert_before (gsi
, vec_stmt
, GSI_SAME_STMT
);
1682 vect_finish_stmt_generation_1 (stmt
, vec_stmt
);
1685 /* We want to vectorize a call to combined function CFN with function
1686 decl FNDECL, using VECTYPE_OUT as the type of the output and VECTYPE_IN
1687 as the types of all inputs. Check whether this is possible using
1688 an internal function, returning its code if so or IFN_LAST if not. */
1691 vectorizable_internal_function (combined_fn cfn
, tree fndecl
,
1692 tree vectype_out
, tree vectype_in
)
1695 if (internal_fn_p (cfn
))
1696 ifn
= as_internal_fn (cfn
);
1698 ifn
= associated_internal_fn (fndecl
);
1699 if (ifn
!= IFN_LAST
&& direct_internal_fn_p (ifn
))
1701 const direct_internal_fn_info
&info
= direct_internal_fn (ifn
);
1702 if (info
.vectorizable
)
1704 tree type0
= (info
.type0
< 0 ? vectype_out
: vectype_in
);
1705 tree type1
= (info
.type1
< 0 ? vectype_out
: vectype_in
);
1706 if (direct_internal_fn_supported_p (ifn
, tree_pair (type0
, type1
),
1707 OPTIMIZE_FOR_SPEED
))
1715 static tree
permute_vec_elements (tree
, tree
, tree
, gimple
*,
1716 gimple_stmt_iterator
*);
1718 /* Check whether a load or store statement in the loop described by
1719 LOOP_VINFO is possible in a fully-masked loop. This is testing
1720 whether the vectorizer pass has the appropriate support, as well as
1721 whether the target does.
1723 VLS_TYPE says whether the statement is a load or store and VECTYPE
1724 is the type of the vector being loaded or stored. MEMORY_ACCESS_TYPE
1725 says how the load or store is going to be implemented and GROUP_SIZE
1726 is the number of load or store statements in the containing group.
1727 If the access is a gather load or scatter store, GS_INFO describes
1730 Clear LOOP_VINFO_CAN_FULLY_MASK_P if a fully-masked loop is not
1731 supported, otherwise record the required mask types. */
1734 check_load_store_masking (loop_vec_info loop_vinfo
, tree vectype
,
1735 vec_load_store_type vls_type
, int group_size
,
1736 vect_memory_access_type memory_access_type
,
1737 gather_scatter_info
*gs_info
)
1739 /* Invariant loads need no special support. */
1740 if (memory_access_type
== VMAT_INVARIANT
)
1743 vec_loop_masks
*masks
= &LOOP_VINFO_MASKS (loop_vinfo
);
1744 machine_mode vecmode
= TYPE_MODE (vectype
);
1745 bool is_load
= (vls_type
== VLS_LOAD
);
1746 if (memory_access_type
== VMAT_LOAD_STORE_LANES
)
1749 ? !vect_load_lanes_supported (vectype
, group_size
, true)
1750 : !vect_store_lanes_supported (vectype
, group_size
, true))
1752 if (dump_enabled_p ())
1753 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1754 "can't use a fully-masked loop because the"
1755 " target doesn't have an appropriate masked"
1756 " load/store-lanes instruction.\n");
1757 LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo
) = false;
1760 unsigned int ncopies
= vect_get_num_copies (loop_vinfo
, vectype
);
1761 vect_record_loop_mask (loop_vinfo
, masks
, ncopies
, vectype
);
1765 if (memory_access_type
== VMAT_GATHER_SCATTER
)
1767 internal_fn ifn
= (is_load
1768 ? IFN_MASK_GATHER_LOAD
1769 : IFN_MASK_SCATTER_STORE
);
1770 tree offset_type
= TREE_TYPE (gs_info
->offset
);
1771 if (!internal_gather_scatter_fn_supported_p (ifn
, vectype
,
1772 gs_info
->memory_type
,
1773 TYPE_SIGN (offset_type
),
1776 if (dump_enabled_p ())
1777 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1778 "can't use a fully-masked loop because the"
1779 " target doesn't have an appropriate masked"
1780 " gather load or scatter store instruction.\n");
1781 LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo
) = false;
1784 unsigned int ncopies
= vect_get_num_copies (loop_vinfo
, vectype
);
1785 vect_record_loop_mask (loop_vinfo
, masks
, ncopies
, vectype
);
1789 if (memory_access_type
!= VMAT_CONTIGUOUS
1790 && memory_access_type
!= VMAT_CONTIGUOUS_PERMUTE
)
1792 /* Element X of the data must come from iteration i * VF + X of the
1793 scalar loop. We need more work to support other mappings. */
1794 if (dump_enabled_p ())
1795 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1796 "can't use a fully-masked loop because an access"
1797 " isn't contiguous.\n");
1798 LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo
) = false;
1802 machine_mode mask_mode
;
1803 if (!(targetm
.vectorize
.get_mask_mode
1804 (GET_MODE_NUNITS (vecmode
),
1805 GET_MODE_SIZE (vecmode
)).exists (&mask_mode
))
1806 || !can_vec_mask_load_store_p (vecmode
, mask_mode
, is_load
))
1808 if (dump_enabled_p ())
1809 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1810 "can't use a fully-masked loop because the target"
1811 " doesn't have the appropriate masked load or"
1813 LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo
) = false;
1816 /* We might load more scalars than we need for permuting SLP loads.
1817 We checked in get_group_load_store_type that the extra elements
1818 don't leak into a new vector. */
1819 poly_uint64 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
1820 poly_uint64 vf
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
1821 unsigned int nvectors
;
1822 if (can_div_away_from_zero_p (group_size
* vf
, nunits
, &nvectors
))
1823 vect_record_loop_mask (loop_vinfo
, masks
, nvectors
, vectype
);
1828 /* Return the mask input to a masked load or store. VEC_MASK is the vectorized
1829 form of the scalar mask condition and LOOP_MASK, if nonnull, is the mask
1830 that needs to be applied to all loads and stores in a vectorized loop.
1831 Return VEC_MASK if LOOP_MASK is null, otherwise return VEC_MASK & LOOP_MASK.
1833 MASK_TYPE is the type of both masks. If new statements are needed,
1834 insert them before GSI. */
1837 prepare_load_store_mask (tree mask_type
, tree loop_mask
, tree vec_mask
,
1838 gimple_stmt_iterator
*gsi
)
1840 gcc_assert (useless_type_conversion_p (mask_type
, TREE_TYPE (vec_mask
)));
1844 gcc_assert (TREE_TYPE (loop_mask
) == mask_type
);
1845 tree and_res
= make_temp_ssa_name (mask_type
, NULL
, "vec_mask_and");
1846 gimple
*and_stmt
= gimple_build_assign (and_res
, BIT_AND_EXPR
,
1847 vec_mask
, loop_mask
);
1848 gsi_insert_before (gsi
, and_stmt
, GSI_SAME_STMT
);
1852 /* Determine whether we can use a gather load or scatter store to vectorize
1853 strided load or store STMT by truncating the current offset to a smaller
1854 width. We need to be able to construct an offset vector:
1856 { 0, X, X*2, X*3, ... }
1858 without loss of precision, where X is STMT's DR_STEP.
1860 Return true if this is possible, describing the gather load or scatter
1861 store in GS_INFO. MASKED_P is true if the load or store is conditional. */
1864 vect_truncate_gather_scatter_offset (gimple
*stmt
, loop_vec_info loop_vinfo
,
1866 gather_scatter_info
*gs_info
)
1868 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1869 data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
);
1870 tree step
= DR_STEP (dr
);
1871 if (TREE_CODE (step
) != INTEGER_CST
)
1873 /* ??? Perhaps we could use range information here? */
1874 if (dump_enabled_p ())
1875 dump_printf_loc (MSG_NOTE
, vect_location
,
1876 "cannot truncate variable step.\n");
1880 /* Get the number of bits in an element. */
1881 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
1882 scalar_mode element_mode
= SCALAR_TYPE_MODE (TREE_TYPE (vectype
));
1883 unsigned int element_bits
= GET_MODE_BITSIZE (element_mode
);
1885 /* Set COUNT to the upper limit on the number of elements - 1.
1886 Start with the maximum vectorization factor. */
1887 unsigned HOST_WIDE_INT count
= vect_max_vf (loop_vinfo
) - 1;
1889 /* Try lowering COUNT to the number of scalar latch iterations. */
1890 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
1891 widest_int max_iters
;
1892 if (max_loop_iterations (loop
, &max_iters
)
1893 && max_iters
< count
)
1894 count
= max_iters
.to_shwi ();
1896 /* Try scales of 1 and the element size. */
1897 int scales
[] = { 1, vect_get_scalar_dr_size (dr
) };
1898 bool overflow_p
= false;
1899 for (int i
= 0; i
< 2; ++i
)
1901 int scale
= scales
[i
];
1903 if (!wi::multiple_of_p (wi::to_widest (step
), scale
, SIGNED
, &factor
))
1906 /* See whether we can calculate (COUNT - 1) * STEP / SCALE
1907 in OFFSET_BITS bits. */
1908 widest_int range
= wi::mul (count
, factor
, SIGNED
, &overflow_p
);
1911 signop sign
= range
>= 0 ? UNSIGNED
: SIGNED
;
1912 if (wi::min_precision (range
, sign
) > element_bits
)
1918 /* See whether the target supports the operation. */
1919 tree memory_type
= TREE_TYPE (DR_REF (dr
));
1920 if (!vect_gather_scatter_fn_p (DR_IS_READ (dr
), masked_p
, vectype
,
1921 memory_type
, element_bits
, sign
, scale
,
1922 &gs_info
->ifn
, &gs_info
->element_type
))
1925 tree offset_type
= build_nonstandard_integer_type (element_bits
,
1928 gs_info
->decl
= NULL_TREE
;
1929 /* Logically the sum of DR_BASE_ADDRESS, DR_INIT and DR_OFFSET,
1930 but we don't need to store that here. */
1931 gs_info
->base
= NULL_TREE
;
1932 gs_info
->offset
= fold_convert (offset_type
, step
);
1933 gs_info
->offset_dt
= vect_constant_def
;
1934 gs_info
->offset_vectype
= NULL_TREE
;
1935 gs_info
->scale
= scale
;
1936 gs_info
->memory_type
= memory_type
;
1940 if (overflow_p
&& dump_enabled_p ())
1941 dump_printf_loc (MSG_NOTE
, vect_location
,
1942 "truncating gather/scatter offset to %d bits"
1943 " might change its value.\n", element_bits
);
1948 /* Return true if we can use gather/scatter internal functions to
1949 vectorize STMT, which is a grouped or strided load or store.
1950 MASKED_P is true if load or store is conditional. When returning
1951 true, fill in GS_INFO with the information required to perform the
1955 vect_use_strided_gather_scatters_p (gimple
*stmt
, loop_vec_info loop_vinfo
,
1957 gather_scatter_info
*gs_info
)
1959 if (!vect_check_gather_scatter (stmt
, loop_vinfo
, gs_info
)
1961 return vect_truncate_gather_scatter_offset (stmt
, loop_vinfo
,
1964 scalar_mode element_mode
= SCALAR_TYPE_MODE (gs_info
->element_type
);
1965 unsigned int element_bits
= GET_MODE_BITSIZE (element_mode
);
1966 tree offset_type
= TREE_TYPE (gs_info
->offset
);
1967 unsigned int offset_bits
= TYPE_PRECISION (offset_type
);
1969 /* Enforced by vect_check_gather_scatter. */
1970 gcc_assert (element_bits
>= offset_bits
);
1972 /* If the elements are wider than the offset, convert the offset to the
1973 same width, without changing its sign. */
1974 if (element_bits
> offset_bits
)
1976 bool unsigned_p
= TYPE_UNSIGNED (offset_type
);
1977 offset_type
= build_nonstandard_integer_type (element_bits
, unsigned_p
);
1978 gs_info
->offset
= fold_convert (offset_type
, gs_info
->offset
);
1981 if (dump_enabled_p ())
1982 dump_printf_loc (MSG_NOTE
, vect_location
,
1983 "using gather/scatter for strided/grouped access,"
1984 " scale = %d\n", gs_info
->scale
);
1989 /* STMT is a non-strided load or store, meaning that it accesses
1990 elements with a known constant step. Return -1 if that step
1991 is negative, 0 if it is zero, and 1 if it is greater than zero. */
1994 compare_step_with_zero (gimple
*stmt
)
1996 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1997 data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
);
1998 return tree_int_cst_compare (vect_dr_behavior (dr
)->step
,
2002 /* If the target supports a permute mask that reverses the elements in
2003 a vector of type VECTYPE, return that mask, otherwise return null. */
2006 perm_mask_for_reverse (tree vectype
)
2008 poly_uint64 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
2010 /* The encoding has a single stepped pattern. */
2011 vec_perm_builder
sel (nunits
, 1, 3);
2012 for (int i
= 0; i
< 3; ++i
)
2013 sel
.quick_push (nunits
- 1 - i
);
2015 vec_perm_indices
indices (sel
, 1, nunits
);
2016 if (!can_vec_perm_const_p (TYPE_MODE (vectype
), indices
))
2018 return vect_gen_perm_mask_checked (vectype
, indices
);
2021 /* STMT is either a masked or unconditional store. Return the value
2025 vect_get_store_rhs (gimple
*stmt
)
2027 if (gassign
*assign
= dyn_cast
<gassign
*> (stmt
))
2029 gcc_assert (gimple_assign_single_p (assign
));
2030 return gimple_assign_rhs1 (assign
);
2032 if (gcall
*call
= dyn_cast
<gcall
*> (stmt
))
2034 internal_fn ifn
= gimple_call_internal_fn (call
);
2035 int index
= internal_fn_stored_value_index (ifn
);
2036 gcc_assert (index
>= 0);
2037 return gimple_call_arg (stmt
, index
);
2042 /* A subroutine of get_load_store_type, with a subset of the same
2043 arguments. Handle the case where STMT is part of a grouped load
2046 For stores, the statements in the group are all consecutive
2047 and there is no gap at the end. For loads, the statements in the
2048 group might not be consecutive; there can be gaps between statements
2049 as well as at the end. */
2052 get_group_load_store_type (gimple
*stmt
, tree vectype
, bool slp
,
2053 bool masked_p
, vec_load_store_type vls_type
,
2054 vect_memory_access_type
*memory_access_type
,
2055 gather_scatter_info
*gs_info
)
2057 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
2058 vec_info
*vinfo
= stmt_info
->vinfo
;
2059 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
2060 struct loop
*loop
= loop_vinfo
? LOOP_VINFO_LOOP (loop_vinfo
) : NULL
;
2061 gimple
*first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
2062 data_reference
*first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
2063 unsigned int group_size
= GROUP_SIZE (vinfo_for_stmt (first_stmt
));
2064 bool single_element_p
= (stmt
== first_stmt
2065 && !GROUP_NEXT_ELEMENT (stmt_info
));
2066 unsigned HOST_WIDE_INT gap
= GROUP_GAP (vinfo_for_stmt (first_stmt
));
2067 poly_uint64 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
2069 /* True if the vectorized statements would access beyond the last
2070 statement in the group. */
2071 bool overrun_p
= false;
2073 /* True if we can cope with such overrun by peeling for gaps, so that
2074 there is at least one final scalar iteration after the vector loop. */
2075 bool can_overrun_p
= (!masked_p
2076 && vls_type
== VLS_LOAD
2080 /* There can only be a gap at the end of the group if the stride is
2081 known at compile time. */
2082 gcc_assert (!STMT_VINFO_STRIDED_P (stmt_info
) || gap
== 0);
2084 /* Stores can't yet have gaps. */
2085 gcc_assert (slp
|| vls_type
== VLS_LOAD
|| gap
== 0);
2089 if (STMT_VINFO_STRIDED_P (stmt_info
))
2091 /* Try to use consecutive accesses of GROUP_SIZE elements,
2092 separated by the stride, until we have a complete vector.
2093 Fall back to scalar accesses if that isn't possible. */
2094 if (multiple_p (nunits
, group_size
))
2095 *memory_access_type
= VMAT_STRIDED_SLP
;
2097 *memory_access_type
= VMAT_ELEMENTWISE
;
2101 overrun_p
= loop_vinfo
&& gap
!= 0;
2102 if (overrun_p
&& vls_type
!= VLS_LOAD
)
2104 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2105 "Grouped store with gaps requires"
2106 " non-consecutive accesses\n");
2109 /* An overrun is fine if the trailing elements are smaller
2110 than the alignment boundary B. Every vector access will
2111 be a multiple of B and so we are guaranteed to access a
2112 non-gap element in the same B-sized block. */
2114 && gap
< (vect_known_alignment_in_bytes (first_dr
)
2115 / vect_get_scalar_dr_size (first_dr
)))
2117 if (overrun_p
&& !can_overrun_p
)
2119 if (dump_enabled_p ())
2120 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2121 "Peeling for outer loop is not supported\n");
2124 *memory_access_type
= VMAT_CONTIGUOUS
;
2129 /* We can always handle this case using elementwise accesses,
2130 but see if something more efficient is available. */
2131 *memory_access_type
= VMAT_ELEMENTWISE
;
2133 /* If there is a gap at the end of the group then these optimizations
2134 would access excess elements in the last iteration. */
2135 bool would_overrun_p
= (gap
!= 0);
2136 /* An overrun is fine if the trailing elements are smaller than the
2137 alignment boundary B. Every vector access will be a multiple of B
2138 and so we are guaranteed to access a non-gap element in the
2139 same B-sized block. */
2142 && gap
< (vect_known_alignment_in_bytes (first_dr
)
2143 / vect_get_scalar_dr_size (first_dr
)))
2144 would_overrun_p
= false;
2146 if (!STMT_VINFO_STRIDED_P (stmt_info
)
2147 && (can_overrun_p
|| !would_overrun_p
)
2148 && compare_step_with_zero (stmt
) > 0)
2150 /* First cope with the degenerate case of a single-element
2152 if (known_eq (TYPE_VECTOR_SUBPARTS (vectype
), 1U))
2153 *memory_access_type
= VMAT_CONTIGUOUS
;
2155 /* Otherwise try using LOAD/STORE_LANES. */
2156 if (*memory_access_type
== VMAT_ELEMENTWISE
2157 && (vls_type
== VLS_LOAD
2158 ? vect_load_lanes_supported (vectype
, group_size
, masked_p
)
2159 : vect_store_lanes_supported (vectype
, group_size
,
2162 *memory_access_type
= VMAT_LOAD_STORE_LANES
;
2163 overrun_p
= would_overrun_p
;
2166 /* If that fails, try using permuting loads. */
2167 if (*memory_access_type
== VMAT_ELEMENTWISE
2168 && (vls_type
== VLS_LOAD
2169 ? vect_grouped_load_supported (vectype
, single_element_p
,
2171 : vect_grouped_store_supported (vectype
, group_size
)))
2173 *memory_access_type
= VMAT_CONTIGUOUS_PERMUTE
;
2174 overrun_p
= would_overrun_p
;
2178 /* As a last resort, trying using a gather load or scatter store.
2180 ??? Although the code can handle all group sizes correctly,
2181 it probably isn't a win to use separate strided accesses based
2182 on nearby locations. Or, even if it's a win over scalar code,
2183 it might not be a win over vectorizing at a lower VF, if that
2184 allows us to use contiguous accesses. */
2185 if (*memory_access_type
== VMAT_ELEMENTWISE
2188 && vect_use_strided_gather_scatters_p (stmt
, loop_vinfo
,
2190 *memory_access_type
= VMAT_GATHER_SCATTER
;
2193 if (vls_type
!= VLS_LOAD
&& first_stmt
== stmt
)
2195 /* STMT is the leader of the group. Check the operands of all the
2196 stmts of the group. */
2197 gimple
*next_stmt
= GROUP_NEXT_ELEMENT (stmt_info
);
2200 tree op
= vect_get_store_rhs (next_stmt
);
2202 enum vect_def_type dt
;
2203 if (!vect_is_simple_use (op
, vinfo
, &def_stmt
, &dt
))
2205 if (dump_enabled_p ())
2206 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2207 "use not simple.\n");
2210 next_stmt
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt
));
2216 gcc_assert (can_overrun_p
);
2217 if (dump_enabled_p ())
2218 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2219 "Data access with gaps requires scalar "
2221 LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo
) = true;
2227 /* A subroutine of get_load_store_type, with a subset of the same
2228 arguments. Handle the case where STMT is a load or store that
2229 accesses consecutive elements with a negative step. */
2231 static vect_memory_access_type
2232 get_negative_load_store_type (gimple
*stmt
, tree vectype
,
2233 vec_load_store_type vls_type
,
2234 unsigned int ncopies
)
2236 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
2237 struct data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
);
2238 dr_alignment_support alignment_support_scheme
;
2242 if (dump_enabled_p ())
2243 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2244 "multiple types with negative step.\n");
2245 return VMAT_ELEMENTWISE
;
2248 alignment_support_scheme
= vect_supportable_dr_alignment (dr
, false);
2249 if (alignment_support_scheme
!= dr_aligned
2250 && alignment_support_scheme
!= dr_unaligned_supported
)
2252 if (dump_enabled_p ())
2253 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2254 "negative step but alignment required.\n");
2255 return VMAT_ELEMENTWISE
;
2258 if (vls_type
== VLS_STORE_INVARIANT
)
2260 if (dump_enabled_p ())
2261 dump_printf_loc (MSG_NOTE
, vect_location
,
2262 "negative step with invariant source;"
2263 " no permute needed.\n");
2264 return VMAT_CONTIGUOUS_DOWN
;
2267 if (!perm_mask_for_reverse (vectype
))
2269 if (dump_enabled_p ())
2270 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2271 "negative step and reversing not supported.\n");
2272 return VMAT_ELEMENTWISE
;
2275 return VMAT_CONTIGUOUS_REVERSE
;
2278 /* Analyze load or store statement STMT of type VLS_TYPE. Return true
2279 if there is a memory access type that the vectorized form can use,
2280 storing it in *MEMORY_ACCESS_TYPE if so. If we decide to use gathers
2281 or scatters, fill in GS_INFO accordingly.
2283 SLP says whether we're performing SLP rather than loop vectorization.
2284 MASKED_P is true if the statement is conditional on a vectorized mask.
2285 VECTYPE is the vector type that the vectorized statements will use.
2286 NCOPIES is the number of vector statements that will be needed. */
2289 get_load_store_type (gimple
*stmt
, tree vectype
, bool slp
, bool masked_p
,
2290 vec_load_store_type vls_type
, unsigned int ncopies
,
2291 vect_memory_access_type
*memory_access_type
,
2292 gather_scatter_info
*gs_info
)
2294 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
2295 vec_info
*vinfo
= stmt_info
->vinfo
;
2296 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
2297 poly_uint64 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
2298 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info
))
2300 *memory_access_type
= VMAT_GATHER_SCATTER
;
2302 if (!vect_check_gather_scatter (stmt
, loop_vinfo
, gs_info
))
2304 else if (!vect_is_simple_use (gs_info
->offset
, vinfo
, &def_stmt
,
2305 &gs_info
->offset_dt
,
2306 &gs_info
->offset_vectype
))
2308 if (dump_enabled_p ())
2309 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2310 "%s index use not simple.\n",
2311 vls_type
== VLS_LOAD
? "gather" : "scatter");
2315 else if (STMT_VINFO_GROUPED_ACCESS (stmt_info
))
2317 if (!get_group_load_store_type (stmt
, vectype
, slp
, masked_p
, vls_type
,
2318 memory_access_type
, gs_info
))
2321 else if (STMT_VINFO_STRIDED_P (stmt_info
))
2325 && vect_use_strided_gather_scatters_p (stmt
, loop_vinfo
,
2327 *memory_access_type
= VMAT_GATHER_SCATTER
;
2329 *memory_access_type
= VMAT_ELEMENTWISE
;
2333 int cmp
= compare_step_with_zero (stmt
);
2335 *memory_access_type
= get_negative_load_store_type
2336 (stmt
, vectype
, vls_type
, ncopies
);
2339 gcc_assert (vls_type
== VLS_LOAD
);
2340 *memory_access_type
= VMAT_INVARIANT
;
2343 *memory_access_type
= VMAT_CONTIGUOUS
;
2346 if ((*memory_access_type
== VMAT_ELEMENTWISE
2347 || *memory_access_type
== VMAT_STRIDED_SLP
)
2348 && !nunits
.is_constant ())
2350 if (dump_enabled_p ())
2351 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2352 "Not using elementwise accesses due to variable "
2353 "vectorization factor.\n");
2357 /* FIXME: At the moment the cost model seems to underestimate the
2358 cost of using elementwise accesses. This check preserves the
2359 traditional behavior until that can be fixed. */
2360 if (*memory_access_type
== VMAT_ELEMENTWISE
2361 && !STMT_VINFO_STRIDED_P (stmt_info
)
2362 && !(stmt
== GROUP_FIRST_ELEMENT (stmt_info
)
2363 && !GROUP_NEXT_ELEMENT (stmt_info
)
2364 && !pow2p_hwi (GROUP_SIZE (stmt_info
))))
2366 if (dump_enabled_p ())
2367 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2368 "not falling back to elementwise accesses\n");
2374 /* Return true if boolean argument MASK is suitable for vectorizing
2375 conditional load or store STMT. When returning true, store the type
2376 of the definition in *MASK_DT_OUT and the type of the vectorized mask
2377 in *MASK_VECTYPE_OUT. */
2380 vect_check_load_store_mask (gimple
*stmt
, tree mask
,
2381 vect_def_type
*mask_dt_out
,
2382 tree
*mask_vectype_out
)
2384 if (!VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (mask
)))
2386 if (dump_enabled_p ())
2387 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2388 "mask argument is not a boolean.\n");
2392 if (TREE_CODE (mask
) != SSA_NAME
)
2394 if (dump_enabled_p ())
2395 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2396 "mask argument is not an SSA name.\n");
2400 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
2402 enum vect_def_type mask_dt
;
2404 if (!vect_is_simple_use (mask
, stmt_info
->vinfo
, &def_stmt
, &mask_dt
,
2407 if (dump_enabled_p ())
2408 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2409 "mask use not simple.\n");
2413 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
2415 mask_vectype
= get_mask_type_for_scalar_type (TREE_TYPE (vectype
));
2417 if (!mask_vectype
|| !VECTOR_BOOLEAN_TYPE_P (mask_vectype
))
2419 if (dump_enabled_p ())
2420 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2421 "could not find an appropriate vector mask type.\n");
2425 if (maybe_ne (TYPE_VECTOR_SUBPARTS (mask_vectype
),
2426 TYPE_VECTOR_SUBPARTS (vectype
)))
2428 if (dump_enabled_p ())
2430 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2431 "vector mask type ");
2432 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, mask_vectype
);
2433 dump_printf (MSG_MISSED_OPTIMIZATION
,
2434 " does not match vector data type ");
2435 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, vectype
);
2436 dump_printf (MSG_MISSED_OPTIMIZATION
, ".\n");
2441 *mask_dt_out
= mask_dt
;
2442 *mask_vectype_out
= mask_vectype
;
2446 /* Return true if stored value RHS is suitable for vectorizing store
2447 statement STMT. When returning true, store the type of the
2448 definition in *RHS_DT_OUT, the type of the vectorized store value in
2449 *RHS_VECTYPE_OUT and the type of the store in *VLS_TYPE_OUT. */
2452 vect_check_store_rhs (gimple
*stmt
, tree rhs
, vect_def_type
*rhs_dt_out
,
2453 tree
*rhs_vectype_out
, vec_load_store_type
*vls_type_out
)
2455 /* In the case this is a store from a constant make sure
2456 native_encode_expr can handle it. */
2457 if (CONSTANT_CLASS_P (rhs
) && native_encode_expr (rhs
, NULL
, 64) == 0)
2459 if (dump_enabled_p ())
2460 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2461 "cannot encode constant as a byte sequence.\n");
2465 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
2467 enum vect_def_type rhs_dt
;
2469 if (!vect_is_simple_use (rhs
, stmt_info
->vinfo
, &def_stmt
, &rhs_dt
,
2472 if (dump_enabled_p ())
2473 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2474 "use not simple.\n");
2478 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
2479 if (rhs_vectype
&& !useless_type_conversion_p (vectype
, rhs_vectype
))
2481 if (dump_enabled_p ())
2482 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2483 "incompatible vector types.\n");
2487 *rhs_dt_out
= rhs_dt
;
2488 *rhs_vectype_out
= rhs_vectype
;
2489 if (rhs_dt
== vect_constant_def
|| rhs_dt
== vect_external_def
)
2490 *vls_type_out
= VLS_STORE_INVARIANT
;
2492 *vls_type_out
= VLS_STORE
;
2496 /* Build an all-ones vector mask of type MASKTYPE while vectorizing STMT.
2497 Note that we support masks with floating-point type, in which case the
2498 floats are interpreted as a bitmask. */
2501 vect_build_all_ones_mask (gimple
*stmt
, tree masktype
)
2503 if (TREE_CODE (masktype
) == INTEGER_TYPE
)
2504 return build_int_cst (masktype
, -1);
2505 else if (TREE_CODE (TREE_TYPE (masktype
)) == INTEGER_TYPE
)
2507 tree mask
= build_int_cst (TREE_TYPE (masktype
), -1);
2508 mask
= build_vector_from_val (masktype
, mask
);
2509 return vect_init_vector (stmt
, mask
, masktype
, NULL
);
2511 else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (masktype
)))
2515 for (int j
= 0; j
< 6; ++j
)
2517 real_from_target (&r
, tmp
, TYPE_MODE (TREE_TYPE (masktype
)));
2518 tree mask
= build_real (TREE_TYPE (masktype
), r
);
2519 mask
= build_vector_from_val (masktype
, mask
);
2520 return vect_init_vector (stmt
, mask
, masktype
, NULL
);
2525 /* Build an all-zero merge value of type VECTYPE while vectorizing
2526 STMT as a gather load. */
2529 vect_build_zero_merge_argument (gimple
*stmt
, tree vectype
)
2532 if (TREE_CODE (TREE_TYPE (vectype
)) == INTEGER_TYPE
)
2533 merge
= build_int_cst (TREE_TYPE (vectype
), 0);
2534 else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (vectype
)))
2538 for (int j
= 0; j
< 6; ++j
)
2540 real_from_target (&r
, tmp
, TYPE_MODE (TREE_TYPE (vectype
)));
2541 merge
= build_real (TREE_TYPE (vectype
), r
);
2545 merge
= build_vector_from_val (vectype
, merge
);
2546 return vect_init_vector (stmt
, merge
, vectype
, NULL
);
2549 /* Build a gather load call while vectorizing STMT. Insert new instructions
2550 before GSI and add them to VEC_STMT. GS_INFO describes the gather load
2551 operation. If the load is conditional, MASK is the unvectorized
2552 condition and MASK_DT is its definition type, otherwise MASK is null. */
2555 vect_build_gather_load_calls (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
2556 gimple
**vec_stmt
, gather_scatter_info
*gs_info
,
2557 tree mask
, vect_def_type mask_dt
)
2559 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
2560 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
2561 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
2562 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
2563 poly_uint64 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
2564 int ncopies
= vect_get_num_copies (loop_vinfo
, vectype
);
2565 edge pe
= loop_preheader_edge (loop
);
2566 enum { NARROW
, NONE
, WIDEN
} modifier
;
2567 poly_uint64 gather_off_nunits
2568 = TYPE_VECTOR_SUBPARTS (gs_info
->offset_vectype
);
2570 tree arglist
= TYPE_ARG_TYPES (TREE_TYPE (gs_info
->decl
));
2571 tree rettype
= TREE_TYPE (TREE_TYPE (gs_info
->decl
));
2572 tree srctype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
2573 tree ptrtype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
2574 tree idxtype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
2575 tree masktype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
2576 tree scaletype
= TREE_VALUE (arglist
);
2577 gcc_checking_assert (types_compatible_p (srctype
, rettype
)
2578 && (!mask
|| types_compatible_p (srctype
, masktype
)));
2580 tree perm_mask
= NULL_TREE
;
2581 tree mask_perm_mask
= NULL_TREE
;
2582 if (known_eq (nunits
, gather_off_nunits
))
2584 else if (known_eq (nunits
* 2, gather_off_nunits
))
2588 /* Currently widening gathers and scatters are only supported for
2589 fixed-length vectors. */
2590 int count
= gather_off_nunits
.to_constant ();
2591 vec_perm_builder
sel (count
, count
, 1);
2592 for (int i
= 0; i
< count
; ++i
)
2593 sel
.quick_push (i
| (count
/ 2));
2595 vec_perm_indices
indices (sel
, 1, count
);
2596 perm_mask
= vect_gen_perm_mask_checked (gs_info
->offset_vectype
,
2599 else if (known_eq (nunits
, gather_off_nunits
* 2))
2603 /* Currently narrowing gathers and scatters are only supported for
2604 fixed-length vectors. */
2605 int count
= nunits
.to_constant ();
2606 vec_perm_builder
sel (count
, count
, 1);
2607 sel
.quick_grow (count
);
2608 for (int i
= 0; i
< count
; ++i
)
2609 sel
[i
] = i
< count
/ 2 ? i
: i
+ count
/ 2;
2610 vec_perm_indices
indices (sel
, 2, count
);
2611 perm_mask
= vect_gen_perm_mask_checked (vectype
, indices
);
2617 for (int i
= 0; i
< count
; ++i
)
2618 sel
[i
] = i
| (count
/ 2);
2619 indices
.new_vector (sel
, 2, count
);
2620 mask_perm_mask
= vect_gen_perm_mask_checked (masktype
, indices
);
2626 tree vec_dest
= vect_create_destination_var (gimple_get_lhs (stmt
),
2629 tree ptr
= fold_convert (ptrtype
, gs_info
->base
);
2630 if (!is_gimple_min_invariant (ptr
))
2633 ptr
= force_gimple_operand (ptr
, &seq
, true, NULL_TREE
);
2634 basic_block new_bb
= gsi_insert_seq_on_edge_immediate (pe
, seq
);
2635 gcc_assert (!new_bb
);
2638 tree scale
= build_int_cst (scaletype
, gs_info
->scale
);
2640 tree vec_oprnd0
= NULL_TREE
;
2641 tree vec_mask
= NULL_TREE
;
2642 tree src_op
= NULL_TREE
;
2643 tree mask_op
= NULL_TREE
;
2644 tree prev_res
= NULL_TREE
;
2645 stmt_vec_info prev_stmt_info
= NULL
;
2649 src_op
= vect_build_zero_merge_argument (stmt
, rettype
);
2650 mask_op
= vect_build_all_ones_mask (stmt
, masktype
);
2653 for (int j
= 0; j
< ncopies
; ++j
)
2657 if (modifier
== WIDEN
&& (j
& 1))
2658 op
= permute_vec_elements (vec_oprnd0
, vec_oprnd0
,
2659 perm_mask
, stmt
, gsi
);
2662 = vect_get_vec_def_for_operand (gs_info
->offset
, stmt
);
2665 = vect_get_vec_def_for_stmt_copy (gs_info
->offset_dt
, vec_oprnd0
);
2667 if (!useless_type_conversion_p (idxtype
, TREE_TYPE (op
)))
2669 gcc_assert (known_eq (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op
)),
2670 TYPE_VECTOR_SUBPARTS (idxtype
)));
2671 var
= vect_get_new_ssa_name (idxtype
, vect_simple_var
);
2672 op
= build1 (VIEW_CONVERT_EXPR
, idxtype
, op
);
2673 new_stmt
= gimple_build_assign (var
, VIEW_CONVERT_EXPR
, op
);
2674 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2680 if (mask_perm_mask
&& (j
& 1))
2681 mask_op
= permute_vec_elements (mask_op
, mask_op
,
2682 mask_perm_mask
, stmt
, gsi
);
2686 vec_mask
= vect_get_vec_def_for_operand (mask
, stmt
);
2688 vec_mask
= vect_get_vec_def_for_stmt_copy (mask_dt
, vec_mask
);
2691 if (!useless_type_conversion_p (masktype
, TREE_TYPE (vec_mask
)))
2694 (known_eq (TYPE_VECTOR_SUBPARTS (TREE_TYPE (mask_op
)),
2695 TYPE_VECTOR_SUBPARTS (masktype
)));
2696 var
= vect_get_new_ssa_name (masktype
, vect_simple_var
);
2697 mask_op
= build1 (VIEW_CONVERT_EXPR
, masktype
, mask_op
);
2698 new_stmt
= gimple_build_assign (var
, VIEW_CONVERT_EXPR
,
2700 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2707 new_stmt
= gimple_build_call (gs_info
->decl
, 5, src_op
, ptr
, op
,
2710 if (!useless_type_conversion_p (vectype
, rettype
))
2712 gcc_assert (known_eq (TYPE_VECTOR_SUBPARTS (vectype
),
2713 TYPE_VECTOR_SUBPARTS (rettype
)));
2714 op
= vect_get_new_ssa_name (rettype
, vect_simple_var
);
2715 gimple_call_set_lhs (new_stmt
, op
);
2716 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2717 var
= make_ssa_name (vec_dest
);
2718 op
= build1 (VIEW_CONVERT_EXPR
, vectype
, op
);
2719 new_stmt
= gimple_build_assign (var
, VIEW_CONVERT_EXPR
, op
);
2723 var
= make_ssa_name (vec_dest
, new_stmt
);
2724 gimple_call_set_lhs (new_stmt
, var
);
2727 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2729 if (modifier
== NARROW
)
2736 var
= permute_vec_elements (prev_res
, var
, perm_mask
, stmt
, gsi
);
2737 new_stmt
= SSA_NAME_DEF_STMT (var
);
2740 if (prev_stmt_info
== NULL
)
2741 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
2743 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
2744 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
2748 /* Prepare the base and offset in GS_INFO for vectorization.
2749 Set *DATAREF_PTR to the loop-invariant base address and *VEC_OFFSET
2750 to the vectorized offset argument for the first copy of STMT. STMT
2751 is the statement described by GS_INFO and LOOP is the containing loop. */
2754 vect_get_gather_scatter_ops (struct loop
*loop
, gimple
*stmt
,
2755 gather_scatter_info
*gs_info
,
2756 tree
*dataref_ptr
, tree
*vec_offset
)
2758 gimple_seq stmts
= NULL
;
2759 *dataref_ptr
= force_gimple_operand (gs_info
->base
, &stmts
, true, NULL_TREE
);
2763 edge pe
= loop_preheader_edge (loop
);
2764 new_bb
= gsi_insert_seq_on_edge_immediate (pe
, stmts
);
2765 gcc_assert (!new_bb
);
2767 tree offset_type
= TREE_TYPE (gs_info
->offset
);
2768 tree offset_vectype
= get_vectype_for_scalar_type (offset_type
);
2769 *vec_offset
= vect_get_vec_def_for_operand (gs_info
->offset
, stmt
,
2773 /* Prepare to implement a grouped or strided load or store using
2774 the gather load or scatter store operation described by GS_INFO.
2775 STMT is the load or store statement.
2777 Set *DATAREF_BUMP to the amount that should be added to the base
2778 address after each copy of the vectorized statement. Set *VEC_OFFSET
2779 to an invariant offset vector in which element I has the value
2780 I * DR_STEP / SCALE. */
2783 vect_get_strided_load_store_ops (gimple
*stmt
, loop_vec_info loop_vinfo
,
2784 gather_scatter_info
*gs_info
,
2785 tree
*dataref_bump
, tree
*vec_offset
)
2787 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
2788 struct data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
);
2789 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
2790 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
2793 tree bump
= size_binop (MULT_EXPR
,
2794 fold_convert (sizetype
, DR_STEP (dr
)),
2795 size_int (TYPE_VECTOR_SUBPARTS (vectype
)));
2796 *dataref_bump
= force_gimple_operand (bump
, &stmts
, true, NULL_TREE
);
2798 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop
), stmts
);
2800 /* The offset given in GS_INFO can have pointer type, so use the element
2801 type of the vector instead. */
2802 tree offset_type
= TREE_TYPE (gs_info
->offset
);
2803 tree offset_vectype
= get_vectype_for_scalar_type (offset_type
);
2804 offset_type
= TREE_TYPE (offset_vectype
);
2806 /* Calculate X = DR_STEP / SCALE and convert it to the appropriate type. */
2807 tree step
= size_binop (EXACT_DIV_EXPR
, DR_STEP (dr
),
2808 ssize_int (gs_info
->scale
));
2809 step
= fold_convert (offset_type
, step
);
2810 step
= force_gimple_operand (step
, &stmts
, true, NULL_TREE
);
2812 /* Create {0, X, X*2, X*3, ...}. */
2813 *vec_offset
= gimple_build (&stmts
, VEC_SERIES_EXPR
, offset_vectype
,
2814 build_zero_cst (offset_type
), step
);
2816 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop
), stmts
);
2819 /* Return the amount that should be added to a vector pointer to move
2820 to the next or previous copy of AGGR_TYPE. DR is the data reference
2821 being vectorized and MEMORY_ACCESS_TYPE describes the type of
2825 vect_get_data_ptr_increment (data_reference
*dr
, tree aggr_type
,
2826 vect_memory_access_type memory_access_type
)
2828 if (memory_access_type
== VMAT_INVARIANT
)
2829 return size_zero_node
;
2831 tree iv_step
= TYPE_SIZE_UNIT (aggr_type
);
2832 tree step
= vect_dr_behavior (dr
)->step
;
2833 if (tree_int_cst_sgn (step
) == -1)
2834 iv_step
= fold_build1 (NEGATE_EXPR
, TREE_TYPE (iv_step
), iv_step
);
2838 /* Check and perform vectorization of BUILT_IN_BSWAP{16,32,64}. */
2841 vectorizable_bswap (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
2842 gimple
**vec_stmt
, slp_tree slp_node
,
2843 tree vectype_in
, enum vect_def_type
*dt
)
2846 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
2847 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
2849 unsigned HOST_WIDE_INT nunits
, num_bytes
;
2851 op
= gimple_call_arg (stmt
, 0);
2852 vectype
= STMT_VINFO_VECTYPE (stmt_info
);
2854 if (!TYPE_VECTOR_SUBPARTS (vectype
).is_constant (&nunits
))
2857 /* Multiple types in SLP are handled by creating the appropriate number of
2858 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
2863 ncopies
= vect_get_num_copies (loop_vinfo
, vectype
);
2865 gcc_assert (ncopies
>= 1);
2867 tree char_vectype
= get_same_sized_vectype (char_type_node
, vectype_in
);
2871 if (!TYPE_VECTOR_SUBPARTS (char_vectype
).is_constant (&num_bytes
))
2874 unsigned word_bytes
= num_bytes
/ nunits
;
2876 /* The encoding uses one stepped pattern for each byte in the word. */
2877 vec_perm_builder
elts (num_bytes
, word_bytes
, 3);
2878 for (unsigned i
= 0; i
< 3; ++i
)
2879 for (unsigned j
= 0; j
< word_bytes
; ++j
)
2880 elts
.quick_push ((i
+ 1) * word_bytes
- j
- 1);
2882 vec_perm_indices
indices (elts
, 1, num_bytes
);
2883 if (!can_vec_perm_const_p (TYPE_MODE (char_vectype
), indices
))
2888 STMT_VINFO_TYPE (stmt_info
) = call_vec_info_type
;
2889 if (dump_enabled_p ())
2890 dump_printf_loc (MSG_NOTE
, vect_location
, "=== vectorizable_bswap ==="
2894 add_stmt_cost (stmt_info
->vinfo
->target_cost_data
,
2895 1, vector_stmt
, stmt_info
, 0, vect_prologue
);
2896 add_stmt_cost (stmt_info
->vinfo
->target_cost_data
,
2897 ncopies
, vec_perm
, stmt_info
, 0, vect_body
);
2902 tree bswap_vconst
= vec_perm_indices_to_tree (char_vectype
, indices
);
2905 vec
<tree
> vec_oprnds
= vNULL
;
2906 gimple
*new_stmt
= NULL
;
2907 stmt_vec_info prev_stmt_info
= NULL
;
2908 for (unsigned j
= 0; j
< ncopies
; j
++)
2912 vect_get_vec_defs (op
, NULL
, stmt
, &vec_oprnds
, NULL
, slp_node
);
2914 vect_get_vec_defs_for_stmt_copy (dt
, &vec_oprnds
, NULL
);
2916 /* Arguments are ready. create the new vector stmt. */
2919 FOR_EACH_VEC_ELT (vec_oprnds
, i
, vop
)
2921 tree tem
= make_ssa_name (char_vectype
);
2922 new_stmt
= gimple_build_assign (tem
, build1 (VIEW_CONVERT_EXPR
,
2923 char_vectype
, vop
));
2924 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2925 tree tem2
= make_ssa_name (char_vectype
);
2926 new_stmt
= gimple_build_assign (tem2
, VEC_PERM_EXPR
,
2927 tem
, tem
, bswap_vconst
);
2928 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2929 tem
= make_ssa_name (vectype
);
2930 new_stmt
= gimple_build_assign (tem
, build1 (VIEW_CONVERT_EXPR
,
2932 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2934 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
2941 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
2943 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
2945 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
2948 vec_oprnds
.release ();
2952 /* Return true if vector types VECTYPE_IN and VECTYPE_OUT have
2953 integer elements and if we can narrow VECTYPE_IN to VECTYPE_OUT
2954 in a single step. On success, store the binary pack code in
2958 simple_integer_narrowing (tree vectype_out
, tree vectype_in
,
2959 tree_code
*convert_code
)
2961 if (!INTEGRAL_TYPE_P (TREE_TYPE (vectype_out
))
2962 || !INTEGRAL_TYPE_P (TREE_TYPE (vectype_in
)))
2966 int multi_step_cvt
= 0;
2967 auto_vec
<tree
, 8> interm_types
;
2968 if (!supportable_narrowing_operation (NOP_EXPR
, vectype_out
, vectype_in
,
2969 &code
, &multi_step_cvt
,
2974 *convert_code
= code
;
2978 /* Function vectorizable_call.
2980 Check if GS performs a function call that can be vectorized.
2981 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2982 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2983 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2986 vectorizable_call (gimple
*gs
, gimple_stmt_iterator
*gsi
, gimple
**vec_stmt
,
2993 tree vec_oprnd0
= NULL_TREE
, vec_oprnd1
= NULL_TREE
;
2994 stmt_vec_info stmt_info
= vinfo_for_stmt (gs
), prev_stmt_info
;
2995 tree vectype_out
, vectype_in
;
2996 poly_uint64 nunits_in
;
2997 poly_uint64 nunits_out
;
2998 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
2999 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
3000 vec_info
*vinfo
= stmt_info
->vinfo
;
3001 tree fndecl
, new_temp
, rhs_type
;
3003 enum vect_def_type dt
[3]
3004 = {vect_unknown_def_type
, vect_unknown_def_type
, vect_unknown_def_type
};
3006 gimple
*new_stmt
= NULL
;
3008 vec
<tree
> vargs
= vNULL
;
3009 enum { NARROW
, NONE
, WIDEN
} modifier
;
3013 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
3016 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
3020 /* Is GS a vectorizable call? */
3021 stmt
= dyn_cast
<gcall
*> (gs
);
3025 if (gimple_call_internal_p (stmt
)
3026 && (internal_load_fn_p (gimple_call_internal_fn (stmt
))
3027 || internal_store_fn_p (gimple_call_internal_fn (stmt
))))
3028 /* Handled by vectorizable_load and vectorizable_store. */
3031 if (gimple_call_lhs (stmt
) == NULL_TREE
3032 || TREE_CODE (gimple_call_lhs (stmt
)) != SSA_NAME
)
3035 gcc_checking_assert (!stmt_can_throw_internal (stmt
));
3037 vectype_out
= STMT_VINFO_VECTYPE (stmt_info
);
3039 /* Process function arguments. */
3040 rhs_type
= NULL_TREE
;
3041 vectype_in
= NULL_TREE
;
3042 nargs
= gimple_call_num_args (stmt
);
3044 /* Bail out if the function has more than three arguments, we do not have
3045 interesting builtin functions to vectorize with more than two arguments
3046 except for fma. No arguments is also not good. */
3047 if (nargs
== 0 || nargs
> 3)
3050 /* Ignore the argument of IFN_GOMP_SIMD_LANE, it is magic. */
3051 if (gimple_call_internal_p (stmt
)
3052 && gimple_call_internal_fn (stmt
) == IFN_GOMP_SIMD_LANE
)
3055 rhs_type
= unsigned_type_node
;
3058 for (i
= 0; i
< nargs
; i
++)
3062 op
= gimple_call_arg (stmt
, i
);
3064 /* We can only handle calls with arguments of the same type. */
3066 && !types_compatible_p (rhs_type
, TREE_TYPE (op
)))
3068 if (dump_enabled_p ())
3069 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3070 "argument types differ.\n");
3074 rhs_type
= TREE_TYPE (op
);
3076 if (!vect_is_simple_use (op
, vinfo
, &def_stmt
, &dt
[i
], &opvectype
))
3078 if (dump_enabled_p ())
3079 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3080 "use not simple.\n");
3085 vectype_in
= opvectype
;
3087 && opvectype
!= vectype_in
)
3089 if (dump_enabled_p ())
3090 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3091 "argument vector types differ.\n");
3095 /* If all arguments are external or constant defs use a vector type with
3096 the same size as the output vector type. */
3098 vectype_in
= get_same_sized_vectype (rhs_type
, vectype_out
);
3100 gcc_assert (vectype_in
);
3103 if (dump_enabled_p ())
3105 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3106 "no vectype for scalar type ");
3107 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, rhs_type
);
3108 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
3115 nunits_in
= TYPE_VECTOR_SUBPARTS (vectype_in
);
3116 nunits_out
= TYPE_VECTOR_SUBPARTS (vectype_out
);
3117 if (known_eq (nunits_in
* 2, nunits_out
))
3119 else if (known_eq (nunits_out
, nunits_in
))
3121 else if (known_eq (nunits_out
* 2, nunits_in
))
3126 /* We only handle functions that do not read or clobber memory. */
3127 if (gimple_vuse (stmt
))
3129 if (dump_enabled_p ())
3130 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3131 "function reads from or writes to memory.\n");
3135 /* For now, we only vectorize functions if a target specific builtin
3136 is available. TODO -- in some cases, it might be profitable to
3137 insert the calls for pieces of the vector, in order to be able
3138 to vectorize other operations in the loop. */
3140 internal_fn ifn
= IFN_LAST
;
3141 combined_fn cfn
= gimple_call_combined_fn (stmt
);
3142 tree callee
= gimple_call_fndecl (stmt
);
3144 /* First try using an internal function. */
3145 tree_code convert_code
= ERROR_MARK
;
3147 && (modifier
== NONE
3148 || (modifier
== NARROW
3149 && simple_integer_narrowing (vectype_out
, vectype_in
,
3151 ifn
= vectorizable_internal_function (cfn
, callee
, vectype_out
,
3154 /* If that fails, try asking for a target-specific built-in function. */
3155 if (ifn
== IFN_LAST
)
3157 if (cfn
!= CFN_LAST
)
3158 fndecl
= targetm
.vectorize
.builtin_vectorized_function
3159 (cfn
, vectype_out
, vectype_in
);
3161 fndecl
= targetm
.vectorize
.builtin_md_vectorized_function
3162 (callee
, vectype_out
, vectype_in
);
3165 if (ifn
== IFN_LAST
&& !fndecl
)
3167 if (cfn
== CFN_GOMP_SIMD_LANE
3170 && LOOP_VINFO_LOOP (loop_vinfo
)->simduid
3171 && TREE_CODE (gimple_call_arg (stmt
, 0)) == SSA_NAME
3172 && LOOP_VINFO_LOOP (loop_vinfo
)->simduid
3173 == SSA_NAME_VAR (gimple_call_arg (stmt
, 0)))
3175 /* We can handle IFN_GOMP_SIMD_LANE by returning a
3176 { 0, 1, 2, ... vf - 1 } vector. */
3177 gcc_assert (nargs
== 0);
3179 else if (modifier
== NONE
3180 && (gimple_call_builtin_p (stmt
, BUILT_IN_BSWAP16
)
3181 || gimple_call_builtin_p (stmt
, BUILT_IN_BSWAP32
)
3182 || gimple_call_builtin_p (stmt
, BUILT_IN_BSWAP64
)))
3183 return vectorizable_bswap (stmt
, gsi
, vec_stmt
, slp_node
,
3187 if (dump_enabled_p ())
3188 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3189 "function is not vectorizable.\n");
3196 else if (modifier
== NARROW
&& ifn
== IFN_LAST
)
3197 ncopies
= vect_get_num_copies (loop_vinfo
, vectype_out
);
3199 ncopies
= vect_get_num_copies (loop_vinfo
, vectype_in
);
3201 /* Sanity check: make sure that at least one copy of the vectorized stmt
3202 needs to be generated. */
3203 gcc_assert (ncopies
>= 1);
3205 if (!vec_stmt
) /* transformation not required. */
3207 STMT_VINFO_TYPE (stmt_info
) = call_vec_info_type
;
3208 if (dump_enabled_p ())
3209 dump_printf_loc (MSG_NOTE
, vect_location
, "=== vectorizable_call ==="
3213 vect_model_simple_cost (stmt_info
, ncopies
, dt
, ndts
, NULL
, NULL
);
3214 if (ifn
!= IFN_LAST
&& modifier
== NARROW
&& !slp_node
)
3215 add_stmt_cost (stmt_info
->vinfo
->target_cost_data
, ncopies
/ 2,
3216 vec_promote_demote
, stmt_info
, 0, vect_body
);
3224 if (dump_enabled_p ())
3225 dump_printf_loc (MSG_NOTE
, vect_location
, "transform call.\n");
3228 scalar_dest
= gimple_call_lhs (stmt
);
3229 vec_dest
= vect_create_destination_var (scalar_dest
, vectype_out
);
3231 prev_stmt_info
= NULL
;
3232 if (modifier
== NONE
|| ifn
!= IFN_LAST
)
3234 tree prev_res
= NULL_TREE
;
3235 for (j
= 0; j
< ncopies
; ++j
)
3237 /* Build argument list for the vectorized call. */
3239 vargs
.create (nargs
);
3245 auto_vec
<vec
<tree
> > vec_defs (nargs
);
3246 vec
<tree
> vec_oprnds0
;
3248 for (i
= 0; i
< nargs
; i
++)
3249 vargs
.quick_push (gimple_call_arg (stmt
, i
));
3250 vect_get_slp_defs (vargs
, slp_node
, &vec_defs
);
3251 vec_oprnds0
= vec_defs
[0];
3253 /* Arguments are ready. Create the new vector stmt. */
3254 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vec_oprnd0
)
3257 for (k
= 0; k
< nargs
; k
++)
3259 vec
<tree
> vec_oprndsk
= vec_defs
[k
];
3260 vargs
[k
] = vec_oprndsk
[i
];
3262 if (modifier
== NARROW
)
3264 tree half_res
= make_ssa_name (vectype_in
);
3266 = gimple_build_call_internal_vec (ifn
, vargs
);
3267 gimple_call_set_lhs (call
, half_res
);
3268 gimple_call_set_nothrow (call
, true);
3270 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3273 prev_res
= half_res
;
3276 new_temp
= make_ssa_name (vec_dest
);
3277 new_stmt
= gimple_build_assign (new_temp
, convert_code
,
3278 prev_res
, half_res
);
3283 if (ifn
!= IFN_LAST
)
3284 call
= gimple_build_call_internal_vec (ifn
, vargs
);
3286 call
= gimple_build_call_vec (fndecl
, vargs
);
3287 new_temp
= make_ssa_name (vec_dest
, call
);
3288 gimple_call_set_lhs (call
, new_temp
);
3289 gimple_call_set_nothrow (call
, true);
3292 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3293 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
3296 for (i
= 0; i
< nargs
; i
++)
3298 vec
<tree
> vec_oprndsi
= vec_defs
[i
];
3299 vec_oprndsi
.release ();
3304 for (i
= 0; i
< nargs
; i
++)
3306 op
= gimple_call_arg (stmt
, i
);
3309 = vect_get_vec_def_for_operand (op
, stmt
);
3312 vec_oprnd0
= gimple_call_arg (new_stmt
, i
);
3314 = vect_get_vec_def_for_stmt_copy (dt
[i
], vec_oprnd0
);
3317 vargs
.quick_push (vec_oprnd0
);
3320 if (gimple_call_internal_p (stmt
)
3321 && gimple_call_internal_fn (stmt
) == IFN_GOMP_SIMD_LANE
)
3323 tree cst
= build_index_vector (vectype_out
, j
* nunits_out
, 1);
3325 = vect_get_new_ssa_name (vectype_out
, vect_simple_var
, "cst_");
3326 gimple
*init_stmt
= gimple_build_assign (new_var
, cst
);
3327 vect_init_vector_1 (stmt
, init_stmt
, NULL
);
3328 new_temp
= make_ssa_name (vec_dest
);
3329 new_stmt
= gimple_build_assign (new_temp
, new_var
);
3331 else if (modifier
== NARROW
)
3333 tree half_res
= make_ssa_name (vectype_in
);
3334 gcall
*call
= gimple_build_call_internal_vec (ifn
, vargs
);
3335 gimple_call_set_lhs (call
, half_res
);
3336 gimple_call_set_nothrow (call
, true);
3338 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3341 prev_res
= half_res
;
3344 new_temp
= make_ssa_name (vec_dest
);
3345 new_stmt
= gimple_build_assign (new_temp
, convert_code
,
3346 prev_res
, half_res
);
3351 if (ifn
!= IFN_LAST
)
3352 call
= gimple_build_call_internal_vec (ifn
, vargs
);
3354 call
= gimple_build_call_vec (fndecl
, vargs
);
3355 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
3356 gimple_call_set_lhs (call
, new_temp
);
3357 gimple_call_set_nothrow (call
, true);
3360 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3362 if (j
== (modifier
== NARROW
? 1 : 0))
3363 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
3365 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
3367 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
3370 else if (modifier
== NARROW
)
3372 for (j
= 0; j
< ncopies
; ++j
)
3374 /* Build argument list for the vectorized call. */
3376 vargs
.create (nargs
* 2);
3382 auto_vec
<vec
<tree
> > vec_defs (nargs
);
3383 vec
<tree
> vec_oprnds0
;
3385 for (i
= 0; i
< nargs
; i
++)
3386 vargs
.quick_push (gimple_call_arg (stmt
, i
));
3387 vect_get_slp_defs (vargs
, slp_node
, &vec_defs
);
3388 vec_oprnds0
= vec_defs
[0];
3390 /* Arguments are ready. Create the new vector stmt. */
3391 for (i
= 0; vec_oprnds0
.iterate (i
, &vec_oprnd0
); i
+= 2)
3395 for (k
= 0; k
< nargs
; k
++)
3397 vec
<tree
> vec_oprndsk
= vec_defs
[k
];
3398 vargs
.quick_push (vec_oprndsk
[i
]);
3399 vargs
.quick_push (vec_oprndsk
[i
+ 1]);
3402 if (ifn
!= IFN_LAST
)
3403 call
= gimple_build_call_internal_vec (ifn
, vargs
);
3405 call
= gimple_build_call_vec (fndecl
, vargs
);
3406 new_temp
= make_ssa_name (vec_dest
, call
);
3407 gimple_call_set_lhs (call
, new_temp
);
3408 gimple_call_set_nothrow (call
, true);
3410 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3411 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
3414 for (i
= 0; i
< nargs
; i
++)
3416 vec
<tree
> vec_oprndsi
= vec_defs
[i
];
3417 vec_oprndsi
.release ();
3422 for (i
= 0; i
< nargs
; i
++)
3424 op
= gimple_call_arg (stmt
, i
);
3428 = vect_get_vec_def_for_operand (op
, stmt
);
3430 = vect_get_vec_def_for_stmt_copy (dt
[i
], vec_oprnd0
);
3434 vec_oprnd1
= gimple_call_arg (new_stmt
, 2*i
+ 1);
3436 = vect_get_vec_def_for_stmt_copy (dt
[i
], vec_oprnd1
);
3438 = vect_get_vec_def_for_stmt_copy (dt
[i
], vec_oprnd0
);
3441 vargs
.quick_push (vec_oprnd0
);
3442 vargs
.quick_push (vec_oprnd1
);
3445 new_stmt
= gimple_build_call_vec (fndecl
, vargs
);
3446 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
3447 gimple_call_set_lhs (new_stmt
, new_temp
);
3448 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3451 STMT_VINFO_VEC_STMT (stmt_info
) = new_stmt
;
3453 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
3455 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
3458 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
3461 /* No current target implements this case. */
3466 /* The call in STMT might prevent it from being removed in dce.
3467 We however cannot remove it here, due to the way the ssa name
3468 it defines is mapped to the new definition. So just replace
3469 rhs of the statement with something harmless. */
3474 type
= TREE_TYPE (scalar_dest
);
3475 if (is_pattern_stmt_p (stmt_info
))
3476 lhs
= gimple_call_lhs (STMT_VINFO_RELATED_STMT (stmt_info
));
3478 lhs
= gimple_call_lhs (stmt
);
3480 new_stmt
= gimple_build_assign (lhs
, build_zero_cst (type
));
3481 set_vinfo_for_stmt (new_stmt
, stmt_info
);
3482 set_vinfo_for_stmt (stmt
, NULL
);
3483 STMT_VINFO_STMT (stmt_info
) = new_stmt
;
3484 gsi_replace (gsi
, new_stmt
, false);
3490 struct simd_call_arg_info
3494 HOST_WIDE_INT linear_step
;
3495 enum vect_def_type dt
;
3497 bool simd_lane_linear
;
3500 /* Helper function of vectorizable_simd_clone_call. If OP, an SSA_NAME,
3501 is linear within simd lane (but not within whole loop), note it in
3505 vect_simd_lane_linear (tree op
, struct loop
*loop
,
3506 struct simd_call_arg_info
*arginfo
)
3508 gimple
*def_stmt
= SSA_NAME_DEF_STMT (op
);
3510 if (!is_gimple_assign (def_stmt
)
3511 || gimple_assign_rhs_code (def_stmt
) != POINTER_PLUS_EXPR
3512 || !is_gimple_min_invariant (gimple_assign_rhs1 (def_stmt
)))
3515 tree base
= gimple_assign_rhs1 (def_stmt
);
3516 HOST_WIDE_INT linear_step
= 0;
3517 tree v
= gimple_assign_rhs2 (def_stmt
);
3518 while (TREE_CODE (v
) == SSA_NAME
)
3521 def_stmt
= SSA_NAME_DEF_STMT (v
);
3522 if (is_gimple_assign (def_stmt
))
3523 switch (gimple_assign_rhs_code (def_stmt
))
3526 t
= gimple_assign_rhs2 (def_stmt
);
3527 if (linear_step
|| TREE_CODE (t
) != INTEGER_CST
)
3529 base
= fold_build2 (POINTER_PLUS_EXPR
, TREE_TYPE (base
), base
, t
);
3530 v
= gimple_assign_rhs1 (def_stmt
);
3533 t
= gimple_assign_rhs2 (def_stmt
);
3534 if (linear_step
|| !tree_fits_shwi_p (t
) || integer_zerop (t
))
3536 linear_step
= tree_to_shwi (t
);
3537 v
= gimple_assign_rhs1 (def_stmt
);
3540 t
= gimple_assign_rhs1 (def_stmt
);
3541 if (TREE_CODE (TREE_TYPE (t
)) != INTEGER_TYPE
3542 || (TYPE_PRECISION (TREE_TYPE (v
))
3543 < TYPE_PRECISION (TREE_TYPE (t
))))
3552 else if (gimple_call_internal_p (def_stmt
, IFN_GOMP_SIMD_LANE
)
3554 && TREE_CODE (gimple_call_arg (def_stmt
, 0)) == SSA_NAME
3555 && (SSA_NAME_VAR (gimple_call_arg (def_stmt
, 0))
3560 arginfo
->linear_step
= linear_step
;
3562 arginfo
->simd_lane_linear
= true;
3568 /* Return the number of elements in vector type VECTYPE, which is associated
3569 with a SIMD clone. At present these vectors always have a constant
3572 static unsigned HOST_WIDE_INT
3573 simd_clone_subparts (tree vectype
)
3575 return TYPE_VECTOR_SUBPARTS (vectype
).to_constant ();
3578 /* Function vectorizable_simd_clone_call.
3580 Check if STMT performs a function call that can be vectorized
3581 by calling a simd clone of the function.
3582 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3583 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
3584 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3587 vectorizable_simd_clone_call (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
3588 gimple
**vec_stmt
, slp_tree slp_node
)
3593 tree vec_oprnd0
= NULL_TREE
;
3594 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
), prev_stmt_info
;
3596 unsigned int nunits
;
3597 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
3598 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
3599 vec_info
*vinfo
= stmt_info
->vinfo
;
3600 struct loop
*loop
= loop_vinfo
? LOOP_VINFO_LOOP (loop_vinfo
) : NULL
;
3601 tree fndecl
, new_temp
;
3603 gimple
*new_stmt
= NULL
;
3605 auto_vec
<simd_call_arg_info
> arginfo
;
3606 vec
<tree
> vargs
= vNULL
;
3608 tree lhs
, rtype
, ratype
;
3609 vec
<constructor_elt
, va_gc
> *ret_ctor_elts
= NULL
;
3611 /* Is STMT a vectorizable call? */
3612 if (!is_gimple_call (stmt
))
3615 fndecl
= gimple_call_fndecl (stmt
);
3616 if (fndecl
== NULL_TREE
)
3619 struct cgraph_node
*node
= cgraph_node::get (fndecl
);
3620 if (node
== NULL
|| node
->simd_clones
== NULL
)
3623 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
3626 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
3630 if (gimple_call_lhs (stmt
)
3631 && TREE_CODE (gimple_call_lhs (stmt
)) != SSA_NAME
)
3634 gcc_checking_assert (!stmt_can_throw_internal (stmt
));
3636 vectype
= STMT_VINFO_VECTYPE (stmt_info
);
3638 if (loop_vinfo
&& nested_in_vect_loop_p (loop
, stmt
))
3645 /* Process function arguments. */
3646 nargs
= gimple_call_num_args (stmt
);
3648 /* Bail out if the function has zero arguments. */
3652 arginfo
.reserve (nargs
, true);
3654 for (i
= 0; i
< nargs
; i
++)
3656 simd_call_arg_info thisarginfo
;
3659 thisarginfo
.linear_step
= 0;
3660 thisarginfo
.align
= 0;
3661 thisarginfo
.op
= NULL_TREE
;
3662 thisarginfo
.simd_lane_linear
= false;
3664 op
= gimple_call_arg (stmt
, i
);
3665 if (!vect_is_simple_use (op
, vinfo
, &def_stmt
, &thisarginfo
.dt
,
3666 &thisarginfo
.vectype
)
3667 || thisarginfo
.dt
== vect_uninitialized_def
)
3669 if (dump_enabled_p ())
3670 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3671 "use not simple.\n");
3675 if (thisarginfo
.dt
== vect_constant_def
3676 || thisarginfo
.dt
== vect_external_def
)
3677 gcc_assert (thisarginfo
.vectype
== NULL_TREE
);
3679 gcc_assert (thisarginfo
.vectype
!= NULL_TREE
);
3681 /* For linear arguments, the analyze phase should have saved
3682 the base and step in STMT_VINFO_SIMD_CLONE_INFO. */
3683 if (i
* 3 + 4 <= STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).length ()
3684 && STMT_VINFO_SIMD_CLONE_INFO (stmt_info
)[i
* 3 + 2])
3686 gcc_assert (vec_stmt
);
3687 thisarginfo
.linear_step
3688 = tree_to_shwi (STMT_VINFO_SIMD_CLONE_INFO (stmt_info
)[i
* 3 + 2]);
3690 = STMT_VINFO_SIMD_CLONE_INFO (stmt_info
)[i
* 3 + 1];
3691 thisarginfo
.simd_lane_linear
3692 = (STMT_VINFO_SIMD_CLONE_INFO (stmt_info
)[i
* 3 + 3]
3693 == boolean_true_node
);
3694 /* If loop has been peeled for alignment, we need to adjust it. */
3695 tree n1
= LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo
);
3696 tree n2
= LOOP_VINFO_NITERS (loop_vinfo
);
3697 if (n1
!= n2
&& !thisarginfo
.simd_lane_linear
)
3699 tree bias
= fold_build2 (MINUS_EXPR
, TREE_TYPE (n1
), n1
, n2
);
3700 tree step
= STMT_VINFO_SIMD_CLONE_INFO (stmt_info
)[i
* 3 + 2];
3701 tree opt
= TREE_TYPE (thisarginfo
.op
);
3702 bias
= fold_convert (TREE_TYPE (step
), bias
);
3703 bias
= fold_build2 (MULT_EXPR
, TREE_TYPE (step
), bias
, step
);
3705 = fold_build2 (POINTER_TYPE_P (opt
)
3706 ? POINTER_PLUS_EXPR
: PLUS_EXPR
, opt
,
3707 thisarginfo
.op
, bias
);
3711 && thisarginfo
.dt
!= vect_constant_def
3712 && thisarginfo
.dt
!= vect_external_def
3714 && TREE_CODE (op
) == SSA_NAME
3715 && simple_iv (loop
, loop_containing_stmt (stmt
), op
,
3717 && tree_fits_shwi_p (iv
.step
))
3719 thisarginfo
.linear_step
= tree_to_shwi (iv
.step
);
3720 thisarginfo
.op
= iv
.base
;
3722 else if ((thisarginfo
.dt
== vect_constant_def
3723 || thisarginfo
.dt
== vect_external_def
)
3724 && POINTER_TYPE_P (TREE_TYPE (op
)))
3725 thisarginfo
.align
= get_pointer_alignment (op
) / BITS_PER_UNIT
;
3726 /* Addresses of array elements indexed by GOMP_SIMD_LANE are
3728 if (POINTER_TYPE_P (TREE_TYPE (op
))
3729 && !thisarginfo
.linear_step
3731 && thisarginfo
.dt
!= vect_constant_def
3732 && thisarginfo
.dt
!= vect_external_def
3735 && TREE_CODE (op
) == SSA_NAME
)
3736 vect_simd_lane_linear (op
, loop
, &thisarginfo
);
3738 arginfo
.quick_push (thisarginfo
);
3741 unsigned HOST_WIDE_INT vf
;
3742 if (!LOOP_VINFO_VECT_FACTOR (loop_vinfo
).is_constant (&vf
))
3744 if (dump_enabled_p ())
3745 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3746 "not considering SIMD clones; not yet supported"
3747 " for variable-width vectors.\n");
3751 unsigned int badness
= 0;
3752 struct cgraph_node
*bestn
= NULL
;
3753 if (STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).exists ())
3754 bestn
= cgraph_node::get (STMT_VINFO_SIMD_CLONE_INFO (stmt_info
)[0]);
3756 for (struct cgraph_node
*n
= node
->simd_clones
; n
!= NULL
;
3757 n
= n
->simdclone
->next_clone
)
3759 unsigned int this_badness
= 0;
3760 if (n
->simdclone
->simdlen
> vf
3761 || n
->simdclone
->nargs
!= nargs
)
3763 if (n
->simdclone
->simdlen
< vf
)
3764 this_badness
+= (exact_log2 (vf
)
3765 - exact_log2 (n
->simdclone
->simdlen
)) * 1024;
3766 if (n
->simdclone
->inbranch
)
3767 this_badness
+= 2048;
3768 int target_badness
= targetm
.simd_clone
.usable (n
);
3769 if (target_badness
< 0)
3771 this_badness
+= target_badness
* 512;
3772 /* FORNOW: Have to add code to add the mask argument. */
3773 if (n
->simdclone
->inbranch
)
3775 for (i
= 0; i
< nargs
; i
++)
3777 switch (n
->simdclone
->args
[i
].arg_type
)
3779 case SIMD_CLONE_ARG_TYPE_VECTOR
:
3780 if (!useless_type_conversion_p
3781 (n
->simdclone
->args
[i
].orig_type
,
3782 TREE_TYPE (gimple_call_arg (stmt
, i
))))
3784 else if (arginfo
[i
].dt
== vect_constant_def
3785 || arginfo
[i
].dt
== vect_external_def
3786 || arginfo
[i
].linear_step
)
3789 case SIMD_CLONE_ARG_TYPE_UNIFORM
:
3790 if (arginfo
[i
].dt
!= vect_constant_def
3791 && arginfo
[i
].dt
!= vect_external_def
)
3794 case SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP
:
3795 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP
:
3796 if (arginfo
[i
].dt
== vect_constant_def
3797 || arginfo
[i
].dt
== vect_external_def
3798 || (arginfo
[i
].linear_step
3799 != n
->simdclone
->args
[i
].linear_step
))
3802 case SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP
:
3803 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_CONSTANT_STEP
:
3804 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_CONSTANT_STEP
:
3805 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_VARIABLE_STEP
:
3806 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_VARIABLE_STEP
:
3807 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_VARIABLE_STEP
:
3811 case SIMD_CLONE_ARG_TYPE_MASK
:
3814 if (i
== (size_t) -1)
3816 if (n
->simdclone
->args
[i
].alignment
> arginfo
[i
].align
)
3821 if (arginfo
[i
].align
)
3822 this_badness
+= (exact_log2 (arginfo
[i
].align
)
3823 - exact_log2 (n
->simdclone
->args
[i
].alignment
));
3825 if (i
== (size_t) -1)
3827 if (bestn
== NULL
|| this_badness
< badness
)
3830 badness
= this_badness
;
3837 for (i
= 0; i
< nargs
; i
++)
3838 if ((arginfo
[i
].dt
== vect_constant_def
3839 || arginfo
[i
].dt
== vect_external_def
)
3840 && bestn
->simdclone
->args
[i
].arg_type
== SIMD_CLONE_ARG_TYPE_VECTOR
)
3843 = get_vectype_for_scalar_type (TREE_TYPE (gimple_call_arg (stmt
,
3845 if (arginfo
[i
].vectype
== NULL
3846 || (simd_clone_subparts (arginfo
[i
].vectype
)
3847 > bestn
->simdclone
->simdlen
))
3851 fndecl
= bestn
->decl
;
3852 nunits
= bestn
->simdclone
->simdlen
;
3853 ncopies
= vf
/ nunits
;
3855 /* If the function isn't const, only allow it in simd loops where user
3856 has asserted that at least nunits consecutive iterations can be
3857 performed using SIMD instructions. */
3858 if ((loop
== NULL
|| (unsigned) loop
->safelen
< nunits
)
3859 && gimple_vuse (stmt
))
3862 /* Sanity check: make sure that at least one copy of the vectorized stmt
3863 needs to be generated. */
3864 gcc_assert (ncopies
>= 1);
3866 if (!vec_stmt
) /* transformation not required. */
3868 STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).safe_push (bestn
->decl
);
3869 for (i
= 0; i
< nargs
; i
++)
3870 if ((bestn
->simdclone
->args
[i
].arg_type
3871 == SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP
)
3872 || (bestn
->simdclone
->args
[i
].arg_type
3873 == SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP
))
3875 STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).safe_grow_cleared (i
* 3
3877 STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).safe_push (arginfo
[i
].op
);
3878 tree lst
= POINTER_TYPE_P (TREE_TYPE (arginfo
[i
].op
))
3879 ? size_type_node
: TREE_TYPE (arginfo
[i
].op
);
3880 tree ls
= build_int_cst (lst
, arginfo
[i
].linear_step
);
3881 STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).safe_push (ls
);
3882 tree sll
= arginfo
[i
].simd_lane_linear
3883 ? boolean_true_node
: boolean_false_node
;
3884 STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).safe_push (sll
);
3886 STMT_VINFO_TYPE (stmt_info
) = call_simd_clone_vec_info_type
;
3887 if (dump_enabled_p ())
3888 dump_printf_loc (MSG_NOTE
, vect_location
,
3889 "=== vectorizable_simd_clone_call ===\n");
3890 /* vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL); */
3896 if (dump_enabled_p ())
3897 dump_printf_loc (MSG_NOTE
, vect_location
, "transform call.\n");
3900 scalar_dest
= gimple_call_lhs (stmt
);
3901 vec_dest
= NULL_TREE
;
3906 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
3907 rtype
= TREE_TYPE (TREE_TYPE (fndecl
));
3908 if (TREE_CODE (rtype
) == ARRAY_TYPE
)
3911 rtype
= TREE_TYPE (ratype
);
3915 prev_stmt_info
= NULL
;
3916 for (j
= 0; j
< ncopies
; ++j
)
3918 /* Build argument list for the vectorized call. */
3920 vargs
.create (nargs
);
3924 for (i
= 0; i
< nargs
; i
++)
3926 unsigned int k
, l
, m
, o
;
3928 op
= gimple_call_arg (stmt
, i
);
3929 switch (bestn
->simdclone
->args
[i
].arg_type
)
3931 case SIMD_CLONE_ARG_TYPE_VECTOR
:
3932 atype
= bestn
->simdclone
->args
[i
].vector_type
;
3933 o
= nunits
/ simd_clone_subparts (atype
);
3934 for (m
= j
* o
; m
< (j
+ 1) * o
; m
++)
3936 if (simd_clone_subparts (atype
)
3937 < simd_clone_subparts (arginfo
[i
].vectype
))
3939 poly_uint64 prec
= GET_MODE_BITSIZE (TYPE_MODE (atype
));
3940 k
= (simd_clone_subparts (arginfo
[i
].vectype
)
3941 / simd_clone_subparts (atype
));
3942 gcc_assert ((k
& (k
- 1)) == 0);
3945 = vect_get_vec_def_for_operand (op
, stmt
);
3948 vec_oprnd0
= arginfo
[i
].op
;
3949 if ((m
& (k
- 1)) == 0)
3951 = vect_get_vec_def_for_stmt_copy (arginfo
[i
].dt
,
3954 arginfo
[i
].op
= vec_oprnd0
;
3956 = build3 (BIT_FIELD_REF
, atype
, vec_oprnd0
,
3958 bitsize_int ((m
& (k
- 1)) * prec
));
3960 = gimple_build_assign (make_ssa_name (atype
),
3962 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3963 vargs
.safe_push (gimple_assign_lhs (new_stmt
));
3967 k
= (simd_clone_subparts (atype
)
3968 / simd_clone_subparts (arginfo
[i
].vectype
));
3969 gcc_assert ((k
& (k
- 1)) == 0);
3970 vec
<constructor_elt
, va_gc
> *ctor_elts
;
3972 vec_alloc (ctor_elts
, k
);
3975 for (l
= 0; l
< k
; l
++)
3977 if (m
== 0 && l
== 0)
3979 = vect_get_vec_def_for_operand (op
, stmt
);
3982 = vect_get_vec_def_for_stmt_copy (arginfo
[i
].dt
,
3984 arginfo
[i
].op
= vec_oprnd0
;
3987 CONSTRUCTOR_APPEND_ELT (ctor_elts
, NULL_TREE
,
3991 vargs
.safe_push (vec_oprnd0
);
3994 vec_oprnd0
= build_constructor (atype
, ctor_elts
);
3996 = gimple_build_assign (make_ssa_name (atype
),
3998 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3999 vargs
.safe_push (gimple_assign_lhs (new_stmt
));
4004 case SIMD_CLONE_ARG_TYPE_UNIFORM
:
4005 vargs
.safe_push (op
);
4007 case SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP
:
4008 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP
:
4013 = force_gimple_operand (arginfo
[i
].op
, &stmts
, true,
4018 edge pe
= loop_preheader_edge (loop
);
4019 new_bb
= gsi_insert_seq_on_edge_immediate (pe
, stmts
);
4020 gcc_assert (!new_bb
);
4022 if (arginfo
[i
].simd_lane_linear
)
4024 vargs
.safe_push (arginfo
[i
].op
);
4027 tree phi_res
= copy_ssa_name (op
);
4028 gphi
*new_phi
= create_phi_node (phi_res
, loop
->header
);
4029 set_vinfo_for_stmt (new_phi
,
4030 new_stmt_vec_info (new_phi
, loop_vinfo
));
4031 add_phi_arg (new_phi
, arginfo
[i
].op
,
4032 loop_preheader_edge (loop
), UNKNOWN_LOCATION
);
4034 = POINTER_TYPE_P (TREE_TYPE (op
))
4035 ? POINTER_PLUS_EXPR
: PLUS_EXPR
;
4036 tree type
= POINTER_TYPE_P (TREE_TYPE (op
))
4037 ? sizetype
: TREE_TYPE (op
);
4039 = wi::mul (bestn
->simdclone
->args
[i
].linear_step
,
4041 tree tcst
= wide_int_to_tree (type
, cst
);
4042 tree phi_arg
= copy_ssa_name (op
);
4044 = gimple_build_assign (phi_arg
, code
, phi_res
, tcst
);
4045 gimple_stmt_iterator si
= gsi_after_labels (loop
->header
);
4046 gsi_insert_after (&si
, new_stmt
, GSI_NEW_STMT
);
4047 set_vinfo_for_stmt (new_stmt
,
4048 new_stmt_vec_info (new_stmt
, loop_vinfo
));
4049 add_phi_arg (new_phi
, phi_arg
, loop_latch_edge (loop
),
4051 arginfo
[i
].op
= phi_res
;
4052 vargs
.safe_push (phi_res
);
4057 = POINTER_TYPE_P (TREE_TYPE (op
))
4058 ? POINTER_PLUS_EXPR
: PLUS_EXPR
;
4059 tree type
= POINTER_TYPE_P (TREE_TYPE (op
))
4060 ? sizetype
: TREE_TYPE (op
);
4062 = wi::mul (bestn
->simdclone
->args
[i
].linear_step
,
4064 tree tcst
= wide_int_to_tree (type
, cst
);
4065 new_temp
= make_ssa_name (TREE_TYPE (op
));
4066 new_stmt
= gimple_build_assign (new_temp
, code
,
4067 arginfo
[i
].op
, tcst
);
4068 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
4069 vargs
.safe_push (new_temp
);
4072 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_CONSTANT_STEP
:
4073 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_CONSTANT_STEP
:
4074 case SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP
:
4075 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_VARIABLE_STEP
:
4076 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_VARIABLE_STEP
:
4077 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_VARIABLE_STEP
:
4083 new_stmt
= gimple_build_call_vec (fndecl
, vargs
);
4086 gcc_assert (ratype
|| simd_clone_subparts (rtype
) == nunits
);
4088 new_temp
= create_tmp_var (ratype
);
4089 else if (simd_clone_subparts (vectype
)
4090 == simd_clone_subparts (rtype
))
4091 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
4093 new_temp
= make_ssa_name (rtype
, new_stmt
);
4094 gimple_call_set_lhs (new_stmt
, new_temp
);
4096 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
4100 if (simd_clone_subparts (vectype
) < nunits
)
4103 poly_uint64 prec
= GET_MODE_BITSIZE (TYPE_MODE (vectype
));
4104 poly_uint64 bytes
= GET_MODE_SIZE (TYPE_MODE (vectype
));
4105 k
= nunits
/ simd_clone_subparts (vectype
);
4106 gcc_assert ((k
& (k
- 1)) == 0);
4107 for (l
= 0; l
< k
; l
++)
4112 t
= build_fold_addr_expr (new_temp
);
4113 t
= build2 (MEM_REF
, vectype
, t
,
4114 build_int_cst (TREE_TYPE (t
), l
* bytes
));
4117 t
= build3 (BIT_FIELD_REF
, vectype
, new_temp
,
4118 bitsize_int (prec
), bitsize_int (l
* prec
));
4120 = gimple_build_assign (make_ssa_name (vectype
), t
);
4121 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
4122 if (j
== 0 && l
== 0)
4123 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
4125 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
4127 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
4132 tree clobber
= build_constructor (ratype
, NULL
);
4133 TREE_THIS_VOLATILE (clobber
) = 1;
4134 new_stmt
= gimple_build_assign (new_temp
, clobber
);
4135 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
4139 else if (simd_clone_subparts (vectype
) > nunits
)
4141 unsigned int k
= (simd_clone_subparts (vectype
)
4142 / simd_clone_subparts (rtype
));
4143 gcc_assert ((k
& (k
- 1)) == 0);
4144 if ((j
& (k
- 1)) == 0)
4145 vec_alloc (ret_ctor_elts
, k
);
4148 unsigned int m
, o
= nunits
/ simd_clone_subparts (rtype
);
4149 for (m
= 0; m
< o
; m
++)
4151 tree tem
= build4 (ARRAY_REF
, rtype
, new_temp
,
4152 size_int (m
), NULL_TREE
, NULL_TREE
);
4154 = gimple_build_assign (make_ssa_name (rtype
), tem
);
4155 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
4156 CONSTRUCTOR_APPEND_ELT (ret_ctor_elts
, NULL_TREE
,
4157 gimple_assign_lhs (new_stmt
));
4159 tree clobber
= build_constructor (ratype
, NULL
);
4160 TREE_THIS_VOLATILE (clobber
) = 1;
4161 new_stmt
= gimple_build_assign (new_temp
, clobber
);
4162 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
4165 CONSTRUCTOR_APPEND_ELT (ret_ctor_elts
, NULL_TREE
, new_temp
);
4166 if ((j
& (k
- 1)) != k
- 1)
4168 vec_oprnd0
= build_constructor (vectype
, ret_ctor_elts
);
4170 = gimple_build_assign (make_ssa_name (vec_dest
), vec_oprnd0
);
4171 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
4173 if ((unsigned) j
== k
- 1)
4174 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
4176 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
4178 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
4183 tree t
= build_fold_addr_expr (new_temp
);
4184 t
= build2 (MEM_REF
, vectype
, t
,
4185 build_int_cst (TREE_TYPE (t
), 0));
4187 = gimple_build_assign (make_ssa_name (vec_dest
), t
);
4188 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
4189 tree clobber
= build_constructor (ratype
, NULL
);
4190 TREE_THIS_VOLATILE (clobber
) = 1;
4191 vect_finish_stmt_generation (stmt
,
4192 gimple_build_assign (new_temp
,
4198 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
4200 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
4202 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
4207 /* The call in STMT might prevent it from being removed in dce.
4208 We however cannot remove it here, due to the way the ssa name
4209 it defines is mapped to the new definition. So just replace
4210 rhs of the statement with something harmless. */
4217 type
= TREE_TYPE (scalar_dest
);
4218 if (is_pattern_stmt_p (stmt_info
))
4219 lhs
= gimple_call_lhs (STMT_VINFO_RELATED_STMT (stmt_info
));
4221 lhs
= gimple_call_lhs (stmt
);
4222 new_stmt
= gimple_build_assign (lhs
, build_zero_cst (type
));
4225 new_stmt
= gimple_build_nop ();
4226 set_vinfo_for_stmt (new_stmt
, stmt_info
);
4227 set_vinfo_for_stmt (stmt
, NULL
);
4228 STMT_VINFO_STMT (stmt_info
) = new_stmt
;
4229 gsi_replace (gsi
, new_stmt
, true);
4230 unlink_stmt_vdef (stmt
);
4236 /* Function vect_gen_widened_results_half
4238 Create a vector stmt whose code, type, number of arguments, and result
4239 variable are CODE, OP_TYPE, and VEC_DEST, and its arguments are
4240 VEC_OPRND0 and VEC_OPRND1. The new vector stmt is to be inserted at BSI.
4241 In the case that CODE is a CALL_EXPR, this means that a call to DECL
4242 needs to be created (DECL is a function-decl of a target-builtin).
4243 STMT is the original scalar stmt that we are vectorizing. */
4246 vect_gen_widened_results_half (enum tree_code code
,
4248 tree vec_oprnd0
, tree vec_oprnd1
, int op_type
,
4249 tree vec_dest
, gimple_stmt_iterator
*gsi
,
4255 /* Generate half of the widened result: */
4256 if (code
== CALL_EXPR
)
4258 /* Target specific support */
4259 if (op_type
== binary_op
)
4260 new_stmt
= gimple_build_call (decl
, 2, vec_oprnd0
, vec_oprnd1
);
4262 new_stmt
= gimple_build_call (decl
, 1, vec_oprnd0
);
4263 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
4264 gimple_call_set_lhs (new_stmt
, new_temp
);
4268 /* Generic support */
4269 gcc_assert (op_type
== TREE_CODE_LENGTH (code
));
4270 if (op_type
!= binary_op
)
4272 new_stmt
= gimple_build_assign (vec_dest
, code
, vec_oprnd0
, vec_oprnd1
);
4273 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
4274 gimple_assign_set_lhs (new_stmt
, new_temp
);
4276 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
4282 /* Get vectorized definitions for loop-based vectorization. For the first
4283 operand we call vect_get_vec_def_for_operand() (with OPRND containing
4284 scalar operand), and for the rest we get a copy with
4285 vect_get_vec_def_for_stmt_copy() using the previous vector definition
4286 (stored in OPRND). See vect_get_vec_def_for_stmt_copy() for details.
4287 The vectors are collected into VEC_OPRNDS. */
4290 vect_get_loop_based_defs (tree
*oprnd
, gimple
*stmt
, enum vect_def_type dt
,
4291 vec
<tree
> *vec_oprnds
, int multi_step_cvt
)
4295 /* Get first vector operand. */
4296 /* All the vector operands except the very first one (that is scalar oprnd)
4298 if (TREE_CODE (TREE_TYPE (*oprnd
)) != VECTOR_TYPE
)
4299 vec_oprnd
= vect_get_vec_def_for_operand (*oprnd
, stmt
);
4301 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
, *oprnd
);
4303 vec_oprnds
->quick_push (vec_oprnd
);
4305 /* Get second vector operand. */
4306 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
, vec_oprnd
);
4307 vec_oprnds
->quick_push (vec_oprnd
);
4311 /* For conversion in multiple steps, continue to get operands
4314 vect_get_loop_based_defs (oprnd
, stmt
, dt
, vec_oprnds
, multi_step_cvt
- 1);
4318 /* Create vectorized demotion statements for vector operands from VEC_OPRNDS.
4319 For multi-step conversions store the resulting vectors and call the function
4323 vect_create_vectorized_demotion_stmts (vec
<tree
> *vec_oprnds
,
4324 int multi_step_cvt
, gimple
*stmt
,
4326 gimple_stmt_iterator
*gsi
,
4327 slp_tree slp_node
, enum tree_code code
,
4328 stmt_vec_info
*prev_stmt_info
)
4331 tree vop0
, vop1
, new_tmp
, vec_dest
;
4333 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
4335 vec_dest
= vec_dsts
.pop ();
4337 for (i
= 0; i
< vec_oprnds
->length (); i
+= 2)
4339 /* Create demotion operation. */
4340 vop0
= (*vec_oprnds
)[i
];
4341 vop1
= (*vec_oprnds
)[i
+ 1];
4342 new_stmt
= gimple_build_assign (vec_dest
, code
, vop0
, vop1
);
4343 new_tmp
= make_ssa_name (vec_dest
, new_stmt
);
4344 gimple_assign_set_lhs (new_stmt
, new_tmp
);
4345 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
4348 /* Store the resulting vector for next recursive call. */
4349 (*vec_oprnds
)[i
/2] = new_tmp
;
4352 /* This is the last step of the conversion sequence. Store the
4353 vectors in SLP_NODE or in vector info of the scalar statement
4354 (or in STMT_VINFO_RELATED_STMT chain). */
4356 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
4359 if (!*prev_stmt_info
)
4360 STMT_VINFO_VEC_STMT (stmt_info
) = new_stmt
;
4362 STMT_VINFO_RELATED_STMT (*prev_stmt_info
) = new_stmt
;
4364 *prev_stmt_info
= vinfo_for_stmt (new_stmt
);
4369 /* For multi-step demotion operations we first generate demotion operations
4370 from the source type to the intermediate types, and then combine the
4371 results (stored in VEC_OPRNDS) in demotion operation to the destination
4375 /* At each level of recursion we have half of the operands we had at the
4377 vec_oprnds
->truncate ((i
+1)/2);
4378 vect_create_vectorized_demotion_stmts (vec_oprnds
, multi_step_cvt
- 1,
4379 stmt
, vec_dsts
, gsi
, slp_node
,
4380 VEC_PACK_TRUNC_EXPR
,
4384 vec_dsts
.quick_push (vec_dest
);
4388 /* Create vectorized promotion statements for vector operands from VEC_OPRNDS0
4389 and VEC_OPRNDS1 (for binary operations). For multi-step conversions store
4390 the resulting vectors and call the function recursively. */
4393 vect_create_vectorized_promotion_stmts (vec
<tree
> *vec_oprnds0
,
4394 vec
<tree
> *vec_oprnds1
,
4395 gimple
*stmt
, tree vec_dest
,
4396 gimple_stmt_iterator
*gsi
,
4397 enum tree_code code1
,
4398 enum tree_code code2
, tree decl1
,
4399 tree decl2
, int op_type
)
4402 tree vop0
, vop1
, new_tmp1
, new_tmp2
;
4403 gimple
*new_stmt1
, *new_stmt2
;
4404 vec
<tree
> vec_tmp
= vNULL
;
4406 vec_tmp
.create (vec_oprnds0
->length () * 2);
4407 FOR_EACH_VEC_ELT (*vec_oprnds0
, i
, vop0
)
4409 if (op_type
== binary_op
)
4410 vop1
= (*vec_oprnds1
)[i
];
4414 /* Generate the two halves of promotion operation. */
4415 new_stmt1
= vect_gen_widened_results_half (code1
, decl1
, vop0
, vop1
,
4416 op_type
, vec_dest
, gsi
, stmt
);
4417 new_stmt2
= vect_gen_widened_results_half (code2
, decl2
, vop0
, vop1
,
4418 op_type
, vec_dest
, gsi
, stmt
);
4419 if (is_gimple_call (new_stmt1
))
4421 new_tmp1
= gimple_call_lhs (new_stmt1
);
4422 new_tmp2
= gimple_call_lhs (new_stmt2
);
4426 new_tmp1
= gimple_assign_lhs (new_stmt1
);
4427 new_tmp2
= gimple_assign_lhs (new_stmt2
);
4430 /* Store the results for the next step. */
4431 vec_tmp
.quick_push (new_tmp1
);
4432 vec_tmp
.quick_push (new_tmp2
);
4435 vec_oprnds0
->release ();
4436 *vec_oprnds0
= vec_tmp
;
4440 /* Check if STMT performs a conversion operation, that can be vectorized.
4441 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4442 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
4443 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4446 vectorizable_conversion (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
4447 gimple
**vec_stmt
, slp_tree slp_node
)
4451 tree op0
, op1
= NULL_TREE
;
4452 tree vec_oprnd0
= NULL_TREE
, vec_oprnd1
= NULL_TREE
;
4453 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
4454 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
4455 enum tree_code code
, code1
= ERROR_MARK
, code2
= ERROR_MARK
;
4456 enum tree_code codecvt1
= ERROR_MARK
, codecvt2
= ERROR_MARK
;
4457 tree decl1
= NULL_TREE
, decl2
= NULL_TREE
;
4460 enum vect_def_type dt
[2] = {vect_unknown_def_type
, vect_unknown_def_type
};
4462 gimple
*new_stmt
= NULL
;
4463 stmt_vec_info prev_stmt_info
;
4464 poly_uint64 nunits_in
;
4465 poly_uint64 nunits_out
;
4466 tree vectype_out
, vectype_in
;
4468 tree lhs_type
, rhs_type
;
4469 enum { NARROW
, NONE
, WIDEN
} modifier
;
4470 vec
<tree
> vec_oprnds0
= vNULL
;
4471 vec
<tree
> vec_oprnds1
= vNULL
;
4473 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
4474 vec_info
*vinfo
= stmt_info
->vinfo
;
4475 int multi_step_cvt
= 0;
4476 vec
<tree
> interm_types
= vNULL
;
4477 tree last_oprnd
, intermediate_type
, cvt_type
= NULL_TREE
;
4479 unsigned short fltsz
;
4481 /* Is STMT a vectorizable conversion? */
4483 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
4486 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
4490 if (!is_gimple_assign (stmt
))
4493 if (TREE_CODE (gimple_assign_lhs (stmt
)) != SSA_NAME
)
4496 code
= gimple_assign_rhs_code (stmt
);
4497 if (!CONVERT_EXPR_CODE_P (code
)
4498 && code
!= FIX_TRUNC_EXPR
4499 && code
!= FLOAT_EXPR
4500 && code
!= WIDEN_MULT_EXPR
4501 && code
!= WIDEN_LSHIFT_EXPR
)
4504 op_type
= TREE_CODE_LENGTH (code
);
4506 /* Check types of lhs and rhs. */
4507 scalar_dest
= gimple_assign_lhs (stmt
);
4508 lhs_type
= TREE_TYPE (scalar_dest
);
4509 vectype_out
= STMT_VINFO_VECTYPE (stmt_info
);
4511 op0
= gimple_assign_rhs1 (stmt
);
4512 rhs_type
= TREE_TYPE (op0
);
4514 if ((code
!= FIX_TRUNC_EXPR
&& code
!= FLOAT_EXPR
)
4515 && !((INTEGRAL_TYPE_P (lhs_type
)
4516 && INTEGRAL_TYPE_P (rhs_type
))
4517 || (SCALAR_FLOAT_TYPE_P (lhs_type
)
4518 && SCALAR_FLOAT_TYPE_P (rhs_type
))))
4521 if (!VECTOR_BOOLEAN_TYPE_P (vectype_out
)
4522 && ((INTEGRAL_TYPE_P (lhs_type
)
4523 && !type_has_mode_precision_p (lhs_type
))
4524 || (INTEGRAL_TYPE_P (rhs_type
)
4525 && !type_has_mode_precision_p (rhs_type
))))
4527 if (dump_enabled_p ())
4528 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4529 "type conversion to/from bit-precision unsupported."
4534 /* Check the operands of the operation. */
4535 if (!vect_is_simple_use (op0
, vinfo
, &def_stmt
, &dt
[0], &vectype_in
))
4537 if (dump_enabled_p ())
4538 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4539 "use not simple.\n");
4542 if (op_type
== binary_op
)
4546 op1
= gimple_assign_rhs2 (stmt
);
4547 gcc_assert (code
== WIDEN_MULT_EXPR
|| code
== WIDEN_LSHIFT_EXPR
);
4548 /* For WIDEN_MULT_EXPR, if OP0 is a constant, use the type of
4550 if (CONSTANT_CLASS_P (op0
))
4551 ok
= vect_is_simple_use (op1
, vinfo
, &def_stmt
, &dt
[1], &vectype_in
);
4553 ok
= vect_is_simple_use (op1
, vinfo
, &def_stmt
, &dt
[1]);
4557 if (dump_enabled_p ())
4558 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4559 "use not simple.\n");
4564 /* If op0 is an external or constant defs use a vector type of
4565 the same size as the output vector type. */
4567 vectype_in
= get_same_sized_vectype (rhs_type
, vectype_out
);
4569 gcc_assert (vectype_in
);
4572 if (dump_enabled_p ())
4574 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4575 "no vectype for scalar type ");
4576 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, rhs_type
);
4577 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
4583 if (VECTOR_BOOLEAN_TYPE_P (vectype_out
)
4584 && !VECTOR_BOOLEAN_TYPE_P (vectype_in
))
4586 if (dump_enabled_p ())
4588 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4589 "can't convert between boolean and non "
4591 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, rhs_type
);
4592 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
4598 nunits_in
= TYPE_VECTOR_SUBPARTS (vectype_in
);
4599 nunits_out
= TYPE_VECTOR_SUBPARTS (vectype_out
);
4600 if (known_eq (nunits_out
, nunits_in
))
4602 else if (multiple_p (nunits_out
, nunits_in
))
4606 gcc_checking_assert (multiple_p (nunits_in
, nunits_out
));
4610 /* Multiple types in SLP are handled by creating the appropriate number of
4611 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4615 else if (modifier
== NARROW
)
4616 ncopies
= vect_get_num_copies (loop_vinfo
, vectype_out
);
4618 ncopies
= vect_get_num_copies (loop_vinfo
, vectype_in
);
4620 /* Sanity check: make sure that at least one copy of the vectorized stmt
4621 needs to be generated. */
4622 gcc_assert (ncopies
>= 1);
4624 bool found_mode
= false;
4625 scalar_mode lhs_mode
= SCALAR_TYPE_MODE (lhs_type
);
4626 scalar_mode rhs_mode
= SCALAR_TYPE_MODE (rhs_type
);
4627 opt_scalar_mode rhs_mode_iter
;
4629 /* Supportable by target? */
4633 if (code
!= FIX_TRUNC_EXPR
&& code
!= FLOAT_EXPR
)
4635 if (supportable_convert_operation (code
, vectype_out
, vectype_in
,
4640 if (dump_enabled_p ())
4641 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4642 "conversion not supported by target.\n");
4646 if (supportable_widening_operation (code
, stmt
, vectype_out
, vectype_in
,
4647 &code1
, &code2
, &multi_step_cvt
,
4650 /* Binary widening operation can only be supported directly by the
4652 gcc_assert (!(multi_step_cvt
&& op_type
== binary_op
));
4656 if (code
!= FLOAT_EXPR
4657 || GET_MODE_SIZE (lhs_mode
) <= GET_MODE_SIZE (rhs_mode
))
4660 fltsz
= GET_MODE_SIZE (lhs_mode
);
4661 FOR_EACH_2XWIDER_MODE (rhs_mode_iter
, rhs_mode
)
4663 rhs_mode
= rhs_mode_iter
.require ();
4664 if (GET_MODE_SIZE (rhs_mode
) > fltsz
)
4668 = build_nonstandard_integer_type (GET_MODE_BITSIZE (rhs_mode
), 0);
4669 cvt_type
= get_same_sized_vectype (cvt_type
, vectype_in
);
4670 if (cvt_type
== NULL_TREE
)
4673 if (GET_MODE_SIZE (rhs_mode
) == fltsz
)
4675 if (!supportable_convert_operation (code
, vectype_out
,
4676 cvt_type
, &decl1
, &codecvt1
))
4679 else if (!supportable_widening_operation (code
, stmt
, vectype_out
,
4680 cvt_type
, &codecvt1
,
4681 &codecvt2
, &multi_step_cvt
,
4685 gcc_assert (multi_step_cvt
== 0);
4687 if (supportable_widening_operation (NOP_EXPR
, stmt
, cvt_type
,
4688 vectype_in
, &code1
, &code2
,
4689 &multi_step_cvt
, &interm_types
))
4699 if (GET_MODE_SIZE (rhs_mode
) == fltsz
)
4700 codecvt2
= ERROR_MARK
;
4704 interm_types
.safe_push (cvt_type
);
4705 cvt_type
= NULL_TREE
;
4710 gcc_assert (op_type
== unary_op
);
4711 if (supportable_narrowing_operation (code
, vectype_out
, vectype_in
,
4712 &code1
, &multi_step_cvt
,
4716 if (code
!= FIX_TRUNC_EXPR
4717 || GET_MODE_SIZE (lhs_mode
) >= GET_MODE_SIZE (rhs_mode
))
4721 = build_nonstandard_integer_type (GET_MODE_BITSIZE (rhs_mode
), 0);
4722 cvt_type
= get_same_sized_vectype (cvt_type
, vectype_in
);
4723 if (cvt_type
== NULL_TREE
)
4725 if (!supportable_convert_operation (code
, cvt_type
, vectype_in
,
4728 if (supportable_narrowing_operation (NOP_EXPR
, vectype_out
, cvt_type
,
4729 &code1
, &multi_step_cvt
,
4738 if (!vec_stmt
) /* transformation not required. */
4740 if (dump_enabled_p ())
4741 dump_printf_loc (MSG_NOTE
, vect_location
,
4742 "=== vectorizable_conversion ===\n");
4743 if (code
== FIX_TRUNC_EXPR
|| code
== FLOAT_EXPR
)
4745 STMT_VINFO_TYPE (stmt_info
) = type_conversion_vec_info_type
;
4747 vect_model_simple_cost (stmt_info
, ncopies
, dt
, ndts
, NULL
, NULL
);
4749 else if (modifier
== NARROW
)
4751 STMT_VINFO_TYPE (stmt_info
) = type_demotion_vec_info_type
;
4753 vect_model_promotion_demotion_cost (stmt_info
, dt
, multi_step_cvt
);
4757 STMT_VINFO_TYPE (stmt_info
) = type_promotion_vec_info_type
;
4759 vect_model_promotion_demotion_cost (stmt_info
, dt
, multi_step_cvt
);
4761 interm_types
.release ();
4766 if (dump_enabled_p ())
4767 dump_printf_loc (MSG_NOTE
, vect_location
,
4768 "transform conversion. ncopies = %d.\n", ncopies
);
4770 if (op_type
== binary_op
)
4772 if (CONSTANT_CLASS_P (op0
))
4773 op0
= fold_convert (TREE_TYPE (op1
), op0
);
4774 else if (CONSTANT_CLASS_P (op1
))
4775 op1
= fold_convert (TREE_TYPE (op0
), op1
);
4778 /* In case of multi-step conversion, we first generate conversion operations
4779 to the intermediate types, and then from that types to the final one.
4780 We create vector destinations for the intermediate type (TYPES) received
4781 from supportable_*_operation, and store them in the correct order
4782 for future use in vect_create_vectorized_*_stmts (). */
4783 auto_vec
<tree
> vec_dsts (multi_step_cvt
+ 1);
4784 vec_dest
= vect_create_destination_var (scalar_dest
,
4785 (cvt_type
&& modifier
== WIDEN
)
4786 ? cvt_type
: vectype_out
);
4787 vec_dsts
.quick_push (vec_dest
);
4791 for (i
= interm_types
.length () - 1;
4792 interm_types
.iterate (i
, &intermediate_type
); i
--)
4794 vec_dest
= vect_create_destination_var (scalar_dest
,
4796 vec_dsts
.quick_push (vec_dest
);
4801 vec_dest
= vect_create_destination_var (scalar_dest
,
4803 ? vectype_out
: cvt_type
);
4807 if (modifier
== WIDEN
)
4809 vec_oprnds0
.create (multi_step_cvt
? vect_pow2 (multi_step_cvt
) : 1);
4810 if (op_type
== binary_op
)
4811 vec_oprnds1
.create (1);
4813 else if (modifier
== NARROW
)
4814 vec_oprnds0
.create (
4815 2 * (multi_step_cvt
? vect_pow2 (multi_step_cvt
) : 1));
4817 else if (code
== WIDEN_LSHIFT_EXPR
)
4818 vec_oprnds1
.create (slp_node
->vec_stmts_size
);
4821 prev_stmt_info
= NULL
;
4825 for (j
= 0; j
< ncopies
; j
++)
4828 vect_get_vec_defs (op0
, NULL
, stmt
, &vec_oprnds0
, NULL
, slp_node
);
4830 vect_get_vec_defs_for_stmt_copy (dt
, &vec_oprnds0
, NULL
);
4832 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vop0
)
4834 /* Arguments are ready, create the new vector stmt. */
4835 if (code1
== CALL_EXPR
)
4837 new_stmt
= gimple_build_call (decl1
, 1, vop0
);
4838 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
4839 gimple_call_set_lhs (new_stmt
, new_temp
);
4843 gcc_assert (TREE_CODE_LENGTH (code1
) == unary_op
);
4844 new_stmt
= gimple_build_assign (vec_dest
, code1
, vop0
);
4845 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
4846 gimple_assign_set_lhs (new_stmt
, new_temp
);
4849 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
4851 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
4854 if (!prev_stmt_info
)
4855 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
4857 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
4858 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
4865 /* In case the vectorization factor (VF) is bigger than the number
4866 of elements that we can fit in a vectype (nunits), we have to
4867 generate more than one vector stmt - i.e - we need to "unroll"
4868 the vector stmt by a factor VF/nunits. */
4869 for (j
= 0; j
< ncopies
; j
++)
4876 if (code
== WIDEN_LSHIFT_EXPR
)
4881 /* Store vec_oprnd1 for every vector stmt to be created
4882 for SLP_NODE. We check during the analysis that all
4883 the shift arguments are the same. */
4884 for (k
= 0; k
< slp_node
->vec_stmts_size
- 1; k
++)
4885 vec_oprnds1
.quick_push (vec_oprnd1
);
4887 vect_get_vec_defs (op0
, NULL_TREE
, stmt
, &vec_oprnds0
, NULL
,
4891 vect_get_vec_defs (op0
, op1
, stmt
, &vec_oprnds0
,
4892 &vec_oprnds1
, slp_node
);
4896 vec_oprnd0
= vect_get_vec_def_for_operand (op0
, stmt
);
4897 vec_oprnds0
.quick_push (vec_oprnd0
);
4898 if (op_type
== binary_op
)
4900 if (code
== WIDEN_LSHIFT_EXPR
)
4903 vec_oprnd1
= vect_get_vec_def_for_operand (op1
, stmt
);
4904 vec_oprnds1
.quick_push (vec_oprnd1
);
4910 vec_oprnd0
= vect_get_vec_def_for_stmt_copy (dt
[0], vec_oprnd0
);
4911 vec_oprnds0
.truncate (0);
4912 vec_oprnds0
.quick_push (vec_oprnd0
);
4913 if (op_type
== binary_op
)
4915 if (code
== WIDEN_LSHIFT_EXPR
)
4918 vec_oprnd1
= vect_get_vec_def_for_stmt_copy (dt
[1],
4920 vec_oprnds1
.truncate (0);
4921 vec_oprnds1
.quick_push (vec_oprnd1
);
4925 /* Arguments are ready. Create the new vector stmts. */
4926 for (i
= multi_step_cvt
; i
>= 0; i
--)
4928 tree this_dest
= vec_dsts
[i
];
4929 enum tree_code c1
= code1
, c2
= code2
;
4930 if (i
== 0 && codecvt2
!= ERROR_MARK
)
4935 vect_create_vectorized_promotion_stmts (&vec_oprnds0
,
4937 stmt
, this_dest
, gsi
,
4938 c1
, c2
, decl1
, decl2
,
4942 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vop0
)
4946 if (codecvt1
== CALL_EXPR
)
4948 new_stmt
= gimple_build_call (decl1
, 1, vop0
);
4949 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
4950 gimple_call_set_lhs (new_stmt
, new_temp
);
4954 gcc_assert (TREE_CODE_LENGTH (codecvt1
) == unary_op
);
4955 new_temp
= make_ssa_name (vec_dest
);
4956 new_stmt
= gimple_build_assign (new_temp
, codecvt1
,
4960 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
4963 new_stmt
= SSA_NAME_DEF_STMT (vop0
);
4966 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
4969 if (!prev_stmt_info
)
4970 STMT_VINFO_VEC_STMT (stmt_info
) = new_stmt
;
4972 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
4973 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
4978 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
4982 /* In case the vectorization factor (VF) is bigger than the number
4983 of elements that we can fit in a vectype (nunits), we have to
4984 generate more than one vector stmt - i.e - we need to "unroll"
4985 the vector stmt by a factor VF/nunits. */
4986 for (j
= 0; j
< ncopies
; j
++)
4990 vect_get_vec_defs (op0
, NULL_TREE
, stmt
, &vec_oprnds0
, NULL
,
4994 vec_oprnds0
.truncate (0);
4995 vect_get_loop_based_defs (&last_oprnd
, stmt
, dt
[0], &vec_oprnds0
,
4996 vect_pow2 (multi_step_cvt
) - 1);
4999 /* Arguments are ready. Create the new vector stmts. */
5001 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vop0
)
5003 if (codecvt1
== CALL_EXPR
)
5005 new_stmt
= gimple_build_call (decl1
, 1, vop0
);
5006 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
5007 gimple_call_set_lhs (new_stmt
, new_temp
);
5011 gcc_assert (TREE_CODE_LENGTH (codecvt1
) == unary_op
);
5012 new_temp
= make_ssa_name (vec_dest
);
5013 new_stmt
= gimple_build_assign (new_temp
, codecvt1
,
5017 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
5018 vec_oprnds0
[i
] = new_temp
;
5021 vect_create_vectorized_demotion_stmts (&vec_oprnds0
, multi_step_cvt
,
5022 stmt
, vec_dsts
, gsi
,
5027 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
5031 vec_oprnds0
.release ();
5032 vec_oprnds1
.release ();
5033 interm_types
.release ();
5039 /* Function vectorizable_assignment.
5041 Check if STMT performs an assignment (copy) that can be vectorized.
5042 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
5043 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
5044 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
5047 vectorizable_assignment (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
5048 gimple
**vec_stmt
, slp_tree slp_node
)
5053 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
5054 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
5057 enum vect_def_type dt
[1] = {vect_unknown_def_type
};
5061 vec
<tree
> vec_oprnds
= vNULL
;
5063 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
5064 vec_info
*vinfo
= stmt_info
->vinfo
;
5065 gimple
*new_stmt
= NULL
;
5066 stmt_vec_info prev_stmt_info
= NULL
;
5067 enum tree_code code
;
5070 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
5073 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
5077 /* Is vectorizable assignment? */
5078 if (!is_gimple_assign (stmt
))
5081 scalar_dest
= gimple_assign_lhs (stmt
);
5082 if (TREE_CODE (scalar_dest
) != SSA_NAME
)
5085 code
= gimple_assign_rhs_code (stmt
);
5086 if (gimple_assign_single_p (stmt
)
5087 || code
== PAREN_EXPR
5088 || CONVERT_EXPR_CODE_P (code
))
5089 op
= gimple_assign_rhs1 (stmt
);
5093 if (code
== VIEW_CONVERT_EXPR
)
5094 op
= TREE_OPERAND (op
, 0);
5096 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
5097 poly_uint64 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
5099 /* Multiple types in SLP are handled by creating the appropriate number of
5100 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
5105 ncopies
= vect_get_num_copies (loop_vinfo
, vectype
);
5107 gcc_assert (ncopies
>= 1);
5109 if (!vect_is_simple_use (op
, vinfo
, &def_stmt
, &dt
[0], &vectype_in
))
5111 if (dump_enabled_p ())
5112 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5113 "use not simple.\n");
5117 /* We can handle NOP_EXPR conversions that do not change the number
5118 of elements or the vector size. */
5119 if ((CONVERT_EXPR_CODE_P (code
)
5120 || code
== VIEW_CONVERT_EXPR
)
5122 || maybe_ne (TYPE_VECTOR_SUBPARTS (vectype_in
), nunits
)
5123 || maybe_ne (GET_MODE_SIZE (TYPE_MODE (vectype
)),
5124 GET_MODE_SIZE (TYPE_MODE (vectype_in
)))))
5127 /* We do not handle bit-precision changes. */
5128 if ((CONVERT_EXPR_CODE_P (code
)
5129 || code
== VIEW_CONVERT_EXPR
)
5130 && INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest
))
5131 && (!type_has_mode_precision_p (TREE_TYPE (scalar_dest
))
5132 || !type_has_mode_precision_p (TREE_TYPE (op
)))
5133 /* But a conversion that does not change the bit-pattern is ok. */
5134 && !((TYPE_PRECISION (TREE_TYPE (scalar_dest
))
5135 > TYPE_PRECISION (TREE_TYPE (op
)))
5136 && TYPE_UNSIGNED (TREE_TYPE (op
)))
5137 /* Conversion between boolean types of different sizes is
5138 a simple assignment in case their vectypes are same
5140 && (!VECTOR_BOOLEAN_TYPE_P (vectype
)
5141 || !VECTOR_BOOLEAN_TYPE_P (vectype_in
)))
5143 if (dump_enabled_p ())
5144 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5145 "type conversion to/from bit-precision "
5150 if (!vec_stmt
) /* transformation not required. */
5152 STMT_VINFO_TYPE (stmt_info
) = assignment_vec_info_type
;
5153 if (dump_enabled_p ())
5154 dump_printf_loc (MSG_NOTE
, vect_location
,
5155 "=== vectorizable_assignment ===\n");
5157 vect_model_simple_cost (stmt_info
, ncopies
, dt
, ndts
, NULL
, NULL
);
5162 if (dump_enabled_p ())
5163 dump_printf_loc (MSG_NOTE
, vect_location
, "transform assignment.\n");
5166 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
5169 for (j
= 0; j
< ncopies
; j
++)
5173 vect_get_vec_defs (op
, NULL
, stmt
, &vec_oprnds
, NULL
, slp_node
);
5175 vect_get_vec_defs_for_stmt_copy (dt
, &vec_oprnds
, NULL
);
5177 /* Arguments are ready. create the new vector stmt. */
5178 FOR_EACH_VEC_ELT (vec_oprnds
, i
, vop
)
5180 if (CONVERT_EXPR_CODE_P (code
)
5181 || code
== VIEW_CONVERT_EXPR
)
5182 vop
= build1 (VIEW_CONVERT_EXPR
, vectype
, vop
);
5183 new_stmt
= gimple_build_assign (vec_dest
, vop
);
5184 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
5185 gimple_assign_set_lhs (new_stmt
, new_temp
);
5186 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
5188 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
5195 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
5197 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
5199 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
5202 vec_oprnds
.release ();
5207 /* Return TRUE if CODE (a shift operation) is supported for SCALAR_TYPE
5208 either as shift by a scalar or by a vector. */
5211 vect_supportable_shift (enum tree_code code
, tree scalar_type
)
5214 machine_mode vec_mode
;
5219 vectype
= get_vectype_for_scalar_type (scalar_type
);
5223 optab
= optab_for_tree_code (code
, vectype
, optab_scalar
);
5225 || optab_handler (optab
, TYPE_MODE (vectype
)) == CODE_FOR_nothing
)
5227 optab
= optab_for_tree_code (code
, vectype
, optab_vector
);
5229 || (optab_handler (optab
, TYPE_MODE (vectype
))
5230 == CODE_FOR_nothing
))
5234 vec_mode
= TYPE_MODE (vectype
);
5235 icode
= (int) optab_handler (optab
, vec_mode
);
5236 if (icode
== CODE_FOR_nothing
)
5243 /* Function vectorizable_shift.
5245 Check if STMT performs a shift operation that can be vectorized.
5246 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
5247 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
5248 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
5251 vectorizable_shift (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
5252 gimple
**vec_stmt
, slp_tree slp_node
)
5256 tree op0
, op1
= NULL
;
5257 tree vec_oprnd1
= NULL_TREE
;
5258 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
5260 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
5261 enum tree_code code
;
5262 machine_mode vec_mode
;
5266 machine_mode optab_op2_mode
;
5268 enum vect_def_type dt
[2] = {vect_unknown_def_type
, vect_unknown_def_type
};
5270 gimple
*new_stmt
= NULL
;
5271 stmt_vec_info prev_stmt_info
;
5272 poly_uint64 nunits_in
;
5273 poly_uint64 nunits_out
;
5278 vec
<tree
> vec_oprnds0
= vNULL
;
5279 vec
<tree
> vec_oprnds1
= vNULL
;
5282 bool scalar_shift_arg
= true;
5283 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
5284 vec_info
*vinfo
= stmt_info
->vinfo
;
5286 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
5289 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
5293 /* Is STMT a vectorizable binary/unary operation? */
5294 if (!is_gimple_assign (stmt
))
5297 if (TREE_CODE (gimple_assign_lhs (stmt
)) != SSA_NAME
)
5300 code
= gimple_assign_rhs_code (stmt
);
5302 if (!(code
== LSHIFT_EXPR
|| code
== RSHIFT_EXPR
|| code
== LROTATE_EXPR
5303 || code
== RROTATE_EXPR
))
5306 scalar_dest
= gimple_assign_lhs (stmt
);
5307 vectype_out
= STMT_VINFO_VECTYPE (stmt_info
);
5308 if (!type_has_mode_precision_p (TREE_TYPE (scalar_dest
)))
5310 if (dump_enabled_p ())
5311 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5312 "bit-precision shifts not supported.\n");
5316 op0
= gimple_assign_rhs1 (stmt
);
5317 if (!vect_is_simple_use (op0
, vinfo
, &def_stmt
, &dt
[0], &vectype
))
5319 if (dump_enabled_p ())
5320 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5321 "use not simple.\n");
5324 /* If op0 is an external or constant def use a vector type with
5325 the same size as the output vector type. */
5327 vectype
= get_same_sized_vectype (TREE_TYPE (op0
), vectype_out
);
5329 gcc_assert (vectype
);
5332 if (dump_enabled_p ())
5333 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5334 "no vectype for scalar type\n");
5338 nunits_out
= TYPE_VECTOR_SUBPARTS (vectype_out
);
5339 nunits_in
= TYPE_VECTOR_SUBPARTS (vectype
);
5340 if (maybe_ne (nunits_out
, nunits_in
))
5343 op1
= gimple_assign_rhs2 (stmt
);
5344 if (!vect_is_simple_use (op1
, vinfo
, &def_stmt
, &dt
[1], &op1_vectype
))
5346 if (dump_enabled_p ())
5347 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5348 "use not simple.\n");
5352 /* Multiple types in SLP are handled by creating the appropriate number of
5353 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
5358 ncopies
= vect_get_num_copies (loop_vinfo
, vectype
);
5360 gcc_assert (ncopies
>= 1);
5362 /* Determine whether the shift amount is a vector, or scalar. If the
5363 shift/rotate amount is a vector, use the vector/vector shift optabs. */
5365 if ((dt
[1] == vect_internal_def
5366 || dt
[1] == vect_induction_def
)
5368 scalar_shift_arg
= false;
5369 else if (dt
[1] == vect_constant_def
5370 || dt
[1] == vect_external_def
5371 || dt
[1] == vect_internal_def
)
5373 /* In SLP, need to check whether the shift count is the same,
5374 in loops if it is a constant or invariant, it is always
5378 vec
<gimple
*> stmts
= SLP_TREE_SCALAR_STMTS (slp_node
);
5381 FOR_EACH_VEC_ELT (stmts
, k
, slpstmt
)
5382 if (!operand_equal_p (gimple_assign_rhs2 (slpstmt
), op1
, 0))
5383 scalar_shift_arg
= false;
5386 /* If the shift amount is computed by a pattern stmt we cannot
5387 use the scalar amount directly thus give up and use a vector
5389 if (dt
[1] == vect_internal_def
)
5391 gimple
*def
= SSA_NAME_DEF_STMT (op1
);
5392 if (is_pattern_stmt_p (vinfo_for_stmt (def
)))
5393 scalar_shift_arg
= false;
5398 if (dump_enabled_p ())
5399 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5400 "operand mode requires invariant argument.\n");
5404 /* Vector shifted by vector. */
5405 if (!scalar_shift_arg
)
5407 optab
= optab_for_tree_code (code
, vectype
, optab_vector
);
5408 if (dump_enabled_p ())
5409 dump_printf_loc (MSG_NOTE
, vect_location
,
5410 "vector/vector shift/rotate found.\n");
5413 op1_vectype
= get_same_sized_vectype (TREE_TYPE (op1
), vectype_out
);
5414 if (op1_vectype
== NULL_TREE
5415 || TYPE_MODE (op1_vectype
) != TYPE_MODE (vectype
))
5417 if (dump_enabled_p ())
5418 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5419 "unusable type for last operand in"
5420 " vector/vector shift/rotate.\n");
5424 /* See if the machine has a vector shifted by scalar insn and if not
5425 then see if it has a vector shifted by vector insn. */
5428 optab
= optab_for_tree_code (code
, vectype
, optab_scalar
);
5430 && optab_handler (optab
, TYPE_MODE (vectype
)) != CODE_FOR_nothing
)
5432 if (dump_enabled_p ())
5433 dump_printf_loc (MSG_NOTE
, vect_location
,
5434 "vector/scalar shift/rotate found.\n");
5438 optab
= optab_for_tree_code (code
, vectype
, optab_vector
);
5440 && (optab_handler (optab
, TYPE_MODE (vectype
))
5441 != CODE_FOR_nothing
))
5443 scalar_shift_arg
= false;
5445 if (dump_enabled_p ())
5446 dump_printf_loc (MSG_NOTE
, vect_location
,
5447 "vector/vector shift/rotate found.\n");
5449 /* Unlike the other binary operators, shifts/rotates have
5450 the rhs being int, instead of the same type as the lhs,
5451 so make sure the scalar is the right type if we are
5452 dealing with vectors of long long/long/short/char. */
5453 if (dt
[1] == vect_constant_def
)
5454 op1
= fold_convert (TREE_TYPE (vectype
), op1
);
5455 else if (!useless_type_conversion_p (TREE_TYPE (vectype
),
5459 && TYPE_MODE (TREE_TYPE (vectype
))
5460 != TYPE_MODE (TREE_TYPE (op1
)))
5462 if (dump_enabled_p ())
5463 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5464 "unusable type for last operand in"
5465 " vector/vector shift/rotate.\n");
5468 if (vec_stmt
&& !slp_node
)
5470 op1
= fold_convert (TREE_TYPE (vectype
), op1
);
5471 op1
= vect_init_vector (stmt
, op1
,
5472 TREE_TYPE (vectype
), NULL
);
5479 /* Supportable by target? */
5482 if (dump_enabled_p ())
5483 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5487 vec_mode
= TYPE_MODE (vectype
);
5488 icode
= (int) optab_handler (optab
, vec_mode
);
5489 if (icode
== CODE_FOR_nothing
)
5491 if (dump_enabled_p ())
5492 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5493 "op not supported by target.\n");
5494 /* Check only during analysis. */
5495 if (maybe_ne (GET_MODE_SIZE (vec_mode
), UNITS_PER_WORD
)
5497 && !vect_worthwhile_without_simd_p (vinfo
, code
)))
5499 if (dump_enabled_p ())
5500 dump_printf_loc (MSG_NOTE
, vect_location
,
5501 "proceeding using word mode.\n");
5504 /* Worthwhile without SIMD support? Check only during analysis. */
5506 && !VECTOR_MODE_P (TYPE_MODE (vectype
))
5507 && !vect_worthwhile_without_simd_p (vinfo
, code
))
5509 if (dump_enabled_p ())
5510 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5511 "not worthwhile without SIMD support.\n");
5515 if (!vec_stmt
) /* transformation not required. */
5517 STMT_VINFO_TYPE (stmt_info
) = shift_vec_info_type
;
5518 if (dump_enabled_p ())
5519 dump_printf_loc (MSG_NOTE
, vect_location
,
5520 "=== vectorizable_shift ===\n");
5522 vect_model_simple_cost (stmt_info
, ncopies
, dt
, ndts
, NULL
, NULL
);
5528 if (dump_enabled_p ())
5529 dump_printf_loc (MSG_NOTE
, vect_location
,
5530 "transform binary/unary operation.\n");
5533 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
5535 prev_stmt_info
= NULL
;
5536 for (j
= 0; j
< ncopies
; j
++)
5541 if (scalar_shift_arg
)
5543 /* Vector shl and shr insn patterns can be defined with scalar
5544 operand 2 (shift operand). In this case, use constant or loop
5545 invariant op1 directly, without extending it to vector mode
5547 optab_op2_mode
= insn_data
[icode
].operand
[2].mode
;
5548 if (!VECTOR_MODE_P (optab_op2_mode
))
5550 if (dump_enabled_p ())
5551 dump_printf_loc (MSG_NOTE
, vect_location
,
5552 "operand 1 using scalar mode.\n");
5554 vec_oprnds1
.create (slp_node
? slp_node
->vec_stmts_size
: 1);
5555 vec_oprnds1
.quick_push (vec_oprnd1
);
5558 /* Store vec_oprnd1 for every vector stmt to be created
5559 for SLP_NODE. We check during the analysis that all
5560 the shift arguments are the same.
5561 TODO: Allow different constants for different vector
5562 stmts generated for an SLP instance. */
5563 for (k
= 0; k
< slp_node
->vec_stmts_size
- 1; k
++)
5564 vec_oprnds1
.quick_push (vec_oprnd1
);
5569 /* vec_oprnd1 is available if operand 1 should be of a scalar-type
5570 (a special case for certain kind of vector shifts); otherwise,
5571 operand 1 should be of a vector type (the usual case). */
5573 vect_get_vec_defs (op0
, NULL_TREE
, stmt
, &vec_oprnds0
, NULL
,
5576 vect_get_vec_defs (op0
, op1
, stmt
, &vec_oprnds0
, &vec_oprnds1
,
5580 vect_get_vec_defs_for_stmt_copy (dt
, &vec_oprnds0
, &vec_oprnds1
);
5582 /* Arguments are ready. Create the new vector stmt. */
5583 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vop0
)
5585 vop1
= vec_oprnds1
[i
];
5586 new_stmt
= gimple_build_assign (vec_dest
, code
, vop0
, vop1
);
5587 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
5588 gimple_assign_set_lhs (new_stmt
, new_temp
);
5589 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
5591 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
5598 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
5600 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
5601 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
5604 vec_oprnds0
.release ();
5605 vec_oprnds1
.release ();
5611 /* Function vectorizable_operation.
5613 Check if STMT performs a binary, unary or ternary operation that can
5615 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
5616 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
5617 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
5620 vectorizable_operation (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
5621 gimple
**vec_stmt
, slp_tree slp_node
)
5625 tree op0
, op1
= NULL_TREE
, op2
= NULL_TREE
;
5626 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
5628 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
5629 enum tree_code code
, orig_code
;
5630 machine_mode vec_mode
;
5634 bool target_support_p
;
5636 enum vect_def_type dt
[3]
5637 = {vect_unknown_def_type
, vect_unknown_def_type
, vect_unknown_def_type
};
5639 gimple
*new_stmt
= NULL
;
5640 stmt_vec_info prev_stmt_info
;
5641 poly_uint64 nunits_in
;
5642 poly_uint64 nunits_out
;
5646 vec
<tree
> vec_oprnds0
= vNULL
;
5647 vec
<tree
> vec_oprnds1
= vNULL
;
5648 vec
<tree
> vec_oprnds2
= vNULL
;
5649 tree vop0
, vop1
, vop2
;
5650 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
5651 vec_info
*vinfo
= stmt_info
->vinfo
;
5653 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
5656 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
5660 /* Is STMT a vectorizable binary/unary operation? */
5661 if (!is_gimple_assign (stmt
))
5664 if (TREE_CODE (gimple_assign_lhs (stmt
)) != SSA_NAME
)
5667 orig_code
= code
= gimple_assign_rhs_code (stmt
);
5669 /* For pointer addition and subtraction, we should use the normal
5670 plus and minus for the vector operation. */
5671 if (code
== POINTER_PLUS_EXPR
)
5673 if (code
== POINTER_DIFF_EXPR
)
5676 /* Support only unary or binary operations. */
5677 op_type
= TREE_CODE_LENGTH (code
);
5678 if (op_type
!= unary_op
&& op_type
!= binary_op
&& op_type
!= ternary_op
)
5680 if (dump_enabled_p ())
5681 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5682 "num. args = %d (not unary/binary/ternary op).\n",
5687 scalar_dest
= gimple_assign_lhs (stmt
);
5688 vectype_out
= STMT_VINFO_VECTYPE (stmt_info
);
5690 /* Most operations cannot handle bit-precision types without extra
5692 if (!VECTOR_BOOLEAN_TYPE_P (vectype_out
)
5693 && !type_has_mode_precision_p (TREE_TYPE (scalar_dest
))
5694 /* Exception are bitwise binary operations. */
5695 && code
!= BIT_IOR_EXPR
5696 && code
!= BIT_XOR_EXPR
5697 && code
!= BIT_AND_EXPR
)
5699 if (dump_enabled_p ())
5700 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5701 "bit-precision arithmetic not supported.\n");
5705 op0
= gimple_assign_rhs1 (stmt
);
5706 if (!vect_is_simple_use (op0
, vinfo
, &def_stmt
, &dt
[0], &vectype
))
5708 if (dump_enabled_p ())
5709 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5710 "use not simple.\n");
5713 /* If op0 is an external or constant def use a vector type with
5714 the same size as the output vector type. */
5717 /* For boolean type we cannot determine vectype by
5718 invariant value (don't know whether it is a vector
5719 of booleans or vector of integers). We use output
5720 vectype because operations on boolean don't change
5722 if (VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (op0
)))
5724 if (!VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (scalar_dest
)))
5726 if (dump_enabled_p ())
5727 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5728 "not supported operation on bool value.\n");
5731 vectype
= vectype_out
;
5734 vectype
= get_same_sized_vectype (TREE_TYPE (op0
), vectype_out
);
5737 gcc_assert (vectype
);
5740 if (dump_enabled_p ())
5742 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5743 "no vectype for scalar type ");
5744 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
5746 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
5752 nunits_out
= TYPE_VECTOR_SUBPARTS (vectype_out
);
5753 nunits_in
= TYPE_VECTOR_SUBPARTS (vectype
);
5754 if (maybe_ne (nunits_out
, nunits_in
))
5757 if (op_type
== binary_op
|| op_type
== ternary_op
)
5759 op1
= gimple_assign_rhs2 (stmt
);
5760 if (!vect_is_simple_use (op1
, vinfo
, &def_stmt
, &dt
[1]))
5762 if (dump_enabled_p ())
5763 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5764 "use not simple.\n");
5768 if (op_type
== ternary_op
)
5770 op2
= gimple_assign_rhs3 (stmt
);
5771 if (!vect_is_simple_use (op2
, vinfo
, &def_stmt
, &dt
[2]))
5773 if (dump_enabled_p ())
5774 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5775 "use not simple.\n");
5780 /* Multiple types in SLP are handled by creating the appropriate number of
5781 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
5786 ncopies
= vect_get_num_copies (loop_vinfo
, vectype
);
5788 gcc_assert (ncopies
>= 1);
5790 /* Shifts are handled in vectorizable_shift (). */
5791 if (code
== LSHIFT_EXPR
|| code
== RSHIFT_EXPR
|| code
== LROTATE_EXPR
5792 || code
== RROTATE_EXPR
)
5795 /* Supportable by target? */
5797 vec_mode
= TYPE_MODE (vectype
);
5798 if (code
== MULT_HIGHPART_EXPR
)
5799 target_support_p
= can_mult_highpart_p (vec_mode
, TYPE_UNSIGNED (vectype
));
5802 optab
= optab_for_tree_code (code
, vectype
, optab_default
);
5805 if (dump_enabled_p ())
5806 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5810 target_support_p
= (optab_handler (optab
, vec_mode
)
5811 != CODE_FOR_nothing
);
5814 if (!target_support_p
)
5816 if (dump_enabled_p ())
5817 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5818 "op not supported by target.\n");
5819 /* Check only during analysis. */
5820 if (maybe_ne (GET_MODE_SIZE (vec_mode
), UNITS_PER_WORD
)
5821 || (!vec_stmt
&& !vect_worthwhile_without_simd_p (vinfo
, code
)))
5823 if (dump_enabled_p ())
5824 dump_printf_loc (MSG_NOTE
, vect_location
,
5825 "proceeding using word mode.\n");
5828 /* Worthwhile without SIMD support? Check only during analysis. */
5829 if (!VECTOR_MODE_P (vec_mode
)
5831 && !vect_worthwhile_without_simd_p (vinfo
, code
))
5833 if (dump_enabled_p ())
5834 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5835 "not worthwhile without SIMD support.\n");
5839 if (!vec_stmt
) /* transformation not required. */
5841 STMT_VINFO_TYPE (stmt_info
) = op_vec_info_type
;
5842 if (dump_enabled_p ())
5843 dump_printf_loc (MSG_NOTE
, vect_location
,
5844 "=== vectorizable_operation ===\n");
5846 vect_model_simple_cost (stmt_info
, ncopies
, dt
, ndts
, NULL
, NULL
);
5852 if (dump_enabled_p ())
5853 dump_printf_loc (MSG_NOTE
, vect_location
,
5854 "transform binary/unary operation.\n");
5857 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
5859 /* POINTER_DIFF_EXPR has pointer arguments which are vectorized as
5860 vectors with unsigned elements, but the result is signed. So, we
5861 need to compute the MINUS_EXPR into vectype temporary and
5862 VIEW_CONVERT_EXPR it into the final vectype_out result. */
5863 tree vec_cvt_dest
= NULL_TREE
;
5864 if (orig_code
== POINTER_DIFF_EXPR
)
5865 vec_cvt_dest
= vect_create_destination_var (scalar_dest
, vectype_out
);
5867 /* In case the vectorization factor (VF) is bigger than the number
5868 of elements that we can fit in a vectype (nunits), we have to generate
5869 more than one vector stmt - i.e - we need to "unroll" the
5870 vector stmt by a factor VF/nunits. In doing so, we record a pointer
5871 from one copy of the vector stmt to the next, in the field
5872 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
5873 stages to find the correct vector defs to be used when vectorizing
5874 stmts that use the defs of the current stmt. The example below
5875 illustrates the vectorization process when VF=16 and nunits=4 (i.e.,
5876 we need to create 4 vectorized stmts):
5878 before vectorization:
5879 RELATED_STMT VEC_STMT
5883 step 1: vectorize stmt S1 (done in vectorizable_load. See more details
5885 RELATED_STMT VEC_STMT
5886 VS1_0: vx0 = memref0 VS1_1 -
5887 VS1_1: vx1 = memref1 VS1_2 -
5888 VS1_2: vx2 = memref2 VS1_3 -
5889 VS1_3: vx3 = memref3 - -
5890 S1: x = load - VS1_0
5893 step2: vectorize stmt S2 (done here):
5894 To vectorize stmt S2 we first need to find the relevant vector
5895 def for the first operand 'x'. This is, as usual, obtained from
5896 the vector stmt recorded in the STMT_VINFO_VEC_STMT of the stmt
5897 that defines 'x' (S1). This way we find the stmt VS1_0, and the
5898 relevant vector def 'vx0'. Having found 'vx0' we can generate
5899 the vector stmt VS2_0, and as usual, record it in the
5900 STMT_VINFO_VEC_STMT of stmt S2.
5901 When creating the second copy (VS2_1), we obtain the relevant vector
5902 def from the vector stmt recorded in the STMT_VINFO_RELATED_STMT of
5903 stmt VS1_0. This way we find the stmt VS1_1 and the relevant
5904 vector def 'vx1'. Using 'vx1' we create stmt VS2_1 and record a
5905 pointer to it in the STMT_VINFO_RELATED_STMT of the vector stmt VS2_0.
5906 Similarly when creating stmts VS2_2 and VS2_3. This is the resulting
5907 chain of stmts and pointers:
5908 RELATED_STMT VEC_STMT
5909 VS1_0: vx0 = memref0 VS1_1 -
5910 VS1_1: vx1 = memref1 VS1_2 -
5911 VS1_2: vx2 = memref2 VS1_3 -
5912 VS1_3: vx3 = memref3 - -
5913 S1: x = load - VS1_0
5914 VS2_0: vz0 = vx0 + v1 VS2_1 -
5915 VS2_1: vz1 = vx1 + v1 VS2_2 -
5916 VS2_2: vz2 = vx2 + v1 VS2_3 -
5917 VS2_3: vz3 = vx3 + v1 - -
5918 S2: z = x + 1 - VS2_0 */
5920 prev_stmt_info
= NULL
;
5921 for (j
= 0; j
< ncopies
; j
++)
5926 if (op_type
== binary_op
)
5927 vect_get_vec_defs (op0
, op1
, stmt
, &vec_oprnds0
, &vec_oprnds1
,
5929 else if (op_type
== ternary_op
)
5933 auto_vec
<tree
> ops(3);
5934 ops
.quick_push (op0
);
5935 ops
.quick_push (op1
);
5936 ops
.quick_push (op2
);
5937 auto_vec
<vec
<tree
> > vec_defs(3);
5938 vect_get_slp_defs (ops
, slp_node
, &vec_defs
);
5939 vec_oprnds0
= vec_defs
[0];
5940 vec_oprnds1
= vec_defs
[1];
5941 vec_oprnds2
= vec_defs
[2];
5945 vect_get_vec_defs (op0
, op1
, stmt
, &vec_oprnds0
, &vec_oprnds1
,
5947 vect_get_vec_defs (op2
, NULL_TREE
, stmt
, &vec_oprnds2
, NULL
,
5952 vect_get_vec_defs (op0
, NULL_TREE
, stmt
, &vec_oprnds0
, NULL
,
5957 vect_get_vec_defs_for_stmt_copy (dt
, &vec_oprnds0
, &vec_oprnds1
);
5958 if (op_type
== ternary_op
)
5960 tree vec_oprnd
= vec_oprnds2
.pop ();
5961 vec_oprnds2
.quick_push (vect_get_vec_def_for_stmt_copy (dt
[2],
5966 /* Arguments are ready. Create the new vector stmt. */
5967 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vop0
)
5969 vop1
= ((op_type
== binary_op
|| op_type
== ternary_op
)
5970 ? vec_oprnds1
[i
] : NULL_TREE
);
5971 vop2
= ((op_type
== ternary_op
)
5972 ? vec_oprnds2
[i
] : NULL_TREE
);
5973 new_stmt
= gimple_build_assign (vec_dest
, code
, vop0
, vop1
, vop2
);
5974 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
5975 gimple_assign_set_lhs (new_stmt
, new_temp
);
5976 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
5979 new_temp
= build1 (VIEW_CONVERT_EXPR
, vectype_out
, new_temp
);
5980 new_stmt
= gimple_build_assign (vec_cvt_dest
, VIEW_CONVERT_EXPR
,
5982 new_temp
= make_ssa_name (vec_cvt_dest
, new_stmt
);
5983 gimple_assign_set_lhs (new_stmt
, new_temp
);
5984 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
5987 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
5994 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
5996 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
5997 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
6000 vec_oprnds0
.release ();
6001 vec_oprnds1
.release ();
6002 vec_oprnds2
.release ();
6007 /* A helper function to ensure data reference DR's base alignment. */
6010 ensure_base_align (struct data_reference
*dr
)
6015 if (DR_VECT_AUX (dr
)->base_misaligned
)
6017 tree base_decl
= DR_VECT_AUX (dr
)->base_decl
;
6019 unsigned int align_base_to
= DR_TARGET_ALIGNMENT (dr
) * BITS_PER_UNIT
;
6021 if (decl_in_symtab_p (base_decl
))
6022 symtab_node::get (base_decl
)->increase_alignment (align_base_to
);
6025 SET_DECL_ALIGN (base_decl
, align_base_to
);
6026 DECL_USER_ALIGN (base_decl
) = 1;
6028 DR_VECT_AUX (dr
)->base_misaligned
= false;
6033 /* Function get_group_alias_ptr_type.
6035 Return the alias type for the group starting at FIRST_STMT. */
6038 get_group_alias_ptr_type (gimple
*first_stmt
)
6040 struct data_reference
*first_dr
, *next_dr
;
6043 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
6044 next_stmt
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (first_stmt
));
6047 next_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (next_stmt
));
6048 if (get_alias_set (DR_REF (first_dr
))
6049 != get_alias_set (DR_REF (next_dr
)))
6051 if (dump_enabled_p ())
6052 dump_printf_loc (MSG_NOTE
, vect_location
,
6053 "conflicting alias set types.\n");
6054 return ptr_type_node
;
6056 next_stmt
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt
));
6058 return reference_alias_ptr_type (DR_REF (first_dr
));
6062 /* Function vectorizable_store.
6064 Check if STMT defines a non scalar data-ref (array/pointer/structure) that
6066 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
6067 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
6068 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
6071 vectorizable_store (gimple
*stmt
, gimple_stmt_iterator
*gsi
, gimple
**vec_stmt
,
6076 tree vec_oprnd
= NULL_TREE
;
6077 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
6078 struct data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
), *first_dr
= NULL
;
6080 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
6081 struct loop
*loop
= NULL
;
6082 machine_mode vec_mode
;
6084 enum dr_alignment_support alignment_support_scheme
;
6086 enum vect_def_type rhs_dt
= vect_unknown_def_type
;
6087 enum vect_def_type mask_dt
= vect_unknown_def_type
;
6088 stmt_vec_info prev_stmt_info
= NULL
;
6089 tree dataref_ptr
= NULL_TREE
;
6090 tree dataref_offset
= NULL_TREE
;
6091 gimple
*ptr_incr
= NULL
;
6094 gimple
*next_stmt
, *first_stmt
;
6096 unsigned int group_size
, i
;
6097 vec
<tree
> oprnds
= vNULL
;
6098 vec
<tree
> result_chain
= vNULL
;
6100 tree offset
= NULL_TREE
;
6101 vec
<tree
> vec_oprnds
= vNULL
;
6102 bool slp
= (slp_node
!= NULL
);
6103 unsigned int vec_num
;
6104 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
6105 vec_info
*vinfo
= stmt_info
->vinfo
;
6107 gather_scatter_info gs_info
;
6110 vec_load_store_type vls_type
;
6113 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
6116 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
6120 /* Is vectorizable store? */
6122 tree mask
= NULL_TREE
, mask_vectype
= NULL_TREE
;
6123 if (is_gimple_assign (stmt
))
6125 tree scalar_dest
= gimple_assign_lhs (stmt
);
6126 if (TREE_CODE (scalar_dest
) == VIEW_CONVERT_EXPR
6127 && is_pattern_stmt_p (stmt_info
))
6128 scalar_dest
= TREE_OPERAND (scalar_dest
, 0);
6129 if (TREE_CODE (scalar_dest
) != ARRAY_REF
6130 && TREE_CODE (scalar_dest
) != BIT_FIELD_REF
6131 && TREE_CODE (scalar_dest
) != INDIRECT_REF
6132 && TREE_CODE (scalar_dest
) != COMPONENT_REF
6133 && TREE_CODE (scalar_dest
) != IMAGPART_EXPR
6134 && TREE_CODE (scalar_dest
) != REALPART_EXPR
6135 && TREE_CODE (scalar_dest
) != MEM_REF
)
6140 gcall
*call
= dyn_cast
<gcall
*> (stmt
);
6141 if (!call
|| !gimple_call_internal_p (call
))
6144 internal_fn ifn
= gimple_call_internal_fn (call
);
6145 if (!internal_store_fn_p (ifn
))
6148 if (slp_node
!= NULL
)
6150 if (dump_enabled_p ())
6151 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6152 "SLP of masked stores not supported.\n");
6156 int mask_index
= internal_fn_mask_index (ifn
);
6157 if (mask_index
>= 0)
6159 mask
= gimple_call_arg (call
, mask_index
);
6160 if (!vect_check_load_store_mask (stmt
, mask
, &mask_dt
,
6166 op
= vect_get_store_rhs (stmt
);
6168 /* Cannot have hybrid store SLP -- that would mean storing to the
6169 same location twice. */
6170 gcc_assert (slp
== PURE_SLP_STMT (stmt_info
));
6172 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
), rhs_vectype
= NULL_TREE
;
6173 poly_uint64 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
6177 loop
= LOOP_VINFO_LOOP (loop_vinfo
);
6178 vf
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
6183 /* Multiple types in SLP are handled by creating the appropriate number of
6184 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
6189 ncopies
= vect_get_num_copies (loop_vinfo
, vectype
);
6191 gcc_assert (ncopies
>= 1);
6193 /* FORNOW. This restriction should be relaxed. */
6194 if (loop
&& nested_in_vect_loop_p (loop
, stmt
) && ncopies
> 1)
6196 if (dump_enabled_p ())
6197 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6198 "multiple types in nested loop.\n");
6202 if (!vect_check_store_rhs (stmt
, op
, &rhs_dt
, &rhs_vectype
, &vls_type
))
6205 elem_type
= TREE_TYPE (vectype
);
6206 vec_mode
= TYPE_MODE (vectype
);
6208 if (!STMT_VINFO_DATA_REF (stmt_info
))
6211 vect_memory_access_type memory_access_type
;
6212 if (!get_load_store_type (stmt
, vectype
, slp
, mask
, vls_type
, ncopies
,
6213 &memory_access_type
, &gs_info
))
6218 if (memory_access_type
== VMAT_CONTIGUOUS
)
6220 if (!VECTOR_MODE_P (vec_mode
)
6221 || !can_vec_mask_load_store_p (vec_mode
,
6222 TYPE_MODE (mask_vectype
), false))
6225 else if (memory_access_type
!= VMAT_LOAD_STORE_LANES
6226 && (memory_access_type
!= VMAT_GATHER_SCATTER
|| gs_info
.decl
))
6228 if (dump_enabled_p ())
6229 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6230 "unsupported access type for masked store.\n");
6236 /* FORNOW. In some cases can vectorize even if data-type not supported
6237 (e.g. - array initialization with 0). */
6238 if (optab_handler (mov_optab
, vec_mode
) == CODE_FOR_nothing
)
6242 grouped_store
= (STMT_VINFO_GROUPED_ACCESS (stmt_info
)
6243 && memory_access_type
!= VMAT_GATHER_SCATTER
6244 && (slp
|| memory_access_type
!= VMAT_CONTIGUOUS
));
6247 first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
6248 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
6249 group_size
= GROUP_SIZE (vinfo_for_stmt (first_stmt
));
6255 group_size
= vec_num
= 1;
6258 if (!vec_stmt
) /* transformation not required. */
6260 STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info
) = memory_access_type
;
6263 && LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo
))
6264 check_load_store_masking (loop_vinfo
, vectype
, vls_type
, group_size
,
6265 memory_access_type
, &gs_info
);
6267 STMT_VINFO_TYPE (stmt_info
) = store_vec_info_type
;
6268 /* The SLP costs are calculated during SLP analysis. */
6270 vect_model_store_cost (stmt_info
, ncopies
, memory_access_type
,
6271 vls_type
, NULL
, NULL
, NULL
);
6274 gcc_assert (memory_access_type
== STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info
));
6278 ensure_base_align (dr
);
6280 if (memory_access_type
== VMAT_GATHER_SCATTER
&& gs_info
.decl
)
6282 tree vec_oprnd0
= NULL_TREE
, vec_oprnd1
= NULL_TREE
, src
;
6283 tree arglist
= TYPE_ARG_TYPES (TREE_TYPE (gs_info
.decl
));
6284 tree rettype
, srctype
, ptrtype
, idxtype
, masktype
, scaletype
;
6285 tree ptr
, mask
, var
, scale
, perm_mask
= NULL_TREE
;
6286 edge pe
= loop_preheader_edge (loop
);
6289 enum { NARROW
, NONE
, WIDEN
} modifier
;
6290 poly_uint64 scatter_off_nunits
6291 = TYPE_VECTOR_SUBPARTS (gs_info
.offset_vectype
);
6293 if (known_eq (nunits
, scatter_off_nunits
))
6295 else if (known_eq (nunits
* 2, scatter_off_nunits
))
6299 /* Currently gathers and scatters are only supported for
6300 fixed-length vectors. */
6301 unsigned int count
= scatter_off_nunits
.to_constant ();
6302 vec_perm_builder
sel (count
, count
, 1);
6303 for (i
= 0; i
< (unsigned int) count
; ++i
)
6304 sel
.quick_push (i
| (count
/ 2));
6306 vec_perm_indices
indices (sel
, 1, count
);
6307 perm_mask
= vect_gen_perm_mask_checked (gs_info
.offset_vectype
,
6309 gcc_assert (perm_mask
!= NULL_TREE
);
6311 else if (known_eq (nunits
, scatter_off_nunits
* 2))
6315 /* Currently gathers and scatters are only supported for
6316 fixed-length vectors. */
6317 unsigned int count
= nunits
.to_constant ();
6318 vec_perm_builder
sel (count
, count
, 1);
6319 for (i
= 0; i
< (unsigned int) count
; ++i
)
6320 sel
.quick_push (i
| (count
/ 2));
6322 vec_perm_indices
indices (sel
, 2, count
);
6323 perm_mask
= vect_gen_perm_mask_checked (vectype
, indices
);
6324 gcc_assert (perm_mask
!= NULL_TREE
);
6330 rettype
= TREE_TYPE (TREE_TYPE (gs_info
.decl
));
6331 ptrtype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
6332 masktype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
6333 idxtype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
6334 srctype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
6335 scaletype
= TREE_VALUE (arglist
);
6337 gcc_checking_assert (TREE_CODE (masktype
) == INTEGER_TYPE
6338 && TREE_CODE (rettype
) == VOID_TYPE
);
6340 ptr
= fold_convert (ptrtype
, gs_info
.base
);
6341 if (!is_gimple_min_invariant (ptr
))
6343 ptr
= force_gimple_operand (ptr
, &seq
, true, NULL_TREE
);
6344 new_bb
= gsi_insert_seq_on_edge_immediate (pe
, seq
);
6345 gcc_assert (!new_bb
);
6348 /* Currently we support only unconditional scatter stores,
6349 so mask should be all ones. */
6350 mask
= build_int_cst (masktype
, -1);
6351 mask
= vect_init_vector (stmt
, mask
, masktype
, NULL
);
6353 scale
= build_int_cst (scaletype
, gs_info
.scale
);
6355 prev_stmt_info
= NULL
;
6356 for (j
= 0; j
< ncopies
; ++j
)
6361 = vect_get_vec_def_for_operand (op
, stmt
);
6363 = vect_get_vec_def_for_operand (gs_info
.offset
, stmt
);
6365 else if (modifier
!= NONE
&& (j
& 1))
6367 if (modifier
== WIDEN
)
6370 = vect_get_vec_def_for_stmt_copy (rhs_dt
, vec_oprnd1
);
6371 op
= permute_vec_elements (vec_oprnd0
, vec_oprnd0
, perm_mask
,
6374 else if (modifier
== NARROW
)
6376 src
= permute_vec_elements (vec_oprnd1
, vec_oprnd1
, perm_mask
,
6379 = vect_get_vec_def_for_stmt_copy (gs_info
.offset_dt
,
6388 = vect_get_vec_def_for_stmt_copy (rhs_dt
, vec_oprnd1
);
6390 = vect_get_vec_def_for_stmt_copy (gs_info
.offset_dt
,
6394 if (!useless_type_conversion_p (srctype
, TREE_TYPE (src
)))
6396 gcc_assert (known_eq (TYPE_VECTOR_SUBPARTS (TREE_TYPE (src
)),
6397 TYPE_VECTOR_SUBPARTS (srctype
)));
6398 var
= vect_get_new_ssa_name (srctype
, vect_simple_var
);
6399 src
= build1 (VIEW_CONVERT_EXPR
, srctype
, src
);
6400 new_stmt
= gimple_build_assign (var
, VIEW_CONVERT_EXPR
, src
);
6401 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6405 if (!useless_type_conversion_p (idxtype
, TREE_TYPE (op
)))
6407 gcc_assert (known_eq (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op
)),
6408 TYPE_VECTOR_SUBPARTS (idxtype
)));
6409 var
= vect_get_new_ssa_name (idxtype
, vect_simple_var
);
6410 op
= build1 (VIEW_CONVERT_EXPR
, idxtype
, op
);
6411 new_stmt
= gimple_build_assign (var
, VIEW_CONVERT_EXPR
, op
);
6412 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6417 = gimple_build_call (gs_info
.decl
, 5, ptr
, mask
, op
, src
, scale
);
6419 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6421 if (prev_stmt_info
== NULL
)
6422 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
6424 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
6425 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
6430 if (STMT_VINFO_GROUPED_ACCESS (stmt_info
))
6432 gimple
*group_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
6433 GROUP_STORE_COUNT (vinfo_for_stmt (group_stmt
))++;
6439 gcc_assert (!loop
|| !nested_in_vect_loop_p (loop
, stmt
));
6441 /* We vectorize all the stmts of the interleaving group when we
6442 reach the last stmt in the group. */
6443 if (GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt
))
6444 < GROUP_SIZE (vinfo_for_stmt (first_stmt
))
6453 grouped_store
= false;
6454 /* VEC_NUM is the number of vect stmts to be created for this
6456 vec_num
= SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node
);
6457 first_stmt
= SLP_TREE_SCALAR_STMTS (slp_node
)[0];
6458 gcc_assert (GROUP_FIRST_ELEMENT (vinfo_for_stmt (first_stmt
)) == first_stmt
);
6459 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
6460 op
= vect_get_store_rhs (first_stmt
);
6463 /* VEC_NUM is the number of vect stmts to be created for this
6465 vec_num
= group_size
;
6467 ref_type
= get_group_alias_ptr_type (first_stmt
);
6470 ref_type
= reference_alias_ptr_type (DR_REF (first_dr
));
6472 if (dump_enabled_p ())
6473 dump_printf_loc (MSG_NOTE
, vect_location
,
6474 "transform store. ncopies = %d\n", ncopies
);
6476 if (memory_access_type
== VMAT_ELEMENTWISE
6477 || memory_access_type
== VMAT_STRIDED_SLP
)
6479 gimple_stmt_iterator incr_gsi
;
6485 tree stride_base
, stride_step
, alias_off
;
6488 /* Checked by get_load_store_type. */
6489 unsigned int const_nunits
= nunits
.to_constant ();
6491 gcc_assert (!LOOP_VINFO_FULLY_MASKED_P (loop_vinfo
));
6492 gcc_assert (!nested_in_vect_loop_p (loop
, stmt
));
6495 = fold_build_pointer_plus
6496 (DR_BASE_ADDRESS (first_dr
),
6497 size_binop (PLUS_EXPR
,
6498 convert_to_ptrofftype (DR_OFFSET (first_dr
)),
6499 convert_to_ptrofftype (DR_INIT (first_dr
))));
6500 stride_step
= fold_convert (sizetype
, DR_STEP (first_dr
));
6502 /* For a store with loop-invariant (but other than power-of-2)
6503 stride (i.e. not a grouped access) like so:
6505 for (i = 0; i < n; i += stride)
6508 we generate a new induction variable and new stores from
6509 the components of the (vectorized) rhs:
6511 for (j = 0; ; j += VF*stride)
6516 array[j + stride] = tmp2;
6520 unsigned nstores
= const_nunits
;
6522 tree ltype
= elem_type
;
6523 tree lvectype
= vectype
;
6526 if (group_size
< const_nunits
6527 && const_nunits
% group_size
== 0)
6529 nstores
= const_nunits
/ group_size
;
6531 ltype
= build_vector_type (elem_type
, group_size
);
6534 /* First check if vec_extract optab doesn't support extraction
6535 of vector elts directly. */
6536 scalar_mode elmode
= SCALAR_TYPE_MODE (elem_type
);
6538 if (!mode_for_vector (elmode
, group_size
).exists (&vmode
)
6539 || !VECTOR_MODE_P (vmode
)
6540 || !targetm
.vector_mode_supported_p (vmode
)
6541 || (convert_optab_handler (vec_extract_optab
,
6542 TYPE_MODE (vectype
), vmode
)
6543 == CODE_FOR_nothing
))
6545 /* Try to avoid emitting an extract of vector elements
6546 by performing the extracts using an integer type of the
6547 same size, extracting from a vector of those and then
6548 re-interpreting it as the original vector type if
6551 = group_size
* GET_MODE_BITSIZE (elmode
);
6552 elmode
= int_mode_for_size (lsize
, 0).require ();
6553 unsigned int lnunits
= const_nunits
/ group_size
;
6554 /* If we can't construct such a vector fall back to
6555 element extracts from the original vector type and
6556 element size stores. */
6557 if (mode_for_vector (elmode
, lnunits
).exists (&vmode
)
6558 && VECTOR_MODE_P (vmode
)
6559 && targetm
.vector_mode_supported_p (vmode
)
6560 && (convert_optab_handler (vec_extract_optab
,
6562 != CODE_FOR_nothing
))
6566 ltype
= build_nonstandard_integer_type (lsize
, 1);
6567 lvectype
= build_vector_type (ltype
, nstores
);
6569 /* Else fall back to vector extraction anyway.
6570 Fewer stores are more important than avoiding spilling
6571 of the vector we extract from. Compared to the
6572 construction case in vectorizable_load no store-forwarding
6573 issue exists here for reasonable archs. */
6576 else if (group_size
>= const_nunits
6577 && group_size
% const_nunits
== 0)
6580 lnel
= const_nunits
;
6584 ltype
= build_aligned_type (ltype
, TYPE_ALIGN (elem_type
));
6585 ncopies
= SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node
);
6588 ivstep
= stride_step
;
6589 ivstep
= fold_build2 (MULT_EXPR
, TREE_TYPE (ivstep
), ivstep
,
6590 build_int_cst (TREE_TYPE (ivstep
), vf
));
6592 standard_iv_increment_position (loop
, &incr_gsi
, &insert_after
);
6594 stride_base
= cse_and_gimplify_to_preheader (loop_vinfo
, stride_base
);
6595 ivstep
= cse_and_gimplify_to_preheader (loop_vinfo
, ivstep
);
6596 create_iv (stride_base
, ivstep
, NULL
,
6597 loop
, &incr_gsi
, insert_after
,
6599 incr
= gsi_stmt (incr_gsi
);
6600 set_vinfo_for_stmt (incr
, new_stmt_vec_info (incr
, loop_vinfo
));
6602 stride_step
= cse_and_gimplify_to_preheader (loop_vinfo
, stride_step
);
6604 prev_stmt_info
= NULL
;
6605 alias_off
= build_int_cst (ref_type
, 0);
6606 next_stmt
= first_stmt
;
6607 for (g
= 0; g
< group_size
; g
++)
6609 running_off
= offvar
;
6612 tree size
= TYPE_SIZE_UNIT (ltype
);
6613 tree pos
= fold_build2 (MULT_EXPR
, sizetype
, size_int (g
),
6615 tree newoff
= copy_ssa_name (running_off
, NULL
);
6616 incr
= gimple_build_assign (newoff
, POINTER_PLUS_EXPR
,
6618 vect_finish_stmt_generation (stmt
, incr
, gsi
);
6619 running_off
= newoff
;
6621 unsigned int group_el
= 0;
6622 unsigned HOST_WIDE_INT
6623 elsz
= tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (vectype
)));
6624 for (j
= 0; j
< ncopies
; j
++)
6626 /* We've set op and dt above, from vect_get_store_rhs,
6627 and first_stmt == stmt. */
6632 vect_get_vec_defs (op
, NULL_TREE
, stmt
, &vec_oprnds
, NULL
,
6634 vec_oprnd
= vec_oprnds
[0];
6638 op
= vect_get_store_rhs (next_stmt
);
6639 vec_oprnd
= vect_get_vec_def_for_operand (op
, next_stmt
);
6645 vec_oprnd
= vec_oprnds
[j
];
6648 vect_is_simple_use (op
, vinfo
, &def_stmt
, &rhs_dt
);
6649 vec_oprnd
= vect_get_vec_def_for_stmt_copy (rhs_dt
,
6653 /* Pun the vector to extract from if necessary. */
6654 if (lvectype
!= vectype
)
6656 tree tem
= make_ssa_name (lvectype
);
6658 = gimple_build_assign (tem
, build1 (VIEW_CONVERT_EXPR
,
6659 lvectype
, vec_oprnd
));
6660 vect_finish_stmt_generation (stmt
, pun
, gsi
);
6663 for (i
= 0; i
< nstores
; i
++)
6665 tree newref
, newoff
;
6666 gimple
*incr
, *assign
;
6667 tree size
= TYPE_SIZE (ltype
);
6668 /* Extract the i'th component. */
6669 tree pos
= fold_build2 (MULT_EXPR
, bitsizetype
,
6670 bitsize_int (i
), size
);
6671 tree elem
= fold_build3 (BIT_FIELD_REF
, ltype
, vec_oprnd
,
6674 elem
= force_gimple_operand_gsi (gsi
, elem
, true,
6678 tree this_off
= build_int_cst (TREE_TYPE (alias_off
),
6680 newref
= build2 (MEM_REF
, ltype
,
6681 running_off
, this_off
);
6682 vect_copy_ref_info (newref
, DR_REF (first_dr
));
6684 /* And store it to *running_off. */
6685 assign
= gimple_build_assign (newref
, elem
);
6686 vect_finish_stmt_generation (stmt
, assign
, gsi
);
6690 || group_el
== group_size
)
6692 newoff
= copy_ssa_name (running_off
, NULL
);
6693 incr
= gimple_build_assign (newoff
, POINTER_PLUS_EXPR
,
6694 running_off
, stride_step
);
6695 vect_finish_stmt_generation (stmt
, incr
, gsi
);
6697 running_off
= newoff
;
6700 if (g
== group_size
- 1
6703 if (j
== 0 && i
== 0)
6704 STMT_VINFO_VEC_STMT (stmt_info
)
6705 = *vec_stmt
= assign
;
6707 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = assign
;
6708 prev_stmt_info
= vinfo_for_stmt (assign
);
6712 next_stmt
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt
));
6717 vec_oprnds
.release ();
6721 auto_vec
<tree
> dr_chain (group_size
);
6722 oprnds
.create (group_size
);
6724 alignment_support_scheme
= vect_supportable_dr_alignment (first_dr
, false);
6725 gcc_assert (alignment_support_scheme
);
6726 vec_loop_masks
*loop_masks
6727 = (loop_vinfo
&& LOOP_VINFO_FULLY_MASKED_P (loop_vinfo
)
6728 ? &LOOP_VINFO_MASKS (loop_vinfo
)
6730 /* Targets with store-lane instructions must not require explicit
6731 realignment. vect_supportable_dr_alignment always returns either
6732 dr_aligned or dr_unaligned_supported for masked operations. */
6733 gcc_assert ((memory_access_type
!= VMAT_LOAD_STORE_LANES
6736 || alignment_support_scheme
== dr_aligned
6737 || alignment_support_scheme
== dr_unaligned_supported
);
6739 if (memory_access_type
== VMAT_CONTIGUOUS_DOWN
6740 || memory_access_type
== VMAT_CONTIGUOUS_REVERSE
)
6741 offset
= size_int (-TYPE_VECTOR_SUBPARTS (vectype
) + 1);
6744 tree vec_offset
= NULL_TREE
;
6745 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info
))
6747 aggr_type
= NULL_TREE
;
6750 else if (memory_access_type
== VMAT_GATHER_SCATTER
)
6752 aggr_type
= elem_type
;
6753 vect_get_strided_load_store_ops (stmt
, loop_vinfo
, &gs_info
,
6754 &bump
, &vec_offset
);
6758 if (memory_access_type
== VMAT_LOAD_STORE_LANES
)
6759 aggr_type
= build_array_type_nelts (elem_type
, vec_num
* nunits
);
6761 aggr_type
= vectype
;
6762 bump
= vect_get_data_ptr_increment (dr
, aggr_type
, memory_access_type
);
6766 LOOP_VINFO_HAS_MASK_STORE (loop_vinfo
) = true;
6768 /* In case the vectorization factor (VF) is bigger than the number
6769 of elements that we can fit in a vectype (nunits), we have to generate
6770 more than one vector stmt - i.e - we need to "unroll" the
6771 vector stmt by a factor VF/nunits. For more details see documentation in
6772 vect_get_vec_def_for_copy_stmt. */
6774 /* In case of interleaving (non-unit grouped access):
6781 We create vectorized stores starting from base address (the access of the
6782 first stmt in the chain (S2 in the above example), when the last store stmt
6783 of the chain (S4) is reached:
6786 VS2: &base + vec_size*1 = vx0
6787 VS3: &base + vec_size*2 = vx1
6788 VS4: &base + vec_size*3 = vx3
6790 Then permutation statements are generated:
6792 VS5: vx5 = VEC_PERM_EXPR < vx0, vx3, {0, 8, 1, 9, 2, 10, 3, 11} >
6793 VS6: vx6 = VEC_PERM_EXPR < vx0, vx3, {4, 12, 5, 13, 6, 14, 7, 15} >
6796 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
6797 (the order of the data-refs in the output of vect_permute_store_chain
6798 corresponds to the order of scalar stmts in the interleaving chain - see
6799 the documentation of vect_permute_store_chain()).
6801 In case of both multiple types and interleaving, above vector stores and
6802 permutation stmts are created for every copy. The result vector stmts are
6803 put in STMT_VINFO_VEC_STMT for the first copy and in the corresponding
6804 STMT_VINFO_RELATED_STMT for the next copies.
6807 prev_stmt_info
= NULL
;
6808 tree vec_mask
= NULL_TREE
;
6809 for (j
= 0; j
< ncopies
; j
++)
6816 /* Get vectorized arguments for SLP_NODE. */
6817 vect_get_vec_defs (op
, NULL_TREE
, stmt
, &vec_oprnds
,
6820 vec_oprnd
= vec_oprnds
[0];
6824 /* For interleaved stores we collect vectorized defs for all the
6825 stores in the group in DR_CHAIN and OPRNDS. DR_CHAIN is then
6826 used as an input to vect_permute_store_chain(), and OPRNDS as
6827 an input to vect_get_vec_def_for_stmt_copy() for the next copy.
6829 If the store is not grouped, GROUP_SIZE is 1, and DR_CHAIN and
6830 OPRNDS are of size 1. */
6831 next_stmt
= first_stmt
;
6832 for (i
= 0; i
< group_size
; i
++)
6834 /* Since gaps are not supported for interleaved stores,
6835 GROUP_SIZE is the exact number of stmts in the chain.
6836 Therefore, NEXT_STMT can't be NULL_TREE. In case that
6837 there is no interleaving, GROUP_SIZE is 1, and only one
6838 iteration of the loop will be executed. */
6839 op
= vect_get_store_rhs (next_stmt
);
6840 vec_oprnd
= vect_get_vec_def_for_operand (op
, next_stmt
);
6841 dr_chain
.quick_push (vec_oprnd
);
6842 oprnds
.quick_push (vec_oprnd
);
6843 next_stmt
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt
));
6846 vec_mask
= vect_get_vec_def_for_operand (mask
, stmt
,
6850 /* We should have catched mismatched types earlier. */
6851 gcc_assert (useless_type_conversion_p (vectype
,
6852 TREE_TYPE (vec_oprnd
)));
6853 bool simd_lane_access_p
6854 = STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info
);
6855 if (simd_lane_access_p
6856 && TREE_CODE (DR_BASE_ADDRESS (first_dr
)) == ADDR_EXPR
6857 && VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (first_dr
), 0))
6858 && integer_zerop (DR_OFFSET (first_dr
))
6859 && integer_zerop (DR_INIT (first_dr
))
6860 && alias_sets_conflict_p (get_alias_set (aggr_type
),
6861 get_alias_set (TREE_TYPE (ref_type
))))
6863 dataref_ptr
= unshare_expr (DR_BASE_ADDRESS (first_dr
));
6864 dataref_offset
= build_int_cst (ref_type
, 0);
6867 else if (STMT_VINFO_GATHER_SCATTER_P (stmt_info
))
6869 vect_get_gather_scatter_ops (loop
, stmt
, &gs_info
,
6870 &dataref_ptr
, &vec_offset
);
6875 = vect_create_data_ref_ptr (first_stmt
, aggr_type
,
6876 simd_lane_access_p
? loop
: NULL
,
6877 offset
, &dummy
, gsi
, &ptr_incr
,
6878 simd_lane_access_p
, &inv_p
,
6880 gcc_assert (bb_vinfo
|| !inv_p
);
6884 /* For interleaved stores we created vectorized defs for all the
6885 defs stored in OPRNDS in the previous iteration (previous copy).
6886 DR_CHAIN is then used as an input to vect_permute_store_chain(),
6887 and OPRNDS as an input to vect_get_vec_def_for_stmt_copy() for the
6889 If the store is not grouped, GROUP_SIZE is 1, and DR_CHAIN and
6890 OPRNDS are of size 1. */
6891 for (i
= 0; i
< group_size
; i
++)
6894 vect_is_simple_use (op
, vinfo
, &def_stmt
, &rhs_dt
);
6895 vec_oprnd
= vect_get_vec_def_for_stmt_copy (rhs_dt
, op
);
6896 dr_chain
[i
] = vec_oprnd
;
6897 oprnds
[i
] = vec_oprnd
;
6900 vec_mask
= vect_get_vec_def_for_stmt_copy (mask_dt
, vec_mask
);
6903 = int_const_binop (PLUS_EXPR
, dataref_offset
, bump
);
6904 else if (STMT_VINFO_GATHER_SCATTER_P (stmt_info
))
6905 vec_offset
= vect_get_vec_def_for_stmt_copy (gs_info
.offset_dt
,
6908 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
, stmt
,
6912 if (memory_access_type
== VMAT_LOAD_STORE_LANES
)
6916 /* Combine all the vectors into an array. */
6917 vec_array
= create_vector_array (vectype
, vec_num
);
6918 for (i
= 0; i
< vec_num
; i
++)
6920 vec_oprnd
= dr_chain
[i
];
6921 write_vector_array (stmt
, gsi
, vec_oprnd
, vec_array
, i
);
6924 tree final_mask
= NULL
;
6926 final_mask
= vect_get_loop_mask (gsi
, loop_masks
, ncopies
,
6929 final_mask
= prepare_load_store_mask (mask_vectype
, final_mask
,
6936 MASK_STORE_LANES (DATAREF_PTR, ALIAS_PTR, VEC_MASK,
6938 unsigned int align
= TYPE_ALIGN_UNIT (TREE_TYPE (vectype
));
6939 tree alias_ptr
= build_int_cst (ref_type
, align
);
6940 call
= gimple_build_call_internal (IFN_MASK_STORE_LANES
, 4,
6941 dataref_ptr
, alias_ptr
,
6942 final_mask
, vec_array
);
6947 MEM_REF[...all elements...] = STORE_LANES (VEC_ARRAY). */
6948 data_ref
= create_array_ref (aggr_type
, dataref_ptr
, ref_type
);
6949 call
= gimple_build_call_internal (IFN_STORE_LANES
, 1,
6951 gimple_call_set_lhs (call
, data_ref
);
6953 gimple_call_set_nothrow (call
, true);
6955 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6963 result_chain
.create (group_size
);
6965 vect_permute_store_chain (dr_chain
, group_size
, stmt
, gsi
,
6969 next_stmt
= first_stmt
;
6970 for (i
= 0; i
< vec_num
; i
++)
6972 unsigned align
, misalign
;
6974 tree final_mask
= NULL_TREE
;
6976 final_mask
= vect_get_loop_mask (gsi
, loop_masks
,
6978 vectype
, vec_num
* j
+ i
);
6980 final_mask
= prepare_load_store_mask (mask_vectype
, final_mask
,
6983 if (memory_access_type
== VMAT_GATHER_SCATTER
)
6985 tree scale
= size_int (gs_info
.scale
);
6988 call
= gimple_build_call_internal
6989 (IFN_MASK_SCATTER_STORE
, 5, dataref_ptr
, vec_offset
,
6990 scale
, vec_oprnd
, final_mask
);
6992 call
= gimple_build_call_internal
6993 (IFN_SCATTER_STORE
, 4, dataref_ptr
, vec_offset
,
6995 gimple_call_set_nothrow (call
, true);
6997 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
7002 /* Bump the vector pointer. */
7003 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
,
7007 vec_oprnd
= vec_oprnds
[i
];
7008 else if (grouped_store
)
7009 /* For grouped stores vectorized defs are interleaved in
7010 vect_permute_store_chain(). */
7011 vec_oprnd
= result_chain
[i
];
7013 align
= DR_TARGET_ALIGNMENT (first_dr
);
7014 if (aligned_access_p (first_dr
))
7016 else if (DR_MISALIGNMENT (first_dr
) == -1)
7018 align
= dr_alignment (vect_dr_behavior (first_dr
));
7022 misalign
= DR_MISALIGNMENT (first_dr
);
7023 if (dataref_offset
== NULL_TREE
7024 && TREE_CODE (dataref_ptr
) == SSA_NAME
)
7025 set_ptr_info_alignment (get_ptr_info (dataref_ptr
), align
,
7028 if (memory_access_type
== VMAT_CONTIGUOUS_REVERSE
)
7030 tree perm_mask
= perm_mask_for_reverse (vectype
);
7032 = vect_create_destination_var (vect_get_store_rhs (stmt
),
7034 tree new_temp
= make_ssa_name (perm_dest
);
7036 /* Generate the permute statement. */
7038 = gimple_build_assign (new_temp
, VEC_PERM_EXPR
, vec_oprnd
,
7039 vec_oprnd
, perm_mask
);
7040 vect_finish_stmt_generation (stmt
, perm_stmt
, gsi
);
7042 perm_stmt
= SSA_NAME_DEF_STMT (new_temp
);
7043 vec_oprnd
= new_temp
;
7046 /* Arguments are ready. Create the new vector stmt. */
7049 align
= least_bit_hwi (misalign
| align
);
7050 tree ptr
= build_int_cst (ref_type
, align
);
7052 = gimple_build_call_internal (IFN_MASK_STORE
, 4,
7054 final_mask
, vec_oprnd
);
7055 gimple_call_set_nothrow (call
, true);
7060 data_ref
= fold_build2 (MEM_REF
, vectype
,
7064 : build_int_cst (ref_type
, 0));
7065 if (aligned_access_p (first_dr
))
7067 else if (DR_MISALIGNMENT (first_dr
) == -1)
7068 TREE_TYPE (data_ref
)
7069 = build_aligned_type (TREE_TYPE (data_ref
),
7070 align
* BITS_PER_UNIT
);
7072 TREE_TYPE (data_ref
)
7073 = build_aligned_type (TREE_TYPE (data_ref
),
7074 TYPE_ALIGN (elem_type
));
7075 vect_copy_ref_info (data_ref
, DR_REF (first_dr
));
7076 new_stmt
= gimple_build_assign (data_ref
, vec_oprnd
);
7078 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
7083 next_stmt
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt
));
7091 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
7093 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
7094 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
7099 result_chain
.release ();
7100 vec_oprnds
.release ();
7105 /* Given a vector type VECTYPE, turns permutation SEL into the equivalent
7106 VECTOR_CST mask. No checks are made that the target platform supports the
7107 mask, so callers may wish to test can_vec_perm_const_p separately, or use
7108 vect_gen_perm_mask_checked. */
7111 vect_gen_perm_mask_any (tree vectype
, const vec_perm_indices
&sel
)
7115 poly_uint64 nunits
= sel
.length ();
7116 gcc_assert (known_eq (nunits
, TYPE_VECTOR_SUBPARTS (vectype
)));
7118 mask_type
= build_vector_type (ssizetype
, nunits
);
7119 return vec_perm_indices_to_tree (mask_type
, sel
);
7122 /* Checked version of vect_gen_perm_mask_any. Asserts can_vec_perm_const_p,
7123 i.e. that the target supports the pattern _for arbitrary input vectors_. */
7126 vect_gen_perm_mask_checked (tree vectype
, const vec_perm_indices
&sel
)
7128 gcc_assert (can_vec_perm_const_p (TYPE_MODE (vectype
), sel
));
7129 return vect_gen_perm_mask_any (vectype
, sel
);
7132 /* Given a vector variable X and Y, that was generated for the scalar
7133 STMT, generate instructions to permute the vector elements of X and Y
7134 using permutation mask MASK_VEC, insert them at *GSI and return the
7135 permuted vector variable. */
7138 permute_vec_elements (tree x
, tree y
, tree mask_vec
, gimple
*stmt
,
7139 gimple_stmt_iterator
*gsi
)
7141 tree vectype
= TREE_TYPE (x
);
7142 tree perm_dest
, data_ref
;
7145 tree scalar_dest
= gimple_get_lhs (stmt
);
7146 if (TREE_CODE (scalar_dest
) == SSA_NAME
)
7147 perm_dest
= vect_create_destination_var (scalar_dest
, vectype
);
7149 perm_dest
= vect_get_new_vect_var (vectype
, vect_simple_var
, NULL
);
7150 data_ref
= make_ssa_name (perm_dest
);
7152 /* Generate the permute statement. */
7153 perm_stmt
= gimple_build_assign (data_ref
, VEC_PERM_EXPR
, x
, y
, mask_vec
);
7154 vect_finish_stmt_generation (stmt
, perm_stmt
, gsi
);
7159 /* Hoist the definitions of all SSA uses on STMT out of the loop LOOP,
7160 inserting them on the loops preheader edge. Returns true if we
7161 were successful in doing so (and thus STMT can be moved then),
7162 otherwise returns false. */
7165 hoist_defs_of_uses (gimple
*stmt
, struct loop
*loop
)
7171 FOR_EACH_SSA_TREE_OPERAND (op
, stmt
, i
, SSA_OP_USE
)
7173 gimple
*def_stmt
= SSA_NAME_DEF_STMT (op
);
7174 if (!gimple_nop_p (def_stmt
)
7175 && flow_bb_inside_loop_p (loop
, gimple_bb (def_stmt
)))
7177 /* Make sure we don't need to recurse. While we could do
7178 so in simple cases when there are more complex use webs
7179 we don't have an easy way to preserve stmt order to fulfil
7180 dependencies within them. */
7183 if (gimple_code (def_stmt
) == GIMPLE_PHI
)
7185 FOR_EACH_SSA_TREE_OPERAND (op2
, def_stmt
, i2
, SSA_OP_USE
)
7187 gimple
*def_stmt2
= SSA_NAME_DEF_STMT (op2
);
7188 if (!gimple_nop_p (def_stmt2
)
7189 && flow_bb_inside_loop_p (loop
, gimple_bb (def_stmt2
)))
7199 FOR_EACH_SSA_TREE_OPERAND (op
, stmt
, i
, SSA_OP_USE
)
7201 gimple
*def_stmt
= SSA_NAME_DEF_STMT (op
);
7202 if (!gimple_nop_p (def_stmt
)
7203 && flow_bb_inside_loop_p (loop
, gimple_bb (def_stmt
)))
7205 gimple_stmt_iterator gsi
= gsi_for_stmt (def_stmt
);
7206 gsi_remove (&gsi
, false);
7207 gsi_insert_on_edge_immediate (loop_preheader_edge (loop
), def_stmt
);
7214 /* vectorizable_load.
7216 Check if STMT reads a non scalar data-ref (array/pointer/structure) that
7218 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
7219 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
7220 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
7223 vectorizable_load (gimple
*stmt
, gimple_stmt_iterator
*gsi
, gimple
**vec_stmt
,
7224 slp_tree slp_node
, slp_instance slp_node_instance
)
7227 tree vec_dest
= NULL
;
7228 tree data_ref
= NULL
;
7229 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
7230 stmt_vec_info prev_stmt_info
;
7231 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
7232 struct loop
*loop
= NULL
;
7233 struct loop
*containing_loop
= (gimple_bb (stmt
))->loop_father
;
7234 bool nested_in_vect_loop
= false;
7235 struct data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
), *first_dr
= NULL
;
7239 gimple
*new_stmt
= NULL
;
7241 enum dr_alignment_support alignment_support_scheme
;
7242 tree dataref_ptr
= NULL_TREE
;
7243 tree dataref_offset
= NULL_TREE
;
7244 gimple
*ptr_incr
= NULL
;
7247 unsigned int group_size
;
7248 poly_uint64 group_gap_adj
;
7249 tree msq
= NULL_TREE
, lsq
;
7250 tree offset
= NULL_TREE
;
7251 tree byte_offset
= NULL_TREE
;
7252 tree realignment_token
= NULL_TREE
;
7254 vec
<tree
> dr_chain
= vNULL
;
7255 bool grouped_load
= false;
7257 gimple
*first_stmt_for_drptr
= NULL
;
7259 bool compute_in_loop
= false;
7260 struct loop
*at_loop
;
7262 bool slp
= (slp_node
!= NULL
);
7263 bool slp_perm
= false;
7264 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
7267 gather_scatter_info gs_info
;
7268 vec_info
*vinfo
= stmt_info
->vinfo
;
7270 enum vect_def_type mask_dt
= vect_unknown_def_type
;
7272 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
7275 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
7279 tree mask
= NULL_TREE
, mask_vectype
= NULL_TREE
;
7280 if (is_gimple_assign (stmt
))
7282 scalar_dest
= gimple_assign_lhs (stmt
);
7283 if (TREE_CODE (scalar_dest
) != SSA_NAME
)
7286 tree_code code
= gimple_assign_rhs_code (stmt
);
7287 if (code
!= ARRAY_REF
7288 && code
!= BIT_FIELD_REF
7289 && code
!= INDIRECT_REF
7290 && code
!= COMPONENT_REF
7291 && code
!= IMAGPART_EXPR
7292 && code
!= REALPART_EXPR
7294 && TREE_CODE_CLASS (code
) != tcc_declaration
)
7299 gcall
*call
= dyn_cast
<gcall
*> (stmt
);
7300 if (!call
|| !gimple_call_internal_p (call
))
7303 internal_fn ifn
= gimple_call_internal_fn (call
);
7304 if (!internal_load_fn_p (ifn
))
7307 scalar_dest
= gimple_call_lhs (call
);
7311 if (slp_node
!= NULL
)
7313 if (dump_enabled_p ())
7314 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7315 "SLP of masked loads not supported.\n");
7319 int mask_index
= internal_fn_mask_index (ifn
);
7320 if (mask_index
>= 0)
7322 mask
= gimple_call_arg (call
, mask_index
);
7323 if (!vect_check_load_store_mask (stmt
, mask
, &mask_dt
,
7329 if (!STMT_VINFO_DATA_REF (stmt_info
))
7332 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
7333 poly_uint64 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
7337 loop
= LOOP_VINFO_LOOP (loop_vinfo
);
7338 nested_in_vect_loop
= nested_in_vect_loop_p (loop
, stmt
);
7339 vf
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
7344 /* Multiple types in SLP are handled by creating the appropriate number of
7345 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
7350 ncopies
= vect_get_num_copies (loop_vinfo
, vectype
);
7352 gcc_assert (ncopies
>= 1);
7354 /* FORNOW. This restriction should be relaxed. */
7355 if (nested_in_vect_loop
&& ncopies
> 1)
7357 if (dump_enabled_p ())
7358 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7359 "multiple types in nested loop.\n");
7363 /* Invalidate assumptions made by dependence analysis when vectorization
7364 on the unrolled body effectively re-orders stmts. */
7366 && STMT_VINFO_MIN_NEG_DIST (stmt_info
) != 0
7367 && maybe_gt (LOOP_VINFO_VECT_FACTOR (loop_vinfo
),
7368 STMT_VINFO_MIN_NEG_DIST (stmt_info
)))
7370 if (dump_enabled_p ())
7371 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7372 "cannot perform implicit CSE when unrolling "
7373 "with negative dependence distance\n");
7377 elem_type
= TREE_TYPE (vectype
);
7378 mode
= TYPE_MODE (vectype
);
7380 /* FORNOW. In some cases can vectorize even if data-type not supported
7381 (e.g. - data copies). */
7382 if (optab_handler (mov_optab
, mode
) == CODE_FOR_nothing
)
7384 if (dump_enabled_p ())
7385 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7386 "Aligned load, but unsupported type.\n");
7390 /* Check if the load is a part of an interleaving chain. */
7391 if (STMT_VINFO_GROUPED_ACCESS (stmt_info
))
7393 grouped_load
= true;
7395 gcc_assert (!nested_in_vect_loop
);
7396 gcc_assert (!STMT_VINFO_GATHER_SCATTER_P (stmt_info
));
7398 first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
7399 group_size
= GROUP_SIZE (vinfo_for_stmt (first_stmt
));
7401 if (slp
&& SLP_TREE_LOAD_PERMUTATION (slp_node
).exists ())
7404 /* Invalidate assumptions made by dependence analysis when vectorization
7405 on the unrolled body effectively re-orders stmts. */
7406 if (!PURE_SLP_STMT (stmt_info
)
7407 && STMT_VINFO_MIN_NEG_DIST (stmt_info
) != 0
7408 && maybe_gt (LOOP_VINFO_VECT_FACTOR (loop_vinfo
),
7409 STMT_VINFO_MIN_NEG_DIST (stmt_info
)))
7411 if (dump_enabled_p ())
7412 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7413 "cannot perform implicit CSE when performing "
7414 "group loads with negative dependence distance\n");
7418 /* Similarly when the stmt is a load that is both part of a SLP
7419 instance and a loop vectorized stmt via the same-dr mechanism
7420 we have to give up. */
7421 if (STMT_VINFO_GROUP_SAME_DR_STMT (stmt_info
)
7422 && (STMT_SLP_TYPE (stmt_info
)
7423 != STMT_SLP_TYPE (vinfo_for_stmt
7424 (STMT_VINFO_GROUP_SAME_DR_STMT (stmt_info
)))))
7426 if (dump_enabled_p ())
7427 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7428 "conflicting SLP types for CSEd load\n");
7435 vect_memory_access_type memory_access_type
;
7436 if (!get_load_store_type (stmt
, vectype
, slp
, mask
, VLS_LOAD
, ncopies
,
7437 &memory_access_type
, &gs_info
))
7442 if (memory_access_type
== VMAT_CONTIGUOUS
)
7444 machine_mode vec_mode
= TYPE_MODE (vectype
);
7445 if (!VECTOR_MODE_P (vec_mode
)
7446 || !can_vec_mask_load_store_p (vec_mode
,
7447 TYPE_MODE (mask_vectype
), true))
7450 else if (memory_access_type
== VMAT_GATHER_SCATTER
&& gs_info
.decl
)
7452 tree arglist
= TYPE_ARG_TYPES (TREE_TYPE (gs_info
.decl
));
7454 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (TREE_CHAIN (arglist
))));
7455 if (TREE_CODE (masktype
) == INTEGER_TYPE
)
7457 if (dump_enabled_p ())
7458 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7459 "masked gather with integer mask not"
7464 else if (memory_access_type
!= VMAT_LOAD_STORE_LANES
7465 && memory_access_type
!= VMAT_GATHER_SCATTER
)
7467 if (dump_enabled_p ())
7468 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7469 "unsupported access type for masked load.\n");
7474 if (!vec_stmt
) /* transformation not required. */
7477 STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info
) = memory_access_type
;
7480 && LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo
))
7481 check_load_store_masking (loop_vinfo
, vectype
, VLS_LOAD
, group_size
,
7482 memory_access_type
, &gs_info
);
7484 STMT_VINFO_TYPE (stmt_info
) = load_vec_info_type
;
7485 /* The SLP costs are calculated during SLP analysis. */
7487 vect_model_load_cost (stmt_info
, ncopies
, memory_access_type
,
7493 gcc_assert (memory_access_type
7494 == STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info
));
7496 if (dump_enabled_p ())
7497 dump_printf_loc (MSG_NOTE
, vect_location
,
7498 "transform load. ncopies = %d\n", ncopies
);
7502 ensure_base_align (dr
);
7504 if (memory_access_type
== VMAT_GATHER_SCATTER
&& gs_info
.decl
)
7506 vect_build_gather_load_calls (stmt
, gsi
, vec_stmt
, &gs_info
, mask
,
7511 if (memory_access_type
== VMAT_ELEMENTWISE
7512 || memory_access_type
== VMAT_STRIDED_SLP
)
7514 gimple_stmt_iterator incr_gsi
;
7520 vec
<constructor_elt
, va_gc
> *v
= NULL
;
7521 tree stride_base
, stride_step
, alias_off
;
7522 /* Checked by get_load_store_type. */
7523 unsigned int const_nunits
= nunits
.to_constant ();
7524 unsigned HOST_WIDE_INT cst_offset
= 0;
7526 gcc_assert (!LOOP_VINFO_FULLY_MASKED_P (loop_vinfo
));
7527 gcc_assert (!nested_in_vect_loop
);
7531 first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
7532 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
7539 if (slp
&& grouped_load
)
7541 group_size
= GROUP_SIZE (vinfo_for_stmt (first_stmt
));
7542 ref_type
= get_group_alias_ptr_type (first_stmt
);
7548 = (tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (vectype
)))
7549 * vect_get_place_in_interleaving_chain (stmt
, first_stmt
));
7551 ref_type
= reference_alias_ptr_type (DR_REF (dr
));
7555 = fold_build_pointer_plus
7556 (DR_BASE_ADDRESS (first_dr
),
7557 size_binop (PLUS_EXPR
,
7558 convert_to_ptrofftype (DR_OFFSET (first_dr
)),
7559 convert_to_ptrofftype (DR_INIT (first_dr
))));
7560 stride_step
= fold_convert (sizetype
, DR_STEP (first_dr
));
7562 /* For a load with loop-invariant (but other than power-of-2)
7563 stride (i.e. not a grouped access) like so:
7565 for (i = 0; i < n; i += stride)
7568 we generate a new induction variable and new accesses to
7569 form a new vector (or vectors, depending on ncopies):
7571 for (j = 0; ; j += VF*stride)
7573 tmp2 = array[j + stride];
7575 vectemp = {tmp1, tmp2, ...}
7578 ivstep
= fold_build2 (MULT_EXPR
, TREE_TYPE (stride_step
), stride_step
,
7579 build_int_cst (TREE_TYPE (stride_step
), vf
));
7581 standard_iv_increment_position (loop
, &incr_gsi
, &insert_after
);
7583 stride_base
= cse_and_gimplify_to_preheader (loop_vinfo
, stride_base
);
7584 ivstep
= cse_and_gimplify_to_preheader (loop_vinfo
, ivstep
);
7585 create_iv (stride_base
, ivstep
, NULL
,
7586 loop
, &incr_gsi
, insert_after
,
7588 incr
= gsi_stmt (incr_gsi
);
7589 set_vinfo_for_stmt (incr
, new_stmt_vec_info (incr
, loop_vinfo
));
7591 stride_step
= cse_and_gimplify_to_preheader (loop_vinfo
, stride_step
);
7593 prev_stmt_info
= NULL
;
7594 running_off
= offvar
;
7595 alias_off
= build_int_cst (ref_type
, 0);
7596 int nloads
= const_nunits
;
7598 tree ltype
= TREE_TYPE (vectype
);
7599 tree lvectype
= vectype
;
7600 auto_vec
<tree
> dr_chain
;
7601 if (memory_access_type
== VMAT_STRIDED_SLP
)
7603 if (group_size
< const_nunits
)
7605 /* First check if vec_init optab supports construction from
7606 vector elts directly. */
7607 scalar_mode elmode
= SCALAR_TYPE_MODE (TREE_TYPE (vectype
));
7609 if (mode_for_vector (elmode
, group_size
).exists (&vmode
)
7610 && VECTOR_MODE_P (vmode
)
7611 && targetm
.vector_mode_supported_p (vmode
)
7612 && (convert_optab_handler (vec_init_optab
,
7613 TYPE_MODE (vectype
), vmode
)
7614 != CODE_FOR_nothing
))
7616 nloads
= const_nunits
/ group_size
;
7618 ltype
= build_vector_type (TREE_TYPE (vectype
), group_size
);
7622 /* Otherwise avoid emitting a constructor of vector elements
7623 by performing the loads using an integer type of the same
7624 size, constructing a vector of those and then
7625 re-interpreting it as the original vector type.
7626 This avoids a huge runtime penalty due to the general
7627 inability to perform store forwarding from smaller stores
7628 to a larger load. */
7630 = group_size
* TYPE_PRECISION (TREE_TYPE (vectype
));
7631 elmode
= int_mode_for_size (lsize
, 0).require ();
7632 unsigned int lnunits
= const_nunits
/ group_size
;
7633 /* If we can't construct such a vector fall back to
7634 element loads of the original vector type. */
7635 if (mode_for_vector (elmode
, lnunits
).exists (&vmode
)
7636 && VECTOR_MODE_P (vmode
)
7637 && targetm
.vector_mode_supported_p (vmode
)
7638 && (convert_optab_handler (vec_init_optab
, vmode
, elmode
)
7639 != CODE_FOR_nothing
))
7643 ltype
= build_nonstandard_integer_type (lsize
, 1);
7644 lvectype
= build_vector_type (ltype
, nloads
);
7651 lnel
= const_nunits
;
7654 ltype
= build_aligned_type (ltype
, TYPE_ALIGN (TREE_TYPE (vectype
)));
7658 /* For SLP permutation support we need to load the whole group,
7659 not only the number of vector stmts the permutation result
7663 /* We don't yet generate SLP_TREE_LOAD_PERMUTATIONs for
7665 unsigned int const_vf
= vf
.to_constant ();
7666 ncopies
= CEIL (group_size
* const_vf
, const_nunits
);
7667 dr_chain
.create (ncopies
);
7670 ncopies
= SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node
);
7672 unsigned int group_el
= 0;
7673 unsigned HOST_WIDE_INT
7674 elsz
= tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (vectype
)));
7675 for (j
= 0; j
< ncopies
; j
++)
7678 vec_alloc (v
, nloads
);
7679 for (i
= 0; i
< nloads
; i
++)
7681 tree this_off
= build_int_cst (TREE_TYPE (alias_off
),
7682 group_el
* elsz
+ cst_offset
);
7683 tree data_ref
= build2 (MEM_REF
, ltype
, running_off
, this_off
);
7684 vect_copy_ref_info (data_ref
, DR_REF (first_dr
));
7685 new_stmt
= gimple_build_assign (make_ssa_name (ltype
), data_ref
);
7686 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
7688 CONSTRUCTOR_APPEND_ELT (v
, NULL_TREE
,
7689 gimple_assign_lhs (new_stmt
));
7693 || group_el
== group_size
)
7695 tree newoff
= copy_ssa_name (running_off
);
7696 gimple
*incr
= gimple_build_assign (newoff
, POINTER_PLUS_EXPR
,
7697 running_off
, stride_step
);
7698 vect_finish_stmt_generation (stmt
, incr
, gsi
);
7700 running_off
= newoff
;
7706 tree vec_inv
= build_constructor (lvectype
, v
);
7707 new_temp
= vect_init_vector (stmt
, vec_inv
, lvectype
, gsi
);
7708 new_stmt
= SSA_NAME_DEF_STMT (new_temp
);
7709 if (lvectype
!= vectype
)
7711 new_stmt
= gimple_build_assign (make_ssa_name (vectype
),
7713 build1 (VIEW_CONVERT_EXPR
,
7714 vectype
, new_temp
));
7715 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
7722 dr_chain
.quick_push (gimple_assign_lhs (new_stmt
));
7724 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
7729 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
7731 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
7732 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
7738 vect_transform_slp_perm_load (slp_node
, dr_chain
, gsi
, vf
,
7739 slp_node_instance
, false, &n_perms
);
7744 if (memory_access_type
== VMAT_GATHER_SCATTER
7745 || (!slp
&& memory_access_type
== VMAT_CONTIGUOUS
))
7746 grouped_load
= false;
7750 first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
7751 group_size
= GROUP_SIZE (vinfo_for_stmt (first_stmt
));
7752 /* For SLP vectorization we directly vectorize a subchain
7753 without permutation. */
7754 if (slp
&& ! SLP_TREE_LOAD_PERMUTATION (slp_node
).exists ())
7755 first_stmt
= SLP_TREE_SCALAR_STMTS (slp_node
)[0];
7756 /* For BB vectorization always use the first stmt to base
7757 the data ref pointer on. */
7759 first_stmt_for_drptr
= SLP_TREE_SCALAR_STMTS (slp_node
)[0];
7761 /* Check if the chain of loads is already vectorized. */
7762 if (STMT_VINFO_VEC_STMT (vinfo_for_stmt (first_stmt
))
7763 /* For SLP we would need to copy over SLP_TREE_VEC_STMTS.
7764 ??? But we can only do so if there is exactly one
7765 as we have no way to get at the rest. Leave the CSE
7767 ??? With the group load eventually participating
7768 in multiple different permutations (having multiple
7769 slp nodes which refer to the same group) the CSE
7770 is even wrong code. See PR56270. */
7773 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
7776 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
7779 /* VEC_NUM is the number of vect stmts to be created for this group. */
7782 grouped_load
= false;
7783 /* For SLP permutation support we need to load the whole group,
7784 not only the number of vector stmts the permutation result
7788 /* We don't yet generate SLP_TREE_LOAD_PERMUTATIONs for
7790 unsigned int const_vf
= vf
.to_constant ();
7791 unsigned int const_nunits
= nunits
.to_constant ();
7792 vec_num
= CEIL (group_size
* const_vf
, const_nunits
);
7793 group_gap_adj
= vf
* group_size
- nunits
* vec_num
;
7797 vec_num
= SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node
);
7799 = group_size
- SLP_INSTANCE_GROUP_SIZE (slp_node_instance
);
7803 vec_num
= group_size
;
7805 ref_type
= get_group_alias_ptr_type (first_stmt
);
7811 group_size
= vec_num
= 1;
7813 ref_type
= reference_alias_ptr_type (DR_REF (first_dr
));
7816 alignment_support_scheme
= vect_supportable_dr_alignment (first_dr
, false);
7817 gcc_assert (alignment_support_scheme
);
7818 vec_loop_masks
*loop_masks
7819 = (loop_vinfo
&& LOOP_VINFO_FULLY_MASKED_P (loop_vinfo
)
7820 ? &LOOP_VINFO_MASKS (loop_vinfo
)
7822 /* Targets with store-lane instructions must not require explicit
7823 realignment. vect_supportable_dr_alignment always returns either
7824 dr_aligned or dr_unaligned_supported for masked operations. */
7825 gcc_assert ((memory_access_type
!= VMAT_LOAD_STORE_LANES
7828 || alignment_support_scheme
== dr_aligned
7829 || alignment_support_scheme
== dr_unaligned_supported
);
7831 /* In case the vectorization factor (VF) is bigger than the number
7832 of elements that we can fit in a vectype (nunits), we have to generate
7833 more than one vector stmt - i.e - we need to "unroll" the
7834 vector stmt by a factor VF/nunits. In doing so, we record a pointer
7835 from one copy of the vector stmt to the next, in the field
7836 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
7837 stages to find the correct vector defs to be used when vectorizing
7838 stmts that use the defs of the current stmt. The example below
7839 illustrates the vectorization process when VF=16 and nunits=4 (i.e., we
7840 need to create 4 vectorized stmts):
7842 before vectorization:
7843 RELATED_STMT VEC_STMT
7847 step 1: vectorize stmt S1:
7848 We first create the vector stmt VS1_0, and, as usual, record a
7849 pointer to it in the STMT_VINFO_VEC_STMT of the scalar stmt S1.
7850 Next, we create the vector stmt VS1_1, and record a pointer to
7851 it in the STMT_VINFO_RELATED_STMT of the vector stmt VS1_0.
7852 Similarly, for VS1_2 and VS1_3. This is the resulting chain of
7854 RELATED_STMT VEC_STMT
7855 VS1_0: vx0 = memref0 VS1_1 -
7856 VS1_1: vx1 = memref1 VS1_2 -
7857 VS1_2: vx2 = memref2 VS1_3 -
7858 VS1_3: vx3 = memref3 - -
7859 S1: x = load - VS1_0
7862 See in documentation in vect_get_vec_def_for_stmt_copy for how the
7863 information we recorded in RELATED_STMT field is used to vectorize
7866 /* In case of interleaving (non-unit grouped access):
7873 Vectorized loads are created in the order of memory accesses
7874 starting from the access of the first stmt of the chain:
7877 VS2: vx1 = &base + vec_size*1
7878 VS3: vx3 = &base + vec_size*2
7879 VS4: vx4 = &base + vec_size*3
7881 Then permutation statements are generated:
7883 VS5: vx5 = VEC_PERM_EXPR < vx0, vx1, { 0, 2, ..., i*2 } >
7884 VS6: vx6 = VEC_PERM_EXPR < vx0, vx1, { 1, 3, ..., i*2+1 } >
7887 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
7888 (the order of the data-refs in the output of vect_permute_load_chain
7889 corresponds to the order of scalar stmts in the interleaving chain - see
7890 the documentation of vect_permute_load_chain()).
7891 The generation of permutation stmts and recording them in
7892 STMT_VINFO_VEC_STMT is done in vect_transform_grouped_load().
7894 In case of both multiple types and interleaving, the vector loads and
7895 permutation stmts above are created for every copy. The result vector
7896 stmts are put in STMT_VINFO_VEC_STMT for the first copy and in the
7897 corresponding STMT_VINFO_RELATED_STMT for the next copies. */
7899 /* If the data reference is aligned (dr_aligned) or potentially unaligned
7900 on a target that supports unaligned accesses (dr_unaligned_supported)
7901 we generate the following code:
7905 p = p + indx * vectype_size;
7910 Otherwise, the data reference is potentially unaligned on a target that
7911 does not support unaligned accesses (dr_explicit_realign_optimized) -
7912 then generate the following code, in which the data in each iteration is
7913 obtained by two vector loads, one from the previous iteration, and one
7914 from the current iteration:
7916 msq_init = *(floor(p1))
7917 p2 = initial_addr + VS - 1;
7918 realignment_token = call target_builtin;
7921 p2 = p2 + indx * vectype_size
7923 vec_dest = realign_load (msq, lsq, realignment_token)
7928 /* If the misalignment remains the same throughout the execution of the
7929 loop, we can create the init_addr and permutation mask at the loop
7930 preheader. Otherwise, it needs to be created inside the loop.
7931 This can only occur when vectorizing memory accesses in the inner-loop
7932 nested within an outer-loop that is being vectorized. */
7934 if (nested_in_vect_loop
7935 && !multiple_p (DR_STEP_ALIGNMENT (dr
),
7936 GET_MODE_SIZE (TYPE_MODE (vectype
))))
7938 gcc_assert (alignment_support_scheme
!= dr_explicit_realign_optimized
);
7939 compute_in_loop
= true;
7942 if ((alignment_support_scheme
== dr_explicit_realign_optimized
7943 || alignment_support_scheme
== dr_explicit_realign
)
7944 && !compute_in_loop
)
7946 msq
= vect_setup_realignment (first_stmt
, gsi
, &realignment_token
,
7947 alignment_support_scheme
, NULL_TREE
,
7949 if (alignment_support_scheme
== dr_explicit_realign_optimized
)
7951 phi
= as_a
<gphi
*> (SSA_NAME_DEF_STMT (msq
));
7952 byte_offset
= size_binop (MINUS_EXPR
, TYPE_SIZE_UNIT (vectype
),
7959 if (memory_access_type
== VMAT_CONTIGUOUS_REVERSE
)
7960 offset
= size_int (-TYPE_VECTOR_SUBPARTS (vectype
) + 1);
7963 tree vec_offset
= NULL_TREE
;
7964 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info
))
7966 aggr_type
= NULL_TREE
;
7969 else if (memory_access_type
== VMAT_GATHER_SCATTER
)
7971 aggr_type
= elem_type
;
7972 vect_get_strided_load_store_ops (stmt
, loop_vinfo
, &gs_info
,
7973 &bump
, &vec_offset
);
7977 if (memory_access_type
== VMAT_LOAD_STORE_LANES
)
7978 aggr_type
= build_array_type_nelts (elem_type
, vec_num
* nunits
);
7980 aggr_type
= vectype
;
7981 bump
= vect_get_data_ptr_increment (dr
, aggr_type
, memory_access_type
);
7984 tree vec_mask
= NULL_TREE
;
7985 prev_stmt_info
= NULL
;
7986 poly_uint64 group_elt
= 0;
7987 for (j
= 0; j
< ncopies
; j
++)
7989 /* 1. Create the vector or array pointer update chain. */
7992 bool simd_lane_access_p
7993 = STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info
);
7994 if (simd_lane_access_p
7995 && TREE_CODE (DR_BASE_ADDRESS (first_dr
)) == ADDR_EXPR
7996 && VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (first_dr
), 0))
7997 && integer_zerop (DR_OFFSET (first_dr
))
7998 && integer_zerop (DR_INIT (first_dr
))
7999 && alias_sets_conflict_p (get_alias_set (aggr_type
),
8000 get_alias_set (TREE_TYPE (ref_type
)))
8001 && (alignment_support_scheme
== dr_aligned
8002 || alignment_support_scheme
== dr_unaligned_supported
))
8004 dataref_ptr
= unshare_expr (DR_BASE_ADDRESS (first_dr
));
8005 dataref_offset
= build_int_cst (ref_type
, 0);
8008 else if (first_stmt_for_drptr
8009 && first_stmt
!= first_stmt_for_drptr
)
8012 = vect_create_data_ref_ptr (first_stmt_for_drptr
, aggr_type
,
8013 at_loop
, offset
, &dummy
, gsi
,
8014 &ptr_incr
, simd_lane_access_p
,
8015 &inv_p
, byte_offset
, bump
);
8016 /* Adjust the pointer by the difference to first_stmt. */
8017 data_reference_p ptrdr
8018 = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt_for_drptr
));
8019 tree diff
= fold_convert (sizetype
,
8020 size_binop (MINUS_EXPR
,
8023 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
,
8026 else if (STMT_VINFO_GATHER_SCATTER_P (stmt_info
))
8028 vect_get_gather_scatter_ops (loop
, stmt
, &gs_info
,
8029 &dataref_ptr
, &vec_offset
);
8034 = vect_create_data_ref_ptr (first_stmt
, aggr_type
, at_loop
,
8035 offset
, &dummy
, gsi
, &ptr_incr
,
8036 simd_lane_access_p
, &inv_p
,
8039 vec_mask
= vect_get_vec_def_for_operand (mask
, stmt
,
8045 dataref_offset
= int_const_binop (PLUS_EXPR
, dataref_offset
,
8047 else if (STMT_VINFO_GATHER_SCATTER_P (stmt_info
))
8048 vec_offset
= vect_get_vec_def_for_stmt_copy (gs_info
.offset_dt
,
8051 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
,
8054 vec_mask
= vect_get_vec_def_for_stmt_copy (mask_dt
, vec_mask
);
8057 if (grouped_load
|| slp_perm
)
8058 dr_chain
.create (vec_num
);
8060 if (memory_access_type
== VMAT_LOAD_STORE_LANES
)
8064 vec_array
= create_vector_array (vectype
, vec_num
);
8066 tree final_mask
= NULL_TREE
;
8068 final_mask
= vect_get_loop_mask (gsi
, loop_masks
, ncopies
,
8071 final_mask
= prepare_load_store_mask (mask_vectype
, final_mask
,
8078 VEC_ARRAY = MASK_LOAD_LANES (DATAREF_PTR, ALIAS_PTR,
8080 unsigned int align
= TYPE_ALIGN_UNIT (TREE_TYPE (vectype
));
8081 tree alias_ptr
= build_int_cst (ref_type
, align
);
8082 call
= gimple_build_call_internal (IFN_MASK_LOAD_LANES
, 3,
8083 dataref_ptr
, alias_ptr
,
8089 VEC_ARRAY = LOAD_LANES (MEM_REF[...all elements...]). */
8090 data_ref
= create_array_ref (aggr_type
, dataref_ptr
, ref_type
);
8091 call
= gimple_build_call_internal (IFN_LOAD_LANES
, 1, data_ref
);
8093 gimple_call_set_lhs (call
, vec_array
);
8094 gimple_call_set_nothrow (call
, true);
8096 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
8098 /* Extract each vector into an SSA_NAME. */
8099 for (i
= 0; i
< vec_num
; i
++)
8101 new_temp
= read_vector_array (stmt
, gsi
, scalar_dest
,
8103 dr_chain
.quick_push (new_temp
);
8106 /* Record the mapping between SSA_NAMEs and statements. */
8107 vect_record_grouped_load_vectors (stmt
, dr_chain
);
8111 for (i
= 0; i
< vec_num
; i
++)
8113 tree final_mask
= NULL_TREE
;
8115 && memory_access_type
!= VMAT_INVARIANT
)
8116 final_mask
= vect_get_loop_mask (gsi
, loop_masks
,
8118 vectype
, vec_num
* j
+ i
);
8120 final_mask
= prepare_load_store_mask (mask_vectype
, final_mask
,
8124 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
,
8127 /* 2. Create the vector-load in the loop. */
8128 switch (alignment_support_scheme
)
8131 case dr_unaligned_supported
:
8133 unsigned int align
, misalign
;
8135 if (memory_access_type
== VMAT_GATHER_SCATTER
)
8137 tree scale
= size_int (gs_info
.scale
);
8140 call
= gimple_build_call_internal
8141 (IFN_MASK_GATHER_LOAD
, 4, dataref_ptr
,
8142 vec_offset
, scale
, final_mask
);
8144 call
= gimple_build_call_internal
8145 (IFN_GATHER_LOAD
, 3, dataref_ptr
,
8147 gimple_call_set_nothrow (call
, true);
8149 data_ref
= NULL_TREE
;
8153 align
= DR_TARGET_ALIGNMENT (dr
);
8154 if (alignment_support_scheme
== dr_aligned
)
8156 gcc_assert (aligned_access_p (first_dr
));
8159 else if (DR_MISALIGNMENT (first_dr
) == -1)
8161 align
= dr_alignment (vect_dr_behavior (first_dr
));
8165 misalign
= DR_MISALIGNMENT (first_dr
);
8166 if (dataref_offset
== NULL_TREE
8167 && TREE_CODE (dataref_ptr
) == SSA_NAME
)
8168 set_ptr_info_alignment (get_ptr_info (dataref_ptr
),
8173 align
= least_bit_hwi (misalign
| align
);
8174 tree ptr
= build_int_cst (ref_type
, align
);
8176 = gimple_build_call_internal (IFN_MASK_LOAD
, 3,
8179 gimple_call_set_nothrow (call
, true);
8181 data_ref
= NULL_TREE
;
8186 = fold_build2 (MEM_REF
, vectype
, dataref_ptr
,
8189 : build_int_cst (ref_type
, 0));
8190 if (alignment_support_scheme
== dr_aligned
)
8192 else if (DR_MISALIGNMENT (first_dr
) == -1)
8193 TREE_TYPE (data_ref
)
8194 = build_aligned_type (TREE_TYPE (data_ref
),
8195 align
* BITS_PER_UNIT
);
8197 TREE_TYPE (data_ref
)
8198 = build_aligned_type (TREE_TYPE (data_ref
),
8199 TYPE_ALIGN (elem_type
));
8203 case dr_explicit_realign
:
8207 tree vs
= size_int (TYPE_VECTOR_SUBPARTS (vectype
));
8209 if (compute_in_loop
)
8210 msq
= vect_setup_realignment (first_stmt
, gsi
,
8212 dr_explicit_realign
,
8215 if (TREE_CODE (dataref_ptr
) == SSA_NAME
)
8216 ptr
= copy_ssa_name (dataref_ptr
);
8218 ptr
= make_ssa_name (TREE_TYPE (dataref_ptr
));
8219 unsigned int align
= DR_TARGET_ALIGNMENT (first_dr
);
8220 new_stmt
= gimple_build_assign
8221 (ptr
, BIT_AND_EXPR
, dataref_ptr
,
8223 (TREE_TYPE (dataref_ptr
),
8224 -(HOST_WIDE_INT
) align
));
8225 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
8227 = build2 (MEM_REF
, vectype
, ptr
,
8228 build_int_cst (ref_type
, 0));
8229 vect_copy_ref_info (data_ref
, DR_REF (first_dr
));
8230 vec_dest
= vect_create_destination_var (scalar_dest
,
8232 new_stmt
= gimple_build_assign (vec_dest
, data_ref
);
8233 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
8234 gimple_assign_set_lhs (new_stmt
, new_temp
);
8235 gimple_set_vdef (new_stmt
, gimple_vdef (stmt
));
8236 gimple_set_vuse (new_stmt
, gimple_vuse (stmt
));
8237 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
8240 bump
= size_binop (MULT_EXPR
, vs
,
8241 TYPE_SIZE_UNIT (elem_type
));
8242 bump
= size_binop (MINUS_EXPR
, bump
, size_one_node
);
8243 ptr
= bump_vector_ptr (dataref_ptr
, NULL
, gsi
, stmt
, bump
);
8244 new_stmt
= gimple_build_assign
8245 (NULL_TREE
, BIT_AND_EXPR
, ptr
,
8247 (TREE_TYPE (ptr
), -(HOST_WIDE_INT
) align
));
8248 ptr
= copy_ssa_name (ptr
, new_stmt
);
8249 gimple_assign_set_lhs (new_stmt
, ptr
);
8250 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
8252 = build2 (MEM_REF
, vectype
, ptr
,
8253 build_int_cst (ref_type
, 0));
8256 case dr_explicit_realign_optimized
:
8258 if (TREE_CODE (dataref_ptr
) == SSA_NAME
)
8259 new_temp
= copy_ssa_name (dataref_ptr
);
8261 new_temp
= make_ssa_name (TREE_TYPE (dataref_ptr
));
8262 unsigned int align
= DR_TARGET_ALIGNMENT (first_dr
);
8263 new_stmt
= gimple_build_assign
8264 (new_temp
, BIT_AND_EXPR
, dataref_ptr
,
8265 build_int_cst (TREE_TYPE (dataref_ptr
),
8266 -(HOST_WIDE_INT
) align
));
8267 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
8269 = build2 (MEM_REF
, vectype
, new_temp
,
8270 build_int_cst (ref_type
, 0));
8276 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
8277 /* DATA_REF is null if we've already built the statement. */
8280 vect_copy_ref_info (data_ref
, DR_REF (first_dr
));
8281 new_stmt
= gimple_build_assign (vec_dest
, data_ref
);
8283 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
8284 gimple_set_lhs (new_stmt
, new_temp
);
8285 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
8287 /* 3. Handle explicit realignment if necessary/supported.
8289 vec_dest = realign_load (msq, lsq, realignment_token) */
8290 if (alignment_support_scheme
== dr_explicit_realign_optimized
8291 || alignment_support_scheme
== dr_explicit_realign
)
8293 lsq
= gimple_assign_lhs (new_stmt
);
8294 if (!realignment_token
)
8295 realignment_token
= dataref_ptr
;
8296 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
8297 new_stmt
= gimple_build_assign (vec_dest
, REALIGN_LOAD_EXPR
,
8298 msq
, lsq
, realignment_token
);
8299 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
8300 gimple_assign_set_lhs (new_stmt
, new_temp
);
8301 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
8303 if (alignment_support_scheme
== dr_explicit_realign_optimized
)
8306 if (i
== vec_num
- 1 && j
== ncopies
- 1)
8307 add_phi_arg (phi
, lsq
,
8308 loop_latch_edge (containing_loop
),
8314 /* 4. Handle invariant-load. */
8315 if (inv_p
&& !bb_vinfo
)
8317 gcc_assert (!grouped_load
);
8318 /* If we have versioned for aliasing or the loop doesn't
8319 have any data dependencies that would preclude this,
8320 then we are sure this is a loop invariant load and
8321 thus we can insert it on the preheader edge. */
8322 if (LOOP_VINFO_NO_DATA_DEPENDENCIES (loop_vinfo
)
8323 && !nested_in_vect_loop
8324 && hoist_defs_of_uses (stmt
, loop
))
8326 if (dump_enabled_p ())
8328 dump_printf_loc (MSG_NOTE
, vect_location
,
8329 "hoisting out of the vectorized "
8331 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
8333 tree tem
= copy_ssa_name (scalar_dest
);
8334 gsi_insert_on_edge_immediate
8335 (loop_preheader_edge (loop
),
8336 gimple_build_assign (tem
,
8338 (gimple_assign_rhs1 (stmt
))));
8339 new_temp
= vect_init_vector (stmt
, tem
, vectype
, NULL
);
8340 new_stmt
= SSA_NAME_DEF_STMT (new_temp
);
8341 set_vinfo_for_stmt (new_stmt
,
8342 new_stmt_vec_info (new_stmt
, vinfo
));
8346 gimple_stmt_iterator gsi2
= *gsi
;
8348 new_temp
= vect_init_vector (stmt
, scalar_dest
,
8350 new_stmt
= SSA_NAME_DEF_STMT (new_temp
);
8354 if (memory_access_type
== VMAT_CONTIGUOUS_REVERSE
)
8356 tree perm_mask
= perm_mask_for_reverse (vectype
);
8357 new_temp
= permute_vec_elements (new_temp
, new_temp
,
8358 perm_mask
, stmt
, gsi
);
8359 new_stmt
= SSA_NAME_DEF_STMT (new_temp
);
8362 /* Collect vector loads and later create their permutation in
8363 vect_transform_grouped_load (). */
8364 if (grouped_load
|| slp_perm
)
8365 dr_chain
.quick_push (new_temp
);
8367 /* Store vector loads in the corresponding SLP_NODE. */
8368 if (slp
&& !slp_perm
)
8369 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
8371 /* With SLP permutation we load the gaps as well, without
8372 we need to skip the gaps after we manage to fully load
8373 all elements. group_gap_adj is GROUP_SIZE here. */
8374 group_elt
+= nunits
;
8375 if (maybe_ne (group_gap_adj
, 0U)
8377 && known_eq (group_elt
, group_size
- group_gap_adj
))
8379 poly_wide_int bump_val
8380 = (wi::to_wide (TYPE_SIZE_UNIT (elem_type
))
8382 tree bump
= wide_int_to_tree (sizetype
, bump_val
);
8383 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
,
8388 /* Bump the vector pointer to account for a gap or for excess
8389 elements loaded for a permuted SLP load. */
8390 if (maybe_ne (group_gap_adj
, 0U) && slp_perm
)
8392 poly_wide_int bump_val
8393 = (wi::to_wide (TYPE_SIZE_UNIT (elem_type
))
8395 tree bump
= wide_int_to_tree (sizetype
, bump_val
);
8396 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
,
8401 if (slp
&& !slp_perm
)
8407 if (!vect_transform_slp_perm_load (slp_node
, dr_chain
, gsi
, vf
,
8408 slp_node_instance
, false,
8411 dr_chain
.release ();
8419 if (memory_access_type
!= VMAT_LOAD_STORE_LANES
)
8420 vect_transform_grouped_load (stmt
, dr_chain
, group_size
, gsi
);
8421 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
8426 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
8428 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
8429 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
8432 dr_chain
.release ();
8438 /* Function vect_is_simple_cond.
8441 LOOP - the loop that is being vectorized.
8442 COND - Condition that is checked for simple use.
8445 *COMP_VECTYPE - the vector type for the comparison.
8446 *DTS - The def types for the arguments of the comparison
8448 Returns whether a COND can be vectorized. Checks whether
8449 condition operands are supportable using vec_is_simple_use. */
8452 vect_is_simple_cond (tree cond
, vec_info
*vinfo
,
8453 tree
*comp_vectype
, enum vect_def_type
*dts
,
8457 tree vectype1
= NULL_TREE
, vectype2
= NULL_TREE
;
8460 if (TREE_CODE (cond
) == SSA_NAME
8461 && VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (cond
)))
8463 gimple
*lhs_def_stmt
= SSA_NAME_DEF_STMT (cond
);
8464 if (!vect_is_simple_use (cond
, vinfo
, &lhs_def_stmt
,
8465 &dts
[0], comp_vectype
)
8467 || !VECTOR_BOOLEAN_TYPE_P (*comp_vectype
))
8472 if (!COMPARISON_CLASS_P (cond
))
8475 lhs
= TREE_OPERAND (cond
, 0);
8476 rhs
= TREE_OPERAND (cond
, 1);
8478 if (TREE_CODE (lhs
) == SSA_NAME
)
8480 gimple
*lhs_def_stmt
= SSA_NAME_DEF_STMT (lhs
);
8481 if (!vect_is_simple_use (lhs
, vinfo
, &lhs_def_stmt
, &dts
[0], &vectype1
))
8484 else if (TREE_CODE (lhs
) == INTEGER_CST
|| TREE_CODE (lhs
) == REAL_CST
8485 || TREE_CODE (lhs
) == FIXED_CST
)
8486 dts
[0] = vect_constant_def
;
8490 if (TREE_CODE (rhs
) == SSA_NAME
)
8492 gimple
*rhs_def_stmt
= SSA_NAME_DEF_STMT (rhs
);
8493 if (!vect_is_simple_use (rhs
, vinfo
, &rhs_def_stmt
, &dts
[1], &vectype2
))
8496 else if (TREE_CODE (rhs
) == INTEGER_CST
|| TREE_CODE (rhs
) == REAL_CST
8497 || TREE_CODE (rhs
) == FIXED_CST
)
8498 dts
[1] = vect_constant_def
;
8502 if (vectype1
&& vectype2
8503 && maybe_ne (TYPE_VECTOR_SUBPARTS (vectype1
),
8504 TYPE_VECTOR_SUBPARTS (vectype2
)))
8507 *comp_vectype
= vectype1
? vectype1
: vectype2
;
8508 /* Invariant comparison. */
8509 if (! *comp_vectype
)
8511 tree scalar_type
= TREE_TYPE (lhs
);
8512 /* If we can widen the comparison to match vectype do so. */
8513 if (INTEGRAL_TYPE_P (scalar_type
)
8514 && tree_int_cst_lt (TYPE_SIZE (scalar_type
),
8515 TYPE_SIZE (TREE_TYPE (vectype
))))
8516 scalar_type
= build_nonstandard_integer_type
8517 (tree_to_uhwi (TYPE_SIZE (TREE_TYPE (vectype
))),
8518 TYPE_UNSIGNED (scalar_type
));
8519 *comp_vectype
= get_vectype_for_scalar_type (scalar_type
);
8525 /* vectorizable_condition.
8527 Check if STMT is conditional modify expression that can be vectorized.
8528 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
8529 stmt using VEC_COND_EXPR to replace it, put it in VEC_STMT, and insert it
8532 When STMT is vectorized as nested cycle, REDUC_DEF is the vector variable
8533 to be used at REDUC_INDEX (in then clause if REDUC_INDEX is 1, and in
8534 else clause if it is 2).
8536 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
8539 vectorizable_condition (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
8540 gimple
**vec_stmt
, tree reduc_def
, int reduc_index
,
8543 tree scalar_dest
= NULL_TREE
;
8544 tree vec_dest
= NULL_TREE
;
8545 tree cond_expr
, cond_expr0
= NULL_TREE
, cond_expr1
= NULL_TREE
;
8546 tree then_clause
, else_clause
;
8547 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
8548 tree comp_vectype
= NULL_TREE
;
8549 tree vec_cond_lhs
= NULL_TREE
, vec_cond_rhs
= NULL_TREE
;
8550 tree vec_then_clause
= NULL_TREE
, vec_else_clause
= NULL_TREE
;
8553 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
8554 enum vect_def_type dts
[4]
8555 = {vect_unknown_def_type
, vect_unknown_def_type
,
8556 vect_unknown_def_type
, vect_unknown_def_type
};
8559 enum tree_code code
, cond_code
, bitop1
= NOP_EXPR
, bitop2
= NOP_EXPR
;
8560 stmt_vec_info prev_stmt_info
= NULL
;
8562 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
8563 vec
<tree
> vec_oprnds0
= vNULL
;
8564 vec
<tree
> vec_oprnds1
= vNULL
;
8565 vec
<tree
> vec_oprnds2
= vNULL
;
8566 vec
<tree
> vec_oprnds3
= vNULL
;
8568 bool masked
= false;
8570 if (reduc_index
&& STMT_SLP_TYPE (stmt_info
))
8573 vect_reduction_type reduction_type
8574 = STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info
);
8575 if (reduction_type
== TREE_CODE_REDUCTION
)
8577 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
8580 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
8581 && !(STMT_VINFO_DEF_TYPE (stmt_info
) == vect_nested_cycle
8585 /* FORNOW: not yet supported. */
8586 if (STMT_VINFO_LIVE_P (stmt_info
))
8588 if (dump_enabled_p ())
8589 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
8590 "value used after loop.\n");
8595 /* Is vectorizable conditional operation? */
8596 if (!is_gimple_assign (stmt
))
8599 code
= gimple_assign_rhs_code (stmt
);
8601 if (code
!= COND_EXPR
)
8604 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
8605 tree vectype1
= NULL_TREE
, vectype2
= NULL_TREE
;
8610 ncopies
= vect_get_num_copies (loop_vinfo
, vectype
);
8612 gcc_assert (ncopies
>= 1);
8613 if (reduc_index
&& ncopies
> 1)
8614 return false; /* FORNOW */
8616 cond_expr
= gimple_assign_rhs1 (stmt
);
8617 then_clause
= gimple_assign_rhs2 (stmt
);
8618 else_clause
= gimple_assign_rhs3 (stmt
);
8620 if (!vect_is_simple_cond (cond_expr
, stmt_info
->vinfo
,
8621 &comp_vectype
, &dts
[0], vectype
)
8626 if (!vect_is_simple_use (then_clause
, stmt_info
->vinfo
, &def_stmt
, &dts
[2],
8629 if (!vect_is_simple_use (else_clause
, stmt_info
->vinfo
, &def_stmt
, &dts
[3],
8633 if (vectype1
&& !useless_type_conversion_p (vectype
, vectype1
))
8636 if (vectype2
&& !useless_type_conversion_p (vectype
, vectype2
))
8639 masked
= !COMPARISON_CLASS_P (cond_expr
);
8640 vec_cmp_type
= build_same_sized_truth_vector_type (comp_vectype
);
8642 if (vec_cmp_type
== NULL_TREE
)
8645 cond_code
= TREE_CODE (cond_expr
);
8648 cond_expr0
= TREE_OPERAND (cond_expr
, 0);
8649 cond_expr1
= TREE_OPERAND (cond_expr
, 1);
8652 if (!masked
&& VECTOR_BOOLEAN_TYPE_P (comp_vectype
))
8654 /* Boolean values may have another representation in vectors
8655 and therefore we prefer bit operations over comparison for
8656 them (which also works for scalar masks). We store opcodes
8657 to use in bitop1 and bitop2. Statement is vectorized as
8658 BITOP2 (rhs1 BITOP1 rhs2) or rhs1 BITOP2 (BITOP1 rhs2)
8659 depending on bitop1 and bitop2 arity. */
8663 bitop1
= BIT_NOT_EXPR
;
8664 bitop2
= BIT_AND_EXPR
;
8667 bitop1
= BIT_NOT_EXPR
;
8668 bitop2
= BIT_IOR_EXPR
;
8671 bitop1
= BIT_NOT_EXPR
;
8672 bitop2
= BIT_AND_EXPR
;
8673 std::swap (cond_expr0
, cond_expr1
);
8676 bitop1
= BIT_NOT_EXPR
;
8677 bitop2
= BIT_IOR_EXPR
;
8678 std::swap (cond_expr0
, cond_expr1
);
8681 bitop1
= BIT_XOR_EXPR
;
8684 bitop1
= BIT_XOR_EXPR
;
8685 bitop2
= BIT_NOT_EXPR
;
8690 cond_code
= SSA_NAME
;
8695 STMT_VINFO_TYPE (stmt_info
) = condition_vec_info_type
;
8696 if (bitop1
!= NOP_EXPR
)
8698 machine_mode mode
= TYPE_MODE (comp_vectype
);
8701 optab
= optab_for_tree_code (bitop1
, comp_vectype
, optab_default
);
8702 if (!optab
|| optab_handler (optab
, mode
) == CODE_FOR_nothing
)
8705 if (bitop2
!= NOP_EXPR
)
8707 optab
= optab_for_tree_code (bitop2
, comp_vectype
,
8709 if (!optab
|| optab_handler (optab
, mode
) == CODE_FOR_nothing
)
8713 if (expand_vec_cond_expr_p (vectype
, comp_vectype
,
8717 vect_model_simple_cost (stmt_info
, ncopies
, dts
, ndts
, NULL
, NULL
);
8727 vec_oprnds0
.create (1);
8728 vec_oprnds1
.create (1);
8729 vec_oprnds2
.create (1);
8730 vec_oprnds3
.create (1);
8734 scalar_dest
= gimple_assign_lhs (stmt
);
8735 if (reduction_type
!= EXTRACT_LAST_REDUCTION
)
8736 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
8738 /* Handle cond expr. */
8739 for (j
= 0; j
< ncopies
; j
++)
8741 gimple
*new_stmt
= NULL
;
8746 auto_vec
<tree
, 4> ops
;
8747 auto_vec
<vec
<tree
>, 4> vec_defs
;
8750 ops
.safe_push (cond_expr
);
8753 ops
.safe_push (cond_expr0
);
8754 ops
.safe_push (cond_expr1
);
8756 ops
.safe_push (then_clause
);
8757 ops
.safe_push (else_clause
);
8758 vect_get_slp_defs (ops
, slp_node
, &vec_defs
);
8759 vec_oprnds3
= vec_defs
.pop ();
8760 vec_oprnds2
= vec_defs
.pop ();
8762 vec_oprnds1
= vec_defs
.pop ();
8763 vec_oprnds0
= vec_defs
.pop ();
8771 = vect_get_vec_def_for_operand (cond_expr
, stmt
,
8773 vect_is_simple_use (cond_expr
, stmt_info
->vinfo
,
8779 = vect_get_vec_def_for_operand (cond_expr0
,
8780 stmt
, comp_vectype
);
8781 vect_is_simple_use (cond_expr0
, loop_vinfo
, >emp
, &dts
[0]);
8784 = vect_get_vec_def_for_operand (cond_expr1
,
8785 stmt
, comp_vectype
);
8786 vect_is_simple_use (cond_expr1
, loop_vinfo
, >emp
, &dts
[1]);
8788 if (reduc_index
== 1)
8789 vec_then_clause
= reduc_def
;
8792 vec_then_clause
= vect_get_vec_def_for_operand (then_clause
,
8794 vect_is_simple_use (then_clause
, loop_vinfo
,
8797 if (reduc_index
== 2)
8798 vec_else_clause
= reduc_def
;
8801 vec_else_clause
= vect_get_vec_def_for_operand (else_clause
,
8803 vect_is_simple_use (else_clause
, loop_vinfo
, >emp
, &dts
[3]);
8810 = vect_get_vec_def_for_stmt_copy (dts
[0],
8811 vec_oprnds0
.pop ());
8814 = vect_get_vec_def_for_stmt_copy (dts
[1],
8815 vec_oprnds1
.pop ());
8817 vec_then_clause
= vect_get_vec_def_for_stmt_copy (dts
[2],
8818 vec_oprnds2
.pop ());
8819 vec_else_clause
= vect_get_vec_def_for_stmt_copy (dts
[3],
8820 vec_oprnds3
.pop ());
8825 vec_oprnds0
.quick_push (vec_cond_lhs
);
8827 vec_oprnds1
.quick_push (vec_cond_rhs
);
8828 vec_oprnds2
.quick_push (vec_then_clause
);
8829 vec_oprnds3
.quick_push (vec_else_clause
);
8832 /* Arguments are ready. Create the new vector stmt. */
8833 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vec_cond_lhs
)
8835 vec_then_clause
= vec_oprnds2
[i
];
8836 vec_else_clause
= vec_oprnds3
[i
];
8839 vec_compare
= vec_cond_lhs
;
8842 vec_cond_rhs
= vec_oprnds1
[i
];
8843 if (bitop1
== NOP_EXPR
)
8844 vec_compare
= build2 (cond_code
, vec_cmp_type
,
8845 vec_cond_lhs
, vec_cond_rhs
);
8848 new_temp
= make_ssa_name (vec_cmp_type
);
8849 if (bitop1
== BIT_NOT_EXPR
)
8850 new_stmt
= gimple_build_assign (new_temp
, bitop1
,
8854 = gimple_build_assign (new_temp
, bitop1
, vec_cond_lhs
,
8856 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
8857 if (bitop2
== NOP_EXPR
)
8858 vec_compare
= new_temp
;
8859 else if (bitop2
== BIT_NOT_EXPR
)
8861 /* Instead of doing ~x ? y : z do x ? z : y. */
8862 vec_compare
= new_temp
;
8863 std::swap (vec_then_clause
, vec_else_clause
);
8867 vec_compare
= make_ssa_name (vec_cmp_type
);
8869 = gimple_build_assign (vec_compare
, bitop2
,
8870 vec_cond_lhs
, new_temp
);
8871 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
8875 if (reduction_type
== EXTRACT_LAST_REDUCTION
)
8877 if (!is_gimple_val (vec_compare
))
8879 tree vec_compare_name
= make_ssa_name (vec_cmp_type
);
8880 new_stmt
= gimple_build_assign (vec_compare_name
,
8882 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
8883 vec_compare
= vec_compare_name
;
8885 gcc_assert (reduc_index
== 2);
8886 new_stmt
= gimple_build_call_internal
8887 (IFN_FOLD_EXTRACT_LAST
, 3, else_clause
, vec_compare
,
8889 gimple_call_set_lhs (new_stmt
, scalar_dest
);
8890 SSA_NAME_DEF_STMT (scalar_dest
) = new_stmt
;
8891 if (stmt
== gsi_stmt (*gsi
))
8892 vect_finish_replace_stmt (stmt
, new_stmt
);
8895 /* In this case we're moving the definition to later in the
8896 block. That doesn't matter because the only uses of the
8897 lhs are in phi statements. */
8898 gimple_stmt_iterator old_gsi
= gsi_for_stmt (stmt
);
8899 gsi_remove (&old_gsi
, true);
8900 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
8905 new_temp
= make_ssa_name (vec_dest
);
8906 new_stmt
= gimple_build_assign (new_temp
, VEC_COND_EXPR
,
8907 vec_compare
, vec_then_clause
,
8909 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
8912 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
8919 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
8921 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
8923 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
8926 vec_oprnds0
.release ();
8927 vec_oprnds1
.release ();
8928 vec_oprnds2
.release ();
8929 vec_oprnds3
.release ();
8934 /* vectorizable_comparison.
8936 Check if STMT is comparison expression that can be vectorized.
8937 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
8938 comparison, put it in VEC_STMT, and insert it at GSI.
8940 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
8943 vectorizable_comparison (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
8944 gimple
**vec_stmt
, tree reduc_def
,
8947 tree lhs
, rhs1
, rhs2
;
8948 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
8949 tree vectype1
= NULL_TREE
, vectype2
= NULL_TREE
;
8950 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
8951 tree vec_rhs1
= NULL_TREE
, vec_rhs2
= NULL_TREE
;
8953 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
8954 enum vect_def_type dts
[2] = {vect_unknown_def_type
, vect_unknown_def_type
};
8958 enum tree_code code
, bitop1
= NOP_EXPR
, bitop2
= NOP_EXPR
;
8959 stmt_vec_info prev_stmt_info
= NULL
;
8961 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
8962 vec
<tree
> vec_oprnds0
= vNULL
;
8963 vec
<tree
> vec_oprnds1
= vNULL
;
8968 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
8971 if (!vectype
|| !VECTOR_BOOLEAN_TYPE_P (vectype
))
8974 mask_type
= vectype
;
8975 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
8980 ncopies
= vect_get_num_copies (loop_vinfo
, vectype
);
8982 gcc_assert (ncopies
>= 1);
8983 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
8984 && !(STMT_VINFO_DEF_TYPE (stmt_info
) == vect_nested_cycle
8988 if (STMT_VINFO_LIVE_P (stmt_info
))
8990 if (dump_enabled_p ())
8991 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
8992 "value used after loop.\n");
8996 if (!is_gimple_assign (stmt
))
8999 code
= gimple_assign_rhs_code (stmt
);
9001 if (TREE_CODE_CLASS (code
) != tcc_comparison
)
9004 rhs1
= gimple_assign_rhs1 (stmt
);
9005 rhs2
= gimple_assign_rhs2 (stmt
);
9007 if (!vect_is_simple_use (rhs1
, stmt_info
->vinfo
, &def_stmt
,
9008 &dts
[0], &vectype1
))
9011 if (!vect_is_simple_use (rhs2
, stmt_info
->vinfo
, &def_stmt
,
9012 &dts
[1], &vectype2
))
9015 if (vectype1
&& vectype2
9016 && maybe_ne (TYPE_VECTOR_SUBPARTS (vectype1
),
9017 TYPE_VECTOR_SUBPARTS (vectype2
)))
9020 vectype
= vectype1
? vectype1
: vectype2
;
9022 /* Invariant comparison. */
9025 vectype
= get_vectype_for_scalar_type (TREE_TYPE (rhs1
));
9026 if (maybe_ne (TYPE_VECTOR_SUBPARTS (vectype
), nunits
))
9029 else if (maybe_ne (nunits
, TYPE_VECTOR_SUBPARTS (vectype
)))
9032 /* Can't compare mask and non-mask types. */
9033 if (vectype1
&& vectype2
9034 && (VECTOR_BOOLEAN_TYPE_P (vectype1
) ^ VECTOR_BOOLEAN_TYPE_P (vectype2
)))
9037 /* Boolean values may have another representation in vectors
9038 and therefore we prefer bit operations over comparison for
9039 them (which also works for scalar masks). We store opcodes
9040 to use in bitop1 and bitop2. Statement is vectorized as
9041 BITOP2 (rhs1 BITOP1 rhs2) or
9042 rhs1 BITOP2 (BITOP1 rhs2)
9043 depending on bitop1 and bitop2 arity. */
9044 if (VECTOR_BOOLEAN_TYPE_P (vectype
))
9046 if (code
== GT_EXPR
)
9048 bitop1
= BIT_NOT_EXPR
;
9049 bitop2
= BIT_AND_EXPR
;
9051 else if (code
== GE_EXPR
)
9053 bitop1
= BIT_NOT_EXPR
;
9054 bitop2
= BIT_IOR_EXPR
;
9056 else if (code
== LT_EXPR
)
9058 bitop1
= BIT_NOT_EXPR
;
9059 bitop2
= BIT_AND_EXPR
;
9060 std::swap (rhs1
, rhs2
);
9061 std::swap (dts
[0], dts
[1]);
9063 else if (code
== LE_EXPR
)
9065 bitop1
= BIT_NOT_EXPR
;
9066 bitop2
= BIT_IOR_EXPR
;
9067 std::swap (rhs1
, rhs2
);
9068 std::swap (dts
[0], dts
[1]);
9072 bitop1
= BIT_XOR_EXPR
;
9073 if (code
== EQ_EXPR
)
9074 bitop2
= BIT_NOT_EXPR
;
9080 STMT_VINFO_TYPE (stmt_info
) = comparison_vec_info_type
;
9082 vect_model_simple_cost (stmt_info
, ncopies
* (1 + (bitop2
!= NOP_EXPR
)),
9083 dts
, ndts
, NULL
, NULL
);
9084 if (bitop1
== NOP_EXPR
)
9085 return expand_vec_cmp_expr_p (vectype
, mask_type
, code
);
9088 machine_mode mode
= TYPE_MODE (vectype
);
9091 optab
= optab_for_tree_code (bitop1
, vectype
, optab_default
);
9092 if (!optab
|| optab_handler (optab
, mode
) == CODE_FOR_nothing
)
9095 if (bitop2
!= NOP_EXPR
)
9097 optab
= optab_for_tree_code (bitop2
, vectype
, optab_default
);
9098 if (!optab
|| optab_handler (optab
, mode
) == CODE_FOR_nothing
)
9108 vec_oprnds0
.create (1);
9109 vec_oprnds1
.create (1);
9113 lhs
= gimple_assign_lhs (stmt
);
9114 mask
= vect_create_destination_var (lhs
, mask_type
);
9116 /* Handle cmp expr. */
9117 for (j
= 0; j
< ncopies
; j
++)
9119 gassign
*new_stmt
= NULL
;
9124 auto_vec
<tree
, 2> ops
;
9125 auto_vec
<vec
<tree
>, 2> vec_defs
;
9127 ops
.safe_push (rhs1
);
9128 ops
.safe_push (rhs2
);
9129 vect_get_slp_defs (ops
, slp_node
, &vec_defs
);
9130 vec_oprnds1
= vec_defs
.pop ();
9131 vec_oprnds0
= vec_defs
.pop ();
9135 vec_rhs1
= vect_get_vec_def_for_operand (rhs1
, stmt
, vectype
);
9136 vec_rhs2
= vect_get_vec_def_for_operand (rhs2
, stmt
, vectype
);
9141 vec_rhs1
= vect_get_vec_def_for_stmt_copy (dts
[0],
9142 vec_oprnds0
.pop ());
9143 vec_rhs2
= vect_get_vec_def_for_stmt_copy (dts
[1],
9144 vec_oprnds1
.pop ());
9149 vec_oprnds0
.quick_push (vec_rhs1
);
9150 vec_oprnds1
.quick_push (vec_rhs2
);
9153 /* Arguments are ready. Create the new vector stmt. */
9154 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vec_rhs1
)
9156 vec_rhs2
= vec_oprnds1
[i
];
9158 new_temp
= make_ssa_name (mask
);
9159 if (bitop1
== NOP_EXPR
)
9161 new_stmt
= gimple_build_assign (new_temp
, code
,
9162 vec_rhs1
, vec_rhs2
);
9163 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
9167 if (bitop1
== BIT_NOT_EXPR
)
9168 new_stmt
= gimple_build_assign (new_temp
, bitop1
, vec_rhs2
);
9170 new_stmt
= gimple_build_assign (new_temp
, bitop1
, vec_rhs1
,
9172 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
9173 if (bitop2
!= NOP_EXPR
)
9175 tree res
= make_ssa_name (mask
);
9176 if (bitop2
== BIT_NOT_EXPR
)
9177 new_stmt
= gimple_build_assign (res
, bitop2
, new_temp
);
9179 new_stmt
= gimple_build_assign (res
, bitop2
, vec_rhs1
,
9181 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
9185 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
9192 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
9194 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
9196 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
9199 vec_oprnds0
.release ();
9200 vec_oprnds1
.release ();
9205 /* If SLP_NODE is nonnull, return true if vectorizable_live_operation
9206 can handle all live statements in the node. Otherwise return true
9207 if STMT is not live or if vectorizable_live_operation can handle it.
9208 GSI and VEC_STMT are as for vectorizable_live_operation. */
9211 can_vectorize_live_stmts (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
9212 slp_tree slp_node
, gimple
**vec_stmt
)
9218 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (slp_node
), i
, slp_stmt
)
9220 stmt_vec_info slp_stmt_info
= vinfo_for_stmt (slp_stmt
);
9221 if (STMT_VINFO_LIVE_P (slp_stmt_info
)
9222 && !vectorizable_live_operation (slp_stmt
, gsi
, slp_node
, i
,
9227 else if (STMT_VINFO_LIVE_P (vinfo_for_stmt (stmt
))
9228 && !vectorizable_live_operation (stmt
, gsi
, slp_node
, -1, vec_stmt
))
9234 /* Make sure the statement is vectorizable. */
9237 vect_analyze_stmt (gimple
*stmt
, bool *need_to_vectorize
, slp_tree node
,
9238 slp_instance node_instance
)
9240 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
9241 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
9242 enum vect_relevant relevance
= STMT_VINFO_RELEVANT (stmt_info
);
9244 gimple
*pattern_stmt
;
9245 gimple_seq pattern_def_seq
;
9247 if (dump_enabled_p ())
9249 dump_printf_loc (MSG_NOTE
, vect_location
, "==> examining statement: ");
9250 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
9253 if (gimple_has_volatile_ops (stmt
))
9255 if (dump_enabled_p ())
9256 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
9257 "not vectorized: stmt has volatile operands\n");
9262 /* Skip stmts that do not need to be vectorized. In loops this is expected
9264 - the COND_EXPR which is the loop exit condition
9265 - any LABEL_EXPRs in the loop
9266 - computations that are used only for array indexing or loop control.
9267 In basic blocks we only analyze statements that are a part of some SLP
9268 instance, therefore, all the statements are relevant.
9270 Pattern statement needs to be analyzed instead of the original statement
9271 if the original statement is not relevant. Otherwise, we analyze both
9272 statements. In basic blocks we are called from some SLP instance
9273 traversal, don't analyze pattern stmts instead, the pattern stmts
9274 already will be part of SLP instance. */
9276 pattern_stmt
= STMT_VINFO_RELATED_STMT (stmt_info
);
9277 if (!STMT_VINFO_RELEVANT_P (stmt_info
)
9278 && !STMT_VINFO_LIVE_P (stmt_info
))
9280 if (STMT_VINFO_IN_PATTERN_P (stmt_info
)
9282 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt
))
9283 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt
))))
9285 /* Analyze PATTERN_STMT instead of the original stmt. */
9286 stmt
= pattern_stmt
;
9287 stmt_info
= vinfo_for_stmt (pattern_stmt
);
9288 if (dump_enabled_p ())
9290 dump_printf_loc (MSG_NOTE
, vect_location
,
9291 "==> examining pattern statement: ");
9292 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
9297 if (dump_enabled_p ())
9298 dump_printf_loc (MSG_NOTE
, vect_location
, "irrelevant.\n");
9303 else if (STMT_VINFO_IN_PATTERN_P (stmt_info
)
9306 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt
))
9307 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt
))))
9309 /* Analyze PATTERN_STMT too. */
9310 if (dump_enabled_p ())
9312 dump_printf_loc (MSG_NOTE
, vect_location
,
9313 "==> examining pattern statement: ");
9314 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
9317 if (!vect_analyze_stmt (pattern_stmt
, need_to_vectorize
, node
,
9322 if (is_pattern_stmt_p (stmt_info
)
9324 && (pattern_def_seq
= STMT_VINFO_PATTERN_DEF_SEQ (stmt_info
)))
9326 gimple_stmt_iterator si
;
9328 for (si
= gsi_start (pattern_def_seq
); !gsi_end_p (si
); gsi_next (&si
))
9330 gimple
*pattern_def_stmt
= gsi_stmt (si
);
9331 if (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_def_stmt
))
9332 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_def_stmt
)))
9334 /* Analyze def stmt of STMT if it's a pattern stmt. */
9335 if (dump_enabled_p ())
9337 dump_printf_loc (MSG_NOTE
, vect_location
,
9338 "==> examining pattern def statement: ");
9339 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, pattern_def_stmt
, 0);
9342 if (!vect_analyze_stmt (pattern_def_stmt
,
9343 need_to_vectorize
, node
, node_instance
))
9349 switch (STMT_VINFO_DEF_TYPE (stmt_info
))
9351 case vect_internal_def
:
9354 case vect_reduction_def
:
9355 case vect_nested_cycle
:
9356 gcc_assert (!bb_vinfo
9357 && (relevance
== vect_used_in_outer
9358 || relevance
== vect_used_in_outer_by_reduction
9359 || relevance
== vect_used_by_reduction
9360 || relevance
== vect_unused_in_scope
9361 || relevance
== vect_used_only_live
));
9364 case vect_induction_def
:
9365 gcc_assert (!bb_vinfo
);
9368 case vect_constant_def
:
9369 case vect_external_def
:
9370 case vect_unknown_def_type
:
9375 if (STMT_VINFO_RELEVANT_P (stmt_info
))
9377 gcc_assert (!VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt
))));
9378 gcc_assert (STMT_VINFO_VECTYPE (stmt_info
)
9379 || (is_gimple_call (stmt
)
9380 && gimple_call_lhs (stmt
) == NULL_TREE
));
9381 *need_to_vectorize
= true;
9384 if (PURE_SLP_STMT (stmt_info
) && !node
)
9386 dump_printf_loc (MSG_NOTE
, vect_location
,
9387 "handled only by SLP analysis\n");
9393 && (STMT_VINFO_RELEVANT_P (stmt_info
)
9394 || STMT_VINFO_DEF_TYPE (stmt_info
) == vect_reduction_def
))
9395 ok
= (vectorizable_simd_clone_call (stmt
, NULL
, NULL
, node
)
9396 || vectorizable_conversion (stmt
, NULL
, NULL
, node
)
9397 || vectorizable_shift (stmt
, NULL
, NULL
, node
)
9398 || vectorizable_operation (stmt
, NULL
, NULL
, node
)
9399 || vectorizable_assignment (stmt
, NULL
, NULL
, node
)
9400 || vectorizable_load (stmt
, NULL
, NULL
, node
, NULL
)
9401 || vectorizable_call (stmt
, NULL
, NULL
, node
)
9402 || vectorizable_store (stmt
, NULL
, NULL
, node
)
9403 || vectorizable_reduction (stmt
, NULL
, NULL
, node
, node_instance
)
9404 || vectorizable_induction (stmt
, NULL
, NULL
, node
)
9405 || vectorizable_condition (stmt
, NULL
, NULL
, NULL
, 0, node
)
9406 || vectorizable_comparison (stmt
, NULL
, NULL
, NULL
, node
));
9410 ok
= (vectorizable_simd_clone_call (stmt
, NULL
, NULL
, node
)
9411 || vectorizable_conversion (stmt
, NULL
, NULL
, node
)
9412 || vectorizable_shift (stmt
, NULL
, NULL
, node
)
9413 || vectorizable_operation (stmt
, NULL
, NULL
, node
)
9414 || vectorizable_assignment (stmt
, NULL
, NULL
, node
)
9415 || vectorizable_load (stmt
, NULL
, NULL
, node
, NULL
)
9416 || vectorizable_call (stmt
, NULL
, NULL
, node
)
9417 || vectorizable_store (stmt
, NULL
, NULL
, node
)
9418 || vectorizable_condition (stmt
, NULL
, NULL
, NULL
, 0, node
)
9419 || vectorizable_comparison (stmt
, NULL
, NULL
, NULL
, node
));
9424 if (dump_enabled_p ())
9426 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
9427 "not vectorized: relevant stmt not ");
9428 dump_printf (MSG_MISSED_OPTIMIZATION
, "supported: ");
9429 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
9438 /* Stmts that are (also) "live" (i.e. - that are used out of the loop)
9439 need extra handling, except for vectorizable reductions. */
9440 if (STMT_VINFO_TYPE (stmt_info
) != reduc_vec_info_type
9441 && !can_vectorize_live_stmts (stmt
, NULL
, node
, NULL
))
9443 if (dump_enabled_p ())
9445 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
9446 "not vectorized: live stmt not supported: ");
9447 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
9457 /* Function vect_transform_stmt.
9459 Create a vectorized stmt to replace STMT, and insert it at BSI. */
9462 vect_transform_stmt (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
9463 bool *grouped_store
, slp_tree slp_node
,
9464 slp_instance slp_node_instance
)
9466 bool is_store
= false;
9467 gimple
*vec_stmt
= NULL
;
9468 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
9471 gcc_assert (slp_node
|| !PURE_SLP_STMT (stmt_info
));
9472 gimple
*old_vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
9474 bool nested_p
= (STMT_VINFO_LOOP_VINFO (stmt_info
)
9475 && nested_in_vect_loop_p
9476 (LOOP_VINFO_LOOP (STMT_VINFO_LOOP_VINFO (stmt_info
)),
9479 switch (STMT_VINFO_TYPE (stmt_info
))
9481 case type_demotion_vec_info_type
:
9482 case type_promotion_vec_info_type
:
9483 case type_conversion_vec_info_type
:
9484 done
= vectorizable_conversion (stmt
, gsi
, &vec_stmt
, slp_node
);
9488 case induc_vec_info_type
:
9489 done
= vectorizable_induction (stmt
, gsi
, &vec_stmt
, slp_node
);
9493 case shift_vec_info_type
:
9494 done
= vectorizable_shift (stmt
, gsi
, &vec_stmt
, slp_node
);
9498 case op_vec_info_type
:
9499 done
= vectorizable_operation (stmt
, gsi
, &vec_stmt
, slp_node
);
9503 case assignment_vec_info_type
:
9504 done
= vectorizable_assignment (stmt
, gsi
, &vec_stmt
, slp_node
);
9508 case load_vec_info_type
:
9509 done
= vectorizable_load (stmt
, gsi
, &vec_stmt
, slp_node
,
9514 case store_vec_info_type
:
9515 done
= vectorizable_store (stmt
, gsi
, &vec_stmt
, slp_node
);
9517 if (STMT_VINFO_GROUPED_ACCESS (stmt_info
) && !slp_node
)
9519 /* In case of interleaving, the whole chain is vectorized when the
9520 last store in the chain is reached. Store stmts before the last
9521 one are skipped, and there vec_stmt_info shouldn't be freed
9523 *grouped_store
= true;
9524 stmt_vec_info group_info
9525 = vinfo_for_stmt (GROUP_FIRST_ELEMENT (stmt_info
));
9526 if (GROUP_STORE_COUNT (group_info
) == GROUP_SIZE (group_info
))
9533 case condition_vec_info_type
:
9534 done
= vectorizable_condition (stmt
, gsi
, &vec_stmt
, NULL
, 0, slp_node
);
9538 case comparison_vec_info_type
:
9539 done
= vectorizable_comparison (stmt
, gsi
, &vec_stmt
, NULL
, slp_node
);
9543 case call_vec_info_type
:
9544 done
= vectorizable_call (stmt
, gsi
, &vec_stmt
, slp_node
);
9545 stmt
= gsi_stmt (*gsi
);
9548 case call_simd_clone_vec_info_type
:
9549 done
= vectorizable_simd_clone_call (stmt
, gsi
, &vec_stmt
, slp_node
);
9550 stmt
= gsi_stmt (*gsi
);
9553 case reduc_vec_info_type
:
9554 done
= vectorizable_reduction (stmt
, gsi
, &vec_stmt
, slp_node
,
9560 if (!STMT_VINFO_LIVE_P (stmt_info
))
9562 if (dump_enabled_p ())
9563 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
9564 "stmt not supported.\n");
9569 /* Verify SLP vectorization doesn't mess with STMT_VINFO_VEC_STMT.
9570 This would break hybrid SLP vectorization. */
9572 gcc_assert (!vec_stmt
9573 && STMT_VINFO_VEC_STMT (stmt_info
) == old_vec_stmt
);
9575 /* Handle inner-loop stmts whose DEF is used in the loop-nest that
9576 is being vectorized, but outside the immediately enclosing loop. */
9579 && STMT_VINFO_TYPE (stmt_info
) != reduc_vec_info_type
9580 && (STMT_VINFO_RELEVANT (stmt_info
) == vect_used_in_outer
9581 || STMT_VINFO_RELEVANT (stmt_info
) ==
9582 vect_used_in_outer_by_reduction
))
9584 struct loop
*innerloop
= LOOP_VINFO_LOOP (
9585 STMT_VINFO_LOOP_VINFO (stmt_info
))->inner
;
9586 imm_use_iterator imm_iter
;
9587 use_operand_p use_p
;
9591 if (dump_enabled_p ())
9592 dump_printf_loc (MSG_NOTE
, vect_location
,
9593 "Record the vdef for outer-loop vectorization.\n");
9595 /* Find the relevant loop-exit phi-node, and reord the vec_stmt there
9596 (to be used when vectorizing outer-loop stmts that use the DEF of
9598 if (gimple_code (stmt
) == GIMPLE_PHI
)
9599 scalar_dest
= PHI_RESULT (stmt
);
9601 scalar_dest
= gimple_assign_lhs (stmt
);
9603 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, scalar_dest
)
9605 if (!flow_bb_inside_loop_p (innerloop
, gimple_bb (USE_STMT (use_p
))))
9607 exit_phi
= USE_STMT (use_p
);
9608 STMT_VINFO_VEC_STMT (vinfo_for_stmt (exit_phi
)) = vec_stmt
;
9613 /* Handle stmts whose DEF is used outside the loop-nest that is
9614 being vectorized. */
9615 if (STMT_VINFO_TYPE (stmt_info
) != reduc_vec_info_type
)
9617 done
= can_vectorize_live_stmts (stmt
, gsi
, slp_node
, &vec_stmt
);
9622 STMT_VINFO_VEC_STMT (stmt_info
) = vec_stmt
;
9628 /* Remove a group of stores (for SLP or interleaving), free their
9632 vect_remove_stores (gimple
*first_stmt
)
9634 gimple
*next
= first_stmt
;
9636 gimple_stmt_iterator next_si
;
9640 stmt_vec_info stmt_info
= vinfo_for_stmt (next
);
9642 tmp
= GROUP_NEXT_ELEMENT (stmt_info
);
9643 if (is_pattern_stmt_p (stmt_info
))
9644 next
= STMT_VINFO_RELATED_STMT (stmt_info
);
9645 /* Free the attached stmt_vec_info and remove the stmt. */
9646 next_si
= gsi_for_stmt (next
);
9647 unlink_stmt_vdef (next
);
9648 gsi_remove (&next_si
, true);
9649 release_defs (next
);
9650 free_stmt_vec_info (next
);
9656 /* Function new_stmt_vec_info.
9658 Create and initialize a new stmt_vec_info struct for STMT. */
9661 new_stmt_vec_info (gimple
*stmt
, vec_info
*vinfo
)
9664 res
= (stmt_vec_info
) xcalloc (1, sizeof (struct _stmt_vec_info
));
9666 STMT_VINFO_TYPE (res
) = undef_vec_info_type
;
9667 STMT_VINFO_STMT (res
) = stmt
;
9669 STMT_VINFO_RELEVANT (res
) = vect_unused_in_scope
;
9670 STMT_VINFO_LIVE_P (res
) = false;
9671 STMT_VINFO_VECTYPE (res
) = NULL
;
9672 STMT_VINFO_VEC_STMT (res
) = NULL
;
9673 STMT_VINFO_VECTORIZABLE (res
) = true;
9674 STMT_VINFO_IN_PATTERN_P (res
) = false;
9675 STMT_VINFO_RELATED_STMT (res
) = NULL
;
9676 STMT_VINFO_PATTERN_DEF_SEQ (res
) = NULL
;
9677 STMT_VINFO_DATA_REF (res
) = NULL
;
9678 STMT_VINFO_VEC_REDUCTION_TYPE (res
) = TREE_CODE_REDUCTION
;
9679 STMT_VINFO_VEC_CONST_COND_REDUC_CODE (res
) = ERROR_MARK
;
9681 if (gimple_code (stmt
) == GIMPLE_PHI
9682 && is_loop_header_bb_p (gimple_bb (stmt
)))
9683 STMT_VINFO_DEF_TYPE (res
) = vect_unknown_def_type
;
9685 STMT_VINFO_DEF_TYPE (res
) = vect_internal_def
;
9687 STMT_VINFO_SAME_ALIGN_REFS (res
).create (0);
9688 STMT_SLP_TYPE (res
) = loop_vect
;
9689 STMT_VINFO_NUM_SLP_USES (res
) = 0;
9691 GROUP_FIRST_ELEMENT (res
) = NULL
;
9692 GROUP_NEXT_ELEMENT (res
) = NULL
;
9693 GROUP_SIZE (res
) = 0;
9694 GROUP_STORE_COUNT (res
) = 0;
9695 GROUP_GAP (res
) = 0;
9696 GROUP_SAME_DR_STMT (res
) = NULL
;
9702 /* Create a hash table for stmt_vec_info. */
9705 init_stmt_vec_info_vec (void)
9707 gcc_assert (!stmt_vec_info_vec
.exists ());
9708 stmt_vec_info_vec
.create (50);
9712 /* Free hash table for stmt_vec_info. */
9715 free_stmt_vec_info_vec (void)
9719 FOR_EACH_VEC_ELT (stmt_vec_info_vec
, i
, info
)
9721 free_stmt_vec_info (STMT_VINFO_STMT (info
));
9722 gcc_assert (stmt_vec_info_vec
.exists ());
9723 stmt_vec_info_vec
.release ();
9727 /* Free stmt vectorization related info. */
9730 free_stmt_vec_info (gimple
*stmt
)
9732 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
9737 /* Check if this statement has a related "pattern stmt"
9738 (introduced by the vectorizer during the pattern recognition
9739 pass). Free pattern's stmt_vec_info and def stmt's stmt_vec_info
9741 if (STMT_VINFO_IN_PATTERN_P (stmt_info
))
9743 stmt_vec_info patt_info
9744 = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info
));
9747 gimple_seq seq
= STMT_VINFO_PATTERN_DEF_SEQ (patt_info
);
9748 gimple
*patt_stmt
= STMT_VINFO_STMT (patt_info
);
9749 gimple_set_bb (patt_stmt
, NULL
);
9750 tree lhs
= gimple_get_lhs (patt_stmt
);
9751 if (lhs
&& TREE_CODE (lhs
) == SSA_NAME
)
9752 release_ssa_name (lhs
);
9755 gimple_stmt_iterator si
;
9756 for (si
= gsi_start (seq
); !gsi_end_p (si
); gsi_next (&si
))
9758 gimple
*seq_stmt
= gsi_stmt (si
);
9759 gimple_set_bb (seq_stmt
, NULL
);
9760 lhs
= gimple_get_lhs (seq_stmt
);
9761 if (lhs
&& TREE_CODE (lhs
) == SSA_NAME
)
9762 release_ssa_name (lhs
);
9763 free_stmt_vec_info (seq_stmt
);
9766 free_stmt_vec_info (patt_stmt
);
9770 STMT_VINFO_SAME_ALIGN_REFS (stmt_info
).release ();
9771 STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).release ();
9772 set_vinfo_for_stmt (stmt
, NULL
);
9777 /* Function get_vectype_for_scalar_type_and_size.
9779 Returns the vector type corresponding to SCALAR_TYPE and SIZE as supported
9783 get_vectype_for_scalar_type_and_size (tree scalar_type
, poly_uint64 size
)
9785 tree orig_scalar_type
= scalar_type
;
9786 scalar_mode inner_mode
;
9787 machine_mode simd_mode
;
9791 if (!is_int_mode (TYPE_MODE (scalar_type
), &inner_mode
)
9792 && !is_float_mode (TYPE_MODE (scalar_type
), &inner_mode
))
9795 unsigned int nbytes
= GET_MODE_SIZE (inner_mode
);
9797 /* For vector types of elements whose mode precision doesn't
9798 match their types precision we use a element type of mode
9799 precision. The vectorization routines will have to make sure
9800 they support the proper result truncation/extension.
9801 We also make sure to build vector types with INTEGER_TYPE
9802 component type only. */
9803 if (INTEGRAL_TYPE_P (scalar_type
)
9804 && (GET_MODE_BITSIZE (inner_mode
) != TYPE_PRECISION (scalar_type
)
9805 || TREE_CODE (scalar_type
) != INTEGER_TYPE
))
9806 scalar_type
= build_nonstandard_integer_type (GET_MODE_BITSIZE (inner_mode
),
9807 TYPE_UNSIGNED (scalar_type
));
9809 /* We shouldn't end up building VECTOR_TYPEs of non-scalar components.
9810 When the component mode passes the above test simply use a type
9811 corresponding to that mode. The theory is that any use that
9812 would cause problems with this will disable vectorization anyway. */
9813 else if (!SCALAR_FLOAT_TYPE_P (scalar_type
)
9814 && !INTEGRAL_TYPE_P (scalar_type
))
9815 scalar_type
= lang_hooks
.types
.type_for_mode (inner_mode
, 1);
9817 /* We can't build a vector type of elements with alignment bigger than
9819 else if (nbytes
< TYPE_ALIGN_UNIT (scalar_type
))
9820 scalar_type
= lang_hooks
.types
.type_for_mode (inner_mode
,
9821 TYPE_UNSIGNED (scalar_type
));
9823 /* If we felt back to using the mode fail if there was
9824 no scalar type for it. */
9825 if (scalar_type
== NULL_TREE
)
9828 /* If no size was supplied use the mode the target prefers. Otherwise
9829 lookup a vector mode of the specified size. */
9830 if (known_eq (size
, 0U))
9831 simd_mode
= targetm
.vectorize
.preferred_simd_mode (inner_mode
);
9832 else if (!multiple_p (size
, nbytes
, &nunits
)
9833 || !mode_for_vector (inner_mode
, nunits
).exists (&simd_mode
))
9835 /* NOTE: nunits == 1 is allowed to support single element vector types. */
9836 if (!multiple_p (GET_MODE_SIZE (simd_mode
), nbytes
, &nunits
))
9839 vectype
= build_vector_type (scalar_type
, nunits
);
9841 if (!VECTOR_MODE_P (TYPE_MODE (vectype
))
9842 && !INTEGRAL_MODE_P (TYPE_MODE (vectype
)))
9845 /* Re-attach the address-space qualifier if we canonicalized the scalar
9847 if (TYPE_ADDR_SPACE (orig_scalar_type
) != TYPE_ADDR_SPACE (vectype
))
9848 return build_qualified_type
9849 (vectype
, KEEP_QUAL_ADDR_SPACE (TYPE_QUALS (orig_scalar_type
)));
9854 poly_uint64 current_vector_size
;
9856 /* Function get_vectype_for_scalar_type.
9858 Returns the vector type corresponding to SCALAR_TYPE as supported
9862 get_vectype_for_scalar_type (tree scalar_type
)
9865 vectype
= get_vectype_for_scalar_type_and_size (scalar_type
,
9866 current_vector_size
);
9868 && known_eq (current_vector_size
, 0U))
9869 current_vector_size
= GET_MODE_SIZE (TYPE_MODE (vectype
));
9873 /* Function get_mask_type_for_scalar_type.
9875 Returns the mask type corresponding to a result of comparison
9876 of vectors of specified SCALAR_TYPE as supported by target. */
9879 get_mask_type_for_scalar_type (tree scalar_type
)
9881 tree vectype
= get_vectype_for_scalar_type (scalar_type
);
9886 return build_truth_vector_type (TYPE_VECTOR_SUBPARTS (vectype
),
9887 current_vector_size
);
9890 /* Function get_same_sized_vectype
9892 Returns a vector type corresponding to SCALAR_TYPE of size
9893 VECTOR_TYPE if supported by the target. */
9896 get_same_sized_vectype (tree scalar_type
, tree vector_type
)
9898 if (VECT_SCALAR_BOOLEAN_TYPE_P (scalar_type
))
9899 return build_same_sized_truth_vector_type (vector_type
);
9901 return get_vectype_for_scalar_type_and_size
9902 (scalar_type
, GET_MODE_SIZE (TYPE_MODE (vector_type
)));
9905 /* Function vect_is_simple_use.
9908 VINFO - the vect info of the loop or basic block that is being vectorized.
9909 OPERAND - operand in the loop or bb.
9911 DEF_STMT - the defining stmt in case OPERAND is an SSA_NAME.
9912 DT - the type of definition
9914 Returns whether a stmt with OPERAND can be vectorized.
9915 For loops, supportable operands are constants, loop invariants, and operands
9916 that are defined by the current iteration of the loop. Unsupportable
9917 operands are those that are defined by a previous iteration of the loop (as
9918 is the case in reduction/induction computations).
9919 For basic blocks, supportable operands are constants and bb invariants.
9920 For now, operands defined outside the basic block are not supported. */
9923 vect_is_simple_use (tree operand
, vec_info
*vinfo
,
9924 gimple
**def_stmt
, enum vect_def_type
*dt
)
9927 *dt
= vect_unknown_def_type
;
9929 if (dump_enabled_p ())
9931 dump_printf_loc (MSG_NOTE
, vect_location
,
9932 "vect_is_simple_use: operand ");
9933 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, operand
);
9934 dump_printf (MSG_NOTE
, "\n");
9937 if (CONSTANT_CLASS_P (operand
))
9939 *dt
= vect_constant_def
;
9943 if (is_gimple_min_invariant (operand
))
9945 *dt
= vect_external_def
;
9949 if (TREE_CODE (operand
) != SSA_NAME
)
9951 if (dump_enabled_p ())
9952 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
9957 if (SSA_NAME_IS_DEFAULT_DEF (operand
))
9959 *dt
= vect_external_def
;
9963 *def_stmt
= SSA_NAME_DEF_STMT (operand
);
9964 if (dump_enabled_p ())
9966 dump_printf_loc (MSG_NOTE
, vect_location
, "def_stmt: ");
9967 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, *def_stmt
, 0);
9970 if (! vect_stmt_in_region_p (vinfo
, *def_stmt
))
9971 *dt
= vect_external_def
;
9974 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (*def_stmt
);
9975 *dt
= STMT_VINFO_DEF_TYPE (stmt_vinfo
);
9978 if (dump_enabled_p ())
9980 dump_printf_loc (MSG_NOTE
, vect_location
, "type of def: ");
9983 case vect_uninitialized_def
:
9984 dump_printf (MSG_NOTE
, "uninitialized\n");
9986 case vect_constant_def
:
9987 dump_printf (MSG_NOTE
, "constant\n");
9989 case vect_external_def
:
9990 dump_printf (MSG_NOTE
, "external\n");
9992 case vect_internal_def
:
9993 dump_printf (MSG_NOTE
, "internal\n");
9995 case vect_induction_def
:
9996 dump_printf (MSG_NOTE
, "induction\n");
9998 case vect_reduction_def
:
9999 dump_printf (MSG_NOTE
, "reduction\n");
10001 case vect_double_reduction_def
:
10002 dump_printf (MSG_NOTE
, "double reduction\n");
10004 case vect_nested_cycle
:
10005 dump_printf (MSG_NOTE
, "nested cycle\n");
10007 case vect_unknown_def_type
:
10008 dump_printf (MSG_NOTE
, "unknown\n");
10013 if (*dt
== vect_unknown_def_type
)
10015 if (dump_enabled_p ())
10016 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
10017 "Unsupported pattern.\n");
10021 switch (gimple_code (*def_stmt
))
10024 case GIMPLE_ASSIGN
:
10028 if (dump_enabled_p ())
10029 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
10030 "unsupported defining stmt:\n");
10037 /* Function vect_is_simple_use.
10039 Same as vect_is_simple_use but also determines the vector operand
10040 type of OPERAND and stores it to *VECTYPE. If the definition of
10041 OPERAND is vect_uninitialized_def, vect_constant_def or
10042 vect_external_def *VECTYPE will be set to NULL_TREE and the caller
10043 is responsible to compute the best suited vector type for the
10047 vect_is_simple_use (tree operand
, vec_info
*vinfo
,
10048 gimple
**def_stmt
, enum vect_def_type
*dt
, tree
*vectype
)
10050 if (!vect_is_simple_use (operand
, vinfo
, def_stmt
, dt
))
10053 /* Now get a vector type if the def is internal, otherwise supply
10054 NULL_TREE and leave it up to the caller to figure out a proper
10055 type for the use stmt. */
10056 if (*dt
== vect_internal_def
10057 || *dt
== vect_induction_def
10058 || *dt
== vect_reduction_def
10059 || *dt
== vect_double_reduction_def
10060 || *dt
== vect_nested_cycle
)
10062 stmt_vec_info stmt_info
= vinfo_for_stmt (*def_stmt
);
10064 if (STMT_VINFO_IN_PATTERN_P (stmt_info
)
10065 && !STMT_VINFO_RELEVANT (stmt_info
)
10066 && !STMT_VINFO_LIVE_P (stmt_info
))
10067 stmt_info
= vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info
));
10069 *vectype
= STMT_VINFO_VECTYPE (stmt_info
);
10070 gcc_assert (*vectype
!= NULL_TREE
);
10072 else if (*dt
== vect_uninitialized_def
10073 || *dt
== vect_constant_def
10074 || *dt
== vect_external_def
)
10075 *vectype
= NULL_TREE
;
10077 gcc_unreachable ();
10083 /* Function supportable_widening_operation
10085 Check whether an operation represented by the code CODE is a
10086 widening operation that is supported by the target platform in
10087 vector form (i.e., when operating on arguments of type VECTYPE_IN
10088 producing a result of type VECTYPE_OUT).
10090 Widening operations we currently support are NOP (CONVERT), FLOAT
10091 and WIDEN_MULT. This function checks if these operations are supported
10092 by the target platform either directly (via vector tree-codes), or via
10096 - CODE1 and CODE2 are codes of vector operations to be used when
10097 vectorizing the operation, if available.
10098 - MULTI_STEP_CVT determines the number of required intermediate steps in
10099 case of multi-step conversion (like char->short->int - in that case
10100 MULTI_STEP_CVT will be 1).
10101 - INTERM_TYPES contains the intermediate type required to perform the
10102 widening operation (short in the above example). */
10105 supportable_widening_operation (enum tree_code code
, gimple
*stmt
,
10106 tree vectype_out
, tree vectype_in
,
10107 enum tree_code
*code1
, enum tree_code
*code2
,
10108 int *multi_step_cvt
,
10109 vec
<tree
> *interm_types
)
10111 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
10112 loop_vec_info loop_info
= STMT_VINFO_LOOP_VINFO (stmt_info
);
10113 struct loop
*vect_loop
= NULL
;
10114 machine_mode vec_mode
;
10115 enum insn_code icode1
, icode2
;
10116 optab optab1
, optab2
;
10117 tree vectype
= vectype_in
;
10118 tree wide_vectype
= vectype_out
;
10119 enum tree_code c1
, c2
;
10121 tree prev_type
, intermediate_type
;
10122 machine_mode intermediate_mode
, prev_mode
;
10123 optab optab3
, optab4
;
10125 *multi_step_cvt
= 0;
10127 vect_loop
= LOOP_VINFO_LOOP (loop_info
);
10131 case WIDEN_MULT_EXPR
:
10132 /* The result of a vectorized widening operation usually requires
10133 two vectors (because the widened results do not fit into one vector).
10134 The generated vector results would normally be expected to be
10135 generated in the same order as in the original scalar computation,
10136 i.e. if 8 results are generated in each vector iteration, they are
10137 to be organized as follows:
10138 vect1: [res1,res2,res3,res4],
10139 vect2: [res5,res6,res7,res8].
10141 However, in the special case that the result of the widening
10142 operation is used in a reduction computation only, the order doesn't
10143 matter (because when vectorizing a reduction we change the order of
10144 the computation). Some targets can take advantage of this and
10145 generate more efficient code. For example, targets like Altivec,
10146 that support widen_mult using a sequence of {mult_even,mult_odd}
10147 generate the following vectors:
10148 vect1: [res1,res3,res5,res7],
10149 vect2: [res2,res4,res6,res8].
10151 When vectorizing outer-loops, we execute the inner-loop sequentially
10152 (each vectorized inner-loop iteration contributes to VF outer-loop
10153 iterations in parallel). We therefore don't allow to change the
10154 order of the computation in the inner-loop during outer-loop
10156 /* TODO: Another case in which order doesn't *really* matter is when we
10157 widen and then contract again, e.g. (short)((int)x * y >> 8).
10158 Normally, pack_trunc performs an even/odd permute, whereas the
10159 repack from an even/odd expansion would be an interleave, which
10160 would be significantly simpler for e.g. AVX2. */
10161 /* In any case, in order to avoid duplicating the code below, recurse
10162 on VEC_WIDEN_MULT_EVEN_EXPR. If it succeeds, all the return values
10163 are properly set up for the caller. If we fail, we'll continue with
10164 a VEC_WIDEN_MULT_LO/HI_EXPR check. */
10166 && STMT_VINFO_RELEVANT (stmt_info
) == vect_used_by_reduction
10167 && !nested_in_vect_loop_p (vect_loop
, stmt
)
10168 && supportable_widening_operation (VEC_WIDEN_MULT_EVEN_EXPR
,
10169 stmt
, vectype_out
, vectype_in
,
10170 code1
, code2
, multi_step_cvt
,
10173 /* Elements in a vector with vect_used_by_reduction property cannot
10174 be reordered if the use chain with this property does not have the
10175 same operation. One such an example is s += a * b, where elements
10176 in a and b cannot be reordered. Here we check if the vector defined
10177 by STMT is only directly used in the reduction statement. */
10178 tree lhs
= gimple_assign_lhs (stmt
);
10179 use_operand_p dummy
;
10181 stmt_vec_info use_stmt_info
= NULL
;
10182 if (single_imm_use (lhs
, &dummy
, &use_stmt
)
10183 && (use_stmt_info
= vinfo_for_stmt (use_stmt
))
10184 && STMT_VINFO_DEF_TYPE (use_stmt_info
) == vect_reduction_def
)
10187 c1
= VEC_WIDEN_MULT_LO_EXPR
;
10188 c2
= VEC_WIDEN_MULT_HI_EXPR
;
10191 case DOT_PROD_EXPR
:
10192 c1
= DOT_PROD_EXPR
;
10193 c2
= DOT_PROD_EXPR
;
10201 case VEC_WIDEN_MULT_EVEN_EXPR
:
10202 /* Support the recursion induced just above. */
10203 c1
= VEC_WIDEN_MULT_EVEN_EXPR
;
10204 c2
= VEC_WIDEN_MULT_ODD_EXPR
;
10207 case WIDEN_LSHIFT_EXPR
:
10208 c1
= VEC_WIDEN_LSHIFT_LO_EXPR
;
10209 c2
= VEC_WIDEN_LSHIFT_HI_EXPR
;
10213 c1
= VEC_UNPACK_LO_EXPR
;
10214 c2
= VEC_UNPACK_HI_EXPR
;
10218 c1
= VEC_UNPACK_FLOAT_LO_EXPR
;
10219 c2
= VEC_UNPACK_FLOAT_HI_EXPR
;
10222 case FIX_TRUNC_EXPR
:
10223 /* ??? Not yet implemented due to missing VEC_UNPACK_FIX_TRUNC_HI_EXPR/
10224 VEC_UNPACK_FIX_TRUNC_LO_EXPR tree codes and optabs used for
10225 computing the operation. */
10229 gcc_unreachable ();
10232 if (BYTES_BIG_ENDIAN
&& c1
!= VEC_WIDEN_MULT_EVEN_EXPR
)
10233 std::swap (c1
, c2
);
10235 if (code
== FIX_TRUNC_EXPR
)
10237 /* The signedness is determined from output operand. */
10238 optab1
= optab_for_tree_code (c1
, vectype_out
, optab_default
);
10239 optab2
= optab_for_tree_code (c2
, vectype_out
, optab_default
);
10243 optab1
= optab_for_tree_code (c1
, vectype
, optab_default
);
10244 optab2
= optab_for_tree_code (c2
, vectype
, optab_default
);
10247 if (!optab1
|| !optab2
)
10250 vec_mode
= TYPE_MODE (vectype
);
10251 if ((icode1
= optab_handler (optab1
, vec_mode
)) == CODE_FOR_nothing
10252 || (icode2
= optab_handler (optab2
, vec_mode
)) == CODE_FOR_nothing
)
10258 if (insn_data
[icode1
].operand
[0].mode
== TYPE_MODE (wide_vectype
)
10259 && insn_data
[icode2
].operand
[0].mode
== TYPE_MODE (wide_vectype
))
10260 /* For scalar masks we may have different boolean
10261 vector types having the same QImode. Thus we
10262 add additional check for elements number. */
10263 return (!VECTOR_BOOLEAN_TYPE_P (vectype
)
10264 || known_eq (TYPE_VECTOR_SUBPARTS (vectype
),
10265 TYPE_VECTOR_SUBPARTS (wide_vectype
) * 2));
10267 /* Check if it's a multi-step conversion that can be done using intermediate
10270 prev_type
= vectype
;
10271 prev_mode
= vec_mode
;
10273 if (!CONVERT_EXPR_CODE_P (code
))
10276 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
10277 intermediate steps in promotion sequence. We try
10278 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do
10280 interm_types
->create (MAX_INTERM_CVT_STEPS
);
10281 for (i
= 0; i
< MAX_INTERM_CVT_STEPS
; i
++)
10283 intermediate_mode
= insn_data
[icode1
].operand
[0].mode
;
10284 if (VECTOR_BOOLEAN_TYPE_P (prev_type
))
10286 intermediate_type
= vect_halve_mask_nunits (prev_type
);
10287 if (intermediate_mode
!= TYPE_MODE (intermediate_type
))
10292 = lang_hooks
.types
.type_for_mode (intermediate_mode
,
10293 TYPE_UNSIGNED (prev_type
));
10295 optab3
= optab_for_tree_code (c1
, intermediate_type
, optab_default
);
10296 optab4
= optab_for_tree_code (c2
, intermediate_type
, optab_default
);
10298 if (!optab3
|| !optab4
10299 || (icode1
= optab_handler (optab1
, prev_mode
)) == CODE_FOR_nothing
10300 || insn_data
[icode1
].operand
[0].mode
!= intermediate_mode
10301 || (icode2
= optab_handler (optab2
, prev_mode
)) == CODE_FOR_nothing
10302 || insn_data
[icode2
].operand
[0].mode
!= intermediate_mode
10303 || ((icode1
= optab_handler (optab3
, intermediate_mode
))
10304 == CODE_FOR_nothing
)
10305 || ((icode2
= optab_handler (optab4
, intermediate_mode
))
10306 == CODE_FOR_nothing
))
10309 interm_types
->quick_push (intermediate_type
);
10310 (*multi_step_cvt
)++;
10312 if (insn_data
[icode1
].operand
[0].mode
== TYPE_MODE (wide_vectype
)
10313 && insn_data
[icode2
].operand
[0].mode
== TYPE_MODE (wide_vectype
))
10314 return (!VECTOR_BOOLEAN_TYPE_P (vectype
)
10315 || known_eq (TYPE_VECTOR_SUBPARTS (intermediate_type
),
10316 TYPE_VECTOR_SUBPARTS (wide_vectype
) * 2));
10318 prev_type
= intermediate_type
;
10319 prev_mode
= intermediate_mode
;
10322 interm_types
->release ();
10327 /* Function supportable_narrowing_operation
10329 Check whether an operation represented by the code CODE is a
10330 narrowing operation that is supported by the target platform in
10331 vector form (i.e., when operating on arguments of type VECTYPE_IN
10332 and producing a result of type VECTYPE_OUT).
10334 Narrowing operations we currently support are NOP (CONVERT) and
10335 FIX_TRUNC. This function checks if these operations are supported by
10336 the target platform directly via vector tree-codes.
10339 - CODE1 is the code of a vector operation to be used when
10340 vectorizing the operation, if available.
10341 - MULTI_STEP_CVT determines the number of required intermediate steps in
10342 case of multi-step conversion (like int->short->char - in that case
10343 MULTI_STEP_CVT will be 1).
10344 - INTERM_TYPES contains the intermediate type required to perform the
10345 narrowing operation (short in the above example). */
10348 supportable_narrowing_operation (enum tree_code code
,
10349 tree vectype_out
, tree vectype_in
,
10350 enum tree_code
*code1
, int *multi_step_cvt
,
10351 vec
<tree
> *interm_types
)
10353 machine_mode vec_mode
;
10354 enum insn_code icode1
;
10355 optab optab1
, interm_optab
;
10356 tree vectype
= vectype_in
;
10357 tree narrow_vectype
= vectype_out
;
10359 tree intermediate_type
, prev_type
;
10360 machine_mode intermediate_mode
, prev_mode
;
10364 *multi_step_cvt
= 0;
10368 c1
= VEC_PACK_TRUNC_EXPR
;
10371 case FIX_TRUNC_EXPR
:
10372 c1
= VEC_PACK_FIX_TRUNC_EXPR
;
10376 /* ??? Not yet implemented due to missing VEC_PACK_FLOAT_EXPR
10377 tree code and optabs used for computing the operation. */
10381 gcc_unreachable ();
10384 if (code
== FIX_TRUNC_EXPR
)
10385 /* The signedness is determined from output operand. */
10386 optab1
= optab_for_tree_code (c1
, vectype_out
, optab_default
);
10388 optab1
= optab_for_tree_code (c1
, vectype
, optab_default
);
10393 vec_mode
= TYPE_MODE (vectype
);
10394 if ((icode1
= optab_handler (optab1
, vec_mode
)) == CODE_FOR_nothing
)
10399 if (insn_data
[icode1
].operand
[0].mode
== TYPE_MODE (narrow_vectype
))
10400 /* For scalar masks we may have different boolean
10401 vector types having the same QImode. Thus we
10402 add additional check for elements number. */
10403 return (!VECTOR_BOOLEAN_TYPE_P (vectype
)
10404 || known_eq (TYPE_VECTOR_SUBPARTS (vectype
) * 2,
10405 TYPE_VECTOR_SUBPARTS (narrow_vectype
)));
10407 /* Check if it's a multi-step conversion that can be done using intermediate
10409 prev_mode
= vec_mode
;
10410 prev_type
= vectype
;
10411 if (code
== FIX_TRUNC_EXPR
)
10412 uns
= TYPE_UNSIGNED (vectype_out
);
10414 uns
= TYPE_UNSIGNED (vectype
);
10416 /* For multi-step FIX_TRUNC_EXPR prefer signed floating to integer
10417 conversion over unsigned, as unsigned FIX_TRUNC_EXPR is often more
10418 costly than signed. */
10419 if (code
== FIX_TRUNC_EXPR
&& uns
)
10421 enum insn_code icode2
;
10424 = lang_hooks
.types
.type_for_mode (TYPE_MODE (vectype_out
), 0);
10426 = optab_for_tree_code (c1
, intermediate_type
, optab_default
);
10427 if (interm_optab
!= unknown_optab
10428 && (icode2
= optab_handler (optab1
, vec_mode
)) != CODE_FOR_nothing
10429 && insn_data
[icode1
].operand
[0].mode
10430 == insn_data
[icode2
].operand
[0].mode
)
10433 optab1
= interm_optab
;
10438 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
10439 intermediate steps in promotion sequence. We try
10440 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do not. */
10441 interm_types
->create (MAX_INTERM_CVT_STEPS
);
10442 for (i
= 0; i
< MAX_INTERM_CVT_STEPS
; i
++)
10444 intermediate_mode
= insn_data
[icode1
].operand
[0].mode
;
10445 if (VECTOR_BOOLEAN_TYPE_P (prev_type
))
10447 intermediate_type
= vect_double_mask_nunits (prev_type
);
10448 if (intermediate_mode
!= TYPE_MODE (intermediate_type
))
10453 = lang_hooks
.types
.type_for_mode (intermediate_mode
, uns
);
10455 = optab_for_tree_code (VEC_PACK_TRUNC_EXPR
, intermediate_type
,
10458 || ((icode1
= optab_handler (optab1
, prev_mode
)) == CODE_FOR_nothing
)
10459 || insn_data
[icode1
].operand
[0].mode
!= intermediate_mode
10460 || ((icode1
= optab_handler (interm_optab
, intermediate_mode
))
10461 == CODE_FOR_nothing
))
10464 interm_types
->quick_push (intermediate_type
);
10465 (*multi_step_cvt
)++;
10467 if (insn_data
[icode1
].operand
[0].mode
== TYPE_MODE (narrow_vectype
))
10468 return (!VECTOR_BOOLEAN_TYPE_P (vectype
)
10469 || known_eq (TYPE_VECTOR_SUBPARTS (intermediate_type
) * 2,
10470 TYPE_VECTOR_SUBPARTS (narrow_vectype
)));
10472 prev_mode
= intermediate_mode
;
10473 prev_type
= intermediate_type
;
10474 optab1
= interm_optab
;
10477 interm_types
->release ();
10481 /* Generate and return a statement that sets vector mask MASK such that
10482 MASK[I] is true iff J + START_INDEX < END_INDEX for all J <= I. */
10485 vect_gen_while (tree mask
, tree start_index
, tree end_index
)
10487 tree cmp_type
= TREE_TYPE (start_index
);
10488 tree mask_type
= TREE_TYPE (mask
);
10489 gcc_checking_assert (direct_internal_fn_supported_p (IFN_WHILE_ULT
,
10490 cmp_type
, mask_type
,
10491 OPTIMIZE_FOR_SPEED
));
10492 gcall
*call
= gimple_build_call_internal (IFN_WHILE_ULT
, 3,
10493 start_index
, end_index
,
10494 build_zero_cst (mask_type
));
10495 gimple_call_set_lhs (call
, mask
);
10499 /* Generate a vector mask of type MASK_TYPE for which index I is false iff
10500 J + START_INDEX < END_INDEX for all J <= I. Add the statements to SEQ. */
10503 vect_gen_while_not (gimple_seq
*seq
, tree mask_type
, tree start_index
,
10506 tree tmp
= make_ssa_name (mask_type
);
10507 gcall
*call
= vect_gen_while (tmp
, start_index
, end_index
);
10508 gimple_seq_add_stmt (seq
, call
);
10509 return gimple_build (seq
, BIT_NOT_EXPR
, mask_type
, tmp
);