1 /* Statement Analysis and Transformation for Vectorization
2 Copyright (C) 2003-2018 Free Software Foundation, Inc.
3 Contributed by Dorit Naishlos <dorit@il.ibm.com>
4 and Ira Rosen <irar@il.ibm.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
24 #include "coretypes.h"
31 #include "optabs-tree.h"
32 #include "insn-config.h"
33 #include "recog.h" /* FIXME: for insn_data */
37 #include "fold-const.h"
38 #include "stor-layout.h"
41 #include "gimple-iterator.h"
42 #include "gimplify-me.h"
44 #include "tree-ssa-loop-manip.h"
46 #include "tree-ssa-loop.h"
47 #include "tree-scalar-evolution.h"
48 #include "tree-vectorizer.h"
50 #include "internal-fn.h"
51 #include "tree-vector-builder.h"
52 #include "vec-perm-indices.h"
53 #include "tree-ssa-loop-niter.h"
54 #include "gimple-fold.h"
56 /* For lang_hooks.types.type_for_mode. */
57 #include "langhooks.h"
59 /* Return the vectorized type for the given statement. */
62 stmt_vectype (struct _stmt_vec_info
*stmt_info
)
64 return STMT_VINFO_VECTYPE (stmt_info
);
67 /* Return TRUE iff the given statement is in an inner loop relative to
68 the loop being vectorized. */
70 stmt_in_inner_loop_p (struct _stmt_vec_info
*stmt_info
)
72 gimple
*stmt
= STMT_VINFO_STMT (stmt_info
);
73 basic_block bb
= gimple_bb (stmt
);
74 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
80 loop
= LOOP_VINFO_LOOP (loop_vinfo
);
82 return (bb
->loop_father
== loop
->inner
);
85 /* Record the cost of a statement, either by directly informing the
86 target model or by saving it in a vector for later processing.
87 Return a preliminary estimate of the statement's cost. */
90 record_stmt_cost (stmt_vector_for_cost
*body_cost_vec
, int count
,
91 enum vect_cost_for_stmt kind
, stmt_vec_info stmt_info
,
92 int misalign
, enum vect_cost_model_location where
)
94 if ((kind
== vector_load
|| kind
== unaligned_load
)
95 && STMT_VINFO_GATHER_SCATTER_P (stmt_info
))
96 kind
= vector_gather_load
;
97 if ((kind
== vector_store
|| kind
== unaligned_store
)
98 && STMT_VINFO_GATHER_SCATTER_P (stmt_info
))
99 kind
= vector_scatter_store
;
101 stmt_info_for_cost si
= { count
, kind
, where
, stmt_info
, misalign
};
102 body_cost_vec
->safe_push (si
);
104 tree vectype
= stmt_info
? stmt_vectype (stmt_info
) : NULL_TREE
;
106 (builtin_vectorization_cost (kind
, vectype
, misalign
) * count
);
109 /* Return a variable of type ELEM_TYPE[NELEMS]. */
112 create_vector_array (tree elem_type
, unsigned HOST_WIDE_INT nelems
)
114 return create_tmp_var (build_array_type_nelts (elem_type
, nelems
),
118 /* ARRAY is an array of vectors created by create_vector_array.
119 Return an SSA_NAME for the vector in index N. The reference
120 is part of the vectorization of STMT_INFO and the vector is associated
121 with scalar destination SCALAR_DEST. */
124 read_vector_array (stmt_vec_info stmt_info
, gimple_stmt_iterator
*gsi
,
125 tree scalar_dest
, tree array
, unsigned HOST_WIDE_INT n
)
127 tree vect_type
, vect
, vect_name
, array_ref
;
130 gcc_assert (TREE_CODE (TREE_TYPE (array
)) == ARRAY_TYPE
);
131 vect_type
= TREE_TYPE (TREE_TYPE (array
));
132 vect
= vect_create_destination_var (scalar_dest
, vect_type
);
133 array_ref
= build4 (ARRAY_REF
, vect_type
, array
,
134 build_int_cst (size_type_node
, n
),
135 NULL_TREE
, NULL_TREE
);
137 new_stmt
= gimple_build_assign (vect
, array_ref
);
138 vect_name
= make_ssa_name (vect
, new_stmt
);
139 gimple_assign_set_lhs (new_stmt
, vect_name
);
140 vect_finish_stmt_generation (stmt_info
, new_stmt
, gsi
);
145 /* ARRAY is an array of vectors created by create_vector_array.
146 Emit code to store SSA_NAME VECT in index N of the array.
147 The store is part of the vectorization of STMT_INFO. */
150 write_vector_array (stmt_vec_info stmt_info
, gimple_stmt_iterator
*gsi
,
151 tree vect
, tree array
, unsigned HOST_WIDE_INT n
)
156 array_ref
= build4 (ARRAY_REF
, TREE_TYPE (vect
), array
,
157 build_int_cst (size_type_node
, n
),
158 NULL_TREE
, NULL_TREE
);
160 new_stmt
= gimple_build_assign (array_ref
, vect
);
161 vect_finish_stmt_generation (stmt_info
, new_stmt
, gsi
);
164 /* PTR is a pointer to an array of type TYPE. Return a representation
165 of *PTR. The memory reference replaces those in FIRST_DR
169 create_array_ref (tree type
, tree ptr
, tree alias_ptr_type
)
173 mem_ref
= build2 (MEM_REF
, type
, ptr
, build_int_cst (alias_ptr_type
, 0));
174 /* Arrays have the same alignment as their type. */
175 set_ptr_info_alignment (get_ptr_info (ptr
), TYPE_ALIGN_UNIT (type
), 0);
179 /* Add a clobber of variable VAR to the vectorization of STMT_INFO.
180 Emit the clobber before *GSI. */
183 vect_clobber_variable (stmt_vec_info stmt_info
, gimple_stmt_iterator
*gsi
,
186 tree clobber
= build_clobber (TREE_TYPE (var
));
187 gimple
*new_stmt
= gimple_build_assign (var
, clobber
);
188 vect_finish_stmt_generation (stmt_info
, new_stmt
, gsi
);
191 /* Utility functions used by vect_mark_stmts_to_be_vectorized. */
193 /* Function vect_mark_relevant.
195 Mark STMT_INFO as "relevant for vectorization" and add it to WORKLIST. */
198 vect_mark_relevant (vec
<stmt_vec_info
> *worklist
, stmt_vec_info stmt_info
,
199 enum vect_relevant relevant
, bool live_p
)
201 enum vect_relevant save_relevant
= STMT_VINFO_RELEVANT (stmt_info
);
202 bool save_live_p
= STMT_VINFO_LIVE_P (stmt_info
);
204 if (dump_enabled_p ())
206 dump_printf_loc (MSG_NOTE
, vect_location
,
207 "mark relevant %d, live %d: ", relevant
, live_p
);
208 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt_info
->stmt
, 0);
211 /* If this stmt is an original stmt in a pattern, we might need to mark its
212 related pattern stmt instead of the original stmt. However, such stmts
213 may have their own uses that are not in any pattern, in such cases the
214 stmt itself should be marked. */
215 if (STMT_VINFO_IN_PATTERN_P (stmt_info
))
217 /* This is the last stmt in a sequence that was detected as a
218 pattern that can potentially be vectorized. Don't mark the stmt
219 as relevant/live because it's not going to be vectorized.
220 Instead mark the pattern-stmt that replaces it. */
222 if (dump_enabled_p ())
223 dump_printf_loc (MSG_NOTE
, vect_location
,
224 "last stmt in pattern. don't mark"
225 " relevant/live.\n");
226 stmt_vec_info old_stmt_info
= stmt_info
;
227 stmt_info
= STMT_VINFO_RELATED_STMT (stmt_info
);
228 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_info
) == old_stmt_info
);
229 save_relevant
= STMT_VINFO_RELEVANT (stmt_info
);
230 save_live_p
= STMT_VINFO_LIVE_P (stmt_info
);
233 STMT_VINFO_LIVE_P (stmt_info
) |= live_p
;
234 if (relevant
> STMT_VINFO_RELEVANT (stmt_info
))
235 STMT_VINFO_RELEVANT (stmt_info
) = relevant
;
237 if (STMT_VINFO_RELEVANT (stmt_info
) == save_relevant
238 && STMT_VINFO_LIVE_P (stmt_info
) == save_live_p
)
240 if (dump_enabled_p ())
241 dump_printf_loc (MSG_NOTE
, vect_location
,
242 "already marked relevant/live.\n");
246 worklist
->safe_push (stmt_info
);
250 /* Function is_simple_and_all_uses_invariant
252 Return true if STMT_INFO is simple and all uses of it are invariant. */
255 is_simple_and_all_uses_invariant (stmt_vec_info stmt_info
,
256 loop_vec_info loop_vinfo
)
261 gassign
*stmt
= dyn_cast
<gassign
*> (stmt_info
->stmt
);
265 FOR_EACH_SSA_TREE_OPERAND (op
, stmt
, iter
, SSA_OP_USE
)
267 enum vect_def_type dt
= vect_uninitialized_def
;
269 if (!vect_is_simple_use (op
, loop_vinfo
, &dt
))
271 if (dump_enabled_p ())
272 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
273 "use not simple.\n");
277 if (dt
!= vect_external_def
&& dt
!= vect_constant_def
)
283 /* Function vect_stmt_relevant_p.
285 Return true if STMT_INFO, in the loop that is represented by LOOP_VINFO,
286 is "relevant for vectorization".
288 A stmt is considered "relevant for vectorization" if:
289 - it has uses outside the loop.
290 - it has vdefs (it alters memory).
291 - control stmts in the loop (except for the exit condition).
293 CHECKME: what other side effects would the vectorizer allow? */
296 vect_stmt_relevant_p (stmt_vec_info stmt_info
, loop_vec_info loop_vinfo
,
297 enum vect_relevant
*relevant
, bool *live_p
)
299 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
301 imm_use_iterator imm_iter
;
305 *relevant
= vect_unused_in_scope
;
308 /* cond stmt other than loop exit cond. */
309 if (is_ctrl_stmt (stmt_info
->stmt
)
310 && STMT_VINFO_TYPE (stmt_info
) != loop_exit_ctrl_vec_info_type
)
311 *relevant
= vect_used_in_scope
;
313 /* changing memory. */
314 if (gimple_code (stmt_info
->stmt
) != GIMPLE_PHI
)
315 if (gimple_vdef (stmt_info
->stmt
)
316 && !gimple_clobber_p (stmt_info
->stmt
))
318 if (dump_enabled_p ())
319 dump_printf_loc (MSG_NOTE
, vect_location
,
320 "vec_stmt_relevant_p: stmt has vdefs.\n");
321 *relevant
= vect_used_in_scope
;
324 /* uses outside the loop. */
325 FOR_EACH_PHI_OR_STMT_DEF (def_p
, stmt_info
->stmt
, op_iter
, SSA_OP_DEF
)
327 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, DEF_FROM_PTR (def_p
))
329 basic_block bb
= gimple_bb (USE_STMT (use_p
));
330 if (!flow_bb_inside_loop_p (loop
, bb
))
332 if (dump_enabled_p ())
333 dump_printf_loc (MSG_NOTE
, vect_location
,
334 "vec_stmt_relevant_p: used out of loop.\n");
336 if (is_gimple_debug (USE_STMT (use_p
)))
339 /* We expect all such uses to be in the loop exit phis
340 (because of loop closed form) */
341 gcc_assert (gimple_code (USE_STMT (use_p
)) == GIMPLE_PHI
);
342 gcc_assert (bb
== single_exit (loop
)->dest
);
349 if (*live_p
&& *relevant
== vect_unused_in_scope
350 && !is_simple_and_all_uses_invariant (stmt_info
, loop_vinfo
))
352 if (dump_enabled_p ())
353 dump_printf_loc (MSG_NOTE
, vect_location
,
354 "vec_stmt_relevant_p: stmt live but not relevant.\n");
355 *relevant
= vect_used_only_live
;
358 return (*live_p
|| *relevant
);
362 /* Function exist_non_indexing_operands_for_use_p
364 USE is one of the uses attached to STMT_INFO. Check if USE is
365 used in STMT_INFO for anything other than indexing an array. */
368 exist_non_indexing_operands_for_use_p (tree use
, stmt_vec_info stmt_info
)
372 /* USE corresponds to some operand in STMT. If there is no data
373 reference in STMT, then any operand that corresponds to USE
374 is not indexing an array. */
375 if (!STMT_VINFO_DATA_REF (stmt_info
))
378 /* STMT has a data_ref. FORNOW this means that its of one of
382 (This should have been verified in analyze_data_refs).
384 'var' in the second case corresponds to a def, not a use,
385 so USE cannot correspond to any operands that are not used
388 Therefore, all we need to check is if STMT falls into the
389 first case, and whether var corresponds to USE. */
391 gassign
*assign
= dyn_cast
<gassign
*> (stmt_info
->stmt
);
392 if (!assign
|| !gimple_assign_copy_p (assign
))
394 gcall
*call
= dyn_cast
<gcall
*> (stmt_info
->stmt
);
395 if (call
&& gimple_call_internal_p (call
))
397 internal_fn ifn
= gimple_call_internal_fn (call
);
398 int mask_index
= internal_fn_mask_index (ifn
);
400 && use
== gimple_call_arg (call
, mask_index
))
402 int stored_value_index
= internal_fn_stored_value_index (ifn
);
403 if (stored_value_index
>= 0
404 && use
== gimple_call_arg (call
, stored_value_index
))
406 if (internal_gather_scatter_fn_p (ifn
)
407 && use
== gimple_call_arg (call
, 1))
413 if (TREE_CODE (gimple_assign_lhs (assign
)) == SSA_NAME
)
415 operand
= gimple_assign_rhs1 (assign
);
416 if (TREE_CODE (operand
) != SSA_NAME
)
427 Function process_use.
430 - a USE in STMT_VINFO in a loop represented by LOOP_VINFO
431 - RELEVANT - enum value to be set in the STMT_VINFO of the stmt
432 that defined USE. This is done by calling mark_relevant and passing it
433 the WORKLIST (to add DEF_STMT to the WORKLIST in case it is relevant).
434 - FORCE is true if exist_non_indexing_operands_for_use_p check shouldn't
438 Generally, LIVE_P and RELEVANT are used to define the liveness and
439 relevance info of the DEF_STMT of this USE:
440 STMT_VINFO_LIVE_P (DEF_stmt_vinfo) <-- live_p
441 STMT_VINFO_RELEVANT (DEF_stmt_vinfo) <-- relevant
443 - case 1: If USE is used only for address computations (e.g. array indexing),
444 which does not need to be directly vectorized, then the liveness/relevance
445 of the respective DEF_STMT is left unchanged.
446 - case 2: If STMT_VINFO is a reduction phi and DEF_STMT is a reduction stmt,
447 we skip DEF_STMT cause it had already been processed.
448 - case 3: If DEF_STMT and STMT_VINFO are in different nests, then
449 "relevant" will be modified accordingly.
451 Return true if everything is as expected. Return false otherwise. */
454 process_use (stmt_vec_info stmt_vinfo
, tree use
, loop_vec_info loop_vinfo
,
455 enum vect_relevant relevant
, vec
<stmt_vec_info
> *worklist
,
458 stmt_vec_info dstmt_vinfo
;
459 basic_block bb
, def_bb
;
460 enum vect_def_type dt
;
462 /* case 1: we are only interested in uses that need to be vectorized. Uses
463 that are used for address computation are not considered relevant. */
464 if (!force
&& !exist_non_indexing_operands_for_use_p (use
, stmt_vinfo
))
467 if (!vect_is_simple_use (use
, loop_vinfo
, &dt
, &dstmt_vinfo
))
469 if (dump_enabled_p ())
470 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
471 "not vectorized: unsupported use in stmt.\n");
478 def_bb
= gimple_bb (dstmt_vinfo
->stmt
);
480 /* case 2: A reduction phi (STMT) defined by a reduction stmt (DSTMT_VINFO).
481 DSTMT_VINFO must have already been processed, because this should be the
482 only way that STMT, which is a reduction-phi, was put in the worklist,
483 as there should be no other uses for DSTMT_VINFO in the loop. So we just
484 check that everything is as expected, and we are done. */
485 bb
= gimple_bb (stmt_vinfo
->stmt
);
486 if (gimple_code (stmt_vinfo
->stmt
) == GIMPLE_PHI
487 && STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_reduction_def
488 && gimple_code (dstmt_vinfo
->stmt
) != GIMPLE_PHI
489 && STMT_VINFO_DEF_TYPE (dstmt_vinfo
) == vect_reduction_def
490 && bb
->loop_father
== def_bb
->loop_father
)
492 if (dump_enabled_p ())
493 dump_printf_loc (MSG_NOTE
, vect_location
,
494 "reduc-stmt defining reduc-phi in the same nest.\n");
495 gcc_assert (STMT_VINFO_RELEVANT (dstmt_vinfo
) < vect_used_by_reduction
);
496 gcc_assert (STMT_VINFO_LIVE_P (dstmt_vinfo
)
497 || STMT_VINFO_RELEVANT (dstmt_vinfo
) > vect_unused_in_scope
);
501 /* case 3a: outer-loop stmt defining an inner-loop stmt:
502 outer-loop-header-bb:
508 if (flow_loop_nested_p (def_bb
->loop_father
, bb
->loop_father
))
510 if (dump_enabled_p ())
511 dump_printf_loc (MSG_NOTE
, vect_location
,
512 "outer-loop def-stmt defining inner-loop stmt.\n");
516 case vect_unused_in_scope
:
517 relevant
= (STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_nested_cycle
) ?
518 vect_used_in_scope
: vect_unused_in_scope
;
521 case vect_used_in_outer_by_reduction
:
522 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo
) != vect_reduction_def
);
523 relevant
= vect_used_by_reduction
;
526 case vect_used_in_outer
:
527 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo
) != vect_reduction_def
);
528 relevant
= vect_used_in_scope
;
531 case vect_used_in_scope
:
539 /* case 3b: inner-loop stmt defining an outer-loop stmt:
540 outer-loop-header-bb:
544 outer-loop-tail-bb (or outer-loop-exit-bb in double reduction):
546 else if (flow_loop_nested_p (bb
->loop_father
, def_bb
->loop_father
))
548 if (dump_enabled_p ())
549 dump_printf_loc (MSG_NOTE
, vect_location
,
550 "inner-loop def-stmt defining outer-loop stmt.\n");
554 case vect_unused_in_scope
:
555 relevant
= (STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_reduction_def
556 || STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_double_reduction_def
) ?
557 vect_used_in_outer_by_reduction
: vect_unused_in_scope
;
560 case vect_used_by_reduction
:
561 case vect_used_only_live
:
562 relevant
= vect_used_in_outer_by_reduction
;
565 case vect_used_in_scope
:
566 relevant
= vect_used_in_outer
;
573 /* We are also not interested in uses on loop PHI backedges that are
574 inductions. Otherwise we'll needlessly vectorize the IV increment
575 and cause hybrid SLP for SLP inductions. Unless the PHI is live
577 else if (gimple_code (stmt_vinfo
->stmt
) == GIMPLE_PHI
578 && STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_induction_def
579 && ! STMT_VINFO_LIVE_P (stmt_vinfo
)
580 && (PHI_ARG_DEF_FROM_EDGE (stmt_vinfo
->stmt
,
581 loop_latch_edge (bb
->loop_father
))
584 if (dump_enabled_p ())
585 dump_printf_loc (MSG_NOTE
, vect_location
,
586 "induction value on backedge.\n");
591 vect_mark_relevant (worklist
, dstmt_vinfo
, relevant
, false);
596 /* Function vect_mark_stmts_to_be_vectorized.
598 Not all stmts in the loop need to be vectorized. For example:
607 Stmt 1 and 3 do not need to be vectorized, because loop control and
608 addressing of vectorized data-refs are handled differently.
610 This pass detects such stmts. */
613 vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo
)
615 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
616 basic_block
*bbs
= LOOP_VINFO_BBS (loop_vinfo
);
617 unsigned int nbbs
= loop
->num_nodes
;
618 gimple_stmt_iterator si
;
622 enum vect_relevant relevant
;
624 DUMP_VECT_SCOPE ("vect_mark_stmts_to_be_vectorized");
626 auto_vec
<stmt_vec_info
, 64> worklist
;
628 /* 1. Init worklist. */
629 for (i
= 0; i
< nbbs
; i
++)
632 for (si
= gsi_start_phis (bb
); !gsi_end_p (si
); gsi_next (&si
))
634 stmt_vec_info phi_info
= loop_vinfo
->lookup_stmt (gsi_stmt (si
));
635 if (dump_enabled_p ())
637 dump_printf_loc (MSG_NOTE
, vect_location
, "init: phi relevant? ");
638 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, phi_info
->stmt
, 0);
641 if (vect_stmt_relevant_p (phi_info
, loop_vinfo
, &relevant
, &live_p
))
642 vect_mark_relevant (&worklist
, phi_info
, relevant
, live_p
);
644 for (si
= gsi_start_bb (bb
); !gsi_end_p (si
); gsi_next (&si
))
646 stmt_vec_info stmt_info
= loop_vinfo
->lookup_stmt (gsi_stmt (si
));
647 if (dump_enabled_p ())
649 dump_printf_loc (MSG_NOTE
, vect_location
, "init: stmt relevant? ");
650 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt_info
->stmt
, 0);
653 if (vect_stmt_relevant_p (stmt_info
, loop_vinfo
, &relevant
, &live_p
))
654 vect_mark_relevant (&worklist
, stmt_info
, relevant
, live_p
);
658 /* 2. Process_worklist */
659 while (worklist
.length () > 0)
664 stmt_vec_info stmt_vinfo
= worklist
.pop ();
665 if (dump_enabled_p ())
667 dump_printf_loc (MSG_NOTE
, vect_location
,
668 "worklist: examine stmt: ");
669 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt_vinfo
->stmt
, 0);
672 /* Examine the USEs of STMT. For each USE, mark the stmt that defines it
673 (DEF_STMT) as relevant/irrelevant according to the relevance property
675 relevant
= STMT_VINFO_RELEVANT (stmt_vinfo
);
677 /* Generally, the relevance property of STMT (in STMT_VINFO_RELEVANT) is
678 propagated as is to the DEF_STMTs of its USEs.
680 One exception is when STMT has been identified as defining a reduction
681 variable; in this case we set the relevance to vect_used_by_reduction.
682 This is because we distinguish between two kinds of relevant stmts -
683 those that are used by a reduction computation, and those that are
684 (also) used by a regular computation. This allows us later on to
685 identify stmts that are used solely by a reduction, and therefore the
686 order of the results that they produce does not have to be kept. */
688 switch (STMT_VINFO_DEF_TYPE (stmt_vinfo
))
690 case vect_reduction_def
:
691 gcc_assert (relevant
!= vect_unused_in_scope
);
692 if (relevant
!= vect_unused_in_scope
693 && relevant
!= vect_used_in_scope
694 && relevant
!= vect_used_by_reduction
695 && relevant
!= vect_used_only_live
)
697 if (dump_enabled_p ())
698 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
699 "unsupported use of reduction.\n");
704 case vect_nested_cycle
:
705 if (relevant
!= vect_unused_in_scope
706 && relevant
!= vect_used_in_outer_by_reduction
707 && relevant
!= vect_used_in_outer
)
709 if (dump_enabled_p ())
710 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
711 "unsupported use of nested cycle.\n");
717 case vect_double_reduction_def
:
718 if (relevant
!= vect_unused_in_scope
719 && relevant
!= vect_used_by_reduction
720 && relevant
!= vect_used_only_live
)
722 if (dump_enabled_p ())
723 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
724 "unsupported use of double reduction.\n");
734 if (is_pattern_stmt_p (stmt_vinfo
))
736 /* Pattern statements are not inserted into the code, so
737 FOR_EACH_PHI_OR_STMT_USE optimizes their operands out, and we
738 have to scan the RHS or function arguments instead. */
739 if (gassign
*assign
= dyn_cast
<gassign
*> (stmt_vinfo
->stmt
))
741 enum tree_code rhs_code
= gimple_assign_rhs_code (assign
);
742 tree op
= gimple_assign_rhs1 (assign
);
745 if (rhs_code
== COND_EXPR
&& COMPARISON_CLASS_P (op
))
747 if (!process_use (stmt_vinfo
, TREE_OPERAND (op
, 0),
748 loop_vinfo
, relevant
, &worklist
, false)
749 || !process_use (stmt_vinfo
, TREE_OPERAND (op
, 1),
750 loop_vinfo
, relevant
, &worklist
, false))
754 for (; i
< gimple_num_ops (assign
); i
++)
756 op
= gimple_op (assign
, i
);
757 if (TREE_CODE (op
) == SSA_NAME
758 && !process_use (stmt_vinfo
, op
, loop_vinfo
, relevant
,
763 else if (gcall
*call
= dyn_cast
<gcall
*> (stmt_vinfo
->stmt
))
765 for (i
= 0; i
< gimple_call_num_args (call
); i
++)
767 tree arg
= gimple_call_arg (call
, i
);
768 if (!process_use (stmt_vinfo
, arg
, loop_vinfo
, relevant
,
775 FOR_EACH_PHI_OR_STMT_USE (use_p
, stmt_vinfo
->stmt
, iter
, SSA_OP_USE
)
777 tree op
= USE_FROM_PTR (use_p
);
778 if (!process_use (stmt_vinfo
, op
, loop_vinfo
, relevant
,
783 if (STMT_VINFO_GATHER_SCATTER_P (stmt_vinfo
))
785 gather_scatter_info gs_info
;
786 if (!vect_check_gather_scatter (stmt_vinfo
, loop_vinfo
, &gs_info
))
788 if (!process_use (stmt_vinfo
, gs_info
.offset
, loop_vinfo
, relevant
,
792 } /* while worklist */
797 /* Compute the prologue cost for invariant or constant operands. */
800 vect_prologue_cost_for_slp_op (slp_tree node
, stmt_vec_info stmt_info
,
801 unsigned opno
, enum vect_def_type dt
,
802 stmt_vector_for_cost
*cost_vec
)
804 gimple
*stmt
= SLP_TREE_SCALAR_STMTS (node
)[0]->stmt
;
805 tree op
= gimple_op (stmt
, opno
);
806 unsigned prologue_cost
= 0;
808 /* Without looking at the actual initializer a vector of
809 constants can be implemented as load from the constant pool.
810 When all elements are the same we can use a splat. */
811 tree vectype
= get_vectype_for_scalar_type (TREE_TYPE (op
));
812 unsigned group_size
= SLP_TREE_SCALAR_STMTS (node
).length ();
813 unsigned num_vects_to_check
;
814 unsigned HOST_WIDE_INT const_nunits
;
816 if (TYPE_VECTOR_SUBPARTS (vectype
).is_constant (&const_nunits
)
817 && ! multiple_p (const_nunits
, group_size
))
819 num_vects_to_check
= SLP_TREE_NUMBER_OF_VEC_STMTS (node
);
820 nelt_limit
= const_nunits
;
824 /* If either the vector has variable length or the vectors
825 are composed of repeated whole groups we only need to
826 cost construction once. All vectors will be the same. */
827 num_vects_to_check
= 1;
828 nelt_limit
= group_size
;
830 tree elt
= NULL_TREE
;
832 for (unsigned j
= 0; j
< num_vects_to_check
* nelt_limit
; ++j
)
834 unsigned si
= j
% group_size
;
836 elt
= gimple_op (SLP_TREE_SCALAR_STMTS (node
)[si
]->stmt
, opno
);
837 /* ??? We're just tracking whether all operands of a single
838 vector initializer are the same, ideally we'd check if
839 we emitted the same one already. */
840 else if (elt
!= gimple_op (SLP_TREE_SCALAR_STMTS (node
)[si
]->stmt
,
844 if (nelt
== nelt_limit
)
846 /* ??? We need to pass down stmt_info for a vector type
847 even if it points to the wrong stmt. */
848 prologue_cost
+= record_stmt_cost
850 dt
== vect_external_def
851 ? (elt
? scalar_to_vec
: vec_construct
)
853 stmt_info
, 0, vect_prologue
);
858 return prologue_cost
;
861 /* Function vect_model_simple_cost.
863 Models cost for simple operations, i.e. those that only emit ncopies of a
864 single op. Right now, this does not account for multiple insns that could
865 be generated for the single vector op. We will handle that shortly. */
868 vect_model_simple_cost (stmt_vec_info stmt_info
, int ncopies
,
869 enum vect_def_type
*dt
,
872 stmt_vector_for_cost
*cost_vec
)
874 int inside_cost
= 0, prologue_cost
= 0;
876 gcc_assert (cost_vec
!= NULL
);
878 /* ??? Somehow we need to fix this at the callers. */
880 ncopies
= SLP_TREE_NUMBER_OF_VEC_STMTS (node
);
884 /* Scan operands and account for prologue cost of constants/externals.
885 ??? This over-estimates cost for multiple uses and should be
887 gimple
*stmt
= SLP_TREE_SCALAR_STMTS (node
)[0]->stmt
;
888 tree lhs
= gimple_get_lhs (stmt
);
889 for (unsigned i
= 0; i
< gimple_num_ops (stmt
); ++i
)
891 tree op
= gimple_op (stmt
, i
);
892 enum vect_def_type dt
;
893 if (!op
|| op
== lhs
)
895 if (vect_is_simple_use (op
, stmt_info
->vinfo
, &dt
)
896 && (dt
== vect_constant_def
|| dt
== vect_external_def
))
897 prologue_cost
+= vect_prologue_cost_for_slp_op (node
, stmt_info
,
902 /* Cost the "broadcast" of a scalar operand in to a vector operand.
903 Use scalar_to_vec to cost the broadcast, as elsewhere in the vector
905 for (int i
= 0; i
< ndts
; i
++)
906 if (dt
[i
] == vect_constant_def
|| dt
[i
] == vect_external_def
)
907 prologue_cost
+= record_stmt_cost (cost_vec
, 1, scalar_to_vec
,
908 stmt_info
, 0, vect_prologue
);
910 /* Adjust for two-operator SLP nodes. */
911 if (node
&& SLP_TREE_TWO_OPERATORS (node
))
914 inside_cost
+= record_stmt_cost (cost_vec
, ncopies
, vec_perm
,
915 stmt_info
, 0, vect_body
);
918 /* Pass the inside-of-loop statements to the target-specific cost model. */
919 inside_cost
+= record_stmt_cost (cost_vec
, ncopies
, vector_stmt
,
920 stmt_info
, 0, vect_body
);
922 if (dump_enabled_p ())
923 dump_printf_loc (MSG_NOTE
, vect_location
,
924 "vect_model_simple_cost: inside_cost = %d, "
925 "prologue_cost = %d .\n", inside_cost
, prologue_cost
);
929 /* Model cost for type demotion and promotion operations. PWR is normally
930 zero for single-step promotions and demotions. It will be one if
931 two-step promotion/demotion is required, and so on. Each additional
932 step doubles the number of instructions required. */
935 vect_model_promotion_demotion_cost (stmt_vec_info stmt_info
,
936 enum vect_def_type
*dt
, int pwr
,
937 stmt_vector_for_cost
*cost_vec
)
940 int inside_cost
= 0, prologue_cost
= 0;
942 for (i
= 0; i
< pwr
+ 1; i
++)
944 tmp
= (STMT_VINFO_TYPE (stmt_info
) == type_promotion_vec_info_type
) ?
946 inside_cost
+= record_stmt_cost (cost_vec
, vect_pow2 (tmp
),
947 vec_promote_demote
, stmt_info
, 0,
951 /* FORNOW: Assuming maximum 2 args per stmts. */
952 for (i
= 0; i
< 2; i
++)
953 if (dt
[i
] == vect_constant_def
|| dt
[i
] == vect_external_def
)
954 prologue_cost
+= record_stmt_cost (cost_vec
, 1, vector_stmt
,
955 stmt_info
, 0, vect_prologue
);
957 if (dump_enabled_p ())
958 dump_printf_loc (MSG_NOTE
, vect_location
,
959 "vect_model_promotion_demotion_cost: inside_cost = %d, "
960 "prologue_cost = %d .\n", inside_cost
, prologue_cost
);
963 /* Function vect_model_store_cost
965 Models cost for stores. In the case of grouped accesses, one access
966 has the overhead of the grouped access attributed to it. */
969 vect_model_store_cost (stmt_vec_info stmt_info
, int ncopies
,
970 enum vect_def_type dt
,
971 vect_memory_access_type memory_access_type
,
972 vec_load_store_type vls_type
, slp_tree slp_node
,
973 stmt_vector_for_cost
*cost_vec
)
975 unsigned int inside_cost
= 0, prologue_cost
= 0;
976 stmt_vec_info first_stmt_info
= stmt_info
;
977 bool grouped_access_p
= STMT_VINFO_GROUPED_ACCESS (stmt_info
);
979 /* ??? Somehow we need to fix this at the callers. */
981 ncopies
= SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node
);
983 if (vls_type
== VLS_STORE_INVARIANT
)
986 prologue_cost
+= vect_prologue_cost_for_slp_op (slp_node
, stmt_info
,
989 prologue_cost
+= record_stmt_cost (cost_vec
, 1, scalar_to_vec
,
990 stmt_info
, 0, vect_prologue
);
993 /* Grouped stores update all elements in the group at once,
994 so we want the DR for the first statement. */
995 if (!slp_node
&& grouped_access_p
)
996 first_stmt_info
= DR_GROUP_FIRST_ELEMENT (stmt_info
);
998 /* True if we should include any once-per-group costs as well as
999 the cost of the statement itself. For SLP we only get called
1000 once per group anyhow. */
1001 bool first_stmt_p
= (first_stmt_info
== stmt_info
);
1003 /* We assume that the cost of a single store-lanes instruction is
1004 equivalent to the cost of DR_GROUP_SIZE separate stores. If a grouped
1005 access is instead being provided by a permute-and-store operation,
1006 include the cost of the permutes. */
1008 && memory_access_type
== VMAT_CONTIGUOUS_PERMUTE
)
1010 /* Uses a high and low interleave or shuffle operations for each
1012 int group_size
= DR_GROUP_SIZE (first_stmt_info
);
1013 int nstmts
= ncopies
* ceil_log2 (group_size
) * group_size
;
1014 inside_cost
= record_stmt_cost (cost_vec
, nstmts
, vec_perm
,
1015 stmt_info
, 0, vect_body
);
1017 if (dump_enabled_p ())
1018 dump_printf_loc (MSG_NOTE
, vect_location
,
1019 "vect_model_store_cost: strided group_size = %d .\n",
1023 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
1024 /* Costs of the stores. */
1025 if (memory_access_type
== VMAT_ELEMENTWISE
1026 || memory_access_type
== VMAT_GATHER_SCATTER
)
1028 /* N scalar stores plus extracting the elements. */
1029 unsigned int assumed_nunits
= vect_nunits_for_cost (vectype
);
1030 inside_cost
+= record_stmt_cost (cost_vec
,
1031 ncopies
* assumed_nunits
,
1032 scalar_store
, stmt_info
, 0, vect_body
);
1035 vect_get_store_cost (stmt_info
, ncopies
, &inside_cost
, cost_vec
);
1037 if (memory_access_type
== VMAT_ELEMENTWISE
1038 || memory_access_type
== VMAT_STRIDED_SLP
)
1040 /* N scalar stores plus extracting the elements. */
1041 unsigned int assumed_nunits
= vect_nunits_for_cost (vectype
);
1042 inside_cost
+= record_stmt_cost (cost_vec
,
1043 ncopies
* assumed_nunits
,
1044 vec_to_scalar
, stmt_info
, 0, vect_body
);
1047 if (dump_enabled_p ())
1048 dump_printf_loc (MSG_NOTE
, vect_location
,
1049 "vect_model_store_cost: inside_cost = %d, "
1050 "prologue_cost = %d .\n", inside_cost
, prologue_cost
);
1054 /* Calculate cost of DR's memory access. */
1056 vect_get_store_cost (stmt_vec_info stmt_info
, int ncopies
,
1057 unsigned int *inside_cost
,
1058 stmt_vector_for_cost
*body_cost_vec
)
1060 dr_vec_info
*dr_info
= STMT_VINFO_DR_INFO (stmt_info
);
1061 int alignment_support_scheme
1062 = vect_supportable_dr_alignment (dr_info
, false);
1064 switch (alignment_support_scheme
)
1068 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
,
1069 vector_store
, stmt_info
, 0,
1072 if (dump_enabled_p ())
1073 dump_printf_loc (MSG_NOTE
, vect_location
,
1074 "vect_model_store_cost: aligned.\n");
1078 case dr_unaligned_supported
:
1080 /* Here, we assign an additional cost for the unaligned store. */
1081 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
,
1082 unaligned_store
, stmt_info
,
1083 DR_MISALIGNMENT (dr_info
),
1085 if (dump_enabled_p ())
1086 dump_printf_loc (MSG_NOTE
, vect_location
,
1087 "vect_model_store_cost: unaligned supported by "
1092 case dr_unaligned_unsupported
:
1094 *inside_cost
= VECT_MAX_COST
;
1096 if (dump_enabled_p ())
1097 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1098 "vect_model_store_cost: unsupported access.\n");
1108 /* Function vect_model_load_cost
1110 Models cost for loads. In the case of grouped accesses, one access has
1111 the overhead of the grouped access attributed to it. Since unaligned
1112 accesses are supported for loads, we also account for the costs of the
1113 access scheme chosen. */
1116 vect_model_load_cost (stmt_vec_info stmt_info
, unsigned ncopies
,
1117 vect_memory_access_type memory_access_type
,
1118 slp_instance instance
,
1120 stmt_vector_for_cost
*cost_vec
)
1122 unsigned int inside_cost
= 0, prologue_cost
= 0;
1123 bool grouped_access_p
= STMT_VINFO_GROUPED_ACCESS (stmt_info
);
1125 gcc_assert (cost_vec
);
1127 /* ??? Somehow we need to fix this at the callers. */
1129 ncopies
= SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node
);
1131 if (slp_node
&& SLP_TREE_LOAD_PERMUTATION (slp_node
).exists ())
1133 /* If the load is permuted then the alignment is determined by
1134 the first group element not by the first scalar stmt DR. */
1135 stmt_vec_info first_stmt_info
= DR_GROUP_FIRST_ELEMENT (stmt_info
);
1136 /* Record the cost for the permutation. */
1138 unsigned assumed_nunits
1139 = vect_nunits_for_cost (STMT_VINFO_VECTYPE (first_stmt_info
));
1140 unsigned slp_vf
= (ncopies
* assumed_nunits
) / instance
->group_size
;
1141 vect_transform_slp_perm_load (slp_node
, vNULL
, NULL
,
1142 slp_vf
, instance
, true,
1144 inside_cost
+= record_stmt_cost (cost_vec
, n_perms
, vec_perm
,
1145 first_stmt_info
, 0, vect_body
);
1146 /* And adjust the number of loads performed. This handles
1147 redundancies as well as loads that are later dead. */
1148 auto_sbitmap
perm (DR_GROUP_SIZE (first_stmt_info
));
1149 bitmap_clear (perm
);
1150 for (unsigned i
= 0;
1151 i
< SLP_TREE_LOAD_PERMUTATION (slp_node
).length (); ++i
)
1152 bitmap_set_bit (perm
, SLP_TREE_LOAD_PERMUTATION (slp_node
)[i
]);
1154 bool load_seen
= false;
1155 for (unsigned i
= 0; i
< DR_GROUP_SIZE (first_stmt_info
); ++i
)
1157 if (i
% assumed_nunits
== 0)
1163 if (bitmap_bit_p (perm
, i
))
1169 <= (DR_GROUP_SIZE (first_stmt_info
)
1170 - DR_GROUP_GAP (first_stmt_info
)
1171 + assumed_nunits
- 1) / assumed_nunits
);
1174 /* Grouped loads read all elements in the group at once,
1175 so we want the DR for the first statement. */
1176 stmt_vec_info first_stmt_info
= stmt_info
;
1177 if (!slp_node
&& grouped_access_p
)
1178 first_stmt_info
= DR_GROUP_FIRST_ELEMENT (stmt_info
);
1180 /* True if we should include any once-per-group costs as well as
1181 the cost of the statement itself. For SLP we only get called
1182 once per group anyhow. */
1183 bool first_stmt_p
= (first_stmt_info
== stmt_info
);
1185 /* We assume that the cost of a single load-lanes instruction is
1186 equivalent to the cost of DR_GROUP_SIZE separate loads. If a grouped
1187 access is instead being provided by a load-and-permute operation,
1188 include the cost of the permutes. */
1190 && memory_access_type
== VMAT_CONTIGUOUS_PERMUTE
)
1192 /* Uses an even and odd extract operations or shuffle operations
1193 for each needed permute. */
1194 int group_size
= DR_GROUP_SIZE (first_stmt_info
);
1195 int nstmts
= ncopies
* ceil_log2 (group_size
) * group_size
;
1196 inside_cost
+= record_stmt_cost (cost_vec
, nstmts
, vec_perm
,
1197 stmt_info
, 0, vect_body
);
1199 if (dump_enabled_p ())
1200 dump_printf_loc (MSG_NOTE
, vect_location
,
1201 "vect_model_load_cost: strided group_size = %d .\n",
1205 /* The loads themselves. */
1206 if (memory_access_type
== VMAT_ELEMENTWISE
1207 || memory_access_type
== VMAT_GATHER_SCATTER
)
1209 /* N scalar loads plus gathering them into a vector. */
1210 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
1211 unsigned int assumed_nunits
= vect_nunits_for_cost (vectype
);
1212 inside_cost
+= record_stmt_cost (cost_vec
,
1213 ncopies
* assumed_nunits
,
1214 scalar_load
, stmt_info
, 0, vect_body
);
1217 vect_get_load_cost (stmt_info
, ncopies
, first_stmt_p
,
1218 &inside_cost
, &prologue_cost
,
1219 cost_vec
, cost_vec
, true);
1220 if (memory_access_type
== VMAT_ELEMENTWISE
1221 || memory_access_type
== VMAT_STRIDED_SLP
)
1222 inside_cost
+= record_stmt_cost (cost_vec
, ncopies
, vec_construct
,
1223 stmt_info
, 0, vect_body
);
1225 if (dump_enabled_p ())
1226 dump_printf_loc (MSG_NOTE
, vect_location
,
1227 "vect_model_load_cost: inside_cost = %d, "
1228 "prologue_cost = %d .\n", inside_cost
, prologue_cost
);
1232 /* Calculate cost of DR's memory access. */
1234 vect_get_load_cost (stmt_vec_info stmt_info
, int ncopies
,
1235 bool add_realign_cost
, unsigned int *inside_cost
,
1236 unsigned int *prologue_cost
,
1237 stmt_vector_for_cost
*prologue_cost_vec
,
1238 stmt_vector_for_cost
*body_cost_vec
,
1239 bool record_prologue_costs
)
1241 dr_vec_info
*dr_info
= STMT_VINFO_DR_INFO (stmt_info
);
1242 int alignment_support_scheme
1243 = vect_supportable_dr_alignment (dr_info
, false);
1245 switch (alignment_support_scheme
)
1249 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
, vector_load
,
1250 stmt_info
, 0, vect_body
);
1252 if (dump_enabled_p ())
1253 dump_printf_loc (MSG_NOTE
, vect_location
,
1254 "vect_model_load_cost: aligned.\n");
1258 case dr_unaligned_supported
:
1260 /* Here, we assign an additional cost for the unaligned load. */
1261 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
,
1262 unaligned_load
, stmt_info
,
1263 DR_MISALIGNMENT (dr_info
),
1266 if (dump_enabled_p ())
1267 dump_printf_loc (MSG_NOTE
, vect_location
,
1268 "vect_model_load_cost: unaligned supported by "
1273 case dr_explicit_realign
:
1275 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
* 2,
1276 vector_load
, stmt_info
, 0, vect_body
);
1277 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
,
1278 vec_perm
, stmt_info
, 0, vect_body
);
1280 /* FIXME: If the misalignment remains fixed across the iterations of
1281 the containing loop, the following cost should be added to the
1283 if (targetm
.vectorize
.builtin_mask_for_load
)
1284 *inside_cost
+= record_stmt_cost (body_cost_vec
, 1, vector_stmt
,
1285 stmt_info
, 0, vect_body
);
1287 if (dump_enabled_p ())
1288 dump_printf_loc (MSG_NOTE
, vect_location
,
1289 "vect_model_load_cost: explicit realign\n");
1293 case dr_explicit_realign_optimized
:
1295 if (dump_enabled_p ())
1296 dump_printf_loc (MSG_NOTE
, vect_location
,
1297 "vect_model_load_cost: unaligned software "
1300 /* Unaligned software pipeline has a load of an address, an initial
1301 load, and possibly a mask operation to "prime" the loop. However,
1302 if this is an access in a group of loads, which provide grouped
1303 access, then the above cost should only be considered for one
1304 access in the group. Inside the loop, there is a load op
1305 and a realignment op. */
1307 if (add_realign_cost
&& record_prologue_costs
)
1309 *prologue_cost
+= record_stmt_cost (prologue_cost_vec
, 2,
1310 vector_stmt
, stmt_info
,
1312 if (targetm
.vectorize
.builtin_mask_for_load
)
1313 *prologue_cost
+= record_stmt_cost (prologue_cost_vec
, 1,
1314 vector_stmt
, stmt_info
,
1318 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
, vector_load
,
1319 stmt_info
, 0, vect_body
);
1320 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
, vec_perm
,
1321 stmt_info
, 0, vect_body
);
1323 if (dump_enabled_p ())
1324 dump_printf_loc (MSG_NOTE
, vect_location
,
1325 "vect_model_load_cost: explicit realign optimized"
1331 case dr_unaligned_unsupported
:
1333 *inside_cost
= VECT_MAX_COST
;
1335 if (dump_enabled_p ())
1336 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1337 "vect_model_load_cost: unsupported access.\n");
1346 /* Insert the new stmt NEW_STMT at *GSI or at the appropriate place in
1347 the loop preheader for the vectorized stmt STMT_VINFO. */
1350 vect_init_vector_1 (stmt_vec_info stmt_vinfo
, gimple
*new_stmt
,
1351 gimple_stmt_iterator
*gsi
)
1354 vect_finish_stmt_generation (stmt_vinfo
, new_stmt
, gsi
);
1357 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_vinfo
);
1361 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
1365 if (nested_in_vect_loop_p (loop
, stmt_vinfo
))
1368 pe
= loop_preheader_edge (loop
);
1369 new_bb
= gsi_insert_on_edge_immediate (pe
, new_stmt
);
1370 gcc_assert (!new_bb
);
1374 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_vinfo
);
1376 gimple_stmt_iterator gsi_bb_start
;
1378 gcc_assert (bb_vinfo
);
1379 bb
= BB_VINFO_BB (bb_vinfo
);
1380 gsi_bb_start
= gsi_after_labels (bb
);
1381 gsi_insert_before (&gsi_bb_start
, new_stmt
, GSI_SAME_STMT
);
1385 if (dump_enabled_p ())
1387 dump_printf_loc (MSG_NOTE
, vect_location
,
1388 "created new init_stmt: ");
1389 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, new_stmt
, 0);
1393 /* Function vect_init_vector.
1395 Insert a new stmt (INIT_STMT) that initializes a new variable of type
1396 TYPE with the value VAL. If TYPE is a vector type and VAL does not have
1397 vector type a vector with all elements equal to VAL is created first.
1398 Place the initialization at BSI if it is not NULL. Otherwise, place the
1399 initialization at the loop preheader.
1400 Return the DEF of INIT_STMT.
1401 It will be used in the vectorization of STMT_INFO. */
1404 vect_init_vector (stmt_vec_info stmt_info
, tree val
, tree type
,
1405 gimple_stmt_iterator
*gsi
)
1410 /* We abuse this function to push sth to a SSA name with initial 'val'. */
1411 if (! useless_type_conversion_p (type
, TREE_TYPE (val
)))
1413 gcc_assert (TREE_CODE (type
) == VECTOR_TYPE
);
1414 if (! types_compatible_p (TREE_TYPE (type
), TREE_TYPE (val
)))
1416 /* Scalar boolean value should be transformed into
1417 all zeros or all ones value before building a vector. */
1418 if (VECTOR_BOOLEAN_TYPE_P (type
))
1420 tree true_val
= build_all_ones_cst (TREE_TYPE (type
));
1421 tree false_val
= build_zero_cst (TREE_TYPE (type
));
1423 if (CONSTANT_CLASS_P (val
))
1424 val
= integer_zerop (val
) ? false_val
: true_val
;
1427 new_temp
= make_ssa_name (TREE_TYPE (type
));
1428 init_stmt
= gimple_build_assign (new_temp
, COND_EXPR
,
1429 val
, true_val
, false_val
);
1430 vect_init_vector_1 (stmt_info
, init_stmt
, gsi
);
1434 else if (CONSTANT_CLASS_P (val
))
1435 val
= fold_convert (TREE_TYPE (type
), val
);
1438 new_temp
= make_ssa_name (TREE_TYPE (type
));
1439 if (! INTEGRAL_TYPE_P (TREE_TYPE (val
)))
1440 init_stmt
= gimple_build_assign (new_temp
,
1441 fold_build1 (VIEW_CONVERT_EXPR
,
1445 init_stmt
= gimple_build_assign (new_temp
, NOP_EXPR
, val
);
1446 vect_init_vector_1 (stmt_info
, init_stmt
, gsi
);
1450 val
= build_vector_from_val (type
, val
);
1453 new_temp
= vect_get_new_ssa_name (type
, vect_simple_var
, "cst_");
1454 init_stmt
= gimple_build_assign (new_temp
, val
);
1455 vect_init_vector_1 (stmt_info
, init_stmt
, gsi
);
1459 /* Function vect_get_vec_def_for_operand_1.
1461 For a defining stmt DEF_STMT_INFO of a scalar stmt, return a vector def
1462 with type DT that will be used in the vectorized stmt. */
1465 vect_get_vec_def_for_operand_1 (stmt_vec_info def_stmt_info
,
1466 enum vect_def_type dt
)
1469 stmt_vec_info vec_stmt_info
;
1473 /* operand is a constant or a loop invariant. */
1474 case vect_constant_def
:
1475 case vect_external_def
:
1476 /* Code should use vect_get_vec_def_for_operand. */
1479 /* operand is defined inside the loop. */
1480 case vect_internal_def
:
1482 /* Get the def from the vectorized stmt. */
1483 vec_stmt_info
= STMT_VINFO_VEC_STMT (def_stmt_info
);
1484 /* Get vectorized pattern statement. */
1486 && STMT_VINFO_IN_PATTERN_P (def_stmt_info
)
1487 && !STMT_VINFO_RELEVANT (def_stmt_info
))
1488 vec_stmt_info
= (STMT_VINFO_VEC_STMT
1489 (STMT_VINFO_RELATED_STMT (def_stmt_info
)));
1490 gcc_assert (vec_stmt_info
);
1491 if (gphi
*phi
= dyn_cast
<gphi
*> (vec_stmt_info
->stmt
))
1492 vec_oprnd
= PHI_RESULT (phi
);
1494 vec_oprnd
= gimple_get_lhs (vec_stmt_info
->stmt
);
1498 /* operand is defined by a loop header phi. */
1499 case vect_reduction_def
:
1500 case vect_double_reduction_def
:
1501 case vect_nested_cycle
:
1502 case vect_induction_def
:
1504 gcc_assert (gimple_code (def_stmt_info
->stmt
) == GIMPLE_PHI
);
1506 /* Get the def from the vectorized stmt. */
1507 vec_stmt_info
= STMT_VINFO_VEC_STMT (def_stmt_info
);
1508 if (gphi
*phi
= dyn_cast
<gphi
*> (vec_stmt_info
->stmt
))
1509 vec_oprnd
= PHI_RESULT (phi
);
1511 vec_oprnd
= gimple_get_lhs (vec_stmt_info
->stmt
);
1521 /* Function vect_get_vec_def_for_operand.
1523 OP is an operand in STMT_VINFO. This function returns a (vector) def
1524 that will be used in the vectorized stmt for STMT_VINFO.
1526 In the case that OP is an SSA_NAME which is defined in the loop, then
1527 STMT_VINFO_VEC_STMT of the defining stmt holds the relevant def.
1529 In case OP is an invariant or constant, a new stmt that creates a vector def
1530 needs to be introduced. VECTYPE may be used to specify a required type for
1531 vector invariant. */
1534 vect_get_vec_def_for_operand (tree op
, stmt_vec_info stmt_vinfo
, tree vectype
)
1537 enum vect_def_type dt
;
1539 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_vinfo
);
1541 if (dump_enabled_p ())
1543 dump_printf_loc (MSG_NOTE
, vect_location
,
1544 "vect_get_vec_def_for_operand: ");
1545 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, op
);
1546 dump_printf (MSG_NOTE
, "\n");
1549 stmt_vec_info def_stmt_info
;
1550 is_simple_use
= vect_is_simple_use (op
, loop_vinfo
, &dt
,
1551 &def_stmt_info
, &def_stmt
);
1552 gcc_assert (is_simple_use
);
1553 if (def_stmt
&& dump_enabled_p ())
1555 dump_printf_loc (MSG_NOTE
, vect_location
, " def_stmt = ");
1556 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, def_stmt
, 0);
1559 if (dt
== vect_constant_def
|| dt
== vect_external_def
)
1561 tree stmt_vectype
= STMT_VINFO_VECTYPE (stmt_vinfo
);
1565 vector_type
= vectype
;
1566 else if (VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (op
))
1567 && VECTOR_BOOLEAN_TYPE_P (stmt_vectype
))
1568 vector_type
= build_same_sized_truth_vector_type (stmt_vectype
);
1570 vector_type
= get_vectype_for_scalar_type (TREE_TYPE (op
));
1572 gcc_assert (vector_type
);
1573 return vect_init_vector (stmt_vinfo
, op
, vector_type
, NULL
);
1576 return vect_get_vec_def_for_operand_1 (def_stmt_info
, dt
);
1580 /* Function vect_get_vec_def_for_stmt_copy
1582 Return a vector-def for an operand. This function is used when the
1583 vectorized stmt to be created (by the caller to this function) is a "copy"
1584 created in case the vectorized result cannot fit in one vector, and several
1585 copies of the vector-stmt are required. In this case the vector-def is
1586 retrieved from the vector stmt recorded in the STMT_VINFO_RELATED_STMT field
1587 of the stmt that defines VEC_OPRND. VINFO describes the vectorization.
1590 In case the vectorization factor (VF) is bigger than the number
1591 of elements that can fit in a vectype (nunits), we have to generate
1592 more than one vector stmt to vectorize the scalar stmt. This situation
1593 arises when there are multiple data-types operated upon in the loop; the
1594 smallest data-type determines the VF, and as a result, when vectorizing
1595 stmts operating on wider types we need to create 'VF/nunits' "copies" of the
1596 vector stmt (each computing a vector of 'nunits' results, and together
1597 computing 'VF' results in each iteration). This function is called when
1598 vectorizing such a stmt (e.g. vectorizing S2 in the illustration below, in
1599 which VF=16 and nunits=4, so the number of copies required is 4):
1601 scalar stmt: vectorized into: STMT_VINFO_RELATED_STMT
1603 S1: x = load VS1.0: vx.0 = memref0 VS1.1
1604 VS1.1: vx.1 = memref1 VS1.2
1605 VS1.2: vx.2 = memref2 VS1.3
1606 VS1.3: vx.3 = memref3
1608 S2: z = x + ... VSnew.0: vz0 = vx.0 + ... VSnew.1
1609 VSnew.1: vz1 = vx.1 + ... VSnew.2
1610 VSnew.2: vz2 = vx.2 + ... VSnew.3
1611 VSnew.3: vz3 = vx.3 + ...
1613 The vectorization of S1 is explained in vectorizable_load.
1614 The vectorization of S2:
1615 To create the first vector-stmt out of the 4 copies - VSnew.0 -
1616 the function 'vect_get_vec_def_for_operand' is called to
1617 get the relevant vector-def for each operand of S2. For operand x it
1618 returns the vector-def 'vx.0'.
1620 To create the remaining copies of the vector-stmt (VSnew.j), this
1621 function is called to get the relevant vector-def for each operand. It is
1622 obtained from the respective VS1.j stmt, which is recorded in the
1623 STMT_VINFO_RELATED_STMT field of the stmt that defines VEC_OPRND.
1625 For example, to obtain the vector-def 'vx.1' in order to create the
1626 vector stmt 'VSnew.1', this function is called with VEC_OPRND='vx.0'.
1627 Given 'vx0' we obtain the stmt that defines it ('VS1.0'); from the
1628 STMT_VINFO_RELATED_STMT field of 'VS1.0' we obtain the next copy - 'VS1.1',
1629 and return its def ('vx.1').
1630 Overall, to create the above sequence this function will be called 3 times:
1631 vx.1 = vect_get_vec_def_for_stmt_copy (vinfo, vx.0);
1632 vx.2 = vect_get_vec_def_for_stmt_copy (vinfo, vx.1);
1633 vx.3 = vect_get_vec_def_for_stmt_copy (vinfo, vx.2); */
1636 vect_get_vec_def_for_stmt_copy (vec_info
*vinfo
, tree vec_oprnd
)
1638 stmt_vec_info def_stmt_info
= vinfo
->lookup_def (vec_oprnd
);
1640 /* Do nothing; can reuse same def. */
1643 def_stmt_info
= STMT_VINFO_RELATED_STMT (def_stmt_info
);
1644 gcc_assert (def_stmt_info
);
1645 if (gphi
*phi
= dyn_cast
<gphi
*> (def_stmt_info
->stmt
))
1646 vec_oprnd
= PHI_RESULT (phi
);
1648 vec_oprnd
= gimple_get_lhs (def_stmt_info
->stmt
);
1653 /* Get vectorized definitions for the operands to create a copy of an original
1654 stmt. See vect_get_vec_def_for_stmt_copy () for details. */
1657 vect_get_vec_defs_for_stmt_copy (vec_info
*vinfo
,
1658 vec
<tree
> *vec_oprnds0
,
1659 vec
<tree
> *vec_oprnds1
)
1661 tree vec_oprnd
= vec_oprnds0
->pop ();
1663 vec_oprnd
= vect_get_vec_def_for_stmt_copy (vinfo
, vec_oprnd
);
1664 vec_oprnds0
->quick_push (vec_oprnd
);
1666 if (vec_oprnds1
&& vec_oprnds1
->length ())
1668 vec_oprnd
= vec_oprnds1
->pop ();
1669 vec_oprnd
= vect_get_vec_def_for_stmt_copy (vinfo
, vec_oprnd
);
1670 vec_oprnds1
->quick_push (vec_oprnd
);
1675 /* Get vectorized definitions for OP0 and OP1. */
1678 vect_get_vec_defs (tree op0
, tree op1
, stmt_vec_info stmt_info
,
1679 vec
<tree
> *vec_oprnds0
,
1680 vec
<tree
> *vec_oprnds1
,
1685 int nops
= (op1
== NULL_TREE
) ? 1 : 2;
1686 auto_vec
<tree
> ops (nops
);
1687 auto_vec
<vec
<tree
> > vec_defs (nops
);
1689 ops
.quick_push (op0
);
1691 ops
.quick_push (op1
);
1693 vect_get_slp_defs (ops
, slp_node
, &vec_defs
);
1695 *vec_oprnds0
= vec_defs
[0];
1697 *vec_oprnds1
= vec_defs
[1];
1703 vec_oprnds0
->create (1);
1704 vec_oprnd
= vect_get_vec_def_for_operand (op0
, stmt_info
);
1705 vec_oprnds0
->quick_push (vec_oprnd
);
1709 vec_oprnds1
->create (1);
1710 vec_oprnd
= vect_get_vec_def_for_operand (op1
, stmt_info
);
1711 vec_oprnds1
->quick_push (vec_oprnd
);
1716 /* Helper function called by vect_finish_replace_stmt and
1717 vect_finish_stmt_generation. Set the location of the new
1718 statement and create and return a stmt_vec_info for it. */
1720 static stmt_vec_info
1721 vect_finish_stmt_generation_1 (stmt_vec_info stmt_info
, gimple
*vec_stmt
)
1723 vec_info
*vinfo
= stmt_info
->vinfo
;
1725 stmt_vec_info vec_stmt_info
= vinfo
->add_stmt (vec_stmt
);
1727 if (dump_enabled_p ())
1729 dump_printf_loc (MSG_NOTE
, vect_location
, "add new stmt: ");
1730 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, vec_stmt
, 0);
1733 gimple_set_location (vec_stmt
, gimple_location (stmt_info
->stmt
));
1735 /* While EH edges will generally prevent vectorization, stmt might
1736 e.g. be in a must-not-throw region. Ensure newly created stmts
1737 that could throw are part of the same region. */
1738 int lp_nr
= lookup_stmt_eh_lp (stmt_info
->stmt
);
1739 if (lp_nr
!= 0 && stmt_could_throw_p (vec_stmt
))
1740 add_stmt_to_eh_lp (vec_stmt
, lp_nr
);
1742 return vec_stmt_info
;
1745 /* Replace the scalar statement STMT_INFO with a new vector statement VEC_STMT,
1746 which sets the same scalar result as STMT_INFO did. Create and return a
1747 stmt_vec_info for VEC_STMT. */
1750 vect_finish_replace_stmt (stmt_vec_info stmt_info
, gimple
*vec_stmt
)
1752 gcc_assert (gimple_get_lhs (stmt_info
->stmt
) == gimple_get_lhs (vec_stmt
));
1754 gimple_stmt_iterator gsi
= gsi_for_stmt (stmt_info
->stmt
);
1755 gsi_replace (&gsi
, vec_stmt
, false);
1757 return vect_finish_stmt_generation_1 (stmt_info
, vec_stmt
);
1760 /* Add VEC_STMT to the vectorized implementation of STMT_INFO and insert it
1761 before *GSI. Create and return a stmt_vec_info for VEC_STMT. */
1764 vect_finish_stmt_generation (stmt_vec_info stmt_info
, gimple
*vec_stmt
,
1765 gimple_stmt_iterator
*gsi
)
1767 gcc_assert (gimple_code (stmt_info
->stmt
) != GIMPLE_LABEL
);
1769 if (!gsi_end_p (*gsi
)
1770 && gimple_has_mem_ops (vec_stmt
))
1772 gimple
*at_stmt
= gsi_stmt (*gsi
);
1773 tree vuse
= gimple_vuse (at_stmt
);
1774 if (vuse
&& TREE_CODE (vuse
) == SSA_NAME
)
1776 tree vdef
= gimple_vdef (at_stmt
);
1777 gimple_set_vuse (vec_stmt
, gimple_vuse (at_stmt
));
1778 /* If we have an SSA vuse and insert a store, update virtual
1779 SSA form to avoid triggering the renamer. Do so only
1780 if we can easily see all uses - which is what almost always
1781 happens with the way vectorized stmts are inserted. */
1782 if ((vdef
&& TREE_CODE (vdef
) == SSA_NAME
)
1783 && ((is_gimple_assign (vec_stmt
)
1784 && !is_gimple_reg (gimple_assign_lhs (vec_stmt
)))
1785 || (is_gimple_call (vec_stmt
)
1786 && !(gimple_call_flags (vec_stmt
)
1787 & (ECF_CONST
|ECF_PURE
|ECF_NOVOPS
)))))
1789 tree new_vdef
= copy_ssa_name (vuse
, vec_stmt
);
1790 gimple_set_vdef (vec_stmt
, new_vdef
);
1791 SET_USE (gimple_vuse_op (at_stmt
), new_vdef
);
1795 gsi_insert_before (gsi
, vec_stmt
, GSI_SAME_STMT
);
1796 return vect_finish_stmt_generation_1 (stmt_info
, vec_stmt
);
1799 /* We want to vectorize a call to combined function CFN with function
1800 decl FNDECL, using VECTYPE_OUT as the type of the output and VECTYPE_IN
1801 as the types of all inputs. Check whether this is possible using
1802 an internal function, returning its code if so or IFN_LAST if not. */
1805 vectorizable_internal_function (combined_fn cfn
, tree fndecl
,
1806 tree vectype_out
, tree vectype_in
)
1809 if (internal_fn_p (cfn
))
1810 ifn
= as_internal_fn (cfn
);
1812 ifn
= associated_internal_fn (fndecl
);
1813 if (ifn
!= IFN_LAST
&& direct_internal_fn_p (ifn
))
1815 const direct_internal_fn_info
&info
= direct_internal_fn (ifn
);
1816 if (info
.vectorizable
)
1818 tree type0
= (info
.type0
< 0 ? vectype_out
: vectype_in
);
1819 tree type1
= (info
.type1
< 0 ? vectype_out
: vectype_in
);
1820 if (direct_internal_fn_supported_p (ifn
, tree_pair (type0
, type1
),
1821 OPTIMIZE_FOR_SPEED
))
1829 static tree
permute_vec_elements (tree
, tree
, tree
, stmt_vec_info
,
1830 gimple_stmt_iterator
*);
1832 /* Check whether a load or store statement in the loop described by
1833 LOOP_VINFO is possible in a fully-masked loop. This is testing
1834 whether the vectorizer pass has the appropriate support, as well as
1835 whether the target does.
1837 VLS_TYPE says whether the statement is a load or store and VECTYPE
1838 is the type of the vector being loaded or stored. MEMORY_ACCESS_TYPE
1839 says how the load or store is going to be implemented and GROUP_SIZE
1840 is the number of load or store statements in the containing group.
1841 If the access is a gather load or scatter store, GS_INFO describes
1844 Clear LOOP_VINFO_CAN_FULLY_MASK_P if a fully-masked loop is not
1845 supported, otherwise record the required mask types. */
1848 check_load_store_masking (loop_vec_info loop_vinfo
, tree vectype
,
1849 vec_load_store_type vls_type
, int group_size
,
1850 vect_memory_access_type memory_access_type
,
1851 gather_scatter_info
*gs_info
)
1853 /* Invariant loads need no special support. */
1854 if (memory_access_type
== VMAT_INVARIANT
)
1857 vec_loop_masks
*masks
= &LOOP_VINFO_MASKS (loop_vinfo
);
1858 machine_mode vecmode
= TYPE_MODE (vectype
);
1859 bool is_load
= (vls_type
== VLS_LOAD
);
1860 if (memory_access_type
== VMAT_LOAD_STORE_LANES
)
1863 ? !vect_load_lanes_supported (vectype
, group_size
, true)
1864 : !vect_store_lanes_supported (vectype
, group_size
, true))
1866 if (dump_enabled_p ())
1867 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1868 "can't use a fully-masked loop because the"
1869 " target doesn't have an appropriate masked"
1870 " load/store-lanes instruction.\n");
1871 LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo
) = false;
1874 unsigned int ncopies
= vect_get_num_copies (loop_vinfo
, vectype
);
1875 vect_record_loop_mask (loop_vinfo
, masks
, ncopies
, vectype
);
1879 if (memory_access_type
== VMAT_GATHER_SCATTER
)
1881 internal_fn ifn
= (is_load
1882 ? IFN_MASK_GATHER_LOAD
1883 : IFN_MASK_SCATTER_STORE
);
1884 tree offset_type
= TREE_TYPE (gs_info
->offset
);
1885 if (!internal_gather_scatter_fn_supported_p (ifn
, vectype
,
1886 gs_info
->memory_type
,
1887 TYPE_SIGN (offset_type
),
1890 if (dump_enabled_p ())
1891 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1892 "can't use a fully-masked loop because the"
1893 " target doesn't have an appropriate masked"
1894 " gather load or scatter store instruction.\n");
1895 LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo
) = false;
1898 unsigned int ncopies
= vect_get_num_copies (loop_vinfo
, vectype
);
1899 vect_record_loop_mask (loop_vinfo
, masks
, ncopies
, vectype
);
1903 if (memory_access_type
!= VMAT_CONTIGUOUS
1904 && memory_access_type
!= VMAT_CONTIGUOUS_PERMUTE
)
1906 /* Element X of the data must come from iteration i * VF + X of the
1907 scalar loop. We need more work to support other mappings. */
1908 if (dump_enabled_p ())
1909 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1910 "can't use a fully-masked loop because an access"
1911 " isn't contiguous.\n");
1912 LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo
) = false;
1916 machine_mode mask_mode
;
1917 if (!(targetm
.vectorize
.get_mask_mode
1918 (GET_MODE_NUNITS (vecmode
),
1919 GET_MODE_SIZE (vecmode
)).exists (&mask_mode
))
1920 || !can_vec_mask_load_store_p (vecmode
, mask_mode
, is_load
))
1922 if (dump_enabled_p ())
1923 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1924 "can't use a fully-masked loop because the target"
1925 " doesn't have the appropriate masked load or"
1927 LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo
) = false;
1930 /* We might load more scalars than we need for permuting SLP loads.
1931 We checked in get_group_load_store_type that the extra elements
1932 don't leak into a new vector. */
1933 poly_uint64 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
1934 poly_uint64 vf
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
1935 unsigned int nvectors
;
1936 if (can_div_away_from_zero_p (group_size
* vf
, nunits
, &nvectors
))
1937 vect_record_loop_mask (loop_vinfo
, masks
, nvectors
, vectype
);
1942 /* Return the mask input to a masked load or store. VEC_MASK is the vectorized
1943 form of the scalar mask condition and LOOP_MASK, if nonnull, is the mask
1944 that needs to be applied to all loads and stores in a vectorized loop.
1945 Return VEC_MASK if LOOP_MASK is null, otherwise return VEC_MASK & LOOP_MASK.
1947 MASK_TYPE is the type of both masks. If new statements are needed,
1948 insert them before GSI. */
1951 prepare_load_store_mask (tree mask_type
, tree loop_mask
, tree vec_mask
,
1952 gimple_stmt_iterator
*gsi
)
1954 gcc_assert (useless_type_conversion_p (mask_type
, TREE_TYPE (vec_mask
)));
1958 gcc_assert (TREE_TYPE (loop_mask
) == mask_type
);
1959 tree and_res
= make_temp_ssa_name (mask_type
, NULL
, "vec_mask_and");
1960 gimple
*and_stmt
= gimple_build_assign (and_res
, BIT_AND_EXPR
,
1961 vec_mask
, loop_mask
);
1962 gsi_insert_before (gsi
, and_stmt
, GSI_SAME_STMT
);
1966 /* Determine whether we can use a gather load or scatter store to vectorize
1967 strided load or store STMT_INFO by truncating the current offset to a
1968 smaller width. We need to be able to construct an offset vector:
1970 { 0, X, X*2, X*3, ... }
1972 without loss of precision, where X is STMT_INFO's DR_STEP.
1974 Return true if this is possible, describing the gather load or scatter
1975 store in GS_INFO. MASKED_P is true if the load or store is conditional. */
1978 vect_truncate_gather_scatter_offset (stmt_vec_info stmt_info
,
1979 loop_vec_info loop_vinfo
, bool masked_p
,
1980 gather_scatter_info
*gs_info
)
1982 dr_vec_info
*dr_info
= STMT_VINFO_DR_INFO (stmt_info
);
1983 data_reference
*dr
= dr_info
->dr
;
1984 tree step
= DR_STEP (dr
);
1985 if (TREE_CODE (step
) != INTEGER_CST
)
1987 /* ??? Perhaps we could use range information here? */
1988 if (dump_enabled_p ())
1989 dump_printf_loc (MSG_NOTE
, vect_location
,
1990 "cannot truncate variable step.\n");
1994 /* Get the number of bits in an element. */
1995 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
1996 scalar_mode element_mode
= SCALAR_TYPE_MODE (TREE_TYPE (vectype
));
1997 unsigned int element_bits
= GET_MODE_BITSIZE (element_mode
);
1999 /* Set COUNT to the upper limit on the number of elements - 1.
2000 Start with the maximum vectorization factor. */
2001 unsigned HOST_WIDE_INT count
= vect_max_vf (loop_vinfo
) - 1;
2003 /* Try lowering COUNT to the number of scalar latch iterations. */
2004 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
2005 widest_int max_iters
;
2006 if (max_loop_iterations (loop
, &max_iters
)
2007 && max_iters
< count
)
2008 count
= max_iters
.to_shwi ();
2010 /* Try scales of 1 and the element size. */
2011 int scales
[] = { 1, vect_get_scalar_dr_size (dr_info
) };
2012 wi::overflow_type overflow
= wi::OVF_NONE
;
2013 for (int i
= 0; i
< 2; ++i
)
2015 int scale
= scales
[i
];
2017 if (!wi::multiple_of_p (wi::to_widest (step
), scale
, SIGNED
, &factor
))
2020 /* See whether we can calculate (COUNT - 1) * STEP / SCALE
2021 in OFFSET_BITS bits. */
2022 widest_int range
= wi::mul (count
, factor
, SIGNED
, &overflow
);
2025 signop sign
= range
>= 0 ? UNSIGNED
: SIGNED
;
2026 if (wi::min_precision (range
, sign
) > element_bits
)
2028 overflow
= wi::OVF_UNKNOWN
;
2032 /* See whether the target supports the operation. */
2033 tree memory_type
= TREE_TYPE (DR_REF (dr
));
2034 if (!vect_gather_scatter_fn_p (DR_IS_READ (dr
), masked_p
, vectype
,
2035 memory_type
, element_bits
, sign
, scale
,
2036 &gs_info
->ifn
, &gs_info
->element_type
))
2039 tree offset_type
= build_nonstandard_integer_type (element_bits
,
2042 gs_info
->decl
= NULL_TREE
;
2043 /* Logically the sum of DR_BASE_ADDRESS, DR_INIT and DR_OFFSET,
2044 but we don't need to store that here. */
2045 gs_info
->base
= NULL_TREE
;
2046 gs_info
->offset
= fold_convert (offset_type
, step
);
2047 gs_info
->offset_dt
= vect_constant_def
;
2048 gs_info
->offset_vectype
= NULL_TREE
;
2049 gs_info
->scale
= scale
;
2050 gs_info
->memory_type
= memory_type
;
2054 if (overflow
&& dump_enabled_p ())
2055 dump_printf_loc (MSG_NOTE
, vect_location
,
2056 "truncating gather/scatter offset to %d bits"
2057 " might change its value.\n", element_bits
);
2062 /* Return true if we can use gather/scatter internal functions to
2063 vectorize STMT_INFO, which is a grouped or strided load or store.
2064 MASKED_P is true if load or store is conditional. When returning
2065 true, fill in GS_INFO with the information required to perform the
2069 vect_use_strided_gather_scatters_p (stmt_vec_info stmt_info
,
2070 loop_vec_info loop_vinfo
, bool masked_p
,
2071 gather_scatter_info
*gs_info
)
2073 if (!vect_check_gather_scatter (stmt_info
, loop_vinfo
, gs_info
)
2075 return vect_truncate_gather_scatter_offset (stmt_info
, loop_vinfo
,
2078 scalar_mode element_mode
= SCALAR_TYPE_MODE (gs_info
->element_type
);
2079 unsigned int element_bits
= GET_MODE_BITSIZE (element_mode
);
2080 tree offset_type
= TREE_TYPE (gs_info
->offset
);
2081 unsigned int offset_bits
= TYPE_PRECISION (offset_type
);
2083 /* Enforced by vect_check_gather_scatter. */
2084 gcc_assert (element_bits
>= offset_bits
);
2086 /* If the elements are wider than the offset, convert the offset to the
2087 same width, without changing its sign. */
2088 if (element_bits
> offset_bits
)
2090 bool unsigned_p
= TYPE_UNSIGNED (offset_type
);
2091 offset_type
= build_nonstandard_integer_type (element_bits
, unsigned_p
);
2092 gs_info
->offset
= fold_convert (offset_type
, gs_info
->offset
);
2095 if (dump_enabled_p ())
2096 dump_printf_loc (MSG_NOTE
, vect_location
,
2097 "using gather/scatter for strided/grouped access,"
2098 " scale = %d\n", gs_info
->scale
);
2103 /* STMT_INFO is a non-strided load or store, meaning that it accesses
2104 elements with a known constant step. Return -1 if that step
2105 is negative, 0 if it is zero, and 1 if it is greater than zero. */
2108 compare_step_with_zero (stmt_vec_info stmt_info
)
2110 dr_vec_info
*dr_info
= STMT_VINFO_DR_INFO (stmt_info
);
2111 return tree_int_cst_compare (vect_dr_behavior (dr_info
)->step
,
2115 /* If the target supports a permute mask that reverses the elements in
2116 a vector of type VECTYPE, return that mask, otherwise return null. */
2119 perm_mask_for_reverse (tree vectype
)
2121 poly_uint64 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
2123 /* The encoding has a single stepped pattern. */
2124 vec_perm_builder
sel (nunits
, 1, 3);
2125 for (int i
= 0; i
< 3; ++i
)
2126 sel
.quick_push (nunits
- 1 - i
);
2128 vec_perm_indices
indices (sel
, 1, nunits
);
2129 if (!can_vec_perm_const_p (TYPE_MODE (vectype
), indices
))
2131 return vect_gen_perm_mask_checked (vectype
, indices
);
2134 /* STMT_INFO is either a masked or unconditional store. Return the value
2138 vect_get_store_rhs (stmt_vec_info stmt_info
)
2140 if (gassign
*assign
= dyn_cast
<gassign
*> (stmt_info
->stmt
))
2142 gcc_assert (gimple_assign_single_p (assign
));
2143 return gimple_assign_rhs1 (assign
);
2145 if (gcall
*call
= dyn_cast
<gcall
*> (stmt_info
->stmt
))
2147 internal_fn ifn
= gimple_call_internal_fn (call
);
2148 int index
= internal_fn_stored_value_index (ifn
);
2149 gcc_assert (index
>= 0);
2150 return gimple_call_arg (call
, index
);
2155 /* A subroutine of get_load_store_type, with a subset of the same
2156 arguments. Handle the case where STMT_INFO is part of a grouped load
2159 For stores, the statements in the group are all consecutive
2160 and there is no gap at the end. For loads, the statements in the
2161 group might not be consecutive; there can be gaps between statements
2162 as well as at the end. */
2165 get_group_load_store_type (stmt_vec_info stmt_info
, tree vectype
, bool slp
,
2166 bool masked_p
, vec_load_store_type vls_type
,
2167 vect_memory_access_type
*memory_access_type
,
2168 gather_scatter_info
*gs_info
)
2170 vec_info
*vinfo
= stmt_info
->vinfo
;
2171 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
2172 struct loop
*loop
= loop_vinfo
? LOOP_VINFO_LOOP (loop_vinfo
) : NULL
;
2173 stmt_vec_info first_stmt_info
= DR_GROUP_FIRST_ELEMENT (stmt_info
);
2174 dr_vec_info
*first_dr_info
= STMT_VINFO_DR_INFO (first_stmt_info
);
2175 unsigned int group_size
= DR_GROUP_SIZE (first_stmt_info
);
2176 bool single_element_p
= (stmt_info
== first_stmt_info
2177 && !DR_GROUP_NEXT_ELEMENT (stmt_info
));
2178 unsigned HOST_WIDE_INT gap
= DR_GROUP_GAP (first_stmt_info
);
2179 poly_uint64 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
2181 /* True if the vectorized statements would access beyond the last
2182 statement in the group. */
2183 bool overrun_p
= false;
2185 /* True if we can cope with such overrun by peeling for gaps, so that
2186 there is at least one final scalar iteration after the vector loop. */
2187 bool can_overrun_p
= (!masked_p
2188 && vls_type
== VLS_LOAD
2192 /* There can only be a gap at the end of the group if the stride is
2193 known at compile time. */
2194 gcc_assert (!STMT_VINFO_STRIDED_P (first_stmt_info
) || gap
== 0);
2196 /* Stores can't yet have gaps. */
2197 gcc_assert (slp
|| vls_type
== VLS_LOAD
|| gap
== 0);
2201 if (STMT_VINFO_STRIDED_P (first_stmt_info
))
2203 /* Try to use consecutive accesses of DR_GROUP_SIZE elements,
2204 separated by the stride, until we have a complete vector.
2205 Fall back to scalar accesses if that isn't possible. */
2206 if (multiple_p (nunits
, group_size
))
2207 *memory_access_type
= VMAT_STRIDED_SLP
;
2209 *memory_access_type
= VMAT_ELEMENTWISE
;
2213 overrun_p
= loop_vinfo
&& gap
!= 0;
2214 if (overrun_p
&& vls_type
!= VLS_LOAD
)
2216 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2217 "Grouped store with gaps requires"
2218 " non-consecutive accesses\n");
2221 /* An overrun is fine if the trailing elements are smaller
2222 than the alignment boundary B. Every vector access will
2223 be a multiple of B and so we are guaranteed to access a
2224 non-gap element in the same B-sized block. */
2226 && gap
< (vect_known_alignment_in_bytes (first_dr_info
)
2227 / vect_get_scalar_dr_size (first_dr_info
)))
2229 if (overrun_p
&& !can_overrun_p
)
2231 if (dump_enabled_p ())
2232 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2233 "Peeling for outer loop is not supported\n");
2236 *memory_access_type
= VMAT_CONTIGUOUS
;
2241 /* We can always handle this case using elementwise accesses,
2242 but see if something more efficient is available. */
2243 *memory_access_type
= VMAT_ELEMENTWISE
;
2245 /* If there is a gap at the end of the group then these optimizations
2246 would access excess elements in the last iteration. */
2247 bool would_overrun_p
= (gap
!= 0);
2248 /* An overrun is fine if the trailing elements are smaller than the
2249 alignment boundary B. Every vector access will be a multiple of B
2250 and so we are guaranteed to access a non-gap element in the
2251 same B-sized block. */
2254 && gap
< (vect_known_alignment_in_bytes (first_dr_info
)
2255 / vect_get_scalar_dr_size (first_dr_info
)))
2256 would_overrun_p
= false;
2258 if (!STMT_VINFO_STRIDED_P (first_stmt_info
)
2259 && (can_overrun_p
|| !would_overrun_p
)
2260 && compare_step_with_zero (stmt_info
) > 0)
2262 /* First cope with the degenerate case of a single-element
2264 if (known_eq (TYPE_VECTOR_SUBPARTS (vectype
), 1U))
2265 *memory_access_type
= VMAT_CONTIGUOUS
;
2267 /* Otherwise try using LOAD/STORE_LANES. */
2268 if (*memory_access_type
== VMAT_ELEMENTWISE
2269 && (vls_type
== VLS_LOAD
2270 ? vect_load_lanes_supported (vectype
, group_size
, masked_p
)
2271 : vect_store_lanes_supported (vectype
, group_size
,
2274 *memory_access_type
= VMAT_LOAD_STORE_LANES
;
2275 overrun_p
= would_overrun_p
;
2278 /* If that fails, try using permuting loads. */
2279 if (*memory_access_type
== VMAT_ELEMENTWISE
2280 && (vls_type
== VLS_LOAD
2281 ? vect_grouped_load_supported (vectype
, single_element_p
,
2283 : vect_grouped_store_supported (vectype
, group_size
)))
2285 *memory_access_type
= VMAT_CONTIGUOUS_PERMUTE
;
2286 overrun_p
= would_overrun_p
;
2290 /* As a last resort, trying using a gather load or scatter store.
2292 ??? Although the code can handle all group sizes correctly,
2293 it probably isn't a win to use separate strided accesses based
2294 on nearby locations. Or, even if it's a win over scalar code,
2295 it might not be a win over vectorizing at a lower VF, if that
2296 allows us to use contiguous accesses. */
2297 if (*memory_access_type
== VMAT_ELEMENTWISE
2300 && vect_use_strided_gather_scatters_p (stmt_info
, loop_vinfo
,
2302 *memory_access_type
= VMAT_GATHER_SCATTER
;
2305 if (vls_type
!= VLS_LOAD
&& first_stmt_info
== stmt_info
)
2307 /* STMT is the leader of the group. Check the operands of all the
2308 stmts of the group. */
2309 stmt_vec_info next_stmt_info
= DR_GROUP_NEXT_ELEMENT (stmt_info
);
2310 while (next_stmt_info
)
2312 tree op
= vect_get_store_rhs (next_stmt_info
);
2313 enum vect_def_type dt
;
2314 if (!vect_is_simple_use (op
, vinfo
, &dt
))
2316 if (dump_enabled_p ())
2317 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2318 "use not simple.\n");
2321 next_stmt_info
= DR_GROUP_NEXT_ELEMENT (next_stmt_info
);
2327 gcc_assert (can_overrun_p
);
2328 if (dump_enabled_p ())
2329 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2330 "Data access with gaps requires scalar "
2332 LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo
) = true;
2338 /* A subroutine of get_load_store_type, with a subset of the same
2339 arguments. Handle the case where STMT_INFO is a load or store that
2340 accesses consecutive elements with a negative step. */
2342 static vect_memory_access_type
2343 get_negative_load_store_type (stmt_vec_info stmt_info
, tree vectype
,
2344 vec_load_store_type vls_type
,
2345 unsigned int ncopies
)
2347 dr_vec_info
*dr_info
= STMT_VINFO_DR_INFO (stmt_info
);
2348 dr_alignment_support alignment_support_scheme
;
2352 if (dump_enabled_p ())
2353 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2354 "multiple types with negative step.\n");
2355 return VMAT_ELEMENTWISE
;
2358 alignment_support_scheme
= vect_supportable_dr_alignment (dr_info
, false);
2359 if (alignment_support_scheme
!= dr_aligned
2360 && alignment_support_scheme
!= dr_unaligned_supported
)
2362 if (dump_enabled_p ())
2363 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2364 "negative step but alignment required.\n");
2365 return VMAT_ELEMENTWISE
;
2368 if (vls_type
== VLS_STORE_INVARIANT
)
2370 if (dump_enabled_p ())
2371 dump_printf_loc (MSG_NOTE
, vect_location
,
2372 "negative step with invariant source;"
2373 " no permute needed.\n");
2374 return VMAT_CONTIGUOUS_DOWN
;
2377 if (!perm_mask_for_reverse (vectype
))
2379 if (dump_enabled_p ())
2380 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2381 "negative step and reversing not supported.\n");
2382 return VMAT_ELEMENTWISE
;
2385 return VMAT_CONTIGUOUS_REVERSE
;
2388 /* Analyze load or store statement STMT_INFO of type VLS_TYPE. Return true
2389 if there is a memory access type that the vectorized form can use,
2390 storing it in *MEMORY_ACCESS_TYPE if so. If we decide to use gathers
2391 or scatters, fill in GS_INFO accordingly.
2393 SLP says whether we're performing SLP rather than loop vectorization.
2394 MASKED_P is true if the statement is conditional on a vectorized mask.
2395 VECTYPE is the vector type that the vectorized statements will use.
2396 NCOPIES is the number of vector statements that will be needed. */
2399 get_load_store_type (stmt_vec_info stmt_info
, tree vectype
, bool slp
,
2400 bool masked_p
, vec_load_store_type vls_type
,
2401 unsigned int ncopies
,
2402 vect_memory_access_type
*memory_access_type
,
2403 gather_scatter_info
*gs_info
)
2405 vec_info
*vinfo
= stmt_info
->vinfo
;
2406 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
2407 poly_uint64 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
2408 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info
))
2410 *memory_access_type
= VMAT_GATHER_SCATTER
;
2411 if (!vect_check_gather_scatter (stmt_info
, loop_vinfo
, gs_info
))
2413 else if (!vect_is_simple_use (gs_info
->offset
, vinfo
,
2414 &gs_info
->offset_dt
,
2415 &gs_info
->offset_vectype
))
2417 if (dump_enabled_p ())
2418 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2419 "%s index use not simple.\n",
2420 vls_type
== VLS_LOAD
? "gather" : "scatter");
2424 else if (STMT_VINFO_GROUPED_ACCESS (stmt_info
))
2426 if (!get_group_load_store_type (stmt_info
, vectype
, slp
, masked_p
,
2427 vls_type
, memory_access_type
, gs_info
))
2430 else if (STMT_VINFO_STRIDED_P (stmt_info
))
2434 && vect_use_strided_gather_scatters_p (stmt_info
, loop_vinfo
,
2436 *memory_access_type
= VMAT_GATHER_SCATTER
;
2438 *memory_access_type
= VMAT_ELEMENTWISE
;
2442 int cmp
= compare_step_with_zero (stmt_info
);
2444 *memory_access_type
= get_negative_load_store_type
2445 (stmt_info
, vectype
, vls_type
, ncopies
);
2448 gcc_assert (vls_type
== VLS_LOAD
);
2449 *memory_access_type
= VMAT_INVARIANT
;
2452 *memory_access_type
= VMAT_CONTIGUOUS
;
2455 if ((*memory_access_type
== VMAT_ELEMENTWISE
2456 || *memory_access_type
== VMAT_STRIDED_SLP
)
2457 && !nunits
.is_constant ())
2459 if (dump_enabled_p ())
2460 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2461 "Not using elementwise accesses due to variable "
2462 "vectorization factor.\n");
2466 /* FIXME: At the moment the cost model seems to underestimate the
2467 cost of using elementwise accesses. This check preserves the
2468 traditional behavior until that can be fixed. */
2469 stmt_vec_info first_stmt_info
= DR_GROUP_FIRST_ELEMENT (stmt_info
);
2470 if (!first_stmt_info
)
2471 first_stmt_info
= stmt_info
;
2472 if (*memory_access_type
== VMAT_ELEMENTWISE
2473 && !STMT_VINFO_STRIDED_P (first_stmt_info
)
2474 && !(stmt_info
== DR_GROUP_FIRST_ELEMENT (stmt_info
)
2475 && !DR_GROUP_NEXT_ELEMENT (stmt_info
)
2476 && !pow2p_hwi (DR_GROUP_SIZE (stmt_info
))))
2478 if (dump_enabled_p ())
2479 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2480 "not falling back to elementwise accesses\n");
2486 /* Return true if boolean argument MASK is suitable for vectorizing
2487 conditional load or store STMT_INFO. When returning true, store the type
2488 of the definition in *MASK_DT_OUT and the type of the vectorized mask
2489 in *MASK_VECTYPE_OUT. */
2492 vect_check_load_store_mask (stmt_vec_info stmt_info
, tree mask
,
2493 vect_def_type
*mask_dt_out
,
2494 tree
*mask_vectype_out
)
2496 if (!VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (mask
)))
2498 if (dump_enabled_p ())
2499 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2500 "mask argument is not a boolean.\n");
2504 if (TREE_CODE (mask
) != SSA_NAME
)
2506 if (dump_enabled_p ())
2507 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2508 "mask argument is not an SSA name.\n");
2512 enum vect_def_type mask_dt
;
2514 if (!vect_is_simple_use (mask
, stmt_info
->vinfo
, &mask_dt
, &mask_vectype
))
2516 if (dump_enabled_p ())
2517 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2518 "mask use not simple.\n");
2522 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
2524 mask_vectype
= get_mask_type_for_scalar_type (TREE_TYPE (vectype
));
2526 if (!mask_vectype
|| !VECTOR_BOOLEAN_TYPE_P (mask_vectype
))
2528 if (dump_enabled_p ())
2529 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2530 "could not find an appropriate vector mask type.\n");
2534 if (maybe_ne (TYPE_VECTOR_SUBPARTS (mask_vectype
),
2535 TYPE_VECTOR_SUBPARTS (vectype
)))
2537 if (dump_enabled_p ())
2539 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2540 "vector mask type ");
2541 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, mask_vectype
);
2542 dump_printf (MSG_MISSED_OPTIMIZATION
,
2543 " does not match vector data type ");
2544 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, vectype
);
2545 dump_printf (MSG_MISSED_OPTIMIZATION
, ".\n");
2550 *mask_dt_out
= mask_dt
;
2551 *mask_vectype_out
= mask_vectype
;
2555 /* Return true if stored value RHS is suitable for vectorizing store
2556 statement STMT_INFO. When returning true, store the type of the
2557 definition in *RHS_DT_OUT, the type of the vectorized store value in
2558 *RHS_VECTYPE_OUT and the type of the store in *VLS_TYPE_OUT. */
2561 vect_check_store_rhs (stmt_vec_info stmt_info
, tree rhs
,
2562 vect_def_type
*rhs_dt_out
, tree
*rhs_vectype_out
,
2563 vec_load_store_type
*vls_type_out
)
2565 /* In the case this is a store from a constant make sure
2566 native_encode_expr can handle it. */
2567 if (CONSTANT_CLASS_P (rhs
) && native_encode_expr (rhs
, NULL
, 64) == 0)
2569 if (dump_enabled_p ())
2570 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2571 "cannot encode constant as a byte sequence.\n");
2575 enum vect_def_type rhs_dt
;
2577 if (!vect_is_simple_use (rhs
, stmt_info
->vinfo
, &rhs_dt
, &rhs_vectype
))
2579 if (dump_enabled_p ())
2580 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2581 "use not simple.\n");
2585 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
2586 if (rhs_vectype
&& !useless_type_conversion_p (vectype
, rhs_vectype
))
2588 if (dump_enabled_p ())
2589 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2590 "incompatible vector types.\n");
2594 *rhs_dt_out
= rhs_dt
;
2595 *rhs_vectype_out
= rhs_vectype
;
2596 if (rhs_dt
== vect_constant_def
|| rhs_dt
== vect_external_def
)
2597 *vls_type_out
= VLS_STORE_INVARIANT
;
2599 *vls_type_out
= VLS_STORE
;
2603 /* Build an all-ones vector mask of type MASKTYPE while vectorizing STMT_INFO.
2604 Note that we support masks with floating-point type, in which case the
2605 floats are interpreted as a bitmask. */
2608 vect_build_all_ones_mask (stmt_vec_info stmt_info
, tree masktype
)
2610 if (TREE_CODE (masktype
) == INTEGER_TYPE
)
2611 return build_int_cst (masktype
, -1);
2612 else if (TREE_CODE (TREE_TYPE (masktype
)) == INTEGER_TYPE
)
2614 tree mask
= build_int_cst (TREE_TYPE (masktype
), -1);
2615 mask
= build_vector_from_val (masktype
, mask
);
2616 return vect_init_vector (stmt_info
, mask
, masktype
, NULL
);
2618 else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (masktype
)))
2622 for (int j
= 0; j
< 6; ++j
)
2624 real_from_target (&r
, tmp
, TYPE_MODE (TREE_TYPE (masktype
)));
2625 tree mask
= build_real (TREE_TYPE (masktype
), r
);
2626 mask
= build_vector_from_val (masktype
, mask
);
2627 return vect_init_vector (stmt_info
, mask
, masktype
, NULL
);
2632 /* Build an all-zero merge value of type VECTYPE while vectorizing
2633 STMT_INFO as a gather load. */
2636 vect_build_zero_merge_argument (stmt_vec_info stmt_info
, tree vectype
)
2639 if (TREE_CODE (TREE_TYPE (vectype
)) == INTEGER_TYPE
)
2640 merge
= build_int_cst (TREE_TYPE (vectype
), 0);
2641 else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (vectype
)))
2645 for (int j
= 0; j
< 6; ++j
)
2647 real_from_target (&r
, tmp
, TYPE_MODE (TREE_TYPE (vectype
)));
2648 merge
= build_real (TREE_TYPE (vectype
), r
);
2652 merge
= build_vector_from_val (vectype
, merge
);
2653 return vect_init_vector (stmt_info
, merge
, vectype
, NULL
);
2656 /* Build a gather load call while vectorizing STMT_INFO. Insert new
2657 instructions before GSI and add them to VEC_STMT. GS_INFO describes
2658 the gather load operation. If the load is conditional, MASK is the
2659 unvectorized condition and MASK_DT is its definition type, otherwise
2663 vect_build_gather_load_calls (stmt_vec_info stmt_info
,
2664 gimple_stmt_iterator
*gsi
,
2665 stmt_vec_info
*vec_stmt
,
2666 gather_scatter_info
*gs_info
,
2669 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
2670 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
2671 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
2672 poly_uint64 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
2673 int ncopies
= vect_get_num_copies (loop_vinfo
, vectype
);
2674 edge pe
= loop_preheader_edge (loop
);
2675 enum { NARROW
, NONE
, WIDEN
} modifier
;
2676 poly_uint64 gather_off_nunits
2677 = TYPE_VECTOR_SUBPARTS (gs_info
->offset_vectype
);
2679 tree arglist
= TYPE_ARG_TYPES (TREE_TYPE (gs_info
->decl
));
2680 tree rettype
= TREE_TYPE (TREE_TYPE (gs_info
->decl
));
2681 tree srctype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
2682 tree ptrtype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
2683 tree idxtype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
2684 tree masktype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
2685 tree scaletype
= TREE_VALUE (arglist
);
2686 gcc_checking_assert (types_compatible_p (srctype
, rettype
)
2687 && (!mask
|| types_compatible_p (srctype
, masktype
)));
2689 tree perm_mask
= NULL_TREE
;
2690 tree mask_perm_mask
= NULL_TREE
;
2691 if (known_eq (nunits
, gather_off_nunits
))
2693 else if (known_eq (nunits
* 2, gather_off_nunits
))
2697 /* Currently widening gathers and scatters are only supported for
2698 fixed-length vectors. */
2699 int count
= gather_off_nunits
.to_constant ();
2700 vec_perm_builder
sel (count
, count
, 1);
2701 for (int i
= 0; i
< count
; ++i
)
2702 sel
.quick_push (i
| (count
/ 2));
2704 vec_perm_indices
indices (sel
, 1, count
);
2705 perm_mask
= vect_gen_perm_mask_checked (gs_info
->offset_vectype
,
2708 else if (known_eq (nunits
, gather_off_nunits
* 2))
2712 /* Currently narrowing gathers and scatters are only supported for
2713 fixed-length vectors. */
2714 int count
= nunits
.to_constant ();
2715 vec_perm_builder
sel (count
, count
, 1);
2716 sel
.quick_grow (count
);
2717 for (int i
= 0; i
< count
; ++i
)
2718 sel
[i
] = i
< count
/ 2 ? i
: i
+ count
/ 2;
2719 vec_perm_indices
indices (sel
, 2, count
);
2720 perm_mask
= vect_gen_perm_mask_checked (vectype
, indices
);
2726 for (int i
= 0; i
< count
; ++i
)
2727 sel
[i
] = i
| (count
/ 2);
2728 indices
.new_vector (sel
, 2, count
);
2729 mask_perm_mask
= vect_gen_perm_mask_checked (masktype
, indices
);
2735 tree scalar_dest
= gimple_get_lhs (stmt_info
->stmt
);
2736 tree vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
2738 tree ptr
= fold_convert (ptrtype
, gs_info
->base
);
2739 if (!is_gimple_min_invariant (ptr
))
2742 ptr
= force_gimple_operand (ptr
, &seq
, true, NULL_TREE
);
2743 basic_block new_bb
= gsi_insert_seq_on_edge_immediate (pe
, seq
);
2744 gcc_assert (!new_bb
);
2747 tree scale
= build_int_cst (scaletype
, gs_info
->scale
);
2749 tree vec_oprnd0
= NULL_TREE
;
2750 tree vec_mask
= NULL_TREE
;
2751 tree src_op
= NULL_TREE
;
2752 tree mask_op
= NULL_TREE
;
2753 tree prev_res
= NULL_TREE
;
2754 stmt_vec_info prev_stmt_info
= NULL
;
2758 src_op
= vect_build_zero_merge_argument (stmt_info
, rettype
);
2759 mask_op
= vect_build_all_ones_mask (stmt_info
, masktype
);
2762 for (int j
= 0; j
< ncopies
; ++j
)
2765 if (modifier
== WIDEN
&& (j
& 1))
2766 op
= permute_vec_elements (vec_oprnd0
, vec_oprnd0
,
2767 perm_mask
, stmt_info
, gsi
);
2770 = vect_get_vec_def_for_operand (gs_info
->offset
, stmt_info
);
2772 op
= vec_oprnd0
= vect_get_vec_def_for_stmt_copy (loop_vinfo
,
2775 if (!useless_type_conversion_p (idxtype
, TREE_TYPE (op
)))
2777 gcc_assert (known_eq (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op
)),
2778 TYPE_VECTOR_SUBPARTS (idxtype
)));
2779 var
= vect_get_new_ssa_name (idxtype
, vect_simple_var
);
2780 op
= build1 (VIEW_CONVERT_EXPR
, idxtype
, op
);
2781 gassign
*new_stmt
= gimple_build_assign (var
, VIEW_CONVERT_EXPR
, op
);
2782 vect_finish_stmt_generation (stmt_info
, new_stmt
, gsi
);
2788 if (mask_perm_mask
&& (j
& 1))
2789 mask_op
= permute_vec_elements (mask_op
, mask_op
,
2790 mask_perm_mask
, stmt_info
, gsi
);
2794 vec_mask
= vect_get_vec_def_for_operand (mask
, stmt_info
);
2796 vec_mask
= vect_get_vec_def_for_stmt_copy (loop_vinfo
,
2800 if (!useless_type_conversion_p (masktype
, TREE_TYPE (vec_mask
)))
2803 (known_eq (TYPE_VECTOR_SUBPARTS (TREE_TYPE (mask_op
)),
2804 TYPE_VECTOR_SUBPARTS (masktype
)));
2805 var
= vect_get_new_ssa_name (masktype
, vect_simple_var
);
2806 mask_op
= build1 (VIEW_CONVERT_EXPR
, masktype
, mask_op
);
2808 = gimple_build_assign (var
, VIEW_CONVERT_EXPR
, mask_op
);
2809 vect_finish_stmt_generation (stmt_info
, new_stmt
, gsi
);
2816 gcall
*new_call
= gimple_build_call (gs_info
->decl
, 5, src_op
, ptr
, op
,
2819 stmt_vec_info new_stmt_info
;
2820 if (!useless_type_conversion_p (vectype
, rettype
))
2822 gcc_assert (known_eq (TYPE_VECTOR_SUBPARTS (vectype
),
2823 TYPE_VECTOR_SUBPARTS (rettype
)));
2824 op
= vect_get_new_ssa_name (rettype
, vect_simple_var
);
2825 gimple_call_set_lhs (new_call
, op
);
2826 vect_finish_stmt_generation (stmt_info
, new_call
, gsi
);
2827 var
= make_ssa_name (vec_dest
);
2828 op
= build1 (VIEW_CONVERT_EXPR
, vectype
, op
);
2829 gassign
*new_stmt
= gimple_build_assign (var
, VIEW_CONVERT_EXPR
, op
);
2831 = vect_finish_stmt_generation (stmt_info
, new_stmt
, gsi
);
2835 var
= make_ssa_name (vec_dest
, new_call
);
2836 gimple_call_set_lhs (new_call
, var
);
2838 = vect_finish_stmt_generation (stmt_info
, new_call
, gsi
);
2841 if (modifier
== NARROW
)
2848 var
= permute_vec_elements (prev_res
, var
, perm_mask
,
2850 new_stmt_info
= loop_vinfo
->lookup_def (var
);
2853 if (prev_stmt_info
== NULL
)
2854 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt_info
;
2856 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt_info
;
2857 prev_stmt_info
= new_stmt_info
;
2861 /* Prepare the base and offset in GS_INFO for vectorization.
2862 Set *DATAREF_PTR to the loop-invariant base address and *VEC_OFFSET
2863 to the vectorized offset argument for the first copy of STMT_INFO.
2864 STMT_INFO is the statement described by GS_INFO and LOOP is the
2868 vect_get_gather_scatter_ops (struct loop
*loop
, stmt_vec_info stmt_info
,
2869 gather_scatter_info
*gs_info
,
2870 tree
*dataref_ptr
, tree
*vec_offset
)
2872 gimple_seq stmts
= NULL
;
2873 *dataref_ptr
= force_gimple_operand (gs_info
->base
, &stmts
, true, NULL_TREE
);
2877 edge pe
= loop_preheader_edge (loop
);
2878 new_bb
= gsi_insert_seq_on_edge_immediate (pe
, stmts
);
2879 gcc_assert (!new_bb
);
2881 tree offset_type
= TREE_TYPE (gs_info
->offset
);
2882 tree offset_vectype
= get_vectype_for_scalar_type (offset_type
);
2883 *vec_offset
= vect_get_vec_def_for_operand (gs_info
->offset
, stmt_info
,
2887 /* Prepare to implement a grouped or strided load or store using
2888 the gather load or scatter store operation described by GS_INFO.
2889 STMT_INFO is the load or store statement.
2891 Set *DATAREF_BUMP to the amount that should be added to the base
2892 address after each copy of the vectorized statement. Set *VEC_OFFSET
2893 to an invariant offset vector in which element I has the value
2894 I * DR_STEP / SCALE. */
2897 vect_get_strided_load_store_ops (stmt_vec_info stmt_info
,
2898 loop_vec_info loop_vinfo
,
2899 gather_scatter_info
*gs_info
,
2900 tree
*dataref_bump
, tree
*vec_offset
)
2902 struct data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
);
2903 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
2904 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
2907 tree bump
= size_binop (MULT_EXPR
,
2908 fold_convert (sizetype
, DR_STEP (dr
)),
2909 size_int (TYPE_VECTOR_SUBPARTS (vectype
)));
2910 *dataref_bump
= force_gimple_operand (bump
, &stmts
, true, NULL_TREE
);
2912 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop
), stmts
);
2914 /* The offset given in GS_INFO can have pointer type, so use the element
2915 type of the vector instead. */
2916 tree offset_type
= TREE_TYPE (gs_info
->offset
);
2917 tree offset_vectype
= get_vectype_for_scalar_type (offset_type
);
2918 offset_type
= TREE_TYPE (offset_vectype
);
2920 /* Calculate X = DR_STEP / SCALE and convert it to the appropriate type. */
2921 tree step
= size_binop (EXACT_DIV_EXPR
, DR_STEP (dr
),
2922 ssize_int (gs_info
->scale
));
2923 step
= fold_convert (offset_type
, step
);
2924 step
= force_gimple_operand (step
, &stmts
, true, NULL_TREE
);
2926 /* Create {0, X, X*2, X*3, ...}. */
2927 *vec_offset
= gimple_build (&stmts
, VEC_SERIES_EXPR
, offset_vectype
,
2928 build_zero_cst (offset_type
), step
);
2930 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop
), stmts
);
2933 /* Return the amount that should be added to a vector pointer to move
2934 to the next or previous copy of AGGR_TYPE. DR_INFO is the data reference
2935 being vectorized and MEMORY_ACCESS_TYPE describes the type of
2939 vect_get_data_ptr_increment (dr_vec_info
*dr_info
, tree aggr_type
,
2940 vect_memory_access_type memory_access_type
)
2942 if (memory_access_type
== VMAT_INVARIANT
)
2943 return size_zero_node
;
2945 tree iv_step
= TYPE_SIZE_UNIT (aggr_type
);
2946 tree step
= vect_dr_behavior (dr_info
)->step
;
2947 if (tree_int_cst_sgn (step
) == -1)
2948 iv_step
= fold_build1 (NEGATE_EXPR
, TREE_TYPE (iv_step
), iv_step
);
2952 /* Check and perform vectorization of BUILT_IN_BSWAP{16,32,64}. */
2955 vectorizable_bswap (stmt_vec_info stmt_info
, gimple_stmt_iterator
*gsi
,
2956 stmt_vec_info
*vec_stmt
, slp_tree slp_node
,
2957 tree vectype_in
, stmt_vector_for_cost
*cost_vec
)
2960 gcall
*stmt
= as_a
<gcall
*> (stmt_info
->stmt
);
2961 vec_info
*vinfo
= stmt_info
->vinfo
;
2962 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
2964 unsigned HOST_WIDE_INT nunits
, num_bytes
;
2966 op
= gimple_call_arg (stmt
, 0);
2967 vectype
= STMT_VINFO_VECTYPE (stmt_info
);
2969 if (!TYPE_VECTOR_SUBPARTS (vectype
).is_constant (&nunits
))
2972 /* Multiple types in SLP are handled by creating the appropriate number of
2973 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
2978 ncopies
= vect_get_num_copies (loop_vinfo
, vectype
);
2980 gcc_assert (ncopies
>= 1);
2982 tree char_vectype
= get_same_sized_vectype (char_type_node
, vectype_in
);
2986 if (!TYPE_VECTOR_SUBPARTS (char_vectype
).is_constant (&num_bytes
))
2989 unsigned word_bytes
= num_bytes
/ nunits
;
2991 /* The encoding uses one stepped pattern for each byte in the word. */
2992 vec_perm_builder
elts (num_bytes
, word_bytes
, 3);
2993 for (unsigned i
= 0; i
< 3; ++i
)
2994 for (unsigned j
= 0; j
< word_bytes
; ++j
)
2995 elts
.quick_push ((i
+ 1) * word_bytes
- j
- 1);
2997 vec_perm_indices
indices (elts
, 1, num_bytes
);
2998 if (!can_vec_perm_const_p (TYPE_MODE (char_vectype
), indices
))
3003 STMT_VINFO_TYPE (stmt_info
) = call_vec_info_type
;
3004 DUMP_VECT_SCOPE ("vectorizable_bswap");
3007 record_stmt_cost (cost_vec
,
3008 1, vector_stmt
, stmt_info
, 0, vect_prologue
);
3009 record_stmt_cost (cost_vec
,
3010 ncopies
, vec_perm
, stmt_info
, 0, vect_body
);
3015 tree bswap_vconst
= vec_perm_indices_to_tree (char_vectype
, indices
);
3018 vec
<tree
> vec_oprnds
= vNULL
;
3019 stmt_vec_info new_stmt_info
= NULL
;
3020 stmt_vec_info prev_stmt_info
= NULL
;
3021 for (unsigned j
= 0; j
< ncopies
; j
++)
3025 vect_get_vec_defs (op
, NULL
, stmt_info
, &vec_oprnds
, NULL
, slp_node
);
3027 vect_get_vec_defs_for_stmt_copy (vinfo
, &vec_oprnds
, NULL
);
3029 /* Arguments are ready. create the new vector stmt. */
3032 FOR_EACH_VEC_ELT (vec_oprnds
, i
, vop
)
3035 tree tem
= make_ssa_name (char_vectype
);
3036 new_stmt
= gimple_build_assign (tem
, build1 (VIEW_CONVERT_EXPR
,
3037 char_vectype
, vop
));
3038 vect_finish_stmt_generation (stmt_info
, new_stmt
, gsi
);
3039 tree tem2
= make_ssa_name (char_vectype
);
3040 new_stmt
= gimple_build_assign (tem2
, VEC_PERM_EXPR
,
3041 tem
, tem
, bswap_vconst
);
3042 vect_finish_stmt_generation (stmt_info
, new_stmt
, gsi
);
3043 tem
= make_ssa_name (vectype
);
3044 new_stmt
= gimple_build_assign (tem
, build1 (VIEW_CONVERT_EXPR
,
3047 = vect_finish_stmt_generation (stmt_info
, new_stmt
, gsi
);
3049 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt_info
);
3056 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt_info
;
3058 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt_info
;
3060 prev_stmt_info
= new_stmt_info
;
3063 vec_oprnds
.release ();
3067 /* Return true if vector types VECTYPE_IN and VECTYPE_OUT have
3068 integer elements and if we can narrow VECTYPE_IN to VECTYPE_OUT
3069 in a single step. On success, store the binary pack code in
3073 simple_integer_narrowing (tree vectype_out
, tree vectype_in
,
3074 tree_code
*convert_code
)
3076 if (!INTEGRAL_TYPE_P (TREE_TYPE (vectype_out
))
3077 || !INTEGRAL_TYPE_P (TREE_TYPE (vectype_in
)))
3081 int multi_step_cvt
= 0;
3082 auto_vec
<tree
, 8> interm_types
;
3083 if (!supportable_narrowing_operation (NOP_EXPR
, vectype_out
, vectype_in
,
3084 &code
, &multi_step_cvt
,
3089 *convert_code
= code
;
3093 /* Function vectorizable_call.
3095 Check if STMT_INFO performs a function call that can be vectorized.
3096 If VEC_STMT is also passed, vectorize STMT_INFO: create a vectorized
3097 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
3098 Return true if STMT_INFO is vectorizable in this way. */
3101 vectorizable_call (stmt_vec_info stmt_info
, gimple_stmt_iterator
*gsi
,
3102 stmt_vec_info
*vec_stmt
, slp_tree slp_node
,
3103 stmt_vector_for_cost
*cost_vec
)
3109 tree vec_oprnd0
= NULL_TREE
, vec_oprnd1
= NULL_TREE
;
3110 stmt_vec_info prev_stmt_info
;
3111 tree vectype_out
, vectype_in
;
3112 poly_uint64 nunits_in
;
3113 poly_uint64 nunits_out
;
3114 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
3115 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
3116 vec_info
*vinfo
= stmt_info
->vinfo
;
3117 tree fndecl
, new_temp
, rhs_type
;
3118 enum vect_def_type dt
[4]
3119 = { vect_unknown_def_type
, vect_unknown_def_type
, vect_unknown_def_type
,
3120 vect_unknown_def_type
};
3121 int ndts
= ARRAY_SIZE (dt
);
3123 auto_vec
<tree
, 8> vargs
;
3124 auto_vec
<tree
, 8> orig_vargs
;
3125 enum { NARROW
, NONE
, WIDEN
} modifier
;
3129 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
3132 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
3136 /* Is STMT_INFO a vectorizable call? */
3137 stmt
= dyn_cast
<gcall
*> (stmt_info
->stmt
);
3141 if (gimple_call_internal_p (stmt
)
3142 && (internal_load_fn_p (gimple_call_internal_fn (stmt
))
3143 || internal_store_fn_p (gimple_call_internal_fn (stmt
))))
3144 /* Handled by vectorizable_load and vectorizable_store. */
3147 if (gimple_call_lhs (stmt
) == NULL_TREE
3148 || TREE_CODE (gimple_call_lhs (stmt
)) != SSA_NAME
)
3151 gcc_checking_assert (!stmt_can_throw_internal (stmt
));
3153 vectype_out
= STMT_VINFO_VECTYPE (stmt_info
);
3155 /* Process function arguments. */
3156 rhs_type
= NULL_TREE
;
3157 vectype_in
= NULL_TREE
;
3158 nargs
= gimple_call_num_args (stmt
);
3160 /* Bail out if the function has more than three arguments, we do not have
3161 interesting builtin functions to vectorize with more than two arguments
3162 except for fma. No arguments is also not good. */
3163 if (nargs
== 0 || nargs
> 4)
3166 /* Ignore the argument of IFN_GOMP_SIMD_LANE, it is magic. */
3167 combined_fn cfn
= gimple_call_combined_fn (stmt
);
3168 if (cfn
== CFN_GOMP_SIMD_LANE
)
3171 rhs_type
= unsigned_type_node
;
3175 if (internal_fn_p (cfn
))
3176 mask_opno
= internal_fn_mask_index (as_internal_fn (cfn
));
3178 for (i
= 0; i
< nargs
; i
++)
3182 op
= gimple_call_arg (stmt
, i
);
3183 if (!vect_is_simple_use (op
, vinfo
, &dt
[i
], &opvectype
))
3185 if (dump_enabled_p ())
3186 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3187 "use not simple.\n");
3191 /* Skip the mask argument to an internal function. This operand
3192 has been converted via a pattern if necessary. */
3193 if ((int) i
== mask_opno
)
3196 /* We can only handle calls with arguments of the same type. */
3198 && !types_compatible_p (rhs_type
, TREE_TYPE (op
)))
3200 if (dump_enabled_p ())
3201 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3202 "argument types differ.\n");
3206 rhs_type
= TREE_TYPE (op
);
3209 vectype_in
= opvectype
;
3211 && opvectype
!= vectype_in
)
3213 if (dump_enabled_p ())
3214 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3215 "argument vector types differ.\n");
3219 /* If all arguments are external or constant defs use a vector type with
3220 the same size as the output vector type. */
3222 vectype_in
= get_same_sized_vectype (rhs_type
, vectype_out
);
3224 gcc_assert (vectype_in
);
3227 if (dump_enabled_p ())
3229 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3230 "no vectype for scalar type ");
3231 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, rhs_type
);
3232 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
3239 nunits_in
= TYPE_VECTOR_SUBPARTS (vectype_in
);
3240 nunits_out
= TYPE_VECTOR_SUBPARTS (vectype_out
);
3241 if (known_eq (nunits_in
* 2, nunits_out
))
3243 else if (known_eq (nunits_out
, nunits_in
))
3245 else if (known_eq (nunits_out
* 2, nunits_in
))
3250 /* We only handle functions that do not read or clobber memory. */
3251 if (gimple_vuse (stmt
))
3253 if (dump_enabled_p ())
3254 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3255 "function reads from or writes to memory.\n");
3259 /* For now, we only vectorize functions if a target specific builtin
3260 is available. TODO -- in some cases, it might be profitable to
3261 insert the calls for pieces of the vector, in order to be able
3262 to vectorize other operations in the loop. */
3264 internal_fn ifn
= IFN_LAST
;
3265 tree callee
= gimple_call_fndecl (stmt
);
3267 /* First try using an internal function. */
3268 tree_code convert_code
= ERROR_MARK
;
3270 && (modifier
== NONE
3271 || (modifier
== NARROW
3272 && simple_integer_narrowing (vectype_out
, vectype_in
,
3274 ifn
= vectorizable_internal_function (cfn
, callee
, vectype_out
,
3277 /* If that fails, try asking for a target-specific built-in function. */
3278 if (ifn
== IFN_LAST
)
3280 if (cfn
!= CFN_LAST
)
3281 fndecl
= targetm
.vectorize
.builtin_vectorized_function
3282 (cfn
, vectype_out
, vectype_in
);
3284 fndecl
= targetm
.vectorize
.builtin_md_vectorized_function
3285 (callee
, vectype_out
, vectype_in
);
3288 if (ifn
== IFN_LAST
&& !fndecl
)
3290 if (cfn
== CFN_GOMP_SIMD_LANE
3293 && LOOP_VINFO_LOOP (loop_vinfo
)->simduid
3294 && TREE_CODE (gimple_call_arg (stmt
, 0)) == SSA_NAME
3295 && LOOP_VINFO_LOOP (loop_vinfo
)->simduid
3296 == SSA_NAME_VAR (gimple_call_arg (stmt
, 0)))
3298 /* We can handle IFN_GOMP_SIMD_LANE by returning a
3299 { 0, 1, 2, ... vf - 1 } vector. */
3300 gcc_assert (nargs
== 0);
3302 else if (modifier
== NONE
3303 && (gimple_call_builtin_p (stmt
, BUILT_IN_BSWAP16
)
3304 || gimple_call_builtin_p (stmt
, BUILT_IN_BSWAP32
)
3305 || gimple_call_builtin_p (stmt
, BUILT_IN_BSWAP64
)))
3306 return vectorizable_bswap (stmt_info
, gsi
, vec_stmt
, slp_node
,
3307 vectype_in
, cost_vec
);
3310 if (dump_enabled_p ())
3311 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3312 "function is not vectorizable.\n");
3319 else if (modifier
== NARROW
&& ifn
== IFN_LAST
)
3320 ncopies
= vect_get_num_copies (loop_vinfo
, vectype_out
);
3322 ncopies
= vect_get_num_copies (loop_vinfo
, vectype_in
);
3324 /* Sanity check: make sure that at least one copy of the vectorized stmt
3325 needs to be generated. */
3326 gcc_assert (ncopies
>= 1);
3328 vec_loop_masks
*masks
= (loop_vinfo
? &LOOP_VINFO_MASKS (loop_vinfo
) : NULL
);
3329 if (!vec_stmt
) /* transformation not required. */
3331 STMT_VINFO_TYPE (stmt_info
) = call_vec_info_type
;
3332 DUMP_VECT_SCOPE ("vectorizable_call");
3333 vect_model_simple_cost (stmt_info
, ncopies
, dt
, ndts
, slp_node
, cost_vec
);
3334 if (ifn
!= IFN_LAST
&& modifier
== NARROW
&& !slp_node
)
3335 record_stmt_cost (cost_vec
, ncopies
/ 2,
3336 vec_promote_demote
, stmt_info
, 0, vect_body
);
3338 if (loop_vinfo
&& mask_opno
>= 0)
3340 unsigned int nvectors
= (slp_node
3341 ? SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node
)
3343 vect_record_loop_mask (loop_vinfo
, masks
, nvectors
, vectype_out
);
3350 if (dump_enabled_p ())
3351 dump_printf_loc (MSG_NOTE
, vect_location
, "transform call.\n");
3354 scalar_dest
= gimple_call_lhs (stmt
);
3355 vec_dest
= vect_create_destination_var (scalar_dest
, vectype_out
);
3357 bool masked_loop_p
= loop_vinfo
&& LOOP_VINFO_FULLY_MASKED_P (loop_vinfo
);
3359 stmt_vec_info new_stmt_info
= NULL
;
3360 prev_stmt_info
= NULL
;
3361 if (modifier
== NONE
|| ifn
!= IFN_LAST
)
3363 tree prev_res
= NULL_TREE
;
3364 vargs
.safe_grow (nargs
);
3365 orig_vargs
.safe_grow (nargs
);
3366 for (j
= 0; j
< ncopies
; ++j
)
3368 /* Build argument list for the vectorized call. */
3371 auto_vec
<vec
<tree
> > vec_defs (nargs
);
3372 vec
<tree
> vec_oprnds0
;
3374 for (i
= 0; i
< nargs
; i
++)
3375 vargs
[i
] = gimple_call_arg (stmt
, i
);
3376 vect_get_slp_defs (vargs
, slp_node
, &vec_defs
);
3377 vec_oprnds0
= vec_defs
[0];
3379 /* Arguments are ready. Create the new vector stmt. */
3380 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vec_oprnd0
)
3383 for (k
= 0; k
< nargs
; k
++)
3385 vec
<tree
> vec_oprndsk
= vec_defs
[k
];
3386 vargs
[k
] = vec_oprndsk
[i
];
3388 if (modifier
== NARROW
)
3390 /* We don't define any narrowing conditional functions
3392 gcc_assert (mask_opno
< 0);
3393 tree half_res
= make_ssa_name (vectype_in
);
3395 = gimple_build_call_internal_vec (ifn
, vargs
);
3396 gimple_call_set_lhs (call
, half_res
);
3397 gimple_call_set_nothrow (call
, true);
3399 = vect_finish_stmt_generation (stmt_info
, call
, gsi
);
3402 prev_res
= half_res
;
3405 new_temp
= make_ssa_name (vec_dest
);
3407 = gimple_build_assign (new_temp
, convert_code
,
3408 prev_res
, half_res
);
3410 = vect_finish_stmt_generation (stmt_info
, new_stmt
,
3415 if (mask_opno
>= 0 && masked_loop_p
)
3417 unsigned int vec_num
= vec_oprnds0
.length ();
3418 /* Always true for SLP. */
3419 gcc_assert (ncopies
== 1);
3420 tree mask
= vect_get_loop_mask (gsi
, masks
, vec_num
,
3422 vargs
[mask_opno
] = prepare_load_store_mask
3423 (TREE_TYPE (mask
), mask
, vargs
[mask_opno
], gsi
);
3427 if (ifn
!= IFN_LAST
)
3428 call
= gimple_build_call_internal_vec (ifn
, vargs
);
3430 call
= gimple_build_call_vec (fndecl
, vargs
);
3431 new_temp
= make_ssa_name (vec_dest
, call
);
3432 gimple_call_set_lhs (call
, new_temp
);
3433 gimple_call_set_nothrow (call
, true);
3435 = vect_finish_stmt_generation (stmt_info
, call
, gsi
);
3437 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt_info
);
3440 for (i
= 0; i
< nargs
; i
++)
3442 vec
<tree
> vec_oprndsi
= vec_defs
[i
];
3443 vec_oprndsi
.release ();
3448 for (i
= 0; i
< nargs
; i
++)
3450 op
= gimple_call_arg (stmt
, i
);
3453 = vect_get_vec_def_for_operand (op
, stmt_info
);
3456 = vect_get_vec_def_for_stmt_copy (vinfo
, orig_vargs
[i
]);
3458 orig_vargs
[i
] = vargs
[i
] = vec_oprnd0
;
3461 if (mask_opno
>= 0 && masked_loop_p
)
3463 tree mask
= vect_get_loop_mask (gsi
, masks
, ncopies
,
3466 = prepare_load_store_mask (TREE_TYPE (mask
), mask
,
3467 vargs
[mask_opno
], gsi
);
3470 if (cfn
== CFN_GOMP_SIMD_LANE
)
3472 tree cst
= build_index_vector (vectype_out
, j
* nunits_out
, 1);
3474 = vect_get_new_ssa_name (vectype_out
, vect_simple_var
, "cst_");
3475 gimple
*init_stmt
= gimple_build_assign (new_var
, cst
);
3476 vect_init_vector_1 (stmt_info
, init_stmt
, NULL
);
3477 new_temp
= make_ssa_name (vec_dest
);
3478 gimple
*new_stmt
= gimple_build_assign (new_temp
, new_var
);
3480 = vect_finish_stmt_generation (stmt_info
, new_stmt
, gsi
);
3482 else if (modifier
== NARROW
)
3484 /* We don't define any narrowing conditional functions at
3486 gcc_assert (mask_opno
< 0);
3487 tree half_res
= make_ssa_name (vectype_in
);
3488 gcall
*call
= gimple_build_call_internal_vec (ifn
, vargs
);
3489 gimple_call_set_lhs (call
, half_res
);
3490 gimple_call_set_nothrow (call
, true);
3492 = vect_finish_stmt_generation (stmt_info
, call
, gsi
);
3495 prev_res
= half_res
;
3498 new_temp
= make_ssa_name (vec_dest
);
3499 gassign
*new_stmt
= gimple_build_assign (new_temp
, convert_code
,
3500 prev_res
, half_res
);
3502 = vect_finish_stmt_generation (stmt_info
, new_stmt
, gsi
);
3507 if (ifn
!= IFN_LAST
)
3508 call
= gimple_build_call_internal_vec (ifn
, vargs
);
3510 call
= gimple_build_call_vec (fndecl
, vargs
);
3511 new_temp
= make_ssa_name (vec_dest
, call
);
3512 gimple_call_set_lhs (call
, new_temp
);
3513 gimple_call_set_nothrow (call
, true);
3515 = vect_finish_stmt_generation (stmt_info
, call
, gsi
);
3518 if (j
== (modifier
== NARROW
? 1 : 0))
3519 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt_info
;
3521 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt_info
;
3523 prev_stmt_info
= new_stmt_info
;
3526 else if (modifier
== NARROW
)
3528 /* We don't define any narrowing conditional functions at present. */
3529 gcc_assert (mask_opno
< 0);
3530 for (j
= 0; j
< ncopies
; ++j
)
3532 /* Build argument list for the vectorized call. */
3534 vargs
.create (nargs
* 2);
3540 auto_vec
<vec
<tree
> > vec_defs (nargs
);
3541 vec
<tree
> vec_oprnds0
;
3543 for (i
= 0; i
< nargs
; i
++)
3544 vargs
.quick_push (gimple_call_arg (stmt
, i
));
3545 vect_get_slp_defs (vargs
, slp_node
, &vec_defs
);
3546 vec_oprnds0
= vec_defs
[0];
3548 /* Arguments are ready. Create the new vector stmt. */
3549 for (i
= 0; vec_oprnds0
.iterate (i
, &vec_oprnd0
); i
+= 2)
3553 for (k
= 0; k
< nargs
; k
++)
3555 vec
<tree
> vec_oprndsk
= vec_defs
[k
];
3556 vargs
.quick_push (vec_oprndsk
[i
]);
3557 vargs
.quick_push (vec_oprndsk
[i
+ 1]);
3560 if (ifn
!= IFN_LAST
)
3561 call
= gimple_build_call_internal_vec (ifn
, vargs
);
3563 call
= gimple_build_call_vec (fndecl
, vargs
);
3564 new_temp
= make_ssa_name (vec_dest
, call
);
3565 gimple_call_set_lhs (call
, new_temp
);
3566 gimple_call_set_nothrow (call
, true);
3568 = vect_finish_stmt_generation (stmt_info
, call
, gsi
);
3569 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt_info
);
3572 for (i
= 0; i
< nargs
; i
++)
3574 vec
<tree
> vec_oprndsi
= vec_defs
[i
];
3575 vec_oprndsi
.release ();
3580 for (i
= 0; i
< nargs
; i
++)
3582 op
= gimple_call_arg (stmt
, i
);
3586 = vect_get_vec_def_for_operand (op
, stmt_info
);
3588 = vect_get_vec_def_for_stmt_copy (vinfo
, vec_oprnd0
);
3592 vec_oprnd1
= gimple_call_arg (new_stmt_info
->stmt
,
3595 = vect_get_vec_def_for_stmt_copy (vinfo
, vec_oprnd1
);
3597 = vect_get_vec_def_for_stmt_copy (vinfo
, vec_oprnd0
);
3600 vargs
.quick_push (vec_oprnd0
);
3601 vargs
.quick_push (vec_oprnd1
);
3604 gcall
*new_stmt
= gimple_build_call_vec (fndecl
, vargs
);
3605 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
3606 gimple_call_set_lhs (new_stmt
, new_temp
);
3608 = vect_finish_stmt_generation (stmt_info
, new_stmt
, gsi
);
3611 STMT_VINFO_VEC_STMT (stmt_info
) = new_stmt_info
;
3613 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt_info
;
3615 prev_stmt_info
= new_stmt_info
;
3618 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
3621 /* No current target implements this case. */
3626 /* The call in STMT might prevent it from being removed in dce.
3627 We however cannot remove it here, due to the way the ssa name
3628 it defines is mapped to the new definition. So just replace
3629 rhs of the statement with something harmless. */
3634 stmt_info
= vect_orig_stmt (stmt_info
);
3635 lhs
= gimple_get_lhs (stmt_info
->stmt
);
3638 = gimple_build_assign (lhs
, build_zero_cst (TREE_TYPE (lhs
)));
3639 vinfo
->replace_stmt (gsi
, stmt_info
, new_stmt
);
3645 struct simd_call_arg_info
3649 HOST_WIDE_INT linear_step
;
3650 enum vect_def_type dt
;
3652 bool simd_lane_linear
;
3655 /* Helper function of vectorizable_simd_clone_call. If OP, an SSA_NAME,
3656 is linear within simd lane (but not within whole loop), note it in
3660 vect_simd_lane_linear (tree op
, struct loop
*loop
,
3661 struct simd_call_arg_info
*arginfo
)
3663 gimple
*def_stmt
= SSA_NAME_DEF_STMT (op
);
3665 if (!is_gimple_assign (def_stmt
)
3666 || gimple_assign_rhs_code (def_stmt
) != POINTER_PLUS_EXPR
3667 || !is_gimple_min_invariant (gimple_assign_rhs1 (def_stmt
)))
3670 tree base
= gimple_assign_rhs1 (def_stmt
);
3671 HOST_WIDE_INT linear_step
= 0;
3672 tree v
= gimple_assign_rhs2 (def_stmt
);
3673 while (TREE_CODE (v
) == SSA_NAME
)
3676 def_stmt
= SSA_NAME_DEF_STMT (v
);
3677 if (is_gimple_assign (def_stmt
))
3678 switch (gimple_assign_rhs_code (def_stmt
))
3681 t
= gimple_assign_rhs2 (def_stmt
);
3682 if (linear_step
|| TREE_CODE (t
) != INTEGER_CST
)
3684 base
= fold_build2 (POINTER_PLUS_EXPR
, TREE_TYPE (base
), base
, t
);
3685 v
= gimple_assign_rhs1 (def_stmt
);
3688 t
= gimple_assign_rhs2 (def_stmt
);
3689 if (linear_step
|| !tree_fits_shwi_p (t
) || integer_zerop (t
))
3691 linear_step
= tree_to_shwi (t
);
3692 v
= gimple_assign_rhs1 (def_stmt
);
3695 t
= gimple_assign_rhs1 (def_stmt
);
3696 if (TREE_CODE (TREE_TYPE (t
)) != INTEGER_TYPE
3697 || (TYPE_PRECISION (TREE_TYPE (v
))
3698 < TYPE_PRECISION (TREE_TYPE (t
))))
3707 else if (gimple_call_internal_p (def_stmt
, IFN_GOMP_SIMD_LANE
)
3709 && TREE_CODE (gimple_call_arg (def_stmt
, 0)) == SSA_NAME
3710 && (SSA_NAME_VAR (gimple_call_arg (def_stmt
, 0))
3715 arginfo
->linear_step
= linear_step
;
3717 arginfo
->simd_lane_linear
= true;
3723 /* Return the number of elements in vector type VECTYPE, which is associated
3724 with a SIMD clone. At present these vectors always have a constant
3727 static unsigned HOST_WIDE_INT
3728 simd_clone_subparts (tree vectype
)
3730 return TYPE_VECTOR_SUBPARTS (vectype
).to_constant ();
3733 /* Function vectorizable_simd_clone_call.
3735 Check if STMT_INFO performs a function call that can be vectorized
3736 by calling a simd clone of the function.
3737 If VEC_STMT is also passed, vectorize STMT_INFO: create a vectorized
3738 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
3739 Return true if STMT_INFO is vectorizable in this way. */
3742 vectorizable_simd_clone_call (stmt_vec_info stmt_info
,
3743 gimple_stmt_iterator
*gsi
,
3744 stmt_vec_info
*vec_stmt
, slp_tree slp_node
,
3745 stmt_vector_for_cost
*)
3750 tree vec_oprnd0
= NULL_TREE
;
3751 stmt_vec_info prev_stmt_info
;
3753 unsigned int nunits
;
3754 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
3755 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
3756 vec_info
*vinfo
= stmt_info
->vinfo
;
3757 struct loop
*loop
= loop_vinfo
? LOOP_VINFO_LOOP (loop_vinfo
) : NULL
;
3758 tree fndecl
, new_temp
;
3760 auto_vec
<simd_call_arg_info
> arginfo
;
3761 vec
<tree
> vargs
= vNULL
;
3763 tree lhs
, rtype
, ratype
;
3764 vec
<constructor_elt
, va_gc
> *ret_ctor_elts
= NULL
;
3766 /* Is STMT a vectorizable call? */
3767 gcall
*stmt
= dyn_cast
<gcall
*> (stmt_info
->stmt
);
3771 fndecl
= gimple_call_fndecl (stmt
);
3772 if (fndecl
== NULL_TREE
)
3775 struct cgraph_node
*node
= cgraph_node::get (fndecl
);
3776 if (node
== NULL
|| node
->simd_clones
== NULL
)
3779 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
3782 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
3786 if (gimple_call_lhs (stmt
)
3787 && TREE_CODE (gimple_call_lhs (stmt
)) != SSA_NAME
)
3790 gcc_checking_assert (!stmt_can_throw_internal (stmt
));
3792 vectype
= STMT_VINFO_VECTYPE (stmt_info
);
3794 if (loop_vinfo
&& nested_in_vect_loop_p (loop
, stmt_info
))
3801 /* Process function arguments. */
3802 nargs
= gimple_call_num_args (stmt
);
3804 /* Bail out if the function has zero arguments. */
3808 arginfo
.reserve (nargs
, true);
3810 for (i
= 0; i
< nargs
; i
++)
3812 simd_call_arg_info thisarginfo
;
3815 thisarginfo
.linear_step
= 0;
3816 thisarginfo
.align
= 0;
3817 thisarginfo
.op
= NULL_TREE
;
3818 thisarginfo
.simd_lane_linear
= false;
3820 op
= gimple_call_arg (stmt
, i
);
3821 if (!vect_is_simple_use (op
, vinfo
, &thisarginfo
.dt
,
3822 &thisarginfo
.vectype
)
3823 || thisarginfo
.dt
== vect_uninitialized_def
)
3825 if (dump_enabled_p ())
3826 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3827 "use not simple.\n");
3831 if (thisarginfo
.dt
== vect_constant_def
3832 || thisarginfo
.dt
== vect_external_def
)
3833 gcc_assert (thisarginfo
.vectype
== NULL_TREE
);
3835 gcc_assert (thisarginfo
.vectype
!= NULL_TREE
);
3837 /* For linear arguments, the analyze phase should have saved
3838 the base and step in STMT_VINFO_SIMD_CLONE_INFO. */
3839 if (i
* 3 + 4 <= STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).length ()
3840 && STMT_VINFO_SIMD_CLONE_INFO (stmt_info
)[i
* 3 + 2])
3842 gcc_assert (vec_stmt
);
3843 thisarginfo
.linear_step
3844 = tree_to_shwi (STMT_VINFO_SIMD_CLONE_INFO (stmt_info
)[i
* 3 + 2]);
3846 = STMT_VINFO_SIMD_CLONE_INFO (stmt_info
)[i
* 3 + 1];
3847 thisarginfo
.simd_lane_linear
3848 = (STMT_VINFO_SIMD_CLONE_INFO (stmt_info
)[i
* 3 + 3]
3849 == boolean_true_node
);
3850 /* If loop has been peeled for alignment, we need to adjust it. */
3851 tree n1
= LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo
);
3852 tree n2
= LOOP_VINFO_NITERS (loop_vinfo
);
3853 if (n1
!= n2
&& !thisarginfo
.simd_lane_linear
)
3855 tree bias
= fold_build2 (MINUS_EXPR
, TREE_TYPE (n1
), n1
, n2
);
3856 tree step
= STMT_VINFO_SIMD_CLONE_INFO (stmt_info
)[i
* 3 + 2];
3857 tree opt
= TREE_TYPE (thisarginfo
.op
);
3858 bias
= fold_convert (TREE_TYPE (step
), bias
);
3859 bias
= fold_build2 (MULT_EXPR
, TREE_TYPE (step
), bias
, step
);
3861 = fold_build2 (POINTER_TYPE_P (opt
)
3862 ? POINTER_PLUS_EXPR
: PLUS_EXPR
, opt
,
3863 thisarginfo
.op
, bias
);
3867 && thisarginfo
.dt
!= vect_constant_def
3868 && thisarginfo
.dt
!= vect_external_def
3870 && TREE_CODE (op
) == SSA_NAME
3871 && simple_iv (loop
, loop_containing_stmt (stmt
), op
,
3873 && tree_fits_shwi_p (iv
.step
))
3875 thisarginfo
.linear_step
= tree_to_shwi (iv
.step
);
3876 thisarginfo
.op
= iv
.base
;
3878 else if ((thisarginfo
.dt
== vect_constant_def
3879 || thisarginfo
.dt
== vect_external_def
)
3880 && POINTER_TYPE_P (TREE_TYPE (op
)))
3881 thisarginfo
.align
= get_pointer_alignment (op
) / BITS_PER_UNIT
;
3882 /* Addresses of array elements indexed by GOMP_SIMD_LANE are
3884 if (POINTER_TYPE_P (TREE_TYPE (op
))
3885 && !thisarginfo
.linear_step
3887 && thisarginfo
.dt
!= vect_constant_def
3888 && thisarginfo
.dt
!= vect_external_def
3891 && TREE_CODE (op
) == SSA_NAME
)
3892 vect_simd_lane_linear (op
, loop
, &thisarginfo
);
3894 arginfo
.quick_push (thisarginfo
);
3897 unsigned HOST_WIDE_INT vf
;
3898 if (!LOOP_VINFO_VECT_FACTOR (loop_vinfo
).is_constant (&vf
))
3900 if (dump_enabled_p ())
3901 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3902 "not considering SIMD clones; not yet supported"
3903 " for variable-width vectors.\n");
3907 unsigned int badness
= 0;
3908 struct cgraph_node
*bestn
= NULL
;
3909 if (STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).exists ())
3910 bestn
= cgraph_node::get (STMT_VINFO_SIMD_CLONE_INFO (stmt_info
)[0]);
3912 for (struct cgraph_node
*n
= node
->simd_clones
; n
!= NULL
;
3913 n
= n
->simdclone
->next_clone
)
3915 unsigned int this_badness
= 0;
3916 if (n
->simdclone
->simdlen
> vf
3917 || n
->simdclone
->nargs
!= nargs
)
3919 if (n
->simdclone
->simdlen
< vf
)
3920 this_badness
+= (exact_log2 (vf
)
3921 - exact_log2 (n
->simdclone
->simdlen
)) * 1024;
3922 if (n
->simdclone
->inbranch
)
3923 this_badness
+= 2048;
3924 int target_badness
= targetm
.simd_clone
.usable (n
);
3925 if (target_badness
< 0)
3927 this_badness
+= target_badness
* 512;
3928 /* FORNOW: Have to add code to add the mask argument. */
3929 if (n
->simdclone
->inbranch
)
3931 for (i
= 0; i
< nargs
; i
++)
3933 switch (n
->simdclone
->args
[i
].arg_type
)
3935 case SIMD_CLONE_ARG_TYPE_VECTOR
:
3936 if (!useless_type_conversion_p
3937 (n
->simdclone
->args
[i
].orig_type
,
3938 TREE_TYPE (gimple_call_arg (stmt
, i
))))
3940 else if (arginfo
[i
].dt
== vect_constant_def
3941 || arginfo
[i
].dt
== vect_external_def
3942 || arginfo
[i
].linear_step
)
3945 case SIMD_CLONE_ARG_TYPE_UNIFORM
:
3946 if (arginfo
[i
].dt
!= vect_constant_def
3947 && arginfo
[i
].dt
!= vect_external_def
)
3950 case SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP
:
3951 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP
:
3952 if (arginfo
[i
].dt
== vect_constant_def
3953 || arginfo
[i
].dt
== vect_external_def
3954 || (arginfo
[i
].linear_step
3955 != n
->simdclone
->args
[i
].linear_step
))
3958 case SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP
:
3959 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_CONSTANT_STEP
:
3960 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_CONSTANT_STEP
:
3961 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_VARIABLE_STEP
:
3962 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_VARIABLE_STEP
:
3963 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_VARIABLE_STEP
:
3967 case SIMD_CLONE_ARG_TYPE_MASK
:
3970 if (i
== (size_t) -1)
3972 if (n
->simdclone
->args
[i
].alignment
> arginfo
[i
].align
)
3977 if (arginfo
[i
].align
)
3978 this_badness
+= (exact_log2 (arginfo
[i
].align
)
3979 - exact_log2 (n
->simdclone
->args
[i
].alignment
));
3981 if (i
== (size_t) -1)
3983 if (bestn
== NULL
|| this_badness
< badness
)
3986 badness
= this_badness
;
3993 for (i
= 0; i
< nargs
; i
++)
3994 if ((arginfo
[i
].dt
== vect_constant_def
3995 || arginfo
[i
].dt
== vect_external_def
)
3996 && bestn
->simdclone
->args
[i
].arg_type
== SIMD_CLONE_ARG_TYPE_VECTOR
)
3999 = get_vectype_for_scalar_type (TREE_TYPE (gimple_call_arg (stmt
,
4001 if (arginfo
[i
].vectype
== NULL
4002 || (simd_clone_subparts (arginfo
[i
].vectype
)
4003 > bestn
->simdclone
->simdlen
))
4007 fndecl
= bestn
->decl
;
4008 nunits
= bestn
->simdclone
->simdlen
;
4009 ncopies
= vf
/ nunits
;
4011 /* If the function isn't const, only allow it in simd loops where user
4012 has asserted that at least nunits consecutive iterations can be
4013 performed using SIMD instructions. */
4014 if ((loop
== NULL
|| (unsigned) loop
->safelen
< nunits
)
4015 && gimple_vuse (stmt
))
4018 /* Sanity check: make sure that at least one copy of the vectorized stmt
4019 needs to be generated. */
4020 gcc_assert (ncopies
>= 1);
4022 if (!vec_stmt
) /* transformation not required. */
4024 STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).safe_push (bestn
->decl
);
4025 for (i
= 0; i
< nargs
; i
++)
4026 if ((bestn
->simdclone
->args
[i
].arg_type
4027 == SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP
)
4028 || (bestn
->simdclone
->args
[i
].arg_type
4029 == SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP
))
4031 STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).safe_grow_cleared (i
* 3
4033 STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).safe_push (arginfo
[i
].op
);
4034 tree lst
= POINTER_TYPE_P (TREE_TYPE (arginfo
[i
].op
))
4035 ? size_type_node
: TREE_TYPE (arginfo
[i
].op
);
4036 tree ls
= build_int_cst (lst
, arginfo
[i
].linear_step
);
4037 STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).safe_push (ls
);
4038 tree sll
= arginfo
[i
].simd_lane_linear
4039 ? boolean_true_node
: boolean_false_node
;
4040 STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).safe_push (sll
);
4042 STMT_VINFO_TYPE (stmt_info
) = call_simd_clone_vec_info_type
;
4043 DUMP_VECT_SCOPE ("vectorizable_simd_clone_call");
4044 /* vect_model_simple_cost (stmt_info, ncopies, dt, slp_node, cost_vec); */
4050 if (dump_enabled_p ())
4051 dump_printf_loc (MSG_NOTE
, vect_location
, "transform call.\n");
4054 scalar_dest
= gimple_call_lhs (stmt
);
4055 vec_dest
= NULL_TREE
;
4060 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
4061 rtype
= TREE_TYPE (TREE_TYPE (fndecl
));
4062 if (TREE_CODE (rtype
) == ARRAY_TYPE
)
4065 rtype
= TREE_TYPE (ratype
);
4069 prev_stmt_info
= NULL
;
4070 for (j
= 0; j
< ncopies
; ++j
)
4072 /* Build argument list for the vectorized call. */
4074 vargs
.create (nargs
);
4078 for (i
= 0; i
< nargs
; i
++)
4080 unsigned int k
, l
, m
, o
;
4082 op
= gimple_call_arg (stmt
, i
);
4083 switch (bestn
->simdclone
->args
[i
].arg_type
)
4085 case SIMD_CLONE_ARG_TYPE_VECTOR
:
4086 atype
= bestn
->simdclone
->args
[i
].vector_type
;
4087 o
= nunits
/ simd_clone_subparts (atype
);
4088 for (m
= j
* o
; m
< (j
+ 1) * o
; m
++)
4090 if (simd_clone_subparts (atype
)
4091 < simd_clone_subparts (arginfo
[i
].vectype
))
4093 poly_uint64 prec
= GET_MODE_BITSIZE (TYPE_MODE (atype
));
4094 k
= (simd_clone_subparts (arginfo
[i
].vectype
)
4095 / simd_clone_subparts (atype
));
4096 gcc_assert ((k
& (k
- 1)) == 0);
4099 = vect_get_vec_def_for_operand (op
, stmt_info
);
4102 vec_oprnd0
= arginfo
[i
].op
;
4103 if ((m
& (k
- 1)) == 0)
4105 = vect_get_vec_def_for_stmt_copy (vinfo
,
4108 arginfo
[i
].op
= vec_oprnd0
;
4110 = build3 (BIT_FIELD_REF
, atype
, vec_oprnd0
,
4112 bitsize_int ((m
& (k
- 1)) * prec
));
4114 = gimple_build_assign (make_ssa_name (atype
),
4116 vect_finish_stmt_generation (stmt_info
, new_stmt
, gsi
);
4117 vargs
.safe_push (gimple_assign_lhs (new_stmt
));
4121 k
= (simd_clone_subparts (atype
)
4122 / simd_clone_subparts (arginfo
[i
].vectype
));
4123 gcc_assert ((k
& (k
- 1)) == 0);
4124 vec
<constructor_elt
, va_gc
> *ctor_elts
;
4126 vec_alloc (ctor_elts
, k
);
4129 for (l
= 0; l
< k
; l
++)
4131 if (m
== 0 && l
== 0)
4133 = vect_get_vec_def_for_operand (op
, stmt_info
);
4136 = vect_get_vec_def_for_stmt_copy (vinfo
,
4138 arginfo
[i
].op
= vec_oprnd0
;
4141 CONSTRUCTOR_APPEND_ELT (ctor_elts
, NULL_TREE
,
4145 vargs
.safe_push (vec_oprnd0
);
4148 vec_oprnd0
= build_constructor (atype
, ctor_elts
);
4150 = gimple_build_assign (make_ssa_name (atype
),
4152 vect_finish_stmt_generation (stmt_info
, new_stmt
,
4154 vargs
.safe_push (gimple_assign_lhs (new_stmt
));
4159 case SIMD_CLONE_ARG_TYPE_UNIFORM
:
4160 vargs
.safe_push (op
);
4162 case SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP
:
4163 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP
:
4168 = force_gimple_operand (arginfo
[i
].op
, &stmts
, true,
4173 edge pe
= loop_preheader_edge (loop
);
4174 new_bb
= gsi_insert_seq_on_edge_immediate (pe
, stmts
);
4175 gcc_assert (!new_bb
);
4177 if (arginfo
[i
].simd_lane_linear
)
4179 vargs
.safe_push (arginfo
[i
].op
);
4182 tree phi_res
= copy_ssa_name (op
);
4183 gphi
*new_phi
= create_phi_node (phi_res
, loop
->header
);
4184 loop_vinfo
->add_stmt (new_phi
);
4185 add_phi_arg (new_phi
, arginfo
[i
].op
,
4186 loop_preheader_edge (loop
), UNKNOWN_LOCATION
);
4188 = POINTER_TYPE_P (TREE_TYPE (op
))
4189 ? POINTER_PLUS_EXPR
: PLUS_EXPR
;
4190 tree type
= POINTER_TYPE_P (TREE_TYPE (op
))
4191 ? sizetype
: TREE_TYPE (op
);
4193 = wi::mul (bestn
->simdclone
->args
[i
].linear_step
,
4195 tree tcst
= wide_int_to_tree (type
, cst
);
4196 tree phi_arg
= copy_ssa_name (op
);
4198 = gimple_build_assign (phi_arg
, code
, phi_res
, tcst
);
4199 gimple_stmt_iterator si
= gsi_after_labels (loop
->header
);
4200 gsi_insert_after (&si
, new_stmt
, GSI_NEW_STMT
);
4201 loop_vinfo
->add_stmt (new_stmt
);
4202 add_phi_arg (new_phi
, phi_arg
, loop_latch_edge (loop
),
4204 arginfo
[i
].op
= phi_res
;
4205 vargs
.safe_push (phi_res
);
4210 = POINTER_TYPE_P (TREE_TYPE (op
))
4211 ? POINTER_PLUS_EXPR
: PLUS_EXPR
;
4212 tree type
= POINTER_TYPE_P (TREE_TYPE (op
))
4213 ? sizetype
: TREE_TYPE (op
);
4215 = wi::mul (bestn
->simdclone
->args
[i
].linear_step
,
4217 tree tcst
= wide_int_to_tree (type
, cst
);
4218 new_temp
= make_ssa_name (TREE_TYPE (op
));
4220 = gimple_build_assign (new_temp
, code
,
4221 arginfo
[i
].op
, tcst
);
4222 vect_finish_stmt_generation (stmt_info
, new_stmt
, gsi
);
4223 vargs
.safe_push (new_temp
);
4226 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_CONSTANT_STEP
:
4227 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_CONSTANT_STEP
:
4228 case SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP
:
4229 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_VARIABLE_STEP
:
4230 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_VARIABLE_STEP
:
4231 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_VARIABLE_STEP
:
4237 gcall
*new_call
= gimple_build_call_vec (fndecl
, vargs
);
4240 gcc_assert (ratype
|| simd_clone_subparts (rtype
) == nunits
);
4242 new_temp
= create_tmp_var (ratype
);
4243 else if (simd_clone_subparts (vectype
)
4244 == simd_clone_subparts (rtype
))
4245 new_temp
= make_ssa_name (vec_dest
, new_call
);
4247 new_temp
= make_ssa_name (rtype
, new_call
);
4248 gimple_call_set_lhs (new_call
, new_temp
);
4250 stmt_vec_info new_stmt_info
4251 = vect_finish_stmt_generation (stmt_info
, new_call
, gsi
);
4255 if (simd_clone_subparts (vectype
) < nunits
)
4258 poly_uint64 prec
= GET_MODE_BITSIZE (TYPE_MODE (vectype
));
4259 poly_uint64 bytes
= GET_MODE_SIZE (TYPE_MODE (vectype
));
4260 k
= nunits
/ simd_clone_subparts (vectype
);
4261 gcc_assert ((k
& (k
- 1)) == 0);
4262 for (l
= 0; l
< k
; l
++)
4267 t
= build_fold_addr_expr (new_temp
);
4268 t
= build2 (MEM_REF
, vectype
, t
,
4269 build_int_cst (TREE_TYPE (t
), l
* bytes
));
4272 t
= build3 (BIT_FIELD_REF
, vectype
, new_temp
,
4273 bitsize_int (prec
), bitsize_int (l
* prec
));
4275 = gimple_build_assign (make_ssa_name (vectype
), t
);
4277 = vect_finish_stmt_generation (stmt_info
, new_stmt
, gsi
);
4279 if (j
== 0 && l
== 0)
4280 STMT_VINFO_VEC_STMT (stmt_info
)
4281 = *vec_stmt
= new_stmt_info
;
4283 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt_info
;
4285 prev_stmt_info
= new_stmt_info
;
4289 vect_clobber_variable (stmt_info
, gsi
, new_temp
);
4292 else if (simd_clone_subparts (vectype
) > nunits
)
4294 unsigned int k
= (simd_clone_subparts (vectype
)
4295 / simd_clone_subparts (rtype
));
4296 gcc_assert ((k
& (k
- 1)) == 0);
4297 if ((j
& (k
- 1)) == 0)
4298 vec_alloc (ret_ctor_elts
, k
);
4301 unsigned int m
, o
= nunits
/ simd_clone_subparts (rtype
);
4302 for (m
= 0; m
< o
; m
++)
4304 tree tem
= build4 (ARRAY_REF
, rtype
, new_temp
,
4305 size_int (m
), NULL_TREE
, NULL_TREE
);
4307 = gimple_build_assign (make_ssa_name (rtype
), tem
);
4309 = vect_finish_stmt_generation (stmt_info
, new_stmt
,
4311 CONSTRUCTOR_APPEND_ELT (ret_ctor_elts
, NULL_TREE
,
4312 gimple_assign_lhs (new_stmt
));
4314 vect_clobber_variable (stmt_info
, gsi
, new_temp
);
4317 CONSTRUCTOR_APPEND_ELT (ret_ctor_elts
, NULL_TREE
, new_temp
);
4318 if ((j
& (k
- 1)) != k
- 1)
4320 vec_oprnd0
= build_constructor (vectype
, ret_ctor_elts
);
4322 = gimple_build_assign (make_ssa_name (vec_dest
), vec_oprnd0
);
4324 = vect_finish_stmt_generation (stmt_info
, new_stmt
, gsi
);
4326 if ((unsigned) j
== k
- 1)
4327 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt_info
;
4329 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt_info
;
4331 prev_stmt_info
= new_stmt_info
;
4336 tree t
= build_fold_addr_expr (new_temp
);
4337 t
= build2 (MEM_REF
, vectype
, t
,
4338 build_int_cst (TREE_TYPE (t
), 0));
4340 = gimple_build_assign (make_ssa_name (vec_dest
), t
);
4342 = vect_finish_stmt_generation (stmt_info
, new_stmt
, gsi
);
4343 vect_clobber_variable (stmt_info
, gsi
, new_temp
);
4348 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt_info
;
4350 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt_info
;
4352 prev_stmt_info
= new_stmt_info
;
4357 /* The call in STMT might prevent it from being removed in dce.
4358 We however cannot remove it here, due to the way the ssa name
4359 it defines is mapped to the new definition. So just replace
4360 rhs of the statement with something harmless. */
4368 type
= TREE_TYPE (scalar_dest
);
4369 lhs
= gimple_call_lhs (vect_orig_stmt (stmt_info
)->stmt
);
4370 new_stmt
= gimple_build_assign (lhs
, build_zero_cst (type
));
4373 new_stmt
= gimple_build_nop ();
4374 vinfo
->replace_stmt (gsi
, vect_orig_stmt (stmt_info
), new_stmt
);
4375 unlink_stmt_vdef (stmt
);
4381 /* Function vect_gen_widened_results_half
4383 Create a vector stmt whose code, type, number of arguments, and result
4384 variable are CODE, OP_TYPE, and VEC_DEST, and its arguments are
4385 VEC_OPRND0 and VEC_OPRND1. The new vector stmt is to be inserted at BSI.
4386 In the case that CODE is a CALL_EXPR, this means that a call to DECL
4387 needs to be created (DECL is a function-decl of a target-builtin).
4388 STMT_INFO is the original scalar stmt that we are vectorizing. */
4391 vect_gen_widened_results_half (enum tree_code code
,
4393 tree vec_oprnd0
, tree vec_oprnd1
, int op_type
,
4394 tree vec_dest
, gimple_stmt_iterator
*gsi
,
4395 stmt_vec_info stmt_info
)
4400 /* Generate half of the widened result: */
4401 if (code
== CALL_EXPR
)
4403 /* Target specific support */
4404 if (op_type
== binary_op
)
4405 new_stmt
= gimple_build_call (decl
, 2, vec_oprnd0
, vec_oprnd1
);
4407 new_stmt
= gimple_build_call (decl
, 1, vec_oprnd0
);
4408 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
4409 gimple_call_set_lhs (new_stmt
, new_temp
);
4413 /* Generic support */
4414 gcc_assert (op_type
== TREE_CODE_LENGTH (code
));
4415 if (op_type
!= binary_op
)
4417 new_stmt
= gimple_build_assign (vec_dest
, code
, vec_oprnd0
, vec_oprnd1
);
4418 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
4419 gimple_assign_set_lhs (new_stmt
, new_temp
);
4421 vect_finish_stmt_generation (stmt_info
, new_stmt
, gsi
);
4427 /* Get vectorized definitions for loop-based vectorization of STMT_INFO.
4428 For the first operand we call vect_get_vec_def_for_operand (with OPRND
4429 containing scalar operand), and for the rest we get a copy with
4430 vect_get_vec_def_for_stmt_copy() using the previous vector definition
4431 (stored in OPRND). See vect_get_vec_def_for_stmt_copy() for details.
4432 The vectors are collected into VEC_OPRNDS. */
4435 vect_get_loop_based_defs (tree
*oprnd
, stmt_vec_info stmt_info
,
4436 vec
<tree
> *vec_oprnds
, int multi_step_cvt
)
4438 vec_info
*vinfo
= stmt_info
->vinfo
;
4441 /* Get first vector operand. */
4442 /* All the vector operands except the very first one (that is scalar oprnd)
4444 if (TREE_CODE (TREE_TYPE (*oprnd
)) != VECTOR_TYPE
)
4445 vec_oprnd
= vect_get_vec_def_for_operand (*oprnd
, stmt_info
);
4447 vec_oprnd
= vect_get_vec_def_for_stmt_copy (vinfo
, *oprnd
);
4449 vec_oprnds
->quick_push (vec_oprnd
);
4451 /* Get second vector operand. */
4452 vec_oprnd
= vect_get_vec_def_for_stmt_copy (vinfo
, vec_oprnd
);
4453 vec_oprnds
->quick_push (vec_oprnd
);
4457 /* For conversion in multiple steps, continue to get operands
4460 vect_get_loop_based_defs (oprnd
, stmt_info
, vec_oprnds
,
4461 multi_step_cvt
- 1);
4465 /* Create vectorized demotion statements for vector operands from VEC_OPRNDS.
4466 For multi-step conversions store the resulting vectors and call the function
4470 vect_create_vectorized_demotion_stmts (vec
<tree
> *vec_oprnds
,
4472 stmt_vec_info stmt_info
,
4474 gimple_stmt_iterator
*gsi
,
4475 slp_tree slp_node
, enum tree_code code
,
4476 stmt_vec_info
*prev_stmt_info
)
4479 tree vop0
, vop1
, new_tmp
, vec_dest
;
4481 vec_dest
= vec_dsts
.pop ();
4483 for (i
= 0; i
< vec_oprnds
->length (); i
+= 2)
4485 /* Create demotion operation. */
4486 vop0
= (*vec_oprnds
)[i
];
4487 vop1
= (*vec_oprnds
)[i
+ 1];
4488 gassign
*new_stmt
= gimple_build_assign (vec_dest
, code
, vop0
, vop1
);
4489 new_tmp
= make_ssa_name (vec_dest
, new_stmt
);
4490 gimple_assign_set_lhs (new_stmt
, new_tmp
);
4491 stmt_vec_info new_stmt_info
4492 = vect_finish_stmt_generation (stmt_info
, new_stmt
, gsi
);
4495 /* Store the resulting vector for next recursive call. */
4496 (*vec_oprnds
)[i
/2] = new_tmp
;
4499 /* This is the last step of the conversion sequence. Store the
4500 vectors in SLP_NODE or in vector info of the scalar statement
4501 (or in STMT_VINFO_RELATED_STMT chain). */
4503 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt_info
);
4506 if (!*prev_stmt_info
)
4507 STMT_VINFO_VEC_STMT (stmt_info
) = new_stmt_info
;
4509 STMT_VINFO_RELATED_STMT (*prev_stmt_info
) = new_stmt_info
;
4511 *prev_stmt_info
= new_stmt_info
;
4516 /* For multi-step demotion operations we first generate demotion operations
4517 from the source type to the intermediate types, and then combine the
4518 results (stored in VEC_OPRNDS) in demotion operation to the destination
4522 /* At each level of recursion we have half of the operands we had at the
4524 vec_oprnds
->truncate ((i
+1)/2);
4525 vect_create_vectorized_demotion_stmts (vec_oprnds
, multi_step_cvt
- 1,
4526 stmt_info
, vec_dsts
, gsi
,
4527 slp_node
, VEC_PACK_TRUNC_EXPR
,
4531 vec_dsts
.quick_push (vec_dest
);
4535 /* Create vectorized promotion statements for vector operands from VEC_OPRNDS0
4536 and VEC_OPRNDS1, for a binary operation associated with scalar statement
4537 STMT_INFO. For multi-step conversions store the resulting vectors and
4538 call the function recursively. */
4541 vect_create_vectorized_promotion_stmts (vec
<tree
> *vec_oprnds0
,
4542 vec
<tree
> *vec_oprnds1
,
4543 stmt_vec_info stmt_info
, tree vec_dest
,
4544 gimple_stmt_iterator
*gsi
,
4545 enum tree_code code1
,
4546 enum tree_code code2
, tree decl1
,
4547 tree decl2
, int op_type
)
4550 tree vop0
, vop1
, new_tmp1
, new_tmp2
;
4551 gimple
*new_stmt1
, *new_stmt2
;
4552 vec
<tree
> vec_tmp
= vNULL
;
4554 vec_tmp
.create (vec_oprnds0
->length () * 2);
4555 FOR_EACH_VEC_ELT (*vec_oprnds0
, i
, vop0
)
4557 if (op_type
== binary_op
)
4558 vop1
= (*vec_oprnds1
)[i
];
4562 /* Generate the two halves of promotion operation. */
4563 new_stmt1
= vect_gen_widened_results_half (code1
, decl1
, vop0
, vop1
,
4564 op_type
, vec_dest
, gsi
,
4566 new_stmt2
= vect_gen_widened_results_half (code2
, decl2
, vop0
, vop1
,
4567 op_type
, vec_dest
, gsi
,
4569 if (is_gimple_call (new_stmt1
))
4571 new_tmp1
= gimple_call_lhs (new_stmt1
);
4572 new_tmp2
= gimple_call_lhs (new_stmt2
);
4576 new_tmp1
= gimple_assign_lhs (new_stmt1
);
4577 new_tmp2
= gimple_assign_lhs (new_stmt2
);
4580 /* Store the results for the next step. */
4581 vec_tmp
.quick_push (new_tmp1
);
4582 vec_tmp
.quick_push (new_tmp2
);
4585 vec_oprnds0
->release ();
4586 *vec_oprnds0
= vec_tmp
;
4590 /* Check if STMT_INFO performs a conversion operation that can be vectorized.
4591 If VEC_STMT is also passed, vectorize STMT_INFO: create a vectorized
4592 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
4593 Return true if STMT_INFO is vectorizable in this way. */
4596 vectorizable_conversion (stmt_vec_info stmt_info
, gimple_stmt_iterator
*gsi
,
4597 stmt_vec_info
*vec_stmt
, slp_tree slp_node
,
4598 stmt_vector_for_cost
*cost_vec
)
4602 tree op0
, op1
= NULL_TREE
;
4603 tree vec_oprnd0
= NULL_TREE
, vec_oprnd1
= NULL_TREE
;
4604 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
4605 enum tree_code code
, code1
= ERROR_MARK
, code2
= ERROR_MARK
;
4606 enum tree_code codecvt1
= ERROR_MARK
, codecvt2
= ERROR_MARK
;
4607 tree decl1
= NULL_TREE
, decl2
= NULL_TREE
;
4609 enum vect_def_type dt
[2] = {vect_unknown_def_type
, vect_unknown_def_type
};
4611 stmt_vec_info prev_stmt_info
;
4612 poly_uint64 nunits_in
;
4613 poly_uint64 nunits_out
;
4614 tree vectype_out
, vectype_in
;
4616 tree lhs_type
, rhs_type
;
4617 enum { NARROW
, NONE
, WIDEN
} modifier
;
4618 vec
<tree
> vec_oprnds0
= vNULL
;
4619 vec
<tree
> vec_oprnds1
= vNULL
;
4621 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
4622 vec_info
*vinfo
= stmt_info
->vinfo
;
4623 int multi_step_cvt
= 0;
4624 vec
<tree
> interm_types
= vNULL
;
4625 tree last_oprnd
, intermediate_type
, cvt_type
= NULL_TREE
;
4627 unsigned short fltsz
;
4629 /* Is STMT a vectorizable conversion? */
4631 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
4634 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
4638 gassign
*stmt
= dyn_cast
<gassign
*> (stmt_info
->stmt
);
4642 if (TREE_CODE (gimple_assign_lhs (stmt
)) != SSA_NAME
)
4645 code
= gimple_assign_rhs_code (stmt
);
4646 if (!CONVERT_EXPR_CODE_P (code
)
4647 && code
!= FIX_TRUNC_EXPR
4648 && code
!= FLOAT_EXPR
4649 && code
!= WIDEN_MULT_EXPR
4650 && code
!= WIDEN_LSHIFT_EXPR
)
4653 op_type
= TREE_CODE_LENGTH (code
);
4655 /* Check types of lhs and rhs. */
4656 scalar_dest
= gimple_assign_lhs (stmt
);
4657 lhs_type
= TREE_TYPE (scalar_dest
);
4658 vectype_out
= STMT_VINFO_VECTYPE (stmt_info
);
4660 op0
= gimple_assign_rhs1 (stmt
);
4661 rhs_type
= TREE_TYPE (op0
);
4663 if ((code
!= FIX_TRUNC_EXPR
&& code
!= FLOAT_EXPR
)
4664 && !((INTEGRAL_TYPE_P (lhs_type
)
4665 && INTEGRAL_TYPE_P (rhs_type
))
4666 || (SCALAR_FLOAT_TYPE_P (lhs_type
)
4667 && SCALAR_FLOAT_TYPE_P (rhs_type
))))
4670 if (!VECTOR_BOOLEAN_TYPE_P (vectype_out
)
4671 && ((INTEGRAL_TYPE_P (lhs_type
)
4672 && !type_has_mode_precision_p (lhs_type
))
4673 || (INTEGRAL_TYPE_P (rhs_type
)
4674 && !type_has_mode_precision_p (rhs_type
))))
4676 if (dump_enabled_p ())
4677 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4678 "type conversion to/from bit-precision unsupported."
4683 /* Check the operands of the operation. */
4684 if (!vect_is_simple_use (op0
, vinfo
, &dt
[0], &vectype_in
))
4686 if (dump_enabled_p ())
4687 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4688 "use not simple.\n");
4691 if (op_type
== binary_op
)
4695 op1
= gimple_assign_rhs2 (stmt
);
4696 gcc_assert (code
== WIDEN_MULT_EXPR
|| code
== WIDEN_LSHIFT_EXPR
);
4697 /* For WIDEN_MULT_EXPR, if OP0 is a constant, use the type of
4699 if (CONSTANT_CLASS_P (op0
))
4700 ok
= vect_is_simple_use (op1
, vinfo
, &dt
[1], &vectype_in
);
4702 ok
= vect_is_simple_use (op1
, vinfo
, &dt
[1]);
4706 if (dump_enabled_p ())
4707 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4708 "use not simple.\n");
4713 /* If op0 is an external or constant defs use a vector type of
4714 the same size as the output vector type. */
4716 vectype_in
= get_same_sized_vectype (rhs_type
, vectype_out
);
4718 gcc_assert (vectype_in
);
4721 if (dump_enabled_p ())
4723 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4724 "no vectype for scalar type ");
4725 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, rhs_type
);
4726 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
4732 if (VECTOR_BOOLEAN_TYPE_P (vectype_out
)
4733 && !VECTOR_BOOLEAN_TYPE_P (vectype_in
))
4735 if (dump_enabled_p ())
4737 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4738 "can't convert between boolean and non "
4740 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, rhs_type
);
4741 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
4747 nunits_in
= TYPE_VECTOR_SUBPARTS (vectype_in
);
4748 nunits_out
= TYPE_VECTOR_SUBPARTS (vectype_out
);
4749 if (known_eq (nunits_out
, nunits_in
))
4751 else if (multiple_p (nunits_out
, nunits_in
))
4755 gcc_checking_assert (multiple_p (nunits_in
, nunits_out
));
4759 /* Multiple types in SLP are handled by creating the appropriate number of
4760 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4764 else if (modifier
== NARROW
)
4765 ncopies
= vect_get_num_copies (loop_vinfo
, vectype_out
);
4767 ncopies
= vect_get_num_copies (loop_vinfo
, vectype_in
);
4769 /* Sanity check: make sure that at least one copy of the vectorized stmt
4770 needs to be generated. */
4771 gcc_assert (ncopies
>= 1);
4773 bool found_mode
= false;
4774 scalar_mode lhs_mode
= SCALAR_TYPE_MODE (lhs_type
);
4775 scalar_mode rhs_mode
= SCALAR_TYPE_MODE (rhs_type
);
4776 opt_scalar_mode rhs_mode_iter
;
4778 /* Supportable by target? */
4782 if (code
!= FIX_TRUNC_EXPR
&& code
!= FLOAT_EXPR
)
4784 if (supportable_convert_operation (code
, vectype_out
, vectype_in
,
4789 if (dump_enabled_p ())
4790 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4791 "conversion not supported by target.\n");
4795 if (supportable_widening_operation (code
, stmt_info
, vectype_out
,
4796 vectype_in
, &code1
, &code2
,
4797 &multi_step_cvt
, &interm_types
))
4799 /* Binary widening operation can only be supported directly by the
4801 gcc_assert (!(multi_step_cvt
&& op_type
== binary_op
));
4805 if (code
!= FLOAT_EXPR
4806 || GET_MODE_SIZE (lhs_mode
) <= GET_MODE_SIZE (rhs_mode
))
4809 fltsz
= GET_MODE_SIZE (lhs_mode
);
4810 FOR_EACH_2XWIDER_MODE (rhs_mode_iter
, rhs_mode
)
4812 rhs_mode
= rhs_mode_iter
.require ();
4813 if (GET_MODE_SIZE (rhs_mode
) > fltsz
)
4817 = build_nonstandard_integer_type (GET_MODE_BITSIZE (rhs_mode
), 0);
4818 cvt_type
= get_same_sized_vectype (cvt_type
, vectype_in
);
4819 if (cvt_type
== NULL_TREE
)
4822 if (GET_MODE_SIZE (rhs_mode
) == fltsz
)
4824 if (!supportable_convert_operation (code
, vectype_out
,
4825 cvt_type
, &decl1
, &codecvt1
))
4828 else if (!supportable_widening_operation (code
, stmt_info
,
4829 vectype_out
, cvt_type
,
4830 &codecvt1
, &codecvt2
,
4835 gcc_assert (multi_step_cvt
== 0);
4837 if (supportable_widening_operation (NOP_EXPR
, stmt_info
, cvt_type
,
4838 vectype_in
, &code1
, &code2
,
4839 &multi_step_cvt
, &interm_types
))
4849 if (GET_MODE_SIZE (rhs_mode
) == fltsz
)
4850 codecvt2
= ERROR_MARK
;
4854 interm_types
.safe_push (cvt_type
);
4855 cvt_type
= NULL_TREE
;
4860 gcc_assert (op_type
== unary_op
);
4861 if (supportable_narrowing_operation (code
, vectype_out
, vectype_in
,
4862 &code1
, &multi_step_cvt
,
4866 if (code
!= FIX_TRUNC_EXPR
4867 || GET_MODE_SIZE (lhs_mode
) >= GET_MODE_SIZE (rhs_mode
))
4871 = build_nonstandard_integer_type (GET_MODE_BITSIZE (rhs_mode
), 0);
4872 cvt_type
= get_same_sized_vectype (cvt_type
, vectype_in
);
4873 if (cvt_type
== NULL_TREE
)
4875 if (!supportable_convert_operation (code
, cvt_type
, vectype_in
,
4878 if (supportable_narrowing_operation (NOP_EXPR
, vectype_out
, cvt_type
,
4879 &code1
, &multi_step_cvt
,
4888 if (!vec_stmt
) /* transformation not required. */
4890 DUMP_VECT_SCOPE ("vectorizable_conversion");
4891 if (code
== FIX_TRUNC_EXPR
|| code
== FLOAT_EXPR
)
4893 STMT_VINFO_TYPE (stmt_info
) = type_conversion_vec_info_type
;
4894 vect_model_simple_cost (stmt_info
, ncopies
, dt
, ndts
, slp_node
,
4897 else if (modifier
== NARROW
)
4899 STMT_VINFO_TYPE (stmt_info
) = type_demotion_vec_info_type
;
4900 vect_model_promotion_demotion_cost (stmt_info
, dt
, multi_step_cvt
,
4905 STMT_VINFO_TYPE (stmt_info
) = type_promotion_vec_info_type
;
4906 vect_model_promotion_demotion_cost (stmt_info
, dt
, multi_step_cvt
,
4909 interm_types
.release ();
4914 if (dump_enabled_p ())
4915 dump_printf_loc (MSG_NOTE
, vect_location
,
4916 "transform conversion. ncopies = %d.\n", ncopies
);
4918 if (op_type
== binary_op
)
4920 if (CONSTANT_CLASS_P (op0
))
4921 op0
= fold_convert (TREE_TYPE (op1
), op0
);
4922 else if (CONSTANT_CLASS_P (op1
))
4923 op1
= fold_convert (TREE_TYPE (op0
), op1
);
4926 /* In case of multi-step conversion, we first generate conversion operations
4927 to the intermediate types, and then from that types to the final one.
4928 We create vector destinations for the intermediate type (TYPES) received
4929 from supportable_*_operation, and store them in the correct order
4930 for future use in vect_create_vectorized_*_stmts (). */
4931 auto_vec
<tree
> vec_dsts (multi_step_cvt
+ 1);
4932 vec_dest
= vect_create_destination_var (scalar_dest
,
4933 (cvt_type
&& modifier
== WIDEN
)
4934 ? cvt_type
: vectype_out
);
4935 vec_dsts
.quick_push (vec_dest
);
4939 for (i
= interm_types
.length () - 1;
4940 interm_types
.iterate (i
, &intermediate_type
); i
--)
4942 vec_dest
= vect_create_destination_var (scalar_dest
,
4944 vec_dsts
.quick_push (vec_dest
);
4949 vec_dest
= vect_create_destination_var (scalar_dest
,
4951 ? vectype_out
: cvt_type
);
4955 if (modifier
== WIDEN
)
4957 vec_oprnds0
.create (multi_step_cvt
? vect_pow2 (multi_step_cvt
) : 1);
4958 if (op_type
== binary_op
)
4959 vec_oprnds1
.create (1);
4961 else if (modifier
== NARROW
)
4962 vec_oprnds0
.create (
4963 2 * (multi_step_cvt
? vect_pow2 (multi_step_cvt
) : 1));
4965 else if (code
== WIDEN_LSHIFT_EXPR
)
4966 vec_oprnds1
.create (slp_node
->vec_stmts_size
);
4969 prev_stmt_info
= NULL
;
4973 for (j
= 0; j
< ncopies
; j
++)
4976 vect_get_vec_defs (op0
, NULL
, stmt_info
, &vec_oprnds0
,
4979 vect_get_vec_defs_for_stmt_copy (vinfo
, &vec_oprnds0
, NULL
);
4981 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vop0
)
4983 stmt_vec_info new_stmt_info
;
4984 /* Arguments are ready, create the new vector stmt. */
4985 if (code1
== CALL_EXPR
)
4987 gcall
*new_stmt
= gimple_build_call (decl1
, 1, vop0
);
4988 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
4989 gimple_call_set_lhs (new_stmt
, new_temp
);
4991 = vect_finish_stmt_generation (stmt_info
, new_stmt
, gsi
);
4995 gcc_assert (TREE_CODE_LENGTH (code1
) == unary_op
);
4997 = gimple_build_assign (vec_dest
, code1
, vop0
);
4998 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
4999 gimple_assign_set_lhs (new_stmt
, new_temp
);
5001 = vect_finish_stmt_generation (stmt_info
, new_stmt
, gsi
);
5005 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt_info
);
5008 if (!prev_stmt_info
)
5009 STMT_VINFO_VEC_STMT (stmt_info
)
5010 = *vec_stmt
= new_stmt_info
;
5012 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt_info
;
5013 prev_stmt_info
= new_stmt_info
;
5020 /* In case the vectorization factor (VF) is bigger than the number
5021 of elements that we can fit in a vectype (nunits), we have to
5022 generate more than one vector stmt - i.e - we need to "unroll"
5023 the vector stmt by a factor VF/nunits. */
5024 for (j
= 0; j
< ncopies
; j
++)
5031 if (code
== WIDEN_LSHIFT_EXPR
)
5036 /* Store vec_oprnd1 for every vector stmt to be created
5037 for SLP_NODE. We check during the analysis that all
5038 the shift arguments are the same. */
5039 for (k
= 0; k
< slp_node
->vec_stmts_size
- 1; k
++)
5040 vec_oprnds1
.quick_push (vec_oprnd1
);
5042 vect_get_vec_defs (op0
, NULL_TREE
, stmt_info
,
5043 &vec_oprnds0
, NULL
, slp_node
);
5046 vect_get_vec_defs (op0
, op1
, stmt_info
, &vec_oprnds0
,
5047 &vec_oprnds1
, slp_node
);
5051 vec_oprnd0
= vect_get_vec_def_for_operand (op0
, stmt_info
);
5052 vec_oprnds0
.quick_push (vec_oprnd0
);
5053 if (op_type
== binary_op
)
5055 if (code
== WIDEN_LSHIFT_EXPR
)
5059 = vect_get_vec_def_for_operand (op1
, stmt_info
);
5060 vec_oprnds1
.quick_push (vec_oprnd1
);
5066 vec_oprnd0
= vect_get_vec_def_for_stmt_copy (vinfo
, vec_oprnd0
);
5067 vec_oprnds0
.truncate (0);
5068 vec_oprnds0
.quick_push (vec_oprnd0
);
5069 if (op_type
== binary_op
)
5071 if (code
== WIDEN_LSHIFT_EXPR
)
5074 vec_oprnd1
= vect_get_vec_def_for_stmt_copy (vinfo
,
5076 vec_oprnds1
.truncate (0);
5077 vec_oprnds1
.quick_push (vec_oprnd1
);
5081 /* Arguments are ready. Create the new vector stmts. */
5082 for (i
= multi_step_cvt
; i
>= 0; i
--)
5084 tree this_dest
= vec_dsts
[i
];
5085 enum tree_code c1
= code1
, c2
= code2
;
5086 if (i
== 0 && codecvt2
!= ERROR_MARK
)
5091 vect_create_vectorized_promotion_stmts (&vec_oprnds0
,
5092 &vec_oprnds1
, stmt_info
,
5094 c1
, c2
, decl1
, decl2
,
5098 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vop0
)
5100 stmt_vec_info new_stmt_info
;
5103 if (codecvt1
== CALL_EXPR
)
5105 gcall
*new_stmt
= gimple_build_call (decl1
, 1, vop0
);
5106 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
5107 gimple_call_set_lhs (new_stmt
, new_temp
);
5109 = vect_finish_stmt_generation (stmt_info
, new_stmt
,
5114 gcc_assert (TREE_CODE_LENGTH (codecvt1
) == unary_op
);
5115 new_temp
= make_ssa_name (vec_dest
);
5117 = gimple_build_assign (new_temp
, codecvt1
, vop0
);
5119 = vect_finish_stmt_generation (stmt_info
, new_stmt
,
5124 new_stmt_info
= vinfo
->lookup_def (vop0
);
5127 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt_info
);
5130 if (!prev_stmt_info
)
5131 STMT_VINFO_VEC_STMT (stmt_info
) = new_stmt_info
;
5133 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt_info
;
5134 prev_stmt_info
= new_stmt_info
;
5139 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
5143 /* In case the vectorization factor (VF) is bigger than the number
5144 of elements that we can fit in a vectype (nunits), we have to
5145 generate more than one vector stmt - i.e - we need to "unroll"
5146 the vector stmt by a factor VF/nunits. */
5147 for (j
= 0; j
< ncopies
; j
++)
5151 vect_get_vec_defs (op0
, NULL_TREE
, stmt_info
, &vec_oprnds0
, NULL
,
5155 vec_oprnds0
.truncate (0);
5156 vect_get_loop_based_defs (&last_oprnd
, stmt_info
, &vec_oprnds0
,
5157 vect_pow2 (multi_step_cvt
) - 1);
5160 /* Arguments are ready. Create the new vector stmts. */
5162 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vop0
)
5164 if (codecvt1
== CALL_EXPR
)
5166 gcall
*new_stmt
= gimple_build_call (decl1
, 1, vop0
);
5167 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
5168 gimple_call_set_lhs (new_stmt
, new_temp
);
5169 vect_finish_stmt_generation (stmt_info
, new_stmt
, gsi
);
5173 gcc_assert (TREE_CODE_LENGTH (codecvt1
) == unary_op
);
5174 new_temp
= make_ssa_name (vec_dest
);
5176 = gimple_build_assign (new_temp
, codecvt1
, vop0
);
5177 vect_finish_stmt_generation (stmt_info
, new_stmt
, gsi
);
5180 vec_oprnds0
[i
] = new_temp
;
5183 vect_create_vectorized_demotion_stmts (&vec_oprnds0
, multi_step_cvt
,
5184 stmt_info
, vec_dsts
, gsi
,
5189 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
5193 vec_oprnds0
.release ();
5194 vec_oprnds1
.release ();
5195 interm_types
.release ();
5201 /* Function vectorizable_assignment.
5203 Check if STMT_INFO performs an assignment (copy) that can be vectorized.
5204 If VEC_STMT is also passed, vectorize the STMT_INFO: create a vectorized
5205 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
5206 Return true if STMT_INFO is vectorizable in this way. */
5209 vectorizable_assignment (stmt_vec_info stmt_info
, gimple_stmt_iterator
*gsi
,
5210 stmt_vec_info
*vec_stmt
, slp_tree slp_node
,
5211 stmt_vector_for_cost
*cost_vec
)
5216 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
5218 enum vect_def_type dt
[1] = {vect_unknown_def_type
};
5222 vec
<tree
> vec_oprnds
= vNULL
;
5224 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
5225 vec_info
*vinfo
= stmt_info
->vinfo
;
5226 stmt_vec_info prev_stmt_info
= NULL
;
5227 enum tree_code code
;
5230 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
5233 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
5237 /* Is vectorizable assignment? */
5238 gassign
*stmt
= dyn_cast
<gassign
*> (stmt_info
->stmt
);
5242 scalar_dest
= gimple_assign_lhs (stmt
);
5243 if (TREE_CODE (scalar_dest
) != SSA_NAME
)
5246 code
= gimple_assign_rhs_code (stmt
);
5247 if (gimple_assign_single_p (stmt
)
5248 || code
== PAREN_EXPR
5249 || CONVERT_EXPR_CODE_P (code
))
5250 op
= gimple_assign_rhs1 (stmt
);
5254 if (code
== VIEW_CONVERT_EXPR
)
5255 op
= TREE_OPERAND (op
, 0);
5257 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
5258 poly_uint64 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
5260 /* Multiple types in SLP are handled by creating the appropriate number of
5261 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
5266 ncopies
= vect_get_num_copies (loop_vinfo
, vectype
);
5268 gcc_assert (ncopies
>= 1);
5270 if (!vect_is_simple_use (op
, vinfo
, &dt
[0], &vectype_in
))
5272 if (dump_enabled_p ())
5273 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5274 "use not simple.\n");
5278 /* We can handle NOP_EXPR conversions that do not change the number
5279 of elements or the vector size. */
5280 if ((CONVERT_EXPR_CODE_P (code
)
5281 || code
== VIEW_CONVERT_EXPR
)
5283 || maybe_ne (TYPE_VECTOR_SUBPARTS (vectype_in
), nunits
)
5284 || maybe_ne (GET_MODE_SIZE (TYPE_MODE (vectype
)),
5285 GET_MODE_SIZE (TYPE_MODE (vectype_in
)))))
5288 /* We do not handle bit-precision changes. */
5289 if ((CONVERT_EXPR_CODE_P (code
)
5290 || code
== VIEW_CONVERT_EXPR
)
5291 && INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest
))
5292 && (!type_has_mode_precision_p (TREE_TYPE (scalar_dest
))
5293 || !type_has_mode_precision_p (TREE_TYPE (op
)))
5294 /* But a conversion that does not change the bit-pattern is ok. */
5295 && !((TYPE_PRECISION (TREE_TYPE (scalar_dest
))
5296 > TYPE_PRECISION (TREE_TYPE (op
)))
5297 && TYPE_UNSIGNED (TREE_TYPE (op
)))
5298 /* Conversion between boolean types of different sizes is
5299 a simple assignment in case their vectypes are same
5301 && (!VECTOR_BOOLEAN_TYPE_P (vectype
)
5302 || !VECTOR_BOOLEAN_TYPE_P (vectype_in
)))
5304 if (dump_enabled_p ())
5305 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5306 "type conversion to/from bit-precision "
5311 if (!vec_stmt
) /* transformation not required. */
5313 STMT_VINFO_TYPE (stmt_info
) = assignment_vec_info_type
;
5314 DUMP_VECT_SCOPE ("vectorizable_assignment");
5315 vect_model_simple_cost (stmt_info
, ncopies
, dt
, ndts
, slp_node
, cost_vec
);
5320 if (dump_enabled_p ())
5321 dump_printf_loc (MSG_NOTE
, vect_location
, "transform assignment.\n");
5324 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
5327 for (j
= 0; j
< ncopies
; j
++)
5331 vect_get_vec_defs (op
, NULL
, stmt_info
, &vec_oprnds
, NULL
, slp_node
);
5333 vect_get_vec_defs_for_stmt_copy (vinfo
, &vec_oprnds
, NULL
);
5335 /* Arguments are ready. create the new vector stmt. */
5336 stmt_vec_info new_stmt_info
= NULL
;
5337 FOR_EACH_VEC_ELT (vec_oprnds
, i
, vop
)
5339 if (CONVERT_EXPR_CODE_P (code
)
5340 || code
== VIEW_CONVERT_EXPR
)
5341 vop
= build1 (VIEW_CONVERT_EXPR
, vectype
, vop
);
5342 gassign
*new_stmt
= gimple_build_assign (vec_dest
, vop
);
5343 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
5344 gimple_assign_set_lhs (new_stmt
, new_temp
);
5346 = vect_finish_stmt_generation (stmt_info
, new_stmt
, gsi
);
5348 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt_info
);
5355 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt_info
;
5357 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt_info
;
5359 prev_stmt_info
= new_stmt_info
;
5362 vec_oprnds
.release ();
5367 /* Return TRUE if CODE (a shift operation) is supported for SCALAR_TYPE
5368 either as shift by a scalar or by a vector. */
5371 vect_supportable_shift (enum tree_code code
, tree scalar_type
)
5374 machine_mode vec_mode
;
5379 vectype
= get_vectype_for_scalar_type (scalar_type
);
5383 optab
= optab_for_tree_code (code
, vectype
, optab_scalar
);
5385 || optab_handler (optab
, TYPE_MODE (vectype
)) == CODE_FOR_nothing
)
5387 optab
= optab_for_tree_code (code
, vectype
, optab_vector
);
5389 || (optab_handler (optab
, TYPE_MODE (vectype
))
5390 == CODE_FOR_nothing
))
5394 vec_mode
= TYPE_MODE (vectype
);
5395 icode
= (int) optab_handler (optab
, vec_mode
);
5396 if (icode
== CODE_FOR_nothing
)
5403 /* Function vectorizable_shift.
5405 Check if STMT_INFO performs a shift operation that can be vectorized.
5406 If VEC_STMT is also passed, vectorize the STMT_INFO: create a vectorized
5407 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
5408 Return true if STMT_INFO is vectorizable in this way. */
5411 vectorizable_shift (stmt_vec_info stmt_info
, gimple_stmt_iterator
*gsi
,
5412 stmt_vec_info
*vec_stmt
, slp_tree slp_node
,
5413 stmt_vector_for_cost
*cost_vec
)
5417 tree op0
, op1
= NULL
;
5418 tree vec_oprnd1
= NULL_TREE
;
5420 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
5421 enum tree_code code
;
5422 machine_mode vec_mode
;
5426 machine_mode optab_op2_mode
;
5427 enum vect_def_type dt
[2] = {vect_unknown_def_type
, vect_unknown_def_type
};
5429 stmt_vec_info prev_stmt_info
;
5430 poly_uint64 nunits_in
;
5431 poly_uint64 nunits_out
;
5436 vec
<tree
> vec_oprnds0
= vNULL
;
5437 vec
<tree
> vec_oprnds1
= vNULL
;
5440 bool scalar_shift_arg
= true;
5441 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
5442 vec_info
*vinfo
= stmt_info
->vinfo
;
5444 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
5447 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
5451 /* Is STMT a vectorizable binary/unary operation? */
5452 gassign
*stmt
= dyn_cast
<gassign
*> (stmt_info
->stmt
);
5456 if (TREE_CODE (gimple_assign_lhs (stmt
)) != SSA_NAME
)
5459 code
= gimple_assign_rhs_code (stmt
);
5461 if (!(code
== LSHIFT_EXPR
|| code
== RSHIFT_EXPR
|| code
== LROTATE_EXPR
5462 || code
== RROTATE_EXPR
))
5465 scalar_dest
= gimple_assign_lhs (stmt
);
5466 vectype_out
= STMT_VINFO_VECTYPE (stmt_info
);
5467 if (!type_has_mode_precision_p (TREE_TYPE (scalar_dest
)))
5469 if (dump_enabled_p ())
5470 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5471 "bit-precision shifts not supported.\n");
5475 op0
= gimple_assign_rhs1 (stmt
);
5476 if (!vect_is_simple_use (op0
, vinfo
, &dt
[0], &vectype
))
5478 if (dump_enabled_p ())
5479 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5480 "use not simple.\n");
5483 /* If op0 is an external or constant def use a vector type with
5484 the same size as the output vector type. */
5486 vectype
= get_same_sized_vectype (TREE_TYPE (op0
), vectype_out
);
5488 gcc_assert (vectype
);
5491 if (dump_enabled_p ())
5492 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5493 "no vectype for scalar type\n");
5497 nunits_out
= TYPE_VECTOR_SUBPARTS (vectype_out
);
5498 nunits_in
= TYPE_VECTOR_SUBPARTS (vectype
);
5499 if (maybe_ne (nunits_out
, nunits_in
))
5502 op1
= gimple_assign_rhs2 (stmt
);
5503 stmt_vec_info op1_def_stmt_info
;
5504 if (!vect_is_simple_use (op1
, vinfo
, &dt
[1], &op1_vectype
,
5505 &op1_def_stmt_info
))
5507 if (dump_enabled_p ())
5508 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5509 "use not simple.\n");
5513 /* Multiple types in SLP are handled by creating the appropriate number of
5514 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
5519 ncopies
= vect_get_num_copies (loop_vinfo
, vectype
);
5521 gcc_assert (ncopies
>= 1);
5523 /* Determine whether the shift amount is a vector, or scalar. If the
5524 shift/rotate amount is a vector, use the vector/vector shift optabs. */
5526 if ((dt
[1] == vect_internal_def
5527 || dt
[1] == vect_induction_def
)
5529 scalar_shift_arg
= false;
5530 else if (dt
[1] == vect_constant_def
5531 || dt
[1] == vect_external_def
5532 || dt
[1] == vect_internal_def
)
5534 /* In SLP, need to check whether the shift count is the same,
5535 in loops if it is a constant or invariant, it is always
5539 vec
<stmt_vec_info
> stmts
= SLP_TREE_SCALAR_STMTS (slp_node
);
5540 stmt_vec_info slpstmt_info
;
5542 FOR_EACH_VEC_ELT (stmts
, k
, slpstmt_info
)
5544 gassign
*slpstmt
= as_a
<gassign
*> (slpstmt_info
->stmt
);
5545 if (!operand_equal_p (gimple_assign_rhs2 (slpstmt
), op1
, 0))
5546 scalar_shift_arg
= false;
5550 /* If the shift amount is computed by a pattern stmt we cannot
5551 use the scalar amount directly thus give up and use a vector
5553 if (op1_def_stmt_info
&& is_pattern_stmt_p (op1_def_stmt_info
))
5554 scalar_shift_arg
= false;
5558 if (dump_enabled_p ())
5559 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5560 "operand mode requires invariant argument.\n");
5564 /* Vector shifted by vector. */
5565 if (!scalar_shift_arg
)
5567 optab
= optab_for_tree_code (code
, vectype
, optab_vector
);
5568 if (dump_enabled_p ())
5569 dump_printf_loc (MSG_NOTE
, vect_location
,
5570 "vector/vector shift/rotate found.\n");
5573 op1_vectype
= get_same_sized_vectype (TREE_TYPE (op1
), vectype_out
);
5574 if (op1_vectype
== NULL_TREE
5575 || TYPE_MODE (op1_vectype
) != TYPE_MODE (vectype
))
5577 if (dump_enabled_p ())
5578 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5579 "unusable type for last operand in"
5580 " vector/vector shift/rotate.\n");
5584 /* See if the machine has a vector shifted by scalar insn and if not
5585 then see if it has a vector shifted by vector insn. */
5588 optab
= optab_for_tree_code (code
, vectype
, optab_scalar
);
5590 && optab_handler (optab
, TYPE_MODE (vectype
)) != CODE_FOR_nothing
)
5592 if (dump_enabled_p ())
5593 dump_printf_loc (MSG_NOTE
, vect_location
,
5594 "vector/scalar shift/rotate found.\n");
5598 optab
= optab_for_tree_code (code
, vectype
, optab_vector
);
5600 && (optab_handler (optab
, TYPE_MODE (vectype
))
5601 != CODE_FOR_nothing
))
5603 scalar_shift_arg
= false;
5605 if (dump_enabled_p ())
5606 dump_printf_loc (MSG_NOTE
, vect_location
,
5607 "vector/vector shift/rotate found.\n");
5609 /* Unlike the other binary operators, shifts/rotates have
5610 the rhs being int, instead of the same type as the lhs,
5611 so make sure the scalar is the right type if we are
5612 dealing with vectors of long long/long/short/char. */
5613 if (dt
[1] == vect_constant_def
)
5614 op1
= fold_convert (TREE_TYPE (vectype
), op1
);
5615 else if (!useless_type_conversion_p (TREE_TYPE (vectype
),
5619 && TYPE_MODE (TREE_TYPE (vectype
))
5620 != TYPE_MODE (TREE_TYPE (op1
)))
5622 if (dump_enabled_p ())
5623 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5624 "unusable type for last operand in"
5625 " vector/vector shift/rotate.\n");
5628 if (vec_stmt
&& !slp_node
)
5630 op1
= fold_convert (TREE_TYPE (vectype
), op1
);
5631 op1
= vect_init_vector (stmt_info
, op1
,
5632 TREE_TYPE (vectype
), NULL
);
5639 /* Supportable by target? */
5642 if (dump_enabled_p ())
5643 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5647 vec_mode
= TYPE_MODE (vectype
);
5648 icode
= (int) optab_handler (optab
, vec_mode
);
5649 if (icode
== CODE_FOR_nothing
)
5651 if (dump_enabled_p ())
5652 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5653 "op not supported by target.\n");
5654 /* Check only during analysis. */
5655 if (maybe_ne (GET_MODE_SIZE (vec_mode
), UNITS_PER_WORD
)
5657 && !vect_worthwhile_without_simd_p (vinfo
, code
)))
5659 if (dump_enabled_p ())
5660 dump_printf_loc (MSG_NOTE
, vect_location
,
5661 "proceeding using word mode.\n");
5664 /* Worthwhile without SIMD support? Check only during analysis. */
5666 && !VECTOR_MODE_P (TYPE_MODE (vectype
))
5667 && !vect_worthwhile_without_simd_p (vinfo
, code
))
5669 if (dump_enabled_p ())
5670 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5671 "not worthwhile without SIMD support.\n");
5675 if (!vec_stmt
) /* transformation not required. */
5677 STMT_VINFO_TYPE (stmt_info
) = shift_vec_info_type
;
5678 DUMP_VECT_SCOPE ("vectorizable_shift");
5679 vect_model_simple_cost (stmt_info
, ncopies
, dt
, ndts
, slp_node
, cost_vec
);
5685 if (dump_enabled_p ())
5686 dump_printf_loc (MSG_NOTE
, vect_location
,
5687 "transform binary/unary operation.\n");
5690 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
5692 prev_stmt_info
= NULL
;
5693 for (j
= 0; j
< ncopies
; j
++)
5698 if (scalar_shift_arg
)
5700 /* Vector shl and shr insn patterns can be defined with scalar
5701 operand 2 (shift operand). In this case, use constant or loop
5702 invariant op1 directly, without extending it to vector mode
5704 optab_op2_mode
= insn_data
[icode
].operand
[2].mode
;
5705 if (!VECTOR_MODE_P (optab_op2_mode
))
5707 if (dump_enabled_p ())
5708 dump_printf_loc (MSG_NOTE
, vect_location
,
5709 "operand 1 using scalar mode.\n");
5711 vec_oprnds1
.create (slp_node
? slp_node
->vec_stmts_size
: 1);
5712 vec_oprnds1
.quick_push (vec_oprnd1
);
5715 /* Store vec_oprnd1 for every vector stmt to be created
5716 for SLP_NODE. We check during the analysis that all
5717 the shift arguments are the same.
5718 TODO: Allow different constants for different vector
5719 stmts generated for an SLP instance. */
5720 for (k
= 0; k
< slp_node
->vec_stmts_size
- 1; k
++)
5721 vec_oprnds1
.quick_push (vec_oprnd1
);
5726 /* vec_oprnd1 is available if operand 1 should be of a scalar-type
5727 (a special case for certain kind of vector shifts); otherwise,
5728 operand 1 should be of a vector type (the usual case). */
5730 vect_get_vec_defs (op0
, NULL_TREE
, stmt_info
, &vec_oprnds0
, NULL
,
5733 vect_get_vec_defs (op0
, op1
, stmt_info
, &vec_oprnds0
, &vec_oprnds1
,
5737 vect_get_vec_defs_for_stmt_copy (vinfo
, &vec_oprnds0
, &vec_oprnds1
);
5739 /* Arguments are ready. Create the new vector stmt. */
5740 stmt_vec_info new_stmt_info
= NULL
;
5741 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vop0
)
5743 vop1
= vec_oprnds1
[i
];
5744 gassign
*new_stmt
= gimple_build_assign (vec_dest
, code
, vop0
, vop1
);
5745 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
5746 gimple_assign_set_lhs (new_stmt
, new_temp
);
5748 = vect_finish_stmt_generation (stmt_info
, new_stmt
, gsi
);
5750 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt_info
);
5757 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt_info
;
5759 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt_info
;
5760 prev_stmt_info
= new_stmt_info
;
5763 vec_oprnds0
.release ();
5764 vec_oprnds1
.release ();
5770 /* Function vectorizable_operation.
5772 Check if STMT_INFO performs a binary, unary or ternary operation that can
5774 If VEC_STMT is also passed, vectorize STMT_INFO: create a vectorized
5775 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
5776 Return true if STMT_INFO is vectorizable in this way. */
5779 vectorizable_operation (stmt_vec_info stmt_info
, gimple_stmt_iterator
*gsi
,
5780 stmt_vec_info
*vec_stmt
, slp_tree slp_node
,
5781 stmt_vector_for_cost
*cost_vec
)
5785 tree op0
, op1
= NULL_TREE
, op2
= NULL_TREE
;
5787 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
5788 enum tree_code code
, orig_code
;
5789 machine_mode vec_mode
;
5793 bool target_support_p
;
5794 enum vect_def_type dt
[3]
5795 = {vect_unknown_def_type
, vect_unknown_def_type
, vect_unknown_def_type
};
5797 stmt_vec_info prev_stmt_info
;
5798 poly_uint64 nunits_in
;
5799 poly_uint64 nunits_out
;
5803 vec
<tree
> vec_oprnds0
= vNULL
;
5804 vec
<tree
> vec_oprnds1
= vNULL
;
5805 vec
<tree
> vec_oprnds2
= vNULL
;
5806 tree vop0
, vop1
, vop2
;
5807 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
5808 vec_info
*vinfo
= stmt_info
->vinfo
;
5810 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
5813 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
5817 /* Is STMT a vectorizable binary/unary operation? */
5818 gassign
*stmt
= dyn_cast
<gassign
*> (stmt_info
->stmt
);
5822 if (TREE_CODE (gimple_assign_lhs (stmt
)) != SSA_NAME
)
5825 orig_code
= code
= gimple_assign_rhs_code (stmt
);
5827 /* For pointer addition and subtraction, we should use the normal
5828 plus and minus for the vector operation. */
5829 if (code
== POINTER_PLUS_EXPR
)
5831 if (code
== POINTER_DIFF_EXPR
)
5834 /* Support only unary or binary operations. */
5835 op_type
= TREE_CODE_LENGTH (code
);
5836 if (op_type
!= unary_op
&& op_type
!= binary_op
&& op_type
!= ternary_op
)
5838 if (dump_enabled_p ())
5839 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5840 "num. args = %d (not unary/binary/ternary op).\n",
5845 scalar_dest
= gimple_assign_lhs (stmt
);
5846 vectype_out
= STMT_VINFO_VECTYPE (stmt_info
);
5848 /* Most operations cannot handle bit-precision types without extra
5850 if (!VECTOR_BOOLEAN_TYPE_P (vectype_out
)
5851 && !type_has_mode_precision_p (TREE_TYPE (scalar_dest
))
5852 /* Exception are bitwise binary operations. */
5853 && code
!= BIT_IOR_EXPR
5854 && code
!= BIT_XOR_EXPR
5855 && code
!= BIT_AND_EXPR
)
5857 if (dump_enabled_p ())
5858 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5859 "bit-precision arithmetic not supported.\n");
5863 op0
= gimple_assign_rhs1 (stmt
);
5864 if (!vect_is_simple_use (op0
, vinfo
, &dt
[0], &vectype
))
5866 if (dump_enabled_p ())
5867 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5868 "use not simple.\n");
5871 /* If op0 is an external or constant def use a vector type with
5872 the same size as the output vector type. */
5875 /* For boolean type we cannot determine vectype by
5876 invariant value (don't know whether it is a vector
5877 of booleans or vector of integers). We use output
5878 vectype because operations on boolean don't change
5880 if (VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (op0
)))
5882 if (!VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (scalar_dest
)))
5884 if (dump_enabled_p ())
5885 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5886 "not supported operation on bool value.\n");
5889 vectype
= vectype_out
;
5892 vectype
= get_same_sized_vectype (TREE_TYPE (op0
), vectype_out
);
5895 gcc_assert (vectype
);
5898 if (dump_enabled_p ())
5900 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5901 "no vectype for scalar type ");
5902 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
5904 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
5910 nunits_out
= TYPE_VECTOR_SUBPARTS (vectype_out
);
5911 nunits_in
= TYPE_VECTOR_SUBPARTS (vectype
);
5912 if (maybe_ne (nunits_out
, nunits_in
))
5915 if (op_type
== binary_op
|| op_type
== ternary_op
)
5917 op1
= gimple_assign_rhs2 (stmt
);
5918 if (!vect_is_simple_use (op1
, vinfo
, &dt
[1]))
5920 if (dump_enabled_p ())
5921 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5922 "use not simple.\n");
5926 if (op_type
== ternary_op
)
5928 op2
= gimple_assign_rhs3 (stmt
);
5929 if (!vect_is_simple_use (op2
, vinfo
, &dt
[2]))
5931 if (dump_enabled_p ())
5932 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5933 "use not simple.\n");
5938 /* Multiple types in SLP are handled by creating the appropriate number of
5939 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
5944 ncopies
= vect_get_num_copies (loop_vinfo
, vectype
);
5946 gcc_assert (ncopies
>= 1);
5948 /* Shifts are handled in vectorizable_shift (). */
5949 if (code
== LSHIFT_EXPR
|| code
== RSHIFT_EXPR
|| code
== LROTATE_EXPR
5950 || code
== RROTATE_EXPR
)
5953 /* Supportable by target? */
5955 vec_mode
= TYPE_MODE (vectype
);
5956 if (code
== MULT_HIGHPART_EXPR
)
5957 target_support_p
= can_mult_highpart_p (vec_mode
, TYPE_UNSIGNED (vectype
));
5960 optab
= optab_for_tree_code (code
, vectype
, optab_default
);
5963 if (dump_enabled_p ())
5964 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5968 target_support_p
= (optab_handler (optab
, vec_mode
)
5969 != CODE_FOR_nothing
);
5972 if (!target_support_p
)
5974 if (dump_enabled_p ())
5975 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5976 "op not supported by target.\n");
5977 /* Check only during analysis. */
5978 if (maybe_ne (GET_MODE_SIZE (vec_mode
), UNITS_PER_WORD
)
5979 || (!vec_stmt
&& !vect_worthwhile_without_simd_p (vinfo
, code
)))
5981 if (dump_enabled_p ())
5982 dump_printf_loc (MSG_NOTE
, vect_location
,
5983 "proceeding using word mode.\n");
5986 /* Worthwhile without SIMD support? Check only during analysis. */
5987 if (!VECTOR_MODE_P (vec_mode
)
5989 && !vect_worthwhile_without_simd_p (vinfo
, code
))
5991 if (dump_enabled_p ())
5992 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5993 "not worthwhile without SIMD support.\n");
5997 if (!vec_stmt
) /* transformation not required. */
5999 STMT_VINFO_TYPE (stmt_info
) = op_vec_info_type
;
6000 DUMP_VECT_SCOPE ("vectorizable_operation");
6001 vect_model_simple_cost (stmt_info
, ncopies
, dt
, ndts
, slp_node
, cost_vec
);
6007 if (dump_enabled_p ())
6008 dump_printf_loc (MSG_NOTE
, vect_location
,
6009 "transform binary/unary operation.\n");
6011 /* POINTER_DIFF_EXPR has pointer arguments which are vectorized as
6012 vectors with unsigned elements, but the result is signed. So, we
6013 need to compute the MINUS_EXPR into vectype temporary and
6014 VIEW_CONVERT_EXPR it into the final vectype_out result. */
6015 tree vec_cvt_dest
= NULL_TREE
;
6016 if (orig_code
== POINTER_DIFF_EXPR
)
6018 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
6019 vec_cvt_dest
= vect_create_destination_var (scalar_dest
, vectype_out
);
6023 vec_dest
= vect_create_destination_var (scalar_dest
, vectype_out
);
6025 /* In case the vectorization factor (VF) is bigger than the number
6026 of elements that we can fit in a vectype (nunits), we have to generate
6027 more than one vector stmt - i.e - we need to "unroll" the
6028 vector stmt by a factor VF/nunits. In doing so, we record a pointer
6029 from one copy of the vector stmt to the next, in the field
6030 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
6031 stages to find the correct vector defs to be used when vectorizing
6032 stmts that use the defs of the current stmt. The example below
6033 illustrates the vectorization process when VF=16 and nunits=4 (i.e.,
6034 we need to create 4 vectorized stmts):
6036 before vectorization:
6037 RELATED_STMT VEC_STMT
6041 step 1: vectorize stmt S1 (done in vectorizable_load. See more details
6043 RELATED_STMT VEC_STMT
6044 VS1_0: vx0 = memref0 VS1_1 -
6045 VS1_1: vx1 = memref1 VS1_2 -
6046 VS1_2: vx2 = memref2 VS1_3 -
6047 VS1_3: vx3 = memref3 - -
6048 S1: x = load - VS1_0
6051 step2: vectorize stmt S2 (done here):
6052 To vectorize stmt S2 we first need to find the relevant vector
6053 def for the first operand 'x'. This is, as usual, obtained from
6054 the vector stmt recorded in the STMT_VINFO_VEC_STMT of the stmt
6055 that defines 'x' (S1). This way we find the stmt VS1_0, and the
6056 relevant vector def 'vx0'. Having found 'vx0' we can generate
6057 the vector stmt VS2_0, and as usual, record it in the
6058 STMT_VINFO_VEC_STMT of stmt S2.
6059 When creating the second copy (VS2_1), we obtain the relevant vector
6060 def from the vector stmt recorded in the STMT_VINFO_RELATED_STMT of
6061 stmt VS1_0. This way we find the stmt VS1_1 and the relevant
6062 vector def 'vx1'. Using 'vx1' we create stmt VS2_1 and record a
6063 pointer to it in the STMT_VINFO_RELATED_STMT of the vector stmt VS2_0.
6064 Similarly when creating stmts VS2_2 and VS2_3. This is the resulting
6065 chain of stmts and pointers:
6066 RELATED_STMT VEC_STMT
6067 VS1_0: vx0 = memref0 VS1_1 -
6068 VS1_1: vx1 = memref1 VS1_2 -
6069 VS1_2: vx2 = memref2 VS1_3 -
6070 VS1_3: vx3 = memref3 - -
6071 S1: x = load - VS1_0
6072 VS2_0: vz0 = vx0 + v1 VS2_1 -
6073 VS2_1: vz1 = vx1 + v1 VS2_2 -
6074 VS2_2: vz2 = vx2 + v1 VS2_3 -
6075 VS2_3: vz3 = vx3 + v1 - -
6076 S2: z = x + 1 - VS2_0 */
6078 prev_stmt_info
= NULL
;
6079 for (j
= 0; j
< ncopies
; j
++)
6084 if (op_type
== binary_op
)
6085 vect_get_vec_defs (op0
, op1
, stmt_info
, &vec_oprnds0
, &vec_oprnds1
,
6087 else if (op_type
== ternary_op
)
6091 auto_vec
<tree
> ops(3);
6092 ops
.quick_push (op0
);
6093 ops
.quick_push (op1
);
6094 ops
.quick_push (op2
);
6095 auto_vec
<vec
<tree
> > vec_defs(3);
6096 vect_get_slp_defs (ops
, slp_node
, &vec_defs
);
6097 vec_oprnds0
= vec_defs
[0];
6098 vec_oprnds1
= vec_defs
[1];
6099 vec_oprnds2
= vec_defs
[2];
6103 vect_get_vec_defs (op0
, op1
, stmt_info
, &vec_oprnds0
,
6104 &vec_oprnds1
, NULL
);
6105 vect_get_vec_defs (op2
, NULL_TREE
, stmt_info
, &vec_oprnds2
,
6110 vect_get_vec_defs (op0
, NULL_TREE
, stmt_info
, &vec_oprnds0
, NULL
,
6115 vect_get_vec_defs_for_stmt_copy (vinfo
, &vec_oprnds0
, &vec_oprnds1
);
6116 if (op_type
== ternary_op
)
6118 tree vec_oprnd
= vec_oprnds2
.pop ();
6119 vec_oprnds2
.quick_push (vect_get_vec_def_for_stmt_copy (vinfo
,
6124 /* Arguments are ready. Create the new vector stmt. */
6125 stmt_vec_info new_stmt_info
= NULL
;
6126 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vop0
)
6128 vop1
= ((op_type
== binary_op
|| op_type
== ternary_op
)
6129 ? vec_oprnds1
[i
] : NULL_TREE
);
6130 vop2
= ((op_type
== ternary_op
)
6131 ? vec_oprnds2
[i
] : NULL_TREE
);
6132 gassign
*new_stmt
= gimple_build_assign (vec_dest
, code
,
6134 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
6135 gimple_assign_set_lhs (new_stmt
, new_temp
);
6137 = vect_finish_stmt_generation (stmt_info
, new_stmt
, gsi
);
6140 new_temp
= build1 (VIEW_CONVERT_EXPR
, vectype_out
, new_temp
);
6142 = gimple_build_assign (vec_cvt_dest
, VIEW_CONVERT_EXPR
,
6144 new_temp
= make_ssa_name (vec_cvt_dest
, new_stmt
);
6145 gimple_assign_set_lhs (new_stmt
, new_temp
);
6147 = vect_finish_stmt_generation (stmt_info
, new_stmt
, gsi
);
6150 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt_info
);
6157 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt_info
;
6159 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt_info
;
6160 prev_stmt_info
= new_stmt_info
;
6163 vec_oprnds0
.release ();
6164 vec_oprnds1
.release ();
6165 vec_oprnds2
.release ();
6170 /* A helper function to ensure data reference DR_INFO's base alignment. */
6173 ensure_base_align (dr_vec_info
*dr_info
)
6175 if (dr_info
->misalignment
== DR_MISALIGNMENT_UNINITIALIZED
)
6178 if (dr_info
->base_misaligned
)
6180 tree base_decl
= dr_info
->base_decl
;
6182 unsigned int align_base_to
6183 = DR_TARGET_ALIGNMENT (dr_info
) * BITS_PER_UNIT
;
6185 if (decl_in_symtab_p (base_decl
))
6186 symtab_node::get (base_decl
)->increase_alignment (align_base_to
);
6189 SET_DECL_ALIGN (base_decl
, align_base_to
);
6190 DECL_USER_ALIGN (base_decl
) = 1;
6192 dr_info
->base_misaligned
= false;
6197 /* Function get_group_alias_ptr_type.
6199 Return the alias type for the group starting at FIRST_STMT_INFO. */
6202 get_group_alias_ptr_type (stmt_vec_info first_stmt_info
)
6204 struct data_reference
*first_dr
, *next_dr
;
6206 first_dr
= STMT_VINFO_DATA_REF (first_stmt_info
);
6207 stmt_vec_info next_stmt_info
= DR_GROUP_NEXT_ELEMENT (first_stmt_info
);
6208 while (next_stmt_info
)
6210 next_dr
= STMT_VINFO_DATA_REF (next_stmt_info
);
6211 if (get_alias_set (DR_REF (first_dr
))
6212 != get_alias_set (DR_REF (next_dr
)))
6214 if (dump_enabled_p ())
6215 dump_printf_loc (MSG_NOTE
, vect_location
,
6216 "conflicting alias set types.\n");
6217 return ptr_type_node
;
6219 next_stmt_info
= DR_GROUP_NEXT_ELEMENT (next_stmt_info
);
6221 return reference_alias_ptr_type (DR_REF (first_dr
));
6225 /* Function vectorizable_store.
6227 Check if STMT_INFO defines a non scalar data-ref (array/pointer/structure)
6228 that can be vectorized.
6229 If VEC_STMT is also passed, vectorize STMT_INFO: create a vectorized
6230 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
6231 Return true if STMT_INFO is vectorizable in this way. */
6234 vectorizable_store (stmt_vec_info stmt_info
, gimple_stmt_iterator
*gsi
,
6235 stmt_vec_info
*vec_stmt
, slp_tree slp_node
,
6236 stmt_vector_for_cost
*cost_vec
)
6240 tree vec_oprnd
= NULL_TREE
;
6242 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
6243 struct loop
*loop
= NULL
;
6244 machine_mode vec_mode
;
6246 enum dr_alignment_support alignment_support_scheme
;
6247 enum vect_def_type rhs_dt
= vect_unknown_def_type
;
6248 enum vect_def_type mask_dt
= vect_unknown_def_type
;
6249 stmt_vec_info prev_stmt_info
= NULL
;
6250 tree dataref_ptr
= NULL_TREE
;
6251 tree dataref_offset
= NULL_TREE
;
6252 gimple
*ptr_incr
= NULL
;
6255 stmt_vec_info first_stmt_info
;
6257 unsigned int group_size
, i
;
6258 vec
<tree
> oprnds
= vNULL
;
6259 vec
<tree
> result_chain
= vNULL
;
6260 tree offset
= NULL_TREE
;
6261 vec
<tree
> vec_oprnds
= vNULL
;
6262 bool slp
= (slp_node
!= NULL
);
6263 unsigned int vec_num
;
6264 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
6265 vec_info
*vinfo
= stmt_info
->vinfo
;
6267 gather_scatter_info gs_info
;
6269 vec_load_store_type vls_type
;
6272 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
6275 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
6279 /* Is vectorizable store? */
6281 tree mask
= NULL_TREE
, mask_vectype
= NULL_TREE
;
6282 if (gassign
*assign
= dyn_cast
<gassign
*> (stmt_info
->stmt
))
6284 tree scalar_dest
= gimple_assign_lhs (assign
);
6285 if (TREE_CODE (scalar_dest
) == VIEW_CONVERT_EXPR
6286 && is_pattern_stmt_p (stmt_info
))
6287 scalar_dest
= TREE_OPERAND (scalar_dest
, 0);
6288 if (TREE_CODE (scalar_dest
) != ARRAY_REF
6289 && TREE_CODE (scalar_dest
) != BIT_FIELD_REF
6290 && TREE_CODE (scalar_dest
) != INDIRECT_REF
6291 && TREE_CODE (scalar_dest
) != COMPONENT_REF
6292 && TREE_CODE (scalar_dest
) != IMAGPART_EXPR
6293 && TREE_CODE (scalar_dest
) != REALPART_EXPR
6294 && TREE_CODE (scalar_dest
) != MEM_REF
)
6299 gcall
*call
= dyn_cast
<gcall
*> (stmt_info
->stmt
);
6300 if (!call
|| !gimple_call_internal_p (call
))
6303 internal_fn ifn
= gimple_call_internal_fn (call
);
6304 if (!internal_store_fn_p (ifn
))
6307 if (slp_node
!= NULL
)
6309 if (dump_enabled_p ())
6310 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6311 "SLP of masked stores not supported.\n");
6315 int mask_index
= internal_fn_mask_index (ifn
);
6316 if (mask_index
>= 0)
6318 mask
= gimple_call_arg (call
, mask_index
);
6319 if (!vect_check_load_store_mask (stmt_info
, mask
, &mask_dt
,
6325 op
= vect_get_store_rhs (stmt_info
);
6327 /* Cannot have hybrid store SLP -- that would mean storing to the
6328 same location twice. */
6329 gcc_assert (slp
== PURE_SLP_STMT (stmt_info
));
6331 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
), rhs_vectype
= NULL_TREE
;
6332 poly_uint64 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
6336 loop
= LOOP_VINFO_LOOP (loop_vinfo
);
6337 vf
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
6342 /* Multiple types in SLP are handled by creating the appropriate number of
6343 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
6348 ncopies
= vect_get_num_copies (loop_vinfo
, vectype
);
6350 gcc_assert (ncopies
>= 1);
6352 /* FORNOW. This restriction should be relaxed. */
6353 if (loop
&& nested_in_vect_loop_p (loop
, stmt_info
) && ncopies
> 1)
6355 if (dump_enabled_p ())
6356 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6357 "multiple types in nested loop.\n");
6361 if (!vect_check_store_rhs (stmt_info
, op
, &rhs_dt
, &rhs_vectype
, &vls_type
))
6364 elem_type
= TREE_TYPE (vectype
);
6365 vec_mode
= TYPE_MODE (vectype
);
6367 if (!STMT_VINFO_DATA_REF (stmt_info
))
6370 vect_memory_access_type memory_access_type
;
6371 if (!get_load_store_type (stmt_info
, vectype
, slp
, mask
, vls_type
, ncopies
,
6372 &memory_access_type
, &gs_info
))
6377 if (memory_access_type
== VMAT_CONTIGUOUS
)
6379 if (!VECTOR_MODE_P (vec_mode
)
6380 || !can_vec_mask_load_store_p (vec_mode
,
6381 TYPE_MODE (mask_vectype
), false))
6384 else if (memory_access_type
!= VMAT_LOAD_STORE_LANES
6385 && (memory_access_type
!= VMAT_GATHER_SCATTER
|| gs_info
.decl
))
6387 if (dump_enabled_p ())
6388 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6389 "unsupported access type for masked store.\n");
6395 /* FORNOW. In some cases can vectorize even if data-type not supported
6396 (e.g. - array initialization with 0). */
6397 if (optab_handler (mov_optab
, vec_mode
) == CODE_FOR_nothing
)
6401 dr_vec_info
*dr_info
= STMT_VINFO_DR_INFO (stmt_info
), *first_dr_info
= NULL
;
6402 grouped_store
= (STMT_VINFO_GROUPED_ACCESS (stmt_info
)
6403 && memory_access_type
!= VMAT_GATHER_SCATTER
6404 && (slp
|| memory_access_type
!= VMAT_CONTIGUOUS
));
6407 first_stmt_info
= DR_GROUP_FIRST_ELEMENT (stmt_info
);
6408 first_dr_info
= STMT_VINFO_DR_INFO (first_stmt_info
);
6409 group_size
= DR_GROUP_SIZE (first_stmt_info
);
6413 first_stmt_info
= stmt_info
;
6414 first_dr_info
= dr_info
;
6415 group_size
= vec_num
= 1;
6418 if (!vec_stmt
) /* transformation not required. */
6420 STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info
) = memory_access_type
;
6423 && LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo
))
6424 check_load_store_masking (loop_vinfo
, vectype
, vls_type
, group_size
,
6425 memory_access_type
, &gs_info
);
6427 STMT_VINFO_TYPE (stmt_info
) = store_vec_info_type
;
6428 vect_model_store_cost (stmt_info
, ncopies
, rhs_dt
, memory_access_type
,
6429 vls_type
, slp_node
, cost_vec
);
6432 gcc_assert (memory_access_type
== STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info
));
6436 ensure_base_align (dr_info
);
6438 if (memory_access_type
== VMAT_GATHER_SCATTER
&& gs_info
.decl
)
6440 tree vec_oprnd0
= NULL_TREE
, vec_oprnd1
= NULL_TREE
, src
;
6441 tree arglist
= TYPE_ARG_TYPES (TREE_TYPE (gs_info
.decl
));
6442 tree rettype
, srctype
, ptrtype
, idxtype
, masktype
, scaletype
;
6443 tree ptr
, mask
, var
, scale
, perm_mask
= NULL_TREE
;
6444 edge pe
= loop_preheader_edge (loop
);
6447 enum { NARROW
, NONE
, WIDEN
} modifier
;
6448 poly_uint64 scatter_off_nunits
6449 = TYPE_VECTOR_SUBPARTS (gs_info
.offset_vectype
);
6451 if (known_eq (nunits
, scatter_off_nunits
))
6453 else if (known_eq (nunits
* 2, scatter_off_nunits
))
6457 /* Currently gathers and scatters are only supported for
6458 fixed-length vectors. */
6459 unsigned int count
= scatter_off_nunits
.to_constant ();
6460 vec_perm_builder
sel (count
, count
, 1);
6461 for (i
= 0; i
< (unsigned int) count
; ++i
)
6462 sel
.quick_push (i
| (count
/ 2));
6464 vec_perm_indices
indices (sel
, 1, count
);
6465 perm_mask
= vect_gen_perm_mask_checked (gs_info
.offset_vectype
,
6467 gcc_assert (perm_mask
!= NULL_TREE
);
6469 else if (known_eq (nunits
, scatter_off_nunits
* 2))
6473 /* Currently gathers and scatters are only supported for
6474 fixed-length vectors. */
6475 unsigned int count
= nunits
.to_constant ();
6476 vec_perm_builder
sel (count
, count
, 1);
6477 for (i
= 0; i
< (unsigned int) count
; ++i
)
6478 sel
.quick_push (i
| (count
/ 2));
6480 vec_perm_indices
indices (sel
, 2, count
);
6481 perm_mask
= vect_gen_perm_mask_checked (vectype
, indices
);
6482 gcc_assert (perm_mask
!= NULL_TREE
);
6488 rettype
= TREE_TYPE (TREE_TYPE (gs_info
.decl
));
6489 ptrtype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
6490 masktype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
6491 idxtype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
6492 srctype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
6493 scaletype
= TREE_VALUE (arglist
);
6495 gcc_checking_assert (TREE_CODE (masktype
) == INTEGER_TYPE
6496 && TREE_CODE (rettype
) == VOID_TYPE
);
6498 ptr
= fold_convert (ptrtype
, gs_info
.base
);
6499 if (!is_gimple_min_invariant (ptr
))
6501 ptr
= force_gimple_operand (ptr
, &seq
, true, NULL_TREE
);
6502 new_bb
= gsi_insert_seq_on_edge_immediate (pe
, seq
);
6503 gcc_assert (!new_bb
);
6506 /* Currently we support only unconditional scatter stores,
6507 so mask should be all ones. */
6508 mask
= build_int_cst (masktype
, -1);
6509 mask
= vect_init_vector (stmt_info
, mask
, masktype
, NULL
);
6511 scale
= build_int_cst (scaletype
, gs_info
.scale
);
6513 prev_stmt_info
= NULL
;
6514 for (j
= 0; j
< ncopies
; ++j
)
6519 = vect_get_vec_def_for_operand (op
, stmt_info
);
6521 = vect_get_vec_def_for_operand (gs_info
.offset
, stmt_info
);
6523 else if (modifier
!= NONE
&& (j
& 1))
6525 if (modifier
== WIDEN
)
6528 = vect_get_vec_def_for_stmt_copy (vinfo
, vec_oprnd1
);
6529 op
= permute_vec_elements (vec_oprnd0
, vec_oprnd0
, perm_mask
,
6532 else if (modifier
== NARROW
)
6534 src
= permute_vec_elements (vec_oprnd1
, vec_oprnd1
, perm_mask
,
6537 = vect_get_vec_def_for_stmt_copy (vinfo
, vec_oprnd0
);
6545 = vect_get_vec_def_for_stmt_copy (vinfo
, vec_oprnd1
);
6547 = vect_get_vec_def_for_stmt_copy (vinfo
, vec_oprnd0
);
6550 if (!useless_type_conversion_p (srctype
, TREE_TYPE (src
)))
6552 gcc_assert (known_eq (TYPE_VECTOR_SUBPARTS (TREE_TYPE (src
)),
6553 TYPE_VECTOR_SUBPARTS (srctype
)));
6554 var
= vect_get_new_ssa_name (srctype
, vect_simple_var
);
6555 src
= build1 (VIEW_CONVERT_EXPR
, srctype
, src
);
6557 = gimple_build_assign (var
, VIEW_CONVERT_EXPR
, src
);
6558 vect_finish_stmt_generation (stmt_info
, new_stmt
, gsi
);
6562 if (!useless_type_conversion_p (idxtype
, TREE_TYPE (op
)))
6564 gcc_assert (known_eq (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op
)),
6565 TYPE_VECTOR_SUBPARTS (idxtype
)));
6566 var
= vect_get_new_ssa_name (idxtype
, vect_simple_var
);
6567 op
= build1 (VIEW_CONVERT_EXPR
, idxtype
, op
);
6569 = gimple_build_assign (var
, VIEW_CONVERT_EXPR
, op
);
6570 vect_finish_stmt_generation (stmt_info
, new_stmt
, gsi
);
6575 = gimple_build_call (gs_info
.decl
, 5, ptr
, mask
, op
, src
, scale
);
6576 stmt_vec_info new_stmt_info
6577 = vect_finish_stmt_generation (stmt_info
, new_stmt
, gsi
);
6579 if (prev_stmt_info
== NULL
)
6580 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt_info
;
6582 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt_info
;
6583 prev_stmt_info
= new_stmt_info
;
6588 if (STMT_VINFO_GROUPED_ACCESS (stmt_info
))
6589 DR_GROUP_STORE_COUNT (DR_GROUP_FIRST_ELEMENT (stmt_info
))++;
6594 gcc_assert (!loop
|| !nested_in_vect_loop_p (loop
, stmt_info
));
6596 /* We vectorize all the stmts of the interleaving group when we
6597 reach the last stmt in the group. */
6598 if (DR_GROUP_STORE_COUNT (first_stmt_info
)
6599 < DR_GROUP_SIZE (first_stmt_info
)
6608 grouped_store
= false;
6609 /* VEC_NUM is the number of vect stmts to be created for this
6611 vec_num
= SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node
);
6612 first_stmt_info
= SLP_TREE_SCALAR_STMTS (slp_node
)[0];
6613 gcc_assert (DR_GROUP_FIRST_ELEMENT (first_stmt_info
)
6614 == first_stmt_info
);
6615 first_dr_info
= STMT_VINFO_DR_INFO (first_stmt_info
);
6616 op
= vect_get_store_rhs (first_stmt_info
);
6619 /* VEC_NUM is the number of vect stmts to be created for this
6621 vec_num
= group_size
;
6623 ref_type
= get_group_alias_ptr_type (first_stmt_info
);
6626 ref_type
= reference_alias_ptr_type (DR_REF (first_dr_info
->dr
));
6628 if (dump_enabled_p ())
6629 dump_printf_loc (MSG_NOTE
, vect_location
,
6630 "transform store. ncopies = %d\n", ncopies
);
6632 if (memory_access_type
== VMAT_ELEMENTWISE
6633 || memory_access_type
== VMAT_STRIDED_SLP
)
6635 gimple_stmt_iterator incr_gsi
;
6641 tree stride_base
, stride_step
, alias_off
;
6644 /* Checked by get_load_store_type. */
6645 unsigned int const_nunits
= nunits
.to_constant ();
6647 gcc_assert (!LOOP_VINFO_FULLY_MASKED_P (loop_vinfo
));
6648 gcc_assert (!nested_in_vect_loop_p (loop
, stmt_info
));
6651 = fold_build_pointer_plus
6652 (DR_BASE_ADDRESS (first_dr_info
->dr
),
6653 size_binop (PLUS_EXPR
,
6654 convert_to_ptrofftype (DR_OFFSET (first_dr_info
->dr
)),
6655 convert_to_ptrofftype (DR_INIT (first_dr_info
->dr
))));
6656 stride_step
= fold_convert (sizetype
, DR_STEP (first_dr_info
->dr
));
6658 /* For a store with loop-invariant (but other than power-of-2)
6659 stride (i.e. not a grouped access) like so:
6661 for (i = 0; i < n; i += stride)
6664 we generate a new induction variable and new stores from
6665 the components of the (vectorized) rhs:
6667 for (j = 0; ; j += VF*stride)
6672 array[j + stride] = tmp2;
6676 unsigned nstores
= const_nunits
;
6678 tree ltype
= elem_type
;
6679 tree lvectype
= vectype
;
6682 if (group_size
< const_nunits
6683 && const_nunits
% group_size
== 0)
6685 nstores
= const_nunits
/ group_size
;
6687 ltype
= build_vector_type (elem_type
, group_size
);
6690 /* First check if vec_extract optab doesn't support extraction
6691 of vector elts directly. */
6692 scalar_mode elmode
= SCALAR_TYPE_MODE (elem_type
);
6694 if (!mode_for_vector (elmode
, group_size
).exists (&vmode
)
6695 || !VECTOR_MODE_P (vmode
)
6696 || !targetm
.vector_mode_supported_p (vmode
)
6697 || (convert_optab_handler (vec_extract_optab
,
6698 TYPE_MODE (vectype
), vmode
)
6699 == CODE_FOR_nothing
))
6701 /* Try to avoid emitting an extract of vector elements
6702 by performing the extracts using an integer type of the
6703 same size, extracting from a vector of those and then
6704 re-interpreting it as the original vector type if
6707 = group_size
* GET_MODE_BITSIZE (elmode
);
6708 elmode
= int_mode_for_size (lsize
, 0).require ();
6709 unsigned int lnunits
= const_nunits
/ group_size
;
6710 /* If we can't construct such a vector fall back to
6711 element extracts from the original vector type and
6712 element size stores. */
6713 if (mode_for_vector (elmode
, lnunits
).exists (&vmode
)
6714 && VECTOR_MODE_P (vmode
)
6715 && targetm
.vector_mode_supported_p (vmode
)
6716 && (convert_optab_handler (vec_extract_optab
,
6718 != CODE_FOR_nothing
))
6722 ltype
= build_nonstandard_integer_type (lsize
, 1);
6723 lvectype
= build_vector_type (ltype
, nstores
);
6725 /* Else fall back to vector extraction anyway.
6726 Fewer stores are more important than avoiding spilling
6727 of the vector we extract from. Compared to the
6728 construction case in vectorizable_load no store-forwarding
6729 issue exists here for reasonable archs. */
6732 else if (group_size
>= const_nunits
6733 && group_size
% const_nunits
== 0)
6736 lnel
= const_nunits
;
6740 ltype
= build_aligned_type (ltype
, TYPE_ALIGN (elem_type
));
6741 ncopies
= SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node
);
6744 ivstep
= stride_step
;
6745 ivstep
= fold_build2 (MULT_EXPR
, TREE_TYPE (ivstep
), ivstep
,
6746 build_int_cst (TREE_TYPE (ivstep
), vf
));
6748 standard_iv_increment_position (loop
, &incr_gsi
, &insert_after
);
6750 stride_base
= cse_and_gimplify_to_preheader (loop_vinfo
, stride_base
);
6751 ivstep
= cse_and_gimplify_to_preheader (loop_vinfo
, ivstep
);
6752 create_iv (stride_base
, ivstep
, NULL
,
6753 loop
, &incr_gsi
, insert_after
,
6755 incr
= gsi_stmt (incr_gsi
);
6756 loop_vinfo
->add_stmt (incr
);
6758 stride_step
= cse_and_gimplify_to_preheader (loop_vinfo
, stride_step
);
6760 prev_stmt_info
= NULL
;
6761 alias_off
= build_int_cst (ref_type
, 0);
6762 stmt_vec_info next_stmt_info
= first_stmt_info
;
6763 for (g
= 0; g
< group_size
; g
++)
6765 running_off
= offvar
;
6768 tree size
= TYPE_SIZE_UNIT (ltype
);
6769 tree pos
= fold_build2 (MULT_EXPR
, sizetype
, size_int (g
),
6771 tree newoff
= copy_ssa_name (running_off
, NULL
);
6772 incr
= gimple_build_assign (newoff
, POINTER_PLUS_EXPR
,
6774 vect_finish_stmt_generation (stmt_info
, incr
, gsi
);
6775 running_off
= newoff
;
6777 unsigned int group_el
= 0;
6778 unsigned HOST_WIDE_INT
6779 elsz
= tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (vectype
)));
6780 for (j
= 0; j
< ncopies
; j
++)
6782 /* We've set op and dt above, from vect_get_store_rhs,
6783 and first_stmt_info == stmt_info. */
6788 vect_get_vec_defs (op
, NULL_TREE
, stmt_info
,
6789 &vec_oprnds
, NULL
, slp_node
);
6790 vec_oprnd
= vec_oprnds
[0];
6794 op
= vect_get_store_rhs (next_stmt_info
);
6795 vec_oprnd
= vect_get_vec_def_for_operand
6796 (op
, next_stmt_info
);
6802 vec_oprnd
= vec_oprnds
[j
];
6804 vec_oprnd
= vect_get_vec_def_for_stmt_copy (vinfo
,
6807 /* Pun the vector to extract from if necessary. */
6808 if (lvectype
!= vectype
)
6810 tree tem
= make_ssa_name (lvectype
);
6812 = gimple_build_assign (tem
, build1 (VIEW_CONVERT_EXPR
,
6813 lvectype
, vec_oprnd
));
6814 vect_finish_stmt_generation (stmt_info
, pun
, gsi
);
6817 for (i
= 0; i
< nstores
; i
++)
6819 tree newref
, newoff
;
6820 gimple
*incr
, *assign
;
6821 tree size
= TYPE_SIZE (ltype
);
6822 /* Extract the i'th component. */
6823 tree pos
= fold_build2 (MULT_EXPR
, bitsizetype
,
6824 bitsize_int (i
), size
);
6825 tree elem
= fold_build3 (BIT_FIELD_REF
, ltype
, vec_oprnd
,
6828 elem
= force_gimple_operand_gsi (gsi
, elem
, true,
6832 tree this_off
= build_int_cst (TREE_TYPE (alias_off
),
6834 newref
= build2 (MEM_REF
, ltype
,
6835 running_off
, this_off
);
6836 vect_copy_ref_info (newref
, DR_REF (first_dr_info
->dr
));
6838 /* And store it to *running_off. */
6839 assign
= gimple_build_assign (newref
, elem
);
6840 stmt_vec_info assign_info
6841 = vect_finish_stmt_generation (stmt_info
, assign
, gsi
);
6845 || group_el
== group_size
)
6847 newoff
= copy_ssa_name (running_off
, NULL
);
6848 incr
= gimple_build_assign (newoff
, POINTER_PLUS_EXPR
,
6849 running_off
, stride_step
);
6850 vect_finish_stmt_generation (stmt_info
, incr
, gsi
);
6852 running_off
= newoff
;
6855 if (g
== group_size
- 1
6858 if (j
== 0 && i
== 0)
6859 STMT_VINFO_VEC_STMT (stmt_info
)
6860 = *vec_stmt
= assign_info
;
6862 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = assign_info
;
6863 prev_stmt_info
= assign_info
;
6867 next_stmt_info
= DR_GROUP_NEXT_ELEMENT (next_stmt_info
);
6872 vec_oprnds
.release ();
6876 auto_vec
<tree
> dr_chain (group_size
);
6877 oprnds
.create (group_size
);
6879 alignment_support_scheme
6880 = vect_supportable_dr_alignment (first_dr_info
, false);
6881 gcc_assert (alignment_support_scheme
);
6882 vec_loop_masks
*loop_masks
6883 = (loop_vinfo
&& LOOP_VINFO_FULLY_MASKED_P (loop_vinfo
)
6884 ? &LOOP_VINFO_MASKS (loop_vinfo
)
6886 /* Targets with store-lane instructions must not require explicit
6887 realignment. vect_supportable_dr_alignment always returns either
6888 dr_aligned or dr_unaligned_supported for masked operations. */
6889 gcc_assert ((memory_access_type
!= VMAT_LOAD_STORE_LANES
6892 || alignment_support_scheme
== dr_aligned
6893 || alignment_support_scheme
== dr_unaligned_supported
);
6895 if (memory_access_type
== VMAT_CONTIGUOUS_DOWN
6896 || memory_access_type
== VMAT_CONTIGUOUS_REVERSE
)
6897 offset
= size_int (-TYPE_VECTOR_SUBPARTS (vectype
) + 1);
6900 tree vec_offset
= NULL_TREE
;
6901 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info
))
6903 aggr_type
= NULL_TREE
;
6906 else if (memory_access_type
== VMAT_GATHER_SCATTER
)
6908 aggr_type
= elem_type
;
6909 vect_get_strided_load_store_ops (stmt_info
, loop_vinfo
, &gs_info
,
6910 &bump
, &vec_offset
);
6914 if (memory_access_type
== VMAT_LOAD_STORE_LANES
)
6915 aggr_type
= build_array_type_nelts (elem_type
, vec_num
* nunits
);
6917 aggr_type
= vectype
;
6918 bump
= vect_get_data_ptr_increment (dr_info
, aggr_type
,
6919 memory_access_type
);
6923 LOOP_VINFO_HAS_MASK_STORE (loop_vinfo
) = true;
6925 /* In case the vectorization factor (VF) is bigger than the number
6926 of elements that we can fit in a vectype (nunits), we have to generate
6927 more than one vector stmt - i.e - we need to "unroll" the
6928 vector stmt by a factor VF/nunits. For more details see documentation in
6929 vect_get_vec_def_for_copy_stmt. */
6931 /* In case of interleaving (non-unit grouped access):
6938 We create vectorized stores starting from base address (the access of the
6939 first stmt in the chain (S2 in the above example), when the last store stmt
6940 of the chain (S4) is reached:
6943 VS2: &base + vec_size*1 = vx0
6944 VS3: &base + vec_size*2 = vx1
6945 VS4: &base + vec_size*3 = vx3
6947 Then permutation statements are generated:
6949 VS5: vx5 = VEC_PERM_EXPR < vx0, vx3, {0, 8, 1, 9, 2, 10, 3, 11} >
6950 VS6: vx6 = VEC_PERM_EXPR < vx0, vx3, {4, 12, 5, 13, 6, 14, 7, 15} >
6953 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
6954 (the order of the data-refs in the output of vect_permute_store_chain
6955 corresponds to the order of scalar stmts in the interleaving chain - see
6956 the documentation of vect_permute_store_chain()).
6958 In case of both multiple types and interleaving, above vector stores and
6959 permutation stmts are created for every copy. The result vector stmts are
6960 put in STMT_VINFO_VEC_STMT for the first copy and in the corresponding
6961 STMT_VINFO_RELATED_STMT for the next copies.
6964 prev_stmt_info
= NULL
;
6965 tree vec_mask
= NULL_TREE
;
6966 for (j
= 0; j
< ncopies
; j
++)
6968 stmt_vec_info new_stmt_info
;
6973 /* Get vectorized arguments for SLP_NODE. */
6974 vect_get_vec_defs (op
, NULL_TREE
, stmt_info
, &vec_oprnds
,
6977 vec_oprnd
= vec_oprnds
[0];
6981 /* For interleaved stores we collect vectorized defs for all the
6982 stores in the group in DR_CHAIN and OPRNDS. DR_CHAIN is then
6983 used as an input to vect_permute_store_chain(), and OPRNDS as
6984 an input to vect_get_vec_def_for_stmt_copy() for the next copy.
6986 If the store is not grouped, DR_GROUP_SIZE is 1, and DR_CHAIN and
6987 OPRNDS are of size 1. */
6988 stmt_vec_info next_stmt_info
= first_stmt_info
;
6989 for (i
= 0; i
< group_size
; i
++)
6991 /* Since gaps are not supported for interleaved stores,
6992 DR_GROUP_SIZE is the exact number of stmts in the chain.
6993 Therefore, NEXT_STMT_INFO can't be NULL_TREE. In case
6994 that there is no interleaving, DR_GROUP_SIZE is 1,
6995 and only one iteration of the loop will be executed. */
6996 op
= vect_get_store_rhs (next_stmt_info
);
6997 vec_oprnd
= vect_get_vec_def_for_operand
6998 (op
, next_stmt_info
);
6999 dr_chain
.quick_push (vec_oprnd
);
7000 oprnds
.quick_push (vec_oprnd
);
7001 next_stmt_info
= DR_GROUP_NEXT_ELEMENT (next_stmt_info
);
7004 vec_mask
= vect_get_vec_def_for_operand (mask
, stmt_info
,
7008 /* We should have catched mismatched types earlier. */
7009 gcc_assert (useless_type_conversion_p (vectype
,
7010 TREE_TYPE (vec_oprnd
)));
7011 bool simd_lane_access_p
7012 = STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info
);
7013 if (simd_lane_access_p
7014 && TREE_CODE (DR_BASE_ADDRESS (first_dr_info
->dr
)) == ADDR_EXPR
7015 && VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (first_dr_info
->dr
), 0))
7016 && integer_zerop (DR_OFFSET (first_dr_info
->dr
))
7017 && integer_zerop (DR_INIT (first_dr_info
->dr
))
7018 && alias_sets_conflict_p (get_alias_set (aggr_type
),
7019 get_alias_set (TREE_TYPE (ref_type
))))
7021 dataref_ptr
= unshare_expr (DR_BASE_ADDRESS (first_dr_info
->dr
));
7022 dataref_offset
= build_int_cst (ref_type
, 0);
7024 else if (STMT_VINFO_GATHER_SCATTER_P (stmt_info
))
7025 vect_get_gather_scatter_ops (loop
, stmt_info
, &gs_info
,
7026 &dataref_ptr
, &vec_offset
);
7029 = vect_create_data_ref_ptr (first_stmt_info
, aggr_type
,
7030 simd_lane_access_p
? loop
: NULL
,
7031 offset
, &dummy
, gsi
, &ptr_incr
,
7032 simd_lane_access_p
, NULL_TREE
, bump
);
7036 /* For interleaved stores we created vectorized defs for all the
7037 defs stored in OPRNDS in the previous iteration (previous copy).
7038 DR_CHAIN is then used as an input to vect_permute_store_chain(),
7039 and OPRNDS as an input to vect_get_vec_def_for_stmt_copy() for the
7041 If the store is not grouped, DR_GROUP_SIZE is 1, and DR_CHAIN and
7042 OPRNDS are of size 1. */
7043 for (i
= 0; i
< group_size
; i
++)
7046 vec_oprnd
= vect_get_vec_def_for_stmt_copy (vinfo
, op
);
7047 dr_chain
[i
] = vec_oprnd
;
7048 oprnds
[i
] = vec_oprnd
;
7051 vec_mask
= vect_get_vec_def_for_stmt_copy (vinfo
, vec_mask
);
7054 = int_const_binop (PLUS_EXPR
, dataref_offset
, bump
);
7055 else if (STMT_VINFO_GATHER_SCATTER_P (stmt_info
))
7056 vec_offset
= vect_get_vec_def_for_stmt_copy (vinfo
, vec_offset
);
7058 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
,
7062 if (memory_access_type
== VMAT_LOAD_STORE_LANES
)
7066 /* Get an array into which we can store the individual vectors. */
7067 vec_array
= create_vector_array (vectype
, vec_num
);
7069 /* Invalidate the current contents of VEC_ARRAY. This should
7070 become an RTL clobber too, which prevents the vector registers
7071 from being upward-exposed. */
7072 vect_clobber_variable (stmt_info
, gsi
, vec_array
);
7074 /* Store the individual vectors into the array. */
7075 for (i
= 0; i
< vec_num
; i
++)
7077 vec_oprnd
= dr_chain
[i
];
7078 write_vector_array (stmt_info
, gsi
, vec_oprnd
, vec_array
, i
);
7081 tree final_mask
= NULL
;
7083 final_mask
= vect_get_loop_mask (gsi
, loop_masks
, ncopies
,
7086 final_mask
= prepare_load_store_mask (mask_vectype
, final_mask
,
7093 MASK_STORE_LANES (DATAREF_PTR, ALIAS_PTR, VEC_MASK,
7095 unsigned int align
= TYPE_ALIGN_UNIT (TREE_TYPE (vectype
));
7096 tree alias_ptr
= build_int_cst (ref_type
, align
);
7097 call
= gimple_build_call_internal (IFN_MASK_STORE_LANES
, 4,
7098 dataref_ptr
, alias_ptr
,
7099 final_mask
, vec_array
);
7104 MEM_REF[...all elements...] = STORE_LANES (VEC_ARRAY). */
7105 data_ref
= create_array_ref (aggr_type
, dataref_ptr
, ref_type
);
7106 call
= gimple_build_call_internal (IFN_STORE_LANES
, 1,
7108 gimple_call_set_lhs (call
, data_ref
);
7110 gimple_call_set_nothrow (call
, true);
7111 new_stmt_info
= vect_finish_stmt_generation (stmt_info
, call
, gsi
);
7113 /* Record that VEC_ARRAY is now dead. */
7114 vect_clobber_variable (stmt_info
, gsi
, vec_array
);
7118 new_stmt_info
= NULL
;
7122 result_chain
.create (group_size
);
7124 vect_permute_store_chain (dr_chain
, group_size
, stmt_info
, gsi
,
7128 stmt_vec_info next_stmt_info
= first_stmt_info
;
7129 for (i
= 0; i
< vec_num
; i
++)
7131 unsigned align
, misalign
;
7133 tree final_mask
= NULL_TREE
;
7135 final_mask
= vect_get_loop_mask (gsi
, loop_masks
,
7137 vectype
, vec_num
* j
+ i
);
7139 final_mask
= prepare_load_store_mask (mask_vectype
, final_mask
,
7142 if (memory_access_type
== VMAT_GATHER_SCATTER
)
7144 tree scale
= size_int (gs_info
.scale
);
7147 call
= gimple_build_call_internal
7148 (IFN_MASK_SCATTER_STORE
, 5, dataref_ptr
, vec_offset
,
7149 scale
, vec_oprnd
, final_mask
);
7151 call
= gimple_build_call_internal
7152 (IFN_SCATTER_STORE
, 4, dataref_ptr
, vec_offset
,
7154 gimple_call_set_nothrow (call
, true);
7156 = vect_finish_stmt_generation (stmt_info
, call
, gsi
);
7161 /* Bump the vector pointer. */
7162 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
,
7166 vec_oprnd
= vec_oprnds
[i
];
7167 else if (grouped_store
)
7168 /* For grouped stores vectorized defs are interleaved in
7169 vect_permute_store_chain(). */
7170 vec_oprnd
= result_chain
[i
];
7172 align
= DR_TARGET_ALIGNMENT (first_dr_info
);
7173 if (aligned_access_p (first_dr_info
))
7175 else if (DR_MISALIGNMENT (first_dr_info
) == -1)
7177 align
= dr_alignment (vect_dr_behavior (first_dr_info
));
7181 misalign
= DR_MISALIGNMENT (first_dr_info
);
7182 if (dataref_offset
== NULL_TREE
7183 && TREE_CODE (dataref_ptr
) == SSA_NAME
)
7184 set_ptr_info_alignment (get_ptr_info (dataref_ptr
), align
,
7187 if (memory_access_type
== VMAT_CONTIGUOUS_REVERSE
)
7189 tree perm_mask
= perm_mask_for_reverse (vectype
);
7190 tree perm_dest
= vect_create_destination_var
7191 (vect_get_store_rhs (stmt_info
), vectype
);
7192 tree new_temp
= make_ssa_name (perm_dest
);
7194 /* Generate the permute statement. */
7196 = gimple_build_assign (new_temp
, VEC_PERM_EXPR
, vec_oprnd
,
7197 vec_oprnd
, perm_mask
);
7198 vect_finish_stmt_generation (stmt_info
, perm_stmt
, gsi
);
7200 perm_stmt
= SSA_NAME_DEF_STMT (new_temp
);
7201 vec_oprnd
= new_temp
;
7204 /* Arguments are ready. Create the new vector stmt. */
7207 align
= least_bit_hwi (misalign
| align
);
7208 tree ptr
= build_int_cst (ref_type
, align
);
7210 = gimple_build_call_internal (IFN_MASK_STORE
, 4,
7212 final_mask
, vec_oprnd
);
7213 gimple_call_set_nothrow (call
, true);
7215 = vect_finish_stmt_generation (stmt_info
, call
, gsi
);
7219 data_ref
= fold_build2 (MEM_REF
, vectype
,
7223 : build_int_cst (ref_type
, 0));
7224 if (aligned_access_p (first_dr_info
))
7226 else if (DR_MISALIGNMENT (first_dr_info
) == -1)
7227 TREE_TYPE (data_ref
)
7228 = build_aligned_type (TREE_TYPE (data_ref
),
7229 align
* BITS_PER_UNIT
);
7231 TREE_TYPE (data_ref
)
7232 = build_aligned_type (TREE_TYPE (data_ref
),
7233 TYPE_ALIGN (elem_type
));
7234 vect_copy_ref_info (data_ref
, DR_REF (first_dr_info
->dr
));
7236 = gimple_build_assign (data_ref
, vec_oprnd
);
7238 = vect_finish_stmt_generation (stmt_info
, new_stmt
, gsi
);
7244 next_stmt_info
= DR_GROUP_NEXT_ELEMENT (next_stmt_info
);
7245 if (!next_stmt_info
)
7252 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt_info
;
7254 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt_info
;
7255 prev_stmt_info
= new_stmt_info
;
7260 result_chain
.release ();
7261 vec_oprnds
.release ();
7266 /* Given a vector type VECTYPE, turns permutation SEL into the equivalent
7267 VECTOR_CST mask. No checks are made that the target platform supports the
7268 mask, so callers may wish to test can_vec_perm_const_p separately, or use
7269 vect_gen_perm_mask_checked. */
7272 vect_gen_perm_mask_any (tree vectype
, const vec_perm_indices
&sel
)
7276 poly_uint64 nunits
= sel
.length ();
7277 gcc_assert (known_eq (nunits
, TYPE_VECTOR_SUBPARTS (vectype
)));
7279 mask_type
= build_vector_type (ssizetype
, nunits
);
7280 return vec_perm_indices_to_tree (mask_type
, sel
);
7283 /* Checked version of vect_gen_perm_mask_any. Asserts can_vec_perm_const_p,
7284 i.e. that the target supports the pattern _for arbitrary input vectors_. */
7287 vect_gen_perm_mask_checked (tree vectype
, const vec_perm_indices
&sel
)
7289 gcc_assert (can_vec_perm_const_p (TYPE_MODE (vectype
), sel
));
7290 return vect_gen_perm_mask_any (vectype
, sel
);
7293 /* Given a vector variable X and Y, that was generated for the scalar
7294 STMT_INFO, generate instructions to permute the vector elements of X and Y
7295 using permutation mask MASK_VEC, insert them at *GSI and return the
7296 permuted vector variable. */
7299 permute_vec_elements (tree x
, tree y
, tree mask_vec
, stmt_vec_info stmt_info
,
7300 gimple_stmt_iterator
*gsi
)
7302 tree vectype
= TREE_TYPE (x
);
7303 tree perm_dest
, data_ref
;
7306 tree scalar_dest
= gimple_get_lhs (stmt_info
->stmt
);
7307 if (TREE_CODE (scalar_dest
) == SSA_NAME
)
7308 perm_dest
= vect_create_destination_var (scalar_dest
, vectype
);
7310 perm_dest
= vect_get_new_vect_var (vectype
, vect_simple_var
, NULL
);
7311 data_ref
= make_ssa_name (perm_dest
);
7313 /* Generate the permute statement. */
7314 perm_stmt
= gimple_build_assign (data_ref
, VEC_PERM_EXPR
, x
, y
, mask_vec
);
7315 vect_finish_stmt_generation (stmt_info
, perm_stmt
, gsi
);
7320 /* Hoist the definitions of all SSA uses on STMT_INFO out of the loop LOOP,
7321 inserting them on the loops preheader edge. Returns true if we
7322 were successful in doing so (and thus STMT_INFO can be moved then),
7323 otherwise returns false. */
7326 hoist_defs_of_uses (stmt_vec_info stmt_info
, struct loop
*loop
)
7332 FOR_EACH_SSA_TREE_OPERAND (op
, stmt_info
->stmt
, i
, SSA_OP_USE
)
7334 gimple
*def_stmt
= SSA_NAME_DEF_STMT (op
);
7335 if (!gimple_nop_p (def_stmt
)
7336 && flow_bb_inside_loop_p (loop
, gimple_bb (def_stmt
)))
7338 /* Make sure we don't need to recurse. While we could do
7339 so in simple cases when there are more complex use webs
7340 we don't have an easy way to preserve stmt order to fulfil
7341 dependencies within them. */
7344 if (gimple_code (def_stmt
) == GIMPLE_PHI
)
7346 FOR_EACH_SSA_TREE_OPERAND (op2
, def_stmt
, i2
, SSA_OP_USE
)
7348 gimple
*def_stmt2
= SSA_NAME_DEF_STMT (op2
);
7349 if (!gimple_nop_p (def_stmt2
)
7350 && flow_bb_inside_loop_p (loop
, gimple_bb (def_stmt2
)))
7360 FOR_EACH_SSA_TREE_OPERAND (op
, stmt_info
->stmt
, i
, SSA_OP_USE
)
7362 gimple
*def_stmt
= SSA_NAME_DEF_STMT (op
);
7363 if (!gimple_nop_p (def_stmt
)
7364 && flow_bb_inside_loop_p (loop
, gimple_bb (def_stmt
)))
7366 gimple_stmt_iterator gsi
= gsi_for_stmt (def_stmt
);
7367 gsi_remove (&gsi
, false);
7368 gsi_insert_on_edge_immediate (loop_preheader_edge (loop
), def_stmt
);
7375 /* vectorizable_load.
7377 Check if STMT_INFO reads a non scalar data-ref (array/pointer/structure)
7378 that can be vectorized.
7379 If VEC_STMT is also passed, vectorize STMT_INFO: create a vectorized
7380 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
7381 Return true if STMT_INFO is vectorizable in this way. */
7384 vectorizable_load (stmt_vec_info stmt_info
, gimple_stmt_iterator
*gsi
,
7385 stmt_vec_info
*vec_stmt
, slp_tree slp_node
,
7386 slp_instance slp_node_instance
,
7387 stmt_vector_for_cost
*cost_vec
)
7390 tree vec_dest
= NULL
;
7391 tree data_ref
= NULL
;
7392 stmt_vec_info prev_stmt_info
;
7393 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
7394 struct loop
*loop
= NULL
;
7395 struct loop
*containing_loop
= gimple_bb (stmt_info
->stmt
)->loop_father
;
7396 bool nested_in_vect_loop
= false;
7401 enum dr_alignment_support alignment_support_scheme
;
7402 tree dataref_ptr
= NULL_TREE
;
7403 tree dataref_offset
= NULL_TREE
;
7404 gimple
*ptr_incr
= NULL
;
7407 unsigned int group_size
;
7408 poly_uint64 group_gap_adj
;
7409 tree msq
= NULL_TREE
, lsq
;
7410 tree offset
= NULL_TREE
;
7411 tree byte_offset
= NULL_TREE
;
7412 tree realignment_token
= NULL_TREE
;
7414 vec
<tree
> dr_chain
= vNULL
;
7415 bool grouped_load
= false;
7416 stmt_vec_info first_stmt_info
;
7417 stmt_vec_info first_stmt_info_for_drptr
= NULL
;
7418 bool compute_in_loop
= false;
7419 struct loop
*at_loop
;
7421 bool slp
= (slp_node
!= NULL
);
7422 bool slp_perm
= false;
7423 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
7426 gather_scatter_info gs_info
;
7427 vec_info
*vinfo
= stmt_info
->vinfo
;
7429 enum vect_def_type mask_dt
= vect_unknown_def_type
;
7431 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
7434 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
7438 tree mask
= NULL_TREE
, mask_vectype
= NULL_TREE
;
7439 if (gassign
*assign
= dyn_cast
<gassign
*> (stmt_info
->stmt
))
7441 scalar_dest
= gimple_assign_lhs (assign
);
7442 if (TREE_CODE (scalar_dest
) != SSA_NAME
)
7445 tree_code code
= gimple_assign_rhs_code (assign
);
7446 if (code
!= ARRAY_REF
7447 && code
!= BIT_FIELD_REF
7448 && code
!= INDIRECT_REF
7449 && code
!= COMPONENT_REF
7450 && code
!= IMAGPART_EXPR
7451 && code
!= REALPART_EXPR
7453 && TREE_CODE_CLASS (code
) != tcc_declaration
)
7458 gcall
*call
= dyn_cast
<gcall
*> (stmt_info
->stmt
);
7459 if (!call
|| !gimple_call_internal_p (call
))
7462 internal_fn ifn
= gimple_call_internal_fn (call
);
7463 if (!internal_load_fn_p (ifn
))
7466 scalar_dest
= gimple_call_lhs (call
);
7470 if (slp_node
!= NULL
)
7472 if (dump_enabled_p ())
7473 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7474 "SLP of masked loads not supported.\n");
7478 int mask_index
= internal_fn_mask_index (ifn
);
7479 if (mask_index
>= 0)
7481 mask
= gimple_call_arg (call
, mask_index
);
7482 if (!vect_check_load_store_mask (stmt_info
, mask
, &mask_dt
,
7488 if (!STMT_VINFO_DATA_REF (stmt_info
))
7491 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
7492 poly_uint64 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
7496 loop
= LOOP_VINFO_LOOP (loop_vinfo
);
7497 nested_in_vect_loop
= nested_in_vect_loop_p (loop
, stmt_info
);
7498 vf
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
7503 /* Multiple types in SLP are handled by creating the appropriate number of
7504 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
7509 ncopies
= vect_get_num_copies (loop_vinfo
, vectype
);
7511 gcc_assert (ncopies
>= 1);
7513 /* FORNOW. This restriction should be relaxed. */
7514 if (nested_in_vect_loop
&& ncopies
> 1)
7516 if (dump_enabled_p ())
7517 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7518 "multiple types in nested loop.\n");
7522 /* Invalidate assumptions made by dependence analysis when vectorization
7523 on the unrolled body effectively re-orders stmts. */
7525 && STMT_VINFO_MIN_NEG_DIST (stmt_info
) != 0
7526 && maybe_gt (LOOP_VINFO_VECT_FACTOR (loop_vinfo
),
7527 STMT_VINFO_MIN_NEG_DIST (stmt_info
)))
7529 if (dump_enabled_p ())
7530 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7531 "cannot perform implicit CSE when unrolling "
7532 "with negative dependence distance\n");
7536 elem_type
= TREE_TYPE (vectype
);
7537 mode
= TYPE_MODE (vectype
);
7539 /* FORNOW. In some cases can vectorize even if data-type not supported
7540 (e.g. - data copies). */
7541 if (optab_handler (mov_optab
, mode
) == CODE_FOR_nothing
)
7543 if (dump_enabled_p ())
7544 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7545 "Aligned load, but unsupported type.\n");
7549 /* Check if the load is a part of an interleaving chain. */
7550 if (STMT_VINFO_GROUPED_ACCESS (stmt_info
))
7552 grouped_load
= true;
7554 gcc_assert (!nested_in_vect_loop
);
7555 gcc_assert (!STMT_VINFO_GATHER_SCATTER_P (stmt_info
));
7557 first_stmt_info
= DR_GROUP_FIRST_ELEMENT (stmt_info
);
7558 group_size
= DR_GROUP_SIZE (first_stmt_info
);
7560 if (slp
&& SLP_TREE_LOAD_PERMUTATION (slp_node
).exists ())
7563 /* Invalidate assumptions made by dependence analysis when vectorization
7564 on the unrolled body effectively re-orders stmts. */
7565 if (!PURE_SLP_STMT (stmt_info
)
7566 && STMT_VINFO_MIN_NEG_DIST (stmt_info
) != 0
7567 && maybe_gt (LOOP_VINFO_VECT_FACTOR (loop_vinfo
),
7568 STMT_VINFO_MIN_NEG_DIST (stmt_info
)))
7570 if (dump_enabled_p ())
7571 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7572 "cannot perform implicit CSE when performing "
7573 "group loads with negative dependence distance\n");
7577 /* Similarly when the stmt is a load that is both part of a SLP
7578 instance and a loop vectorized stmt via the same-dr mechanism
7579 we have to give up. */
7580 if (DR_GROUP_SAME_DR_STMT (stmt_info
)
7581 && (STMT_SLP_TYPE (stmt_info
)
7582 != STMT_SLP_TYPE (DR_GROUP_SAME_DR_STMT (stmt_info
))))
7584 if (dump_enabled_p ())
7585 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7586 "conflicting SLP types for CSEd load\n");
7593 vect_memory_access_type memory_access_type
;
7594 if (!get_load_store_type (stmt_info
, vectype
, slp
, mask
, VLS_LOAD
, ncopies
,
7595 &memory_access_type
, &gs_info
))
7600 if (memory_access_type
== VMAT_CONTIGUOUS
)
7602 machine_mode vec_mode
= TYPE_MODE (vectype
);
7603 if (!VECTOR_MODE_P (vec_mode
)
7604 || !can_vec_mask_load_store_p (vec_mode
,
7605 TYPE_MODE (mask_vectype
), true))
7608 else if (memory_access_type
== VMAT_GATHER_SCATTER
&& gs_info
.decl
)
7610 tree arglist
= TYPE_ARG_TYPES (TREE_TYPE (gs_info
.decl
));
7612 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (TREE_CHAIN (arglist
))));
7613 if (TREE_CODE (masktype
) == INTEGER_TYPE
)
7615 if (dump_enabled_p ())
7616 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7617 "masked gather with integer mask not"
7622 else if (memory_access_type
!= VMAT_LOAD_STORE_LANES
7623 && memory_access_type
!= VMAT_GATHER_SCATTER
)
7625 if (dump_enabled_p ())
7626 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7627 "unsupported access type for masked load.\n");
7632 if (!vec_stmt
) /* transformation not required. */
7635 STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info
) = memory_access_type
;
7638 && LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo
))
7639 check_load_store_masking (loop_vinfo
, vectype
, VLS_LOAD
, group_size
,
7640 memory_access_type
, &gs_info
);
7642 STMT_VINFO_TYPE (stmt_info
) = load_vec_info_type
;
7643 vect_model_load_cost (stmt_info
, ncopies
, memory_access_type
,
7644 slp_node_instance
, slp_node
, cost_vec
);
7649 gcc_assert (memory_access_type
7650 == STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info
));
7652 if (dump_enabled_p ())
7653 dump_printf_loc (MSG_NOTE
, vect_location
,
7654 "transform load. ncopies = %d\n", ncopies
);
7658 dr_vec_info
*dr_info
= STMT_VINFO_DR_INFO (stmt_info
), *first_dr_info
= NULL
;
7659 ensure_base_align (dr_info
);
7661 if (memory_access_type
== VMAT_GATHER_SCATTER
&& gs_info
.decl
)
7663 vect_build_gather_load_calls (stmt_info
, gsi
, vec_stmt
, &gs_info
, mask
);
7667 if (memory_access_type
== VMAT_INVARIANT
)
7669 gcc_assert (!grouped_load
&& !mask
&& !bb_vinfo
);
7670 /* If we have versioned for aliasing or the loop doesn't
7671 have any data dependencies that would preclude this,
7672 then we are sure this is a loop invariant load and
7673 thus we can insert it on the preheader edge. */
7674 bool hoist_p
= (LOOP_VINFO_NO_DATA_DEPENDENCIES (loop_vinfo
)
7675 && !nested_in_vect_loop
7676 && hoist_defs_of_uses (stmt_info
, loop
));
7679 gassign
*stmt
= as_a
<gassign
*> (stmt_info
->stmt
);
7680 if (dump_enabled_p ())
7682 dump_printf_loc (MSG_NOTE
, vect_location
,
7683 "hoisting out of the vectorized loop: ");
7684 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
7686 scalar_dest
= copy_ssa_name (scalar_dest
);
7687 tree rhs
= unshare_expr (gimple_assign_rhs1 (stmt
));
7688 gsi_insert_on_edge_immediate
7689 (loop_preheader_edge (loop
),
7690 gimple_build_assign (scalar_dest
, rhs
));
7692 /* These copies are all equivalent, but currently the representation
7693 requires a separate STMT_VINFO_VEC_STMT for each one. */
7694 prev_stmt_info
= NULL
;
7695 gimple_stmt_iterator gsi2
= *gsi
;
7697 for (j
= 0; j
< ncopies
; j
++)
7699 stmt_vec_info new_stmt_info
;
7702 new_temp
= vect_init_vector (stmt_info
, scalar_dest
,
7704 gimple
*new_stmt
= SSA_NAME_DEF_STMT (new_temp
);
7705 new_stmt_info
= vinfo
->add_stmt (new_stmt
);
7709 new_temp
= vect_init_vector (stmt_info
, scalar_dest
,
7711 new_stmt_info
= vinfo
->lookup_def (new_temp
);
7714 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt_info
);
7716 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt_info
;
7718 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt_info
;
7719 prev_stmt_info
= new_stmt_info
;
7724 if (memory_access_type
== VMAT_ELEMENTWISE
7725 || memory_access_type
== VMAT_STRIDED_SLP
)
7727 gimple_stmt_iterator incr_gsi
;
7733 vec
<constructor_elt
, va_gc
> *v
= NULL
;
7734 tree stride_base
, stride_step
, alias_off
;
7735 /* Checked by get_load_store_type. */
7736 unsigned int const_nunits
= nunits
.to_constant ();
7737 unsigned HOST_WIDE_INT cst_offset
= 0;
7739 gcc_assert (!LOOP_VINFO_FULLY_MASKED_P (loop_vinfo
));
7740 gcc_assert (!nested_in_vect_loop
);
7744 first_stmt_info
= DR_GROUP_FIRST_ELEMENT (stmt_info
);
7745 first_dr_info
= STMT_VINFO_DR_INFO (first_stmt_info
);
7749 first_stmt_info
= stmt_info
;
7750 first_dr_info
= dr_info
;
7752 if (slp
&& grouped_load
)
7754 group_size
= DR_GROUP_SIZE (first_stmt_info
);
7755 ref_type
= get_group_alias_ptr_type (first_stmt_info
);
7761 = (tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (vectype
)))
7762 * vect_get_place_in_interleaving_chain (stmt_info
,
7765 ref_type
= reference_alias_ptr_type (DR_REF (dr_info
->dr
));
7769 = fold_build_pointer_plus
7770 (DR_BASE_ADDRESS (first_dr_info
->dr
),
7771 size_binop (PLUS_EXPR
,
7772 convert_to_ptrofftype (DR_OFFSET (first_dr_info
->dr
)),
7773 convert_to_ptrofftype (DR_INIT (first_dr_info
->dr
))));
7774 stride_step
= fold_convert (sizetype
, DR_STEP (first_dr_info
->dr
));
7776 /* For a load with loop-invariant (but other than power-of-2)
7777 stride (i.e. not a grouped access) like so:
7779 for (i = 0; i < n; i += stride)
7782 we generate a new induction variable and new accesses to
7783 form a new vector (or vectors, depending on ncopies):
7785 for (j = 0; ; j += VF*stride)
7787 tmp2 = array[j + stride];
7789 vectemp = {tmp1, tmp2, ...}
7792 ivstep
= fold_build2 (MULT_EXPR
, TREE_TYPE (stride_step
), stride_step
,
7793 build_int_cst (TREE_TYPE (stride_step
), vf
));
7795 standard_iv_increment_position (loop
, &incr_gsi
, &insert_after
);
7797 stride_base
= cse_and_gimplify_to_preheader (loop_vinfo
, stride_base
);
7798 ivstep
= cse_and_gimplify_to_preheader (loop_vinfo
, ivstep
);
7799 create_iv (stride_base
, ivstep
, NULL
,
7800 loop
, &incr_gsi
, insert_after
,
7802 incr
= gsi_stmt (incr_gsi
);
7803 loop_vinfo
->add_stmt (incr
);
7805 stride_step
= cse_and_gimplify_to_preheader (loop_vinfo
, stride_step
);
7807 prev_stmt_info
= NULL
;
7808 running_off
= offvar
;
7809 alias_off
= build_int_cst (ref_type
, 0);
7810 int nloads
= const_nunits
;
7812 tree ltype
= TREE_TYPE (vectype
);
7813 tree lvectype
= vectype
;
7814 auto_vec
<tree
> dr_chain
;
7815 if (memory_access_type
== VMAT_STRIDED_SLP
)
7817 if (group_size
< const_nunits
)
7819 /* First check if vec_init optab supports construction from
7820 vector elts directly. */
7821 scalar_mode elmode
= SCALAR_TYPE_MODE (TREE_TYPE (vectype
));
7823 if (mode_for_vector (elmode
, group_size
).exists (&vmode
)
7824 && VECTOR_MODE_P (vmode
)
7825 && targetm
.vector_mode_supported_p (vmode
)
7826 && (convert_optab_handler (vec_init_optab
,
7827 TYPE_MODE (vectype
), vmode
)
7828 != CODE_FOR_nothing
))
7830 nloads
= const_nunits
/ group_size
;
7832 ltype
= build_vector_type (TREE_TYPE (vectype
), group_size
);
7836 /* Otherwise avoid emitting a constructor of vector elements
7837 by performing the loads using an integer type of the same
7838 size, constructing a vector of those and then
7839 re-interpreting it as the original vector type.
7840 This avoids a huge runtime penalty due to the general
7841 inability to perform store forwarding from smaller stores
7842 to a larger load. */
7844 = group_size
* TYPE_PRECISION (TREE_TYPE (vectype
));
7845 elmode
= int_mode_for_size (lsize
, 0).require ();
7846 unsigned int lnunits
= const_nunits
/ group_size
;
7847 /* If we can't construct such a vector fall back to
7848 element loads of the original vector type. */
7849 if (mode_for_vector (elmode
, lnunits
).exists (&vmode
)
7850 && VECTOR_MODE_P (vmode
)
7851 && targetm
.vector_mode_supported_p (vmode
)
7852 && (convert_optab_handler (vec_init_optab
, vmode
, elmode
)
7853 != CODE_FOR_nothing
))
7857 ltype
= build_nonstandard_integer_type (lsize
, 1);
7858 lvectype
= build_vector_type (ltype
, nloads
);
7865 lnel
= const_nunits
;
7868 ltype
= build_aligned_type (ltype
, TYPE_ALIGN (TREE_TYPE (vectype
)));
7870 /* Load vector(1) scalar_type if it's 1 element-wise vectype. */
7871 else if (nloads
== 1)
7876 /* For SLP permutation support we need to load the whole group,
7877 not only the number of vector stmts the permutation result
7881 /* We don't yet generate SLP_TREE_LOAD_PERMUTATIONs for
7883 unsigned int const_vf
= vf
.to_constant ();
7884 ncopies
= CEIL (group_size
* const_vf
, const_nunits
);
7885 dr_chain
.create (ncopies
);
7888 ncopies
= SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node
);
7890 unsigned int group_el
= 0;
7891 unsigned HOST_WIDE_INT
7892 elsz
= tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (vectype
)));
7893 for (j
= 0; j
< ncopies
; j
++)
7896 vec_alloc (v
, nloads
);
7897 stmt_vec_info new_stmt_info
= NULL
;
7898 for (i
= 0; i
< nloads
; i
++)
7900 tree this_off
= build_int_cst (TREE_TYPE (alias_off
),
7901 group_el
* elsz
+ cst_offset
);
7902 tree data_ref
= build2 (MEM_REF
, ltype
, running_off
, this_off
);
7903 vect_copy_ref_info (data_ref
, DR_REF (first_dr_info
->dr
));
7905 = gimple_build_assign (make_ssa_name (ltype
), data_ref
);
7907 = vect_finish_stmt_generation (stmt_info
, new_stmt
, gsi
);
7909 CONSTRUCTOR_APPEND_ELT (v
, NULL_TREE
,
7910 gimple_assign_lhs (new_stmt
));
7914 || group_el
== group_size
)
7916 tree newoff
= copy_ssa_name (running_off
);
7917 gimple
*incr
= gimple_build_assign (newoff
, POINTER_PLUS_EXPR
,
7918 running_off
, stride_step
);
7919 vect_finish_stmt_generation (stmt_info
, incr
, gsi
);
7921 running_off
= newoff
;
7927 tree vec_inv
= build_constructor (lvectype
, v
);
7928 new_temp
= vect_init_vector (stmt_info
, vec_inv
, lvectype
, gsi
);
7929 new_stmt_info
= vinfo
->lookup_def (new_temp
);
7930 if (lvectype
!= vectype
)
7933 = gimple_build_assign (make_ssa_name (vectype
),
7935 build1 (VIEW_CONVERT_EXPR
,
7936 vectype
, new_temp
));
7938 = vect_finish_stmt_generation (stmt_info
, new_stmt
, gsi
);
7945 dr_chain
.quick_push (gimple_assign_lhs (new_stmt_info
->stmt
));
7947 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt_info
);
7952 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt_info
;
7954 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt_info
;
7955 prev_stmt_info
= new_stmt_info
;
7961 vect_transform_slp_perm_load (slp_node
, dr_chain
, gsi
, vf
,
7962 slp_node_instance
, false, &n_perms
);
7967 if (memory_access_type
== VMAT_GATHER_SCATTER
7968 || (!slp
&& memory_access_type
== VMAT_CONTIGUOUS
))
7969 grouped_load
= false;
7973 first_stmt_info
= DR_GROUP_FIRST_ELEMENT (stmt_info
);
7974 group_size
= DR_GROUP_SIZE (first_stmt_info
);
7975 /* For SLP vectorization we directly vectorize a subchain
7976 without permutation. */
7977 if (slp
&& ! SLP_TREE_LOAD_PERMUTATION (slp_node
).exists ())
7978 first_stmt_info
= SLP_TREE_SCALAR_STMTS (slp_node
)[0];
7979 /* For BB vectorization always use the first stmt to base
7980 the data ref pointer on. */
7982 first_stmt_info_for_drptr
= SLP_TREE_SCALAR_STMTS (slp_node
)[0];
7984 /* Check if the chain of loads is already vectorized. */
7985 if (STMT_VINFO_VEC_STMT (first_stmt_info
)
7986 /* For SLP we would need to copy over SLP_TREE_VEC_STMTS.
7987 ??? But we can only do so if there is exactly one
7988 as we have no way to get at the rest. Leave the CSE
7990 ??? With the group load eventually participating
7991 in multiple different permutations (having multiple
7992 slp nodes which refer to the same group) the CSE
7993 is even wrong code. See PR56270. */
7996 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
7999 first_dr_info
= STMT_VINFO_DR_INFO (first_stmt_info
);
8002 /* VEC_NUM is the number of vect stmts to be created for this group. */
8005 grouped_load
= false;
8006 /* For SLP permutation support we need to load the whole group,
8007 not only the number of vector stmts the permutation result
8011 /* We don't yet generate SLP_TREE_LOAD_PERMUTATIONs for
8013 unsigned int const_vf
= vf
.to_constant ();
8014 unsigned int const_nunits
= nunits
.to_constant ();
8015 vec_num
= CEIL (group_size
* const_vf
, const_nunits
);
8016 group_gap_adj
= vf
* group_size
- nunits
* vec_num
;
8020 vec_num
= SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node
);
8022 = group_size
- SLP_INSTANCE_GROUP_SIZE (slp_node_instance
);
8026 vec_num
= group_size
;
8028 ref_type
= get_group_alias_ptr_type (first_stmt_info
);
8032 first_stmt_info
= stmt_info
;
8033 first_dr_info
= dr_info
;
8034 group_size
= vec_num
= 1;
8036 ref_type
= reference_alias_ptr_type (DR_REF (first_dr_info
->dr
));
8039 alignment_support_scheme
8040 = vect_supportable_dr_alignment (first_dr_info
, false);
8041 gcc_assert (alignment_support_scheme
);
8042 vec_loop_masks
*loop_masks
8043 = (loop_vinfo
&& LOOP_VINFO_FULLY_MASKED_P (loop_vinfo
)
8044 ? &LOOP_VINFO_MASKS (loop_vinfo
)
8046 /* Targets with store-lane instructions must not require explicit
8047 realignment. vect_supportable_dr_alignment always returns either
8048 dr_aligned or dr_unaligned_supported for masked operations. */
8049 gcc_assert ((memory_access_type
!= VMAT_LOAD_STORE_LANES
8052 || alignment_support_scheme
== dr_aligned
8053 || alignment_support_scheme
== dr_unaligned_supported
);
8055 /* In case the vectorization factor (VF) is bigger than the number
8056 of elements that we can fit in a vectype (nunits), we have to generate
8057 more than one vector stmt - i.e - we need to "unroll" the
8058 vector stmt by a factor VF/nunits. In doing so, we record a pointer
8059 from one copy of the vector stmt to the next, in the field
8060 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
8061 stages to find the correct vector defs to be used when vectorizing
8062 stmts that use the defs of the current stmt. The example below
8063 illustrates the vectorization process when VF=16 and nunits=4 (i.e., we
8064 need to create 4 vectorized stmts):
8066 before vectorization:
8067 RELATED_STMT VEC_STMT
8071 step 1: vectorize stmt S1:
8072 We first create the vector stmt VS1_0, and, as usual, record a
8073 pointer to it in the STMT_VINFO_VEC_STMT of the scalar stmt S1.
8074 Next, we create the vector stmt VS1_1, and record a pointer to
8075 it in the STMT_VINFO_RELATED_STMT of the vector stmt VS1_0.
8076 Similarly, for VS1_2 and VS1_3. This is the resulting chain of
8078 RELATED_STMT VEC_STMT
8079 VS1_0: vx0 = memref0 VS1_1 -
8080 VS1_1: vx1 = memref1 VS1_2 -
8081 VS1_2: vx2 = memref2 VS1_3 -
8082 VS1_3: vx3 = memref3 - -
8083 S1: x = load - VS1_0
8086 See in documentation in vect_get_vec_def_for_stmt_copy for how the
8087 information we recorded in RELATED_STMT field is used to vectorize
8090 /* In case of interleaving (non-unit grouped access):
8097 Vectorized loads are created in the order of memory accesses
8098 starting from the access of the first stmt of the chain:
8101 VS2: vx1 = &base + vec_size*1
8102 VS3: vx3 = &base + vec_size*2
8103 VS4: vx4 = &base + vec_size*3
8105 Then permutation statements are generated:
8107 VS5: vx5 = VEC_PERM_EXPR < vx0, vx1, { 0, 2, ..., i*2 } >
8108 VS6: vx6 = VEC_PERM_EXPR < vx0, vx1, { 1, 3, ..., i*2+1 } >
8111 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
8112 (the order of the data-refs in the output of vect_permute_load_chain
8113 corresponds to the order of scalar stmts in the interleaving chain - see
8114 the documentation of vect_permute_load_chain()).
8115 The generation of permutation stmts and recording them in
8116 STMT_VINFO_VEC_STMT is done in vect_transform_grouped_load().
8118 In case of both multiple types and interleaving, the vector loads and
8119 permutation stmts above are created for every copy. The result vector
8120 stmts are put in STMT_VINFO_VEC_STMT for the first copy and in the
8121 corresponding STMT_VINFO_RELATED_STMT for the next copies. */
8123 /* If the data reference is aligned (dr_aligned) or potentially unaligned
8124 on a target that supports unaligned accesses (dr_unaligned_supported)
8125 we generate the following code:
8129 p = p + indx * vectype_size;
8134 Otherwise, the data reference is potentially unaligned on a target that
8135 does not support unaligned accesses (dr_explicit_realign_optimized) -
8136 then generate the following code, in which the data in each iteration is
8137 obtained by two vector loads, one from the previous iteration, and one
8138 from the current iteration:
8140 msq_init = *(floor(p1))
8141 p2 = initial_addr + VS - 1;
8142 realignment_token = call target_builtin;
8145 p2 = p2 + indx * vectype_size
8147 vec_dest = realign_load (msq, lsq, realignment_token)
8152 /* If the misalignment remains the same throughout the execution of the
8153 loop, we can create the init_addr and permutation mask at the loop
8154 preheader. Otherwise, it needs to be created inside the loop.
8155 This can only occur when vectorizing memory accesses in the inner-loop
8156 nested within an outer-loop that is being vectorized. */
8158 if (nested_in_vect_loop
8159 && !multiple_p (DR_STEP_ALIGNMENT (dr_info
->dr
),
8160 GET_MODE_SIZE (TYPE_MODE (vectype
))))
8162 gcc_assert (alignment_support_scheme
!= dr_explicit_realign_optimized
);
8163 compute_in_loop
= true;
8166 if ((alignment_support_scheme
== dr_explicit_realign_optimized
8167 || alignment_support_scheme
== dr_explicit_realign
)
8168 && !compute_in_loop
)
8170 msq
= vect_setup_realignment (first_stmt_info
, gsi
, &realignment_token
,
8171 alignment_support_scheme
, NULL_TREE
,
8173 if (alignment_support_scheme
== dr_explicit_realign_optimized
)
8175 phi
= as_a
<gphi
*> (SSA_NAME_DEF_STMT (msq
));
8176 byte_offset
= size_binop (MINUS_EXPR
, TYPE_SIZE_UNIT (vectype
),
8183 if (memory_access_type
== VMAT_CONTIGUOUS_REVERSE
)
8184 offset
= size_int (-TYPE_VECTOR_SUBPARTS (vectype
) + 1);
8187 tree vec_offset
= NULL_TREE
;
8188 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info
))
8190 aggr_type
= NULL_TREE
;
8193 else if (memory_access_type
== VMAT_GATHER_SCATTER
)
8195 aggr_type
= elem_type
;
8196 vect_get_strided_load_store_ops (stmt_info
, loop_vinfo
, &gs_info
,
8197 &bump
, &vec_offset
);
8201 if (memory_access_type
== VMAT_LOAD_STORE_LANES
)
8202 aggr_type
= build_array_type_nelts (elem_type
, vec_num
* nunits
);
8204 aggr_type
= vectype
;
8205 bump
= vect_get_data_ptr_increment (dr_info
, aggr_type
,
8206 memory_access_type
);
8209 tree vec_mask
= NULL_TREE
;
8210 prev_stmt_info
= NULL
;
8211 poly_uint64 group_elt
= 0;
8212 for (j
= 0; j
< ncopies
; j
++)
8214 stmt_vec_info new_stmt_info
= NULL
;
8215 /* 1. Create the vector or array pointer update chain. */
8218 bool simd_lane_access_p
8219 = STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info
);
8220 if (simd_lane_access_p
8221 && TREE_CODE (DR_BASE_ADDRESS (first_dr_info
->dr
)) == ADDR_EXPR
8222 && VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (first_dr_info
->dr
), 0))
8223 && integer_zerop (DR_OFFSET (first_dr_info
->dr
))
8224 && integer_zerop (DR_INIT (first_dr_info
->dr
))
8225 && alias_sets_conflict_p (get_alias_set (aggr_type
),
8226 get_alias_set (TREE_TYPE (ref_type
)))
8227 && (alignment_support_scheme
== dr_aligned
8228 || alignment_support_scheme
== dr_unaligned_supported
))
8230 dataref_ptr
= unshare_expr (DR_BASE_ADDRESS (first_dr_info
->dr
));
8231 dataref_offset
= build_int_cst (ref_type
, 0);
8233 else if (first_stmt_info_for_drptr
8234 && first_stmt_info
!= first_stmt_info_for_drptr
)
8237 = vect_create_data_ref_ptr (first_stmt_info_for_drptr
,
8238 aggr_type
, at_loop
, offset
, &dummy
,
8239 gsi
, &ptr_incr
, simd_lane_access_p
,
8241 /* Adjust the pointer by the difference to first_stmt. */
8242 data_reference_p ptrdr
8243 = STMT_VINFO_DATA_REF (first_stmt_info_for_drptr
);
8245 = fold_convert (sizetype
,
8246 size_binop (MINUS_EXPR
,
8247 DR_INIT (first_dr_info
->dr
),
8249 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
,
8252 else if (STMT_VINFO_GATHER_SCATTER_P (stmt_info
))
8253 vect_get_gather_scatter_ops (loop
, stmt_info
, &gs_info
,
8254 &dataref_ptr
, &vec_offset
);
8257 = vect_create_data_ref_ptr (first_stmt_info
, aggr_type
, at_loop
,
8258 offset
, &dummy
, gsi
, &ptr_incr
,
8262 vec_mask
= vect_get_vec_def_for_operand (mask
, stmt_info
,
8268 dataref_offset
= int_const_binop (PLUS_EXPR
, dataref_offset
,
8270 else if (STMT_VINFO_GATHER_SCATTER_P (stmt_info
))
8271 vec_offset
= vect_get_vec_def_for_stmt_copy (vinfo
, vec_offset
);
8273 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
,
8276 vec_mask
= vect_get_vec_def_for_stmt_copy (vinfo
, vec_mask
);
8279 if (grouped_load
|| slp_perm
)
8280 dr_chain
.create (vec_num
);
8282 if (memory_access_type
== VMAT_LOAD_STORE_LANES
)
8286 vec_array
= create_vector_array (vectype
, vec_num
);
8288 tree final_mask
= NULL_TREE
;
8290 final_mask
= vect_get_loop_mask (gsi
, loop_masks
, ncopies
,
8293 final_mask
= prepare_load_store_mask (mask_vectype
, final_mask
,
8300 VEC_ARRAY = MASK_LOAD_LANES (DATAREF_PTR, ALIAS_PTR,
8302 unsigned int align
= TYPE_ALIGN_UNIT (TREE_TYPE (vectype
));
8303 tree alias_ptr
= build_int_cst (ref_type
, align
);
8304 call
= gimple_build_call_internal (IFN_MASK_LOAD_LANES
, 3,
8305 dataref_ptr
, alias_ptr
,
8311 VEC_ARRAY = LOAD_LANES (MEM_REF[...all elements...]). */
8312 data_ref
= create_array_ref (aggr_type
, dataref_ptr
, ref_type
);
8313 call
= gimple_build_call_internal (IFN_LOAD_LANES
, 1, data_ref
);
8315 gimple_call_set_lhs (call
, vec_array
);
8316 gimple_call_set_nothrow (call
, true);
8317 new_stmt_info
= vect_finish_stmt_generation (stmt_info
, call
, gsi
);
8319 /* Extract each vector into an SSA_NAME. */
8320 for (i
= 0; i
< vec_num
; i
++)
8322 new_temp
= read_vector_array (stmt_info
, gsi
, scalar_dest
,
8324 dr_chain
.quick_push (new_temp
);
8327 /* Record the mapping between SSA_NAMEs and statements. */
8328 vect_record_grouped_load_vectors (stmt_info
, dr_chain
);
8330 /* Record that VEC_ARRAY is now dead. */
8331 vect_clobber_variable (stmt_info
, gsi
, vec_array
);
8335 for (i
= 0; i
< vec_num
; i
++)
8337 tree final_mask
= NULL_TREE
;
8339 && memory_access_type
!= VMAT_INVARIANT
)
8340 final_mask
= vect_get_loop_mask (gsi
, loop_masks
,
8342 vectype
, vec_num
* j
+ i
);
8344 final_mask
= prepare_load_store_mask (mask_vectype
, final_mask
,
8348 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
,
8351 /* 2. Create the vector-load in the loop. */
8352 gimple
*new_stmt
= NULL
;
8353 switch (alignment_support_scheme
)
8356 case dr_unaligned_supported
:
8358 unsigned int align
, misalign
;
8360 if (memory_access_type
== VMAT_GATHER_SCATTER
)
8362 tree scale
= size_int (gs_info
.scale
);
8365 call
= gimple_build_call_internal
8366 (IFN_MASK_GATHER_LOAD
, 4, dataref_ptr
,
8367 vec_offset
, scale
, final_mask
);
8369 call
= gimple_build_call_internal
8370 (IFN_GATHER_LOAD
, 3, dataref_ptr
,
8372 gimple_call_set_nothrow (call
, true);
8374 data_ref
= NULL_TREE
;
8378 align
= DR_TARGET_ALIGNMENT (dr_info
);
8379 if (alignment_support_scheme
== dr_aligned
)
8381 gcc_assert (aligned_access_p (first_dr_info
));
8384 else if (DR_MISALIGNMENT (first_dr_info
) == -1)
8386 align
= dr_alignment
8387 (vect_dr_behavior (first_dr_info
));
8391 misalign
= DR_MISALIGNMENT (first_dr_info
);
8392 if (dataref_offset
== NULL_TREE
8393 && TREE_CODE (dataref_ptr
) == SSA_NAME
)
8394 set_ptr_info_alignment (get_ptr_info (dataref_ptr
),
8399 align
= least_bit_hwi (misalign
| align
);
8400 tree ptr
= build_int_cst (ref_type
, align
);
8402 = gimple_build_call_internal (IFN_MASK_LOAD
, 3,
8405 gimple_call_set_nothrow (call
, true);
8407 data_ref
= NULL_TREE
;
8412 = fold_build2 (MEM_REF
, vectype
, dataref_ptr
,
8415 : build_int_cst (ref_type
, 0));
8416 if (alignment_support_scheme
== dr_aligned
)
8418 else if (DR_MISALIGNMENT (first_dr_info
) == -1)
8419 TREE_TYPE (data_ref
)
8420 = build_aligned_type (TREE_TYPE (data_ref
),
8421 align
* BITS_PER_UNIT
);
8423 TREE_TYPE (data_ref
)
8424 = build_aligned_type (TREE_TYPE (data_ref
),
8425 TYPE_ALIGN (elem_type
));
8429 case dr_explicit_realign
:
8433 tree vs
= size_int (TYPE_VECTOR_SUBPARTS (vectype
));
8435 if (compute_in_loop
)
8436 msq
= vect_setup_realignment (first_stmt_info
, gsi
,
8438 dr_explicit_realign
,
8441 if (TREE_CODE (dataref_ptr
) == SSA_NAME
)
8442 ptr
= copy_ssa_name (dataref_ptr
);
8444 ptr
= make_ssa_name (TREE_TYPE (dataref_ptr
));
8445 unsigned int align
= DR_TARGET_ALIGNMENT (first_dr_info
);
8446 new_stmt
= gimple_build_assign
8447 (ptr
, BIT_AND_EXPR
, dataref_ptr
,
8449 (TREE_TYPE (dataref_ptr
),
8450 -(HOST_WIDE_INT
) align
));
8451 vect_finish_stmt_generation (stmt_info
, new_stmt
, gsi
);
8453 = build2 (MEM_REF
, vectype
, ptr
,
8454 build_int_cst (ref_type
, 0));
8455 vect_copy_ref_info (data_ref
, DR_REF (first_dr_info
->dr
));
8456 vec_dest
= vect_create_destination_var (scalar_dest
,
8458 new_stmt
= gimple_build_assign (vec_dest
, data_ref
);
8459 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
8460 gimple_assign_set_lhs (new_stmt
, new_temp
);
8461 gimple_set_vdef (new_stmt
, gimple_vdef (stmt_info
->stmt
));
8462 gimple_set_vuse (new_stmt
, gimple_vuse (stmt_info
->stmt
));
8463 vect_finish_stmt_generation (stmt_info
, new_stmt
, gsi
);
8466 bump
= size_binop (MULT_EXPR
, vs
,
8467 TYPE_SIZE_UNIT (elem_type
));
8468 bump
= size_binop (MINUS_EXPR
, bump
, size_one_node
);
8469 ptr
= bump_vector_ptr (dataref_ptr
, NULL
, gsi
,
8471 new_stmt
= gimple_build_assign
8472 (NULL_TREE
, BIT_AND_EXPR
, ptr
,
8474 (TREE_TYPE (ptr
), -(HOST_WIDE_INT
) align
));
8475 ptr
= copy_ssa_name (ptr
, new_stmt
);
8476 gimple_assign_set_lhs (new_stmt
, ptr
);
8477 vect_finish_stmt_generation (stmt_info
, new_stmt
, gsi
);
8479 = build2 (MEM_REF
, vectype
, ptr
,
8480 build_int_cst (ref_type
, 0));
8483 case dr_explicit_realign_optimized
:
8485 if (TREE_CODE (dataref_ptr
) == SSA_NAME
)
8486 new_temp
= copy_ssa_name (dataref_ptr
);
8488 new_temp
= make_ssa_name (TREE_TYPE (dataref_ptr
));
8489 unsigned int align
= DR_TARGET_ALIGNMENT (first_dr_info
);
8490 new_stmt
= gimple_build_assign
8491 (new_temp
, BIT_AND_EXPR
, dataref_ptr
,
8492 build_int_cst (TREE_TYPE (dataref_ptr
),
8493 -(HOST_WIDE_INT
) align
));
8494 vect_finish_stmt_generation (stmt_info
, new_stmt
, gsi
);
8496 = build2 (MEM_REF
, vectype
, new_temp
,
8497 build_int_cst (ref_type
, 0));
8503 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
8504 /* DATA_REF is null if we've already built the statement. */
8507 vect_copy_ref_info (data_ref
, DR_REF (first_dr_info
->dr
));
8508 new_stmt
= gimple_build_assign (vec_dest
, data_ref
);
8510 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
8511 gimple_set_lhs (new_stmt
, new_temp
);
8513 = vect_finish_stmt_generation (stmt_info
, new_stmt
, gsi
);
8515 /* 3. Handle explicit realignment if necessary/supported.
8517 vec_dest = realign_load (msq, lsq, realignment_token) */
8518 if (alignment_support_scheme
== dr_explicit_realign_optimized
8519 || alignment_support_scheme
== dr_explicit_realign
)
8521 lsq
= gimple_assign_lhs (new_stmt
);
8522 if (!realignment_token
)
8523 realignment_token
= dataref_ptr
;
8524 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
8525 new_stmt
= gimple_build_assign (vec_dest
, REALIGN_LOAD_EXPR
,
8526 msq
, lsq
, realignment_token
);
8527 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
8528 gimple_assign_set_lhs (new_stmt
, new_temp
);
8530 = vect_finish_stmt_generation (stmt_info
, new_stmt
, gsi
);
8532 if (alignment_support_scheme
== dr_explicit_realign_optimized
)
8535 if (i
== vec_num
- 1 && j
== ncopies
- 1)
8536 add_phi_arg (phi
, lsq
,
8537 loop_latch_edge (containing_loop
),
8543 if (memory_access_type
== VMAT_CONTIGUOUS_REVERSE
)
8545 tree perm_mask
= perm_mask_for_reverse (vectype
);
8546 new_temp
= permute_vec_elements (new_temp
, new_temp
,
8547 perm_mask
, stmt_info
, gsi
);
8548 new_stmt_info
= vinfo
->lookup_def (new_temp
);
8551 /* Collect vector loads and later create their permutation in
8552 vect_transform_grouped_load (). */
8553 if (grouped_load
|| slp_perm
)
8554 dr_chain
.quick_push (new_temp
);
8556 /* Store vector loads in the corresponding SLP_NODE. */
8557 if (slp
&& !slp_perm
)
8558 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt_info
);
8560 /* With SLP permutation we load the gaps as well, without
8561 we need to skip the gaps after we manage to fully load
8562 all elements. group_gap_adj is DR_GROUP_SIZE here. */
8563 group_elt
+= nunits
;
8564 if (maybe_ne (group_gap_adj
, 0U)
8566 && known_eq (group_elt
, group_size
- group_gap_adj
))
8568 poly_wide_int bump_val
8569 = (wi::to_wide (TYPE_SIZE_UNIT (elem_type
))
8571 tree bump
= wide_int_to_tree (sizetype
, bump_val
);
8572 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
,
8577 /* Bump the vector pointer to account for a gap or for excess
8578 elements loaded for a permuted SLP load. */
8579 if (maybe_ne (group_gap_adj
, 0U) && slp_perm
)
8581 poly_wide_int bump_val
8582 = (wi::to_wide (TYPE_SIZE_UNIT (elem_type
))
8584 tree bump
= wide_int_to_tree (sizetype
, bump_val
);
8585 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
,
8590 if (slp
&& !slp_perm
)
8596 if (!vect_transform_slp_perm_load (slp_node
, dr_chain
, gsi
, vf
,
8597 slp_node_instance
, false,
8600 dr_chain
.release ();
8608 if (memory_access_type
!= VMAT_LOAD_STORE_LANES
)
8609 vect_transform_grouped_load (stmt_info
, dr_chain
,
8611 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
8616 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt_info
;
8618 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt_info
;
8619 prev_stmt_info
= new_stmt_info
;
8622 dr_chain
.release ();
8628 /* Function vect_is_simple_cond.
8631 LOOP - the loop that is being vectorized.
8632 COND - Condition that is checked for simple use.
8635 *COMP_VECTYPE - the vector type for the comparison.
8636 *DTS - The def types for the arguments of the comparison
8638 Returns whether a COND can be vectorized. Checks whether
8639 condition operands are supportable using vec_is_simple_use. */
8642 vect_is_simple_cond (tree cond
, vec_info
*vinfo
,
8643 tree
*comp_vectype
, enum vect_def_type
*dts
,
8647 tree vectype1
= NULL_TREE
, vectype2
= NULL_TREE
;
8650 if (TREE_CODE (cond
) == SSA_NAME
8651 && VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (cond
)))
8653 if (!vect_is_simple_use (cond
, vinfo
, &dts
[0], comp_vectype
)
8655 || !VECTOR_BOOLEAN_TYPE_P (*comp_vectype
))
8660 if (!COMPARISON_CLASS_P (cond
))
8663 lhs
= TREE_OPERAND (cond
, 0);
8664 rhs
= TREE_OPERAND (cond
, 1);
8666 if (TREE_CODE (lhs
) == SSA_NAME
)
8668 if (!vect_is_simple_use (lhs
, vinfo
, &dts
[0], &vectype1
))
8671 else if (TREE_CODE (lhs
) == INTEGER_CST
|| TREE_CODE (lhs
) == REAL_CST
8672 || TREE_CODE (lhs
) == FIXED_CST
)
8673 dts
[0] = vect_constant_def
;
8677 if (TREE_CODE (rhs
) == SSA_NAME
)
8679 if (!vect_is_simple_use (rhs
, vinfo
, &dts
[1], &vectype2
))
8682 else if (TREE_CODE (rhs
) == INTEGER_CST
|| TREE_CODE (rhs
) == REAL_CST
8683 || TREE_CODE (rhs
) == FIXED_CST
)
8684 dts
[1] = vect_constant_def
;
8688 if (vectype1
&& vectype2
8689 && maybe_ne (TYPE_VECTOR_SUBPARTS (vectype1
),
8690 TYPE_VECTOR_SUBPARTS (vectype2
)))
8693 *comp_vectype
= vectype1
? vectype1
: vectype2
;
8694 /* Invariant comparison. */
8695 if (! *comp_vectype
&& vectype
)
8697 tree scalar_type
= TREE_TYPE (lhs
);
8698 /* If we can widen the comparison to match vectype do so. */
8699 if (INTEGRAL_TYPE_P (scalar_type
)
8700 && tree_int_cst_lt (TYPE_SIZE (scalar_type
),
8701 TYPE_SIZE (TREE_TYPE (vectype
))))
8702 scalar_type
= build_nonstandard_integer_type
8703 (tree_to_uhwi (TYPE_SIZE (TREE_TYPE (vectype
))),
8704 TYPE_UNSIGNED (scalar_type
));
8705 *comp_vectype
= get_vectype_for_scalar_type (scalar_type
);
8711 /* vectorizable_condition.
8713 Check if STMT_INFO is conditional modify expression that can be vectorized.
8714 If VEC_STMT is also passed, vectorize STMT_INFO: create a vectorized
8715 stmt using VEC_COND_EXPR to replace it, put it in VEC_STMT, and insert it
8718 When STMT_INFO is vectorized as a nested cycle, REDUC_DEF is the vector
8719 variable to be used at REDUC_INDEX (in then clause if REDUC_INDEX is 1,
8720 and in else clause if it is 2).
8722 Return true if STMT_INFO is vectorizable in this way. */
8725 vectorizable_condition (stmt_vec_info stmt_info
, gimple_stmt_iterator
*gsi
,
8726 stmt_vec_info
*vec_stmt
, tree reduc_def
,
8727 int reduc_index
, slp_tree slp_node
,
8728 stmt_vector_for_cost
*cost_vec
)
8730 vec_info
*vinfo
= stmt_info
->vinfo
;
8731 tree scalar_dest
= NULL_TREE
;
8732 tree vec_dest
= NULL_TREE
;
8733 tree cond_expr
, cond_expr0
= NULL_TREE
, cond_expr1
= NULL_TREE
;
8734 tree then_clause
, else_clause
;
8735 tree comp_vectype
= NULL_TREE
;
8736 tree vec_cond_lhs
= NULL_TREE
, vec_cond_rhs
= NULL_TREE
;
8737 tree vec_then_clause
= NULL_TREE
, vec_else_clause
= NULL_TREE
;
8740 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
8741 enum vect_def_type dts
[4]
8742 = {vect_unknown_def_type
, vect_unknown_def_type
,
8743 vect_unknown_def_type
, vect_unknown_def_type
};
8746 enum tree_code code
, cond_code
, bitop1
= NOP_EXPR
, bitop2
= NOP_EXPR
;
8747 stmt_vec_info prev_stmt_info
= NULL
;
8749 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
8750 vec
<tree
> vec_oprnds0
= vNULL
;
8751 vec
<tree
> vec_oprnds1
= vNULL
;
8752 vec
<tree
> vec_oprnds2
= vNULL
;
8753 vec
<tree
> vec_oprnds3
= vNULL
;
8755 bool masked
= false;
8757 if (reduc_index
&& STMT_SLP_TYPE (stmt_info
))
8760 vect_reduction_type reduction_type
8761 = STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info
);
8762 if (reduction_type
== TREE_CODE_REDUCTION
)
8764 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
8767 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
8768 && !(STMT_VINFO_DEF_TYPE (stmt_info
) == vect_nested_cycle
8772 /* FORNOW: not yet supported. */
8773 if (STMT_VINFO_LIVE_P (stmt_info
))
8775 if (dump_enabled_p ())
8776 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
8777 "value used after loop.\n");
8782 /* Is vectorizable conditional operation? */
8783 gassign
*stmt
= dyn_cast
<gassign
*> (stmt_info
->stmt
);
8787 code
= gimple_assign_rhs_code (stmt
);
8789 if (code
!= COND_EXPR
)
8792 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
8793 tree vectype1
= NULL_TREE
, vectype2
= NULL_TREE
;
8798 ncopies
= vect_get_num_copies (loop_vinfo
, vectype
);
8800 gcc_assert (ncopies
>= 1);
8801 if (reduc_index
&& ncopies
> 1)
8802 return false; /* FORNOW */
8804 cond_expr
= gimple_assign_rhs1 (stmt
);
8805 then_clause
= gimple_assign_rhs2 (stmt
);
8806 else_clause
= gimple_assign_rhs3 (stmt
);
8808 if (!vect_is_simple_cond (cond_expr
, stmt_info
->vinfo
,
8809 &comp_vectype
, &dts
[0], slp_node
? NULL
: vectype
)
8813 if (!vect_is_simple_use (then_clause
, stmt_info
->vinfo
, &dts
[2], &vectype1
))
8815 if (!vect_is_simple_use (else_clause
, stmt_info
->vinfo
, &dts
[3], &vectype2
))
8818 if (vectype1
&& !useless_type_conversion_p (vectype
, vectype1
))
8821 if (vectype2
&& !useless_type_conversion_p (vectype
, vectype2
))
8824 masked
= !COMPARISON_CLASS_P (cond_expr
);
8825 vec_cmp_type
= build_same_sized_truth_vector_type (comp_vectype
);
8827 if (vec_cmp_type
== NULL_TREE
)
8830 cond_code
= TREE_CODE (cond_expr
);
8833 cond_expr0
= TREE_OPERAND (cond_expr
, 0);
8834 cond_expr1
= TREE_OPERAND (cond_expr
, 1);
8837 if (!masked
&& VECTOR_BOOLEAN_TYPE_P (comp_vectype
))
8839 /* Boolean values may have another representation in vectors
8840 and therefore we prefer bit operations over comparison for
8841 them (which also works for scalar masks). We store opcodes
8842 to use in bitop1 and bitop2. Statement is vectorized as
8843 BITOP2 (rhs1 BITOP1 rhs2) or rhs1 BITOP2 (BITOP1 rhs2)
8844 depending on bitop1 and bitop2 arity. */
8848 bitop1
= BIT_NOT_EXPR
;
8849 bitop2
= BIT_AND_EXPR
;
8852 bitop1
= BIT_NOT_EXPR
;
8853 bitop2
= BIT_IOR_EXPR
;
8856 bitop1
= BIT_NOT_EXPR
;
8857 bitop2
= BIT_AND_EXPR
;
8858 std::swap (cond_expr0
, cond_expr1
);
8861 bitop1
= BIT_NOT_EXPR
;
8862 bitop2
= BIT_IOR_EXPR
;
8863 std::swap (cond_expr0
, cond_expr1
);
8866 bitop1
= BIT_XOR_EXPR
;
8869 bitop1
= BIT_XOR_EXPR
;
8870 bitop2
= BIT_NOT_EXPR
;
8875 cond_code
= SSA_NAME
;
8880 if (bitop1
!= NOP_EXPR
)
8882 machine_mode mode
= TYPE_MODE (comp_vectype
);
8885 optab
= optab_for_tree_code (bitop1
, comp_vectype
, optab_default
);
8886 if (!optab
|| optab_handler (optab
, mode
) == CODE_FOR_nothing
)
8889 if (bitop2
!= NOP_EXPR
)
8891 optab
= optab_for_tree_code (bitop2
, comp_vectype
,
8893 if (!optab
|| optab_handler (optab
, mode
) == CODE_FOR_nothing
)
8897 if (expand_vec_cond_expr_p (vectype
, comp_vectype
,
8900 STMT_VINFO_TYPE (stmt_info
) = condition_vec_info_type
;
8901 vect_model_simple_cost (stmt_info
, ncopies
, dts
, ndts
, slp_node
,
8912 vec_oprnds0
.create (1);
8913 vec_oprnds1
.create (1);
8914 vec_oprnds2
.create (1);
8915 vec_oprnds3
.create (1);
8919 scalar_dest
= gimple_assign_lhs (stmt
);
8920 if (reduction_type
!= EXTRACT_LAST_REDUCTION
)
8921 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
8923 /* Handle cond expr. */
8924 for (j
= 0; j
< ncopies
; j
++)
8926 stmt_vec_info new_stmt_info
= NULL
;
8931 auto_vec
<tree
, 4> ops
;
8932 auto_vec
<vec
<tree
>, 4> vec_defs
;
8935 ops
.safe_push (cond_expr
);
8938 ops
.safe_push (cond_expr0
);
8939 ops
.safe_push (cond_expr1
);
8941 ops
.safe_push (then_clause
);
8942 ops
.safe_push (else_clause
);
8943 vect_get_slp_defs (ops
, slp_node
, &vec_defs
);
8944 vec_oprnds3
= vec_defs
.pop ();
8945 vec_oprnds2
= vec_defs
.pop ();
8947 vec_oprnds1
= vec_defs
.pop ();
8948 vec_oprnds0
= vec_defs
.pop ();
8955 = vect_get_vec_def_for_operand (cond_expr
, stmt_info
,
8957 vect_is_simple_use (cond_expr
, stmt_info
->vinfo
, &dts
[0]);
8962 = vect_get_vec_def_for_operand (cond_expr0
,
8963 stmt_info
, comp_vectype
);
8964 vect_is_simple_use (cond_expr0
, loop_vinfo
, &dts
[0]);
8967 = vect_get_vec_def_for_operand (cond_expr1
,
8968 stmt_info
, comp_vectype
);
8969 vect_is_simple_use (cond_expr1
, loop_vinfo
, &dts
[1]);
8971 if (reduc_index
== 1)
8972 vec_then_clause
= reduc_def
;
8975 vec_then_clause
= vect_get_vec_def_for_operand (then_clause
,
8977 vect_is_simple_use (then_clause
, loop_vinfo
, &dts
[2]);
8979 if (reduc_index
== 2)
8980 vec_else_clause
= reduc_def
;
8983 vec_else_clause
= vect_get_vec_def_for_operand (else_clause
,
8985 vect_is_simple_use (else_clause
, loop_vinfo
, &dts
[3]);
8992 = vect_get_vec_def_for_stmt_copy (vinfo
, vec_oprnds0
.pop ());
8995 = vect_get_vec_def_for_stmt_copy (vinfo
, vec_oprnds1
.pop ());
8997 vec_then_clause
= vect_get_vec_def_for_stmt_copy (vinfo
,
8998 vec_oprnds2
.pop ());
8999 vec_else_clause
= vect_get_vec_def_for_stmt_copy (vinfo
,
9000 vec_oprnds3
.pop ());
9005 vec_oprnds0
.quick_push (vec_cond_lhs
);
9007 vec_oprnds1
.quick_push (vec_cond_rhs
);
9008 vec_oprnds2
.quick_push (vec_then_clause
);
9009 vec_oprnds3
.quick_push (vec_else_clause
);
9012 /* Arguments are ready. Create the new vector stmt. */
9013 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vec_cond_lhs
)
9015 vec_then_clause
= vec_oprnds2
[i
];
9016 vec_else_clause
= vec_oprnds3
[i
];
9019 vec_compare
= vec_cond_lhs
;
9022 vec_cond_rhs
= vec_oprnds1
[i
];
9023 if (bitop1
== NOP_EXPR
)
9024 vec_compare
= build2 (cond_code
, vec_cmp_type
,
9025 vec_cond_lhs
, vec_cond_rhs
);
9028 new_temp
= make_ssa_name (vec_cmp_type
);
9030 if (bitop1
== BIT_NOT_EXPR
)
9031 new_stmt
= gimple_build_assign (new_temp
, bitop1
,
9035 = gimple_build_assign (new_temp
, bitop1
, vec_cond_lhs
,
9037 vect_finish_stmt_generation (stmt_info
, new_stmt
, gsi
);
9038 if (bitop2
== NOP_EXPR
)
9039 vec_compare
= new_temp
;
9040 else if (bitop2
== BIT_NOT_EXPR
)
9042 /* Instead of doing ~x ? y : z do x ? z : y. */
9043 vec_compare
= new_temp
;
9044 std::swap (vec_then_clause
, vec_else_clause
);
9048 vec_compare
= make_ssa_name (vec_cmp_type
);
9050 = gimple_build_assign (vec_compare
, bitop2
,
9051 vec_cond_lhs
, new_temp
);
9052 vect_finish_stmt_generation (stmt_info
, new_stmt
, gsi
);
9056 if (reduction_type
== EXTRACT_LAST_REDUCTION
)
9058 if (!is_gimple_val (vec_compare
))
9060 tree vec_compare_name
= make_ssa_name (vec_cmp_type
);
9061 gassign
*new_stmt
= gimple_build_assign (vec_compare_name
,
9063 vect_finish_stmt_generation (stmt_info
, new_stmt
, gsi
);
9064 vec_compare
= vec_compare_name
;
9066 gcc_assert (reduc_index
== 2);
9067 gcall
*new_stmt
= gimple_build_call_internal
9068 (IFN_FOLD_EXTRACT_LAST
, 3, else_clause
, vec_compare
,
9070 gimple_call_set_lhs (new_stmt
, scalar_dest
);
9071 SSA_NAME_DEF_STMT (scalar_dest
) = new_stmt
;
9072 if (stmt_info
->stmt
== gsi_stmt (*gsi
))
9073 new_stmt_info
= vect_finish_replace_stmt (stmt_info
, new_stmt
);
9076 /* In this case we're moving the definition to later in the
9077 block. That doesn't matter because the only uses of the
9078 lhs are in phi statements. */
9079 gimple_stmt_iterator old_gsi
9080 = gsi_for_stmt (stmt_info
->stmt
);
9081 gsi_remove (&old_gsi
, true);
9083 = vect_finish_stmt_generation (stmt_info
, new_stmt
, gsi
);
9088 new_temp
= make_ssa_name (vec_dest
);
9090 = gimple_build_assign (new_temp
, VEC_COND_EXPR
, vec_compare
,
9091 vec_then_clause
, vec_else_clause
);
9093 = vect_finish_stmt_generation (stmt_info
, new_stmt
, gsi
);
9096 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt_info
);
9103 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt_info
;
9105 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt_info
;
9107 prev_stmt_info
= new_stmt_info
;
9110 vec_oprnds0
.release ();
9111 vec_oprnds1
.release ();
9112 vec_oprnds2
.release ();
9113 vec_oprnds3
.release ();
9118 /* vectorizable_comparison.
9120 Check if STMT_INFO is comparison expression that can be vectorized.
9121 If VEC_STMT is also passed, vectorize STMT_INFO: create a vectorized
9122 comparison, put it in VEC_STMT, and insert it at GSI.
9124 Return true if STMT_INFO is vectorizable in this way. */
9127 vectorizable_comparison (stmt_vec_info stmt_info
, gimple_stmt_iterator
*gsi
,
9128 stmt_vec_info
*vec_stmt
, tree reduc_def
,
9129 slp_tree slp_node
, stmt_vector_for_cost
*cost_vec
)
9131 vec_info
*vinfo
= stmt_info
->vinfo
;
9132 tree lhs
, rhs1
, rhs2
;
9133 tree vectype1
= NULL_TREE
, vectype2
= NULL_TREE
;
9134 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
9135 tree vec_rhs1
= NULL_TREE
, vec_rhs2
= NULL_TREE
;
9137 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
9138 enum vect_def_type dts
[2] = {vect_unknown_def_type
, vect_unknown_def_type
};
9142 enum tree_code code
, bitop1
= NOP_EXPR
, bitop2
= NOP_EXPR
;
9143 stmt_vec_info prev_stmt_info
= NULL
;
9145 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
9146 vec
<tree
> vec_oprnds0
= vNULL
;
9147 vec
<tree
> vec_oprnds1
= vNULL
;
9151 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
9154 if (!vectype
|| !VECTOR_BOOLEAN_TYPE_P (vectype
))
9157 mask_type
= vectype
;
9158 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
9163 ncopies
= vect_get_num_copies (loop_vinfo
, vectype
);
9165 gcc_assert (ncopies
>= 1);
9166 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
9167 && !(STMT_VINFO_DEF_TYPE (stmt_info
) == vect_nested_cycle
9171 if (STMT_VINFO_LIVE_P (stmt_info
))
9173 if (dump_enabled_p ())
9174 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
9175 "value used after loop.\n");
9179 gassign
*stmt
= dyn_cast
<gassign
*> (stmt_info
->stmt
);
9183 code
= gimple_assign_rhs_code (stmt
);
9185 if (TREE_CODE_CLASS (code
) != tcc_comparison
)
9188 rhs1
= gimple_assign_rhs1 (stmt
);
9189 rhs2
= gimple_assign_rhs2 (stmt
);
9191 if (!vect_is_simple_use (rhs1
, stmt_info
->vinfo
, &dts
[0], &vectype1
))
9194 if (!vect_is_simple_use (rhs2
, stmt_info
->vinfo
, &dts
[1], &vectype2
))
9197 if (vectype1
&& vectype2
9198 && maybe_ne (TYPE_VECTOR_SUBPARTS (vectype1
),
9199 TYPE_VECTOR_SUBPARTS (vectype2
)))
9202 vectype
= vectype1
? vectype1
: vectype2
;
9204 /* Invariant comparison. */
9207 vectype
= get_vectype_for_scalar_type (TREE_TYPE (rhs1
));
9208 if (maybe_ne (TYPE_VECTOR_SUBPARTS (vectype
), nunits
))
9211 else if (maybe_ne (nunits
, TYPE_VECTOR_SUBPARTS (vectype
)))
9214 /* Can't compare mask and non-mask types. */
9215 if (vectype1
&& vectype2
9216 && (VECTOR_BOOLEAN_TYPE_P (vectype1
) ^ VECTOR_BOOLEAN_TYPE_P (vectype2
)))
9219 /* Boolean values may have another representation in vectors
9220 and therefore we prefer bit operations over comparison for
9221 them (which also works for scalar masks). We store opcodes
9222 to use in bitop1 and bitop2. Statement is vectorized as
9223 BITOP2 (rhs1 BITOP1 rhs2) or
9224 rhs1 BITOP2 (BITOP1 rhs2)
9225 depending on bitop1 and bitop2 arity. */
9226 if (VECTOR_BOOLEAN_TYPE_P (vectype
))
9228 if (code
== GT_EXPR
)
9230 bitop1
= BIT_NOT_EXPR
;
9231 bitop2
= BIT_AND_EXPR
;
9233 else if (code
== GE_EXPR
)
9235 bitop1
= BIT_NOT_EXPR
;
9236 bitop2
= BIT_IOR_EXPR
;
9238 else if (code
== LT_EXPR
)
9240 bitop1
= BIT_NOT_EXPR
;
9241 bitop2
= BIT_AND_EXPR
;
9242 std::swap (rhs1
, rhs2
);
9243 std::swap (dts
[0], dts
[1]);
9245 else if (code
== LE_EXPR
)
9247 bitop1
= BIT_NOT_EXPR
;
9248 bitop2
= BIT_IOR_EXPR
;
9249 std::swap (rhs1
, rhs2
);
9250 std::swap (dts
[0], dts
[1]);
9254 bitop1
= BIT_XOR_EXPR
;
9255 if (code
== EQ_EXPR
)
9256 bitop2
= BIT_NOT_EXPR
;
9262 if (bitop1
== NOP_EXPR
)
9264 if (!expand_vec_cmp_expr_p (vectype
, mask_type
, code
))
9269 machine_mode mode
= TYPE_MODE (vectype
);
9272 optab
= optab_for_tree_code (bitop1
, vectype
, optab_default
);
9273 if (!optab
|| optab_handler (optab
, mode
) == CODE_FOR_nothing
)
9276 if (bitop2
!= NOP_EXPR
)
9278 optab
= optab_for_tree_code (bitop2
, vectype
, optab_default
);
9279 if (!optab
|| optab_handler (optab
, mode
) == CODE_FOR_nothing
)
9284 STMT_VINFO_TYPE (stmt_info
) = comparison_vec_info_type
;
9285 vect_model_simple_cost (stmt_info
, ncopies
* (1 + (bitop2
!= NOP_EXPR
)),
9286 dts
, ndts
, slp_node
, cost_vec
);
9293 vec_oprnds0
.create (1);
9294 vec_oprnds1
.create (1);
9298 lhs
= gimple_assign_lhs (stmt
);
9299 mask
= vect_create_destination_var (lhs
, mask_type
);
9301 /* Handle cmp expr. */
9302 for (j
= 0; j
< ncopies
; j
++)
9304 stmt_vec_info new_stmt_info
= NULL
;
9309 auto_vec
<tree
, 2> ops
;
9310 auto_vec
<vec
<tree
>, 2> vec_defs
;
9312 ops
.safe_push (rhs1
);
9313 ops
.safe_push (rhs2
);
9314 vect_get_slp_defs (ops
, slp_node
, &vec_defs
);
9315 vec_oprnds1
= vec_defs
.pop ();
9316 vec_oprnds0
= vec_defs
.pop ();
9320 vec_rhs1
= vect_get_vec_def_for_operand (rhs1
, stmt_info
,
9322 vec_rhs2
= vect_get_vec_def_for_operand (rhs2
, stmt_info
,
9328 vec_rhs1
= vect_get_vec_def_for_stmt_copy (vinfo
,
9329 vec_oprnds0
.pop ());
9330 vec_rhs2
= vect_get_vec_def_for_stmt_copy (vinfo
,
9331 vec_oprnds1
.pop ());
9336 vec_oprnds0
.quick_push (vec_rhs1
);
9337 vec_oprnds1
.quick_push (vec_rhs2
);
9340 /* Arguments are ready. Create the new vector stmt. */
9341 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vec_rhs1
)
9343 vec_rhs2
= vec_oprnds1
[i
];
9345 new_temp
= make_ssa_name (mask
);
9346 if (bitop1
== NOP_EXPR
)
9348 gassign
*new_stmt
= gimple_build_assign (new_temp
, code
,
9349 vec_rhs1
, vec_rhs2
);
9351 = vect_finish_stmt_generation (stmt_info
, new_stmt
, gsi
);
9356 if (bitop1
== BIT_NOT_EXPR
)
9357 new_stmt
= gimple_build_assign (new_temp
, bitop1
, vec_rhs2
);
9359 new_stmt
= gimple_build_assign (new_temp
, bitop1
, vec_rhs1
,
9362 = vect_finish_stmt_generation (stmt_info
, new_stmt
, gsi
);
9363 if (bitop2
!= NOP_EXPR
)
9365 tree res
= make_ssa_name (mask
);
9366 if (bitop2
== BIT_NOT_EXPR
)
9367 new_stmt
= gimple_build_assign (res
, bitop2
, new_temp
);
9369 new_stmt
= gimple_build_assign (res
, bitop2
, vec_rhs1
,
9372 = vect_finish_stmt_generation (stmt_info
, new_stmt
, gsi
);
9376 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt_info
);
9383 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt_info
;
9385 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt_info
;
9387 prev_stmt_info
= new_stmt_info
;
9390 vec_oprnds0
.release ();
9391 vec_oprnds1
.release ();
9396 /* If SLP_NODE is nonnull, return true if vectorizable_live_operation
9397 can handle all live statements in the node. Otherwise return true
9398 if STMT_INFO is not live or if vectorizable_live_operation can handle it.
9399 GSI and VEC_STMT are as for vectorizable_live_operation. */
9402 can_vectorize_live_stmts (stmt_vec_info stmt_info
, gimple_stmt_iterator
*gsi
,
9403 slp_tree slp_node
, stmt_vec_info
*vec_stmt
,
9404 stmt_vector_for_cost
*cost_vec
)
9408 stmt_vec_info slp_stmt_info
;
9410 FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (slp_node
), i
, slp_stmt_info
)
9412 if (STMT_VINFO_LIVE_P (slp_stmt_info
)
9413 && !vectorizable_live_operation (slp_stmt_info
, gsi
, slp_node
, i
,
9414 vec_stmt
, cost_vec
))
9418 else if (STMT_VINFO_LIVE_P (stmt_info
)
9419 && !vectorizable_live_operation (stmt_info
, gsi
, slp_node
, -1,
9420 vec_stmt
, cost_vec
))
9426 /* Make sure the statement is vectorizable. */
9429 vect_analyze_stmt (stmt_vec_info stmt_info
, bool *need_to_vectorize
,
9430 slp_tree node
, slp_instance node_instance
,
9431 stmt_vector_for_cost
*cost_vec
)
9433 vec_info
*vinfo
= stmt_info
->vinfo
;
9434 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
9435 enum vect_relevant relevance
= STMT_VINFO_RELEVANT (stmt_info
);
9437 gimple_seq pattern_def_seq
;
9439 if (dump_enabled_p ())
9441 dump_printf_loc (MSG_NOTE
, vect_location
, "==> examining statement: ");
9442 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt_info
->stmt
, 0);
9445 if (gimple_has_volatile_ops (stmt_info
->stmt
))
9447 if (dump_enabled_p ())
9448 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
9449 "not vectorized: stmt has volatile operands\n");
9454 if (STMT_VINFO_IN_PATTERN_P (stmt_info
)
9456 && (pattern_def_seq
= STMT_VINFO_PATTERN_DEF_SEQ (stmt_info
)))
9458 gimple_stmt_iterator si
;
9460 for (si
= gsi_start (pattern_def_seq
); !gsi_end_p (si
); gsi_next (&si
))
9462 stmt_vec_info pattern_def_stmt_info
9463 = vinfo
->lookup_stmt (gsi_stmt (si
));
9464 if (STMT_VINFO_RELEVANT_P (pattern_def_stmt_info
)
9465 || STMT_VINFO_LIVE_P (pattern_def_stmt_info
))
9467 /* Analyze def stmt of STMT if it's a pattern stmt. */
9468 if (dump_enabled_p ())
9470 dump_printf_loc (MSG_NOTE
, vect_location
,
9471 "==> examining pattern def statement: ");
9472 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
,
9473 pattern_def_stmt_info
->stmt
, 0);
9476 if (!vect_analyze_stmt (pattern_def_stmt_info
,
9477 need_to_vectorize
, node
, node_instance
,
9484 /* Skip stmts that do not need to be vectorized. In loops this is expected
9486 - the COND_EXPR which is the loop exit condition
9487 - any LABEL_EXPRs in the loop
9488 - computations that are used only for array indexing or loop control.
9489 In basic blocks we only analyze statements that are a part of some SLP
9490 instance, therefore, all the statements are relevant.
9492 Pattern statement needs to be analyzed instead of the original statement
9493 if the original statement is not relevant. Otherwise, we analyze both
9494 statements. In basic blocks we are called from some SLP instance
9495 traversal, don't analyze pattern stmts instead, the pattern stmts
9496 already will be part of SLP instance. */
9498 stmt_vec_info pattern_stmt_info
= STMT_VINFO_RELATED_STMT (stmt_info
);
9499 if (!STMT_VINFO_RELEVANT_P (stmt_info
)
9500 && !STMT_VINFO_LIVE_P (stmt_info
))
9502 if (STMT_VINFO_IN_PATTERN_P (stmt_info
)
9503 && pattern_stmt_info
9504 && (STMT_VINFO_RELEVANT_P (pattern_stmt_info
)
9505 || STMT_VINFO_LIVE_P (pattern_stmt_info
)))
9507 /* Analyze PATTERN_STMT instead of the original stmt. */
9508 stmt_info
= pattern_stmt_info
;
9509 if (dump_enabled_p ())
9511 dump_printf_loc (MSG_NOTE
, vect_location
,
9512 "==> examining pattern statement: ");
9513 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt_info
->stmt
, 0);
9518 if (dump_enabled_p ())
9519 dump_printf_loc (MSG_NOTE
, vect_location
, "irrelevant.\n");
9524 else if (STMT_VINFO_IN_PATTERN_P (stmt_info
)
9526 && pattern_stmt_info
9527 && (STMT_VINFO_RELEVANT_P (pattern_stmt_info
)
9528 || STMT_VINFO_LIVE_P (pattern_stmt_info
)))
9530 /* Analyze PATTERN_STMT too. */
9531 if (dump_enabled_p ())
9533 dump_printf_loc (MSG_NOTE
, vect_location
,
9534 "==> examining pattern statement: ");
9535 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, pattern_stmt_info
->stmt
, 0);
9538 if (!vect_analyze_stmt (pattern_stmt_info
, need_to_vectorize
, node
,
9539 node_instance
, cost_vec
))
9543 switch (STMT_VINFO_DEF_TYPE (stmt_info
))
9545 case vect_internal_def
:
9548 case vect_reduction_def
:
9549 case vect_nested_cycle
:
9550 gcc_assert (!bb_vinfo
9551 && (relevance
== vect_used_in_outer
9552 || relevance
== vect_used_in_outer_by_reduction
9553 || relevance
== vect_used_by_reduction
9554 || relevance
== vect_unused_in_scope
9555 || relevance
== vect_used_only_live
));
9558 case vect_induction_def
:
9559 gcc_assert (!bb_vinfo
);
9562 case vect_constant_def
:
9563 case vect_external_def
:
9564 case vect_unknown_def_type
:
9569 if (STMT_VINFO_RELEVANT_P (stmt_info
))
9571 tree type
= gimple_expr_type (stmt_info
->stmt
);
9572 gcc_assert (!VECTOR_MODE_P (TYPE_MODE (type
)));
9573 gcall
*call
= dyn_cast
<gcall
*> (stmt_info
->stmt
);
9574 gcc_assert (STMT_VINFO_VECTYPE (stmt_info
)
9575 || (call
&& gimple_call_lhs (call
) == NULL_TREE
));
9576 *need_to_vectorize
= true;
9579 if (PURE_SLP_STMT (stmt_info
) && !node
)
9581 dump_printf_loc (MSG_NOTE
, vect_location
,
9582 "handled only by SLP analysis\n");
9588 && (STMT_VINFO_RELEVANT_P (stmt_info
)
9589 || STMT_VINFO_DEF_TYPE (stmt_info
) == vect_reduction_def
))
9590 ok
= (vectorizable_simd_clone_call (stmt_info
, NULL
, NULL
, node
, cost_vec
)
9591 || vectorizable_conversion (stmt_info
, NULL
, NULL
, node
, cost_vec
)
9592 || vectorizable_shift (stmt_info
, NULL
, NULL
, node
, cost_vec
)
9593 || vectorizable_operation (stmt_info
, NULL
, NULL
, node
, cost_vec
)
9594 || vectorizable_assignment (stmt_info
, NULL
, NULL
, node
, cost_vec
)
9595 || vectorizable_load (stmt_info
, NULL
, NULL
, node
, node_instance
,
9597 || vectorizable_call (stmt_info
, NULL
, NULL
, node
, cost_vec
)
9598 || vectorizable_store (stmt_info
, NULL
, NULL
, node
, cost_vec
)
9599 || vectorizable_reduction (stmt_info
, NULL
, NULL
, node
,
9600 node_instance
, cost_vec
)
9601 || vectorizable_induction (stmt_info
, NULL
, NULL
, node
, cost_vec
)
9602 || vectorizable_condition (stmt_info
, NULL
, NULL
, NULL
, 0, node
,
9604 || vectorizable_comparison (stmt_info
, NULL
, NULL
, NULL
, node
,
9609 ok
= (vectorizable_simd_clone_call (stmt_info
, NULL
, NULL
, node
,
9611 || vectorizable_conversion (stmt_info
, NULL
, NULL
, node
,
9613 || vectorizable_shift (stmt_info
, NULL
, NULL
, node
, cost_vec
)
9614 || vectorizable_operation (stmt_info
, NULL
, NULL
, node
, cost_vec
)
9615 || vectorizable_assignment (stmt_info
, NULL
, NULL
, node
,
9617 || vectorizable_load (stmt_info
, NULL
, NULL
, node
, node_instance
,
9619 || vectorizable_call (stmt_info
, NULL
, NULL
, node
, cost_vec
)
9620 || vectorizable_store (stmt_info
, NULL
, NULL
, node
, cost_vec
)
9621 || vectorizable_condition (stmt_info
, NULL
, NULL
, NULL
, 0, node
,
9623 || vectorizable_comparison (stmt_info
, NULL
, NULL
, NULL
, node
,
9629 if (dump_enabled_p ())
9631 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
9632 "not vectorized: relevant stmt not ");
9633 dump_printf (MSG_MISSED_OPTIMIZATION
, "supported: ");
9634 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
9635 stmt_info
->stmt
, 0);
9641 /* Stmts that are (also) "live" (i.e. - that are used out of the loop)
9642 need extra handling, except for vectorizable reductions. */
9644 && STMT_VINFO_TYPE (stmt_info
) != reduc_vec_info_type
9645 && !can_vectorize_live_stmts (stmt_info
, NULL
, node
, NULL
, cost_vec
))
9647 if (dump_enabled_p ())
9649 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
9650 "not vectorized: live stmt not supported: ");
9651 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
9652 stmt_info
->stmt
, 0);
9662 /* Function vect_transform_stmt.
9664 Create a vectorized stmt to replace STMT_INFO, and insert it at BSI. */
9667 vect_transform_stmt (stmt_vec_info stmt_info
, gimple_stmt_iterator
*gsi
,
9668 slp_tree slp_node
, slp_instance slp_node_instance
)
9670 vec_info
*vinfo
= stmt_info
->vinfo
;
9671 bool is_store
= false;
9672 stmt_vec_info vec_stmt
= NULL
;
9675 gcc_assert (slp_node
|| !PURE_SLP_STMT (stmt_info
));
9676 stmt_vec_info old_vec_stmt_info
= STMT_VINFO_VEC_STMT (stmt_info
);
9678 bool nested_p
= (STMT_VINFO_LOOP_VINFO (stmt_info
)
9679 && nested_in_vect_loop_p
9680 (LOOP_VINFO_LOOP (STMT_VINFO_LOOP_VINFO (stmt_info
)),
9683 gimple
*stmt
= stmt_info
->stmt
;
9684 switch (STMT_VINFO_TYPE (stmt_info
))
9686 case type_demotion_vec_info_type
:
9687 case type_promotion_vec_info_type
:
9688 case type_conversion_vec_info_type
:
9689 done
= vectorizable_conversion (stmt_info
, gsi
, &vec_stmt
, slp_node
,
9694 case induc_vec_info_type
:
9695 done
= vectorizable_induction (stmt_info
, gsi
, &vec_stmt
, slp_node
,
9700 case shift_vec_info_type
:
9701 done
= vectorizable_shift (stmt_info
, gsi
, &vec_stmt
, slp_node
, NULL
);
9705 case op_vec_info_type
:
9706 done
= vectorizable_operation (stmt_info
, gsi
, &vec_stmt
, slp_node
,
9711 case assignment_vec_info_type
:
9712 done
= vectorizable_assignment (stmt_info
, gsi
, &vec_stmt
, slp_node
,
9717 case load_vec_info_type
:
9718 done
= vectorizable_load (stmt_info
, gsi
, &vec_stmt
, slp_node
,
9719 slp_node_instance
, NULL
);
9723 case store_vec_info_type
:
9724 done
= vectorizable_store (stmt_info
, gsi
, &vec_stmt
, slp_node
, NULL
);
9726 if (STMT_VINFO_GROUPED_ACCESS (stmt_info
) && !slp_node
)
9728 /* In case of interleaving, the whole chain is vectorized when the
9729 last store in the chain is reached. Store stmts before the last
9730 one are skipped, and there vec_stmt_info shouldn't be freed
9732 stmt_vec_info group_info
= DR_GROUP_FIRST_ELEMENT (stmt_info
);
9733 if (DR_GROUP_STORE_COUNT (group_info
) == DR_GROUP_SIZE (group_info
))
9740 case condition_vec_info_type
:
9741 done
= vectorizable_condition (stmt_info
, gsi
, &vec_stmt
, NULL
, 0,
9746 case comparison_vec_info_type
:
9747 done
= vectorizable_comparison (stmt_info
, gsi
, &vec_stmt
, NULL
,
9752 case call_vec_info_type
:
9753 done
= vectorizable_call (stmt_info
, gsi
, &vec_stmt
, slp_node
, NULL
);
9754 stmt
= gsi_stmt (*gsi
);
9757 case call_simd_clone_vec_info_type
:
9758 done
= vectorizable_simd_clone_call (stmt_info
, gsi
, &vec_stmt
,
9760 stmt
= gsi_stmt (*gsi
);
9763 case reduc_vec_info_type
:
9764 done
= vectorizable_reduction (stmt_info
, gsi
, &vec_stmt
, slp_node
,
9765 slp_node_instance
, NULL
);
9770 if (!STMT_VINFO_LIVE_P (stmt_info
))
9772 if (dump_enabled_p ())
9773 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
9774 "stmt not supported.\n");
9779 /* Verify SLP vectorization doesn't mess with STMT_VINFO_VEC_STMT.
9780 This would break hybrid SLP vectorization. */
9782 gcc_assert (!vec_stmt
9783 && STMT_VINFO_VEC_STMT (stmt_info
) == old_vec_stmt_info
);
9785 /* Handle inner-loop stmts whose DEF is used in the loop-nest that
9786 is being vectorized, but outside the immediately enclosing loop. */
9789 && STMT_VINFO_TYPE (stmt_info
) != reduc_vec_info_type
9790 && (STMT_VINFO_RELEVANT (stmt_info
) == vect_used_in_outer
9791 || STMT_VINFO_RELEVANT (stmt_info
) ==
9792 vect_used_in_outer_by_reduction
))
9794 struct loop
*innerloop
= LOOP_VINFO_LOOP (
9795 STMT_VINFO_LOOP_VINFO (stmt_info
))->inner
;
9796 imm_use_iterator imm_iter
;
9797 use_operand_p use_p
;
9800 if (dump_enabled_p ())
9801 dump_printf_loc (MSG_NOTE
, vect_location
,
9802 "Record the vdef for outer-loop vectorization.\n");
9804 /* Find the relevant loop-exit phi-node, and reord the vec_stmt there
9805 (to be used when vectorizing outer-loop stmts that use the DEF of
9807 if (gimple_code (stmt
) == GIMPLE_PHI
)
9808 scalar_dest
= PHI_RESULT (stmt
);
9810 scalar_dest
= gimple_get_lhs (stmt
);
9812 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, scalar_dest
)
9813 if (!flow_bb_inside_loop_p (innerloop
, gimple_bb (USE_STMT (use_p
))))
9815 stmt_vec_info exit_phi_info
9816 = vinfo
->lookup_stmt (USE_STMT (use_p
));
9817 STMT_VINFO_VEC_STMT (exit_phi_info
) = vec_stmt
;
9821 /* Handle stmts whose DEF is used outside the loop-nest that is
9822 being vectorized. */
9823 if (STMT_VINFO_TYPE (stmt_info
) != reduc_vec_info_type
)
9825 done
= can_vectorize_live_stmts (stmt_info
, gsi
, slp_node
, &vec_stmt
,
9831 STMT_VINFO_VEC_STMT (stmt_info
) = vec_stmt
;
9837 /* Remove a group of stores (for SLP or interleaving), free their
9841 vect_remove_stores (stmt_vec_info first_stmt_info
)
9843 vec_info
*vinfo
= first_stmt_info
->vinfo
;
9844 stmt_vec_info next_stmt_info
= first_stmt_info
;
9846 while (next_stmt_info
)
9848 stmt_vec_info tmp
= DR_GROUP_NEXT_ELEMENT (next_stmt_info
);
9849 next_stmt_info
= vect_orig_stmt (next_stmt_info
);
9850 /* Free the attached stmt_vec_info and remove the stmt. */
9851 vinfo
->remove_stmt (next_stmt_info
);
9852 next_stmt_info
= tmp
;
9856 /* Function get_vectype_for_scalar_type_and_size.
9858 Returns the vector type corresponding to SCALAR_TYPE and SIZE as supported
9862 get_vectype_for_scalar_type_and_size (tree scalar_type
, poly_uint64 size
)
9864 tree orig_scalar_type
= scalar_type
;
9865 scalar_mode inner_mode
;
9866 machine_mode simd_mode
;
9870 if (!is_int_mode (TYPE_MODE (scalar_type
), &inner_mode
)
9871 && !is_float_mode (TYPE_MODE (scalar_type
), &inner_mode
))
9874 unsigned int nbytes
= GET_MODE_SIZE (inner_mode
);
9876 /* For vector types of elements whose mode precision doesn't
9877 match their types precision we use a element type of mode
9878 precision. The vectorization routines will have to make sure
9879 they support the proper result truncation/extension.
9880 We also make sure to build vector types with INTEGER_TYPE
9881 component type only. */
9882 if (INTEGRAL_TYPE_P (scalar_type
)
9883 && (GET_MODE_BITSIZE (inner_mode
) != TYPE_PRECISION (scalar_type
)
9884 || TREE_CODE (scalar_type
) != INTEGER_TYPE
))
9885 scalar_type
= build_nonstandard_integer_type (GET_MODE_BITSIZE (inner_mode
),
9886 TYPE_UNSIGNED (scalar_type
));
9888 /* We shouldn't end up building VECTOR_TYPEs of non-scalar components.
9889 When the component mode passes the above test simply use a type
9890 corresponding to that mode. The theory is that any use that
9891 would cause problems with this will disable vectorization anyway. */
9892 else if (!SCALAR_FLOAT_TYPE_P (scalar_type
)
9893 && !INTEGRAL_TYPE_P (scalar_type
))
9894 scalar_type
= lang_hooks
.types
.type_for_mode (inner_mode
, 1);
9896 /* We can't build a vector type of elements with alignment bigger than
9898 else if (nbytes
< TYPE_ALIGN_UNIT (scalar_type
))
9899 scalar_type
= lang_hooks
.types
.type_for_mode (inner_mode
,
9900 TYPE_UNSIGNED (scalar_type
));
9902 /* If we felt back to using the mode fail if there was
9903 no scalar type for it. */
9904 if (scalar_type
== NULL_TREE
)
9907 /* If no size was supplied use the mode the target prefers. Otherwise
9908 lookup a vector mode of the specified size. */
9909 if (known_eq (size
, 0U))
9910 simd_mode
= targetm
.vectorize
.preferred_simd_mode (inner_mode
);
9911 else if (!multiple_p (size
, nbytes
, &nunits
)
9912 || !mode_for_vector (inner_mode
, nunits
).exists (&simd_mode
))
9914 /* NOTE: nunits == 1 is allowed to support single element vector types. */
9915 if (!multiple_p (GET_MODE_SIZE (simd_mode
), nbytes
, &nunits
))
9918 vectype
= build_vector_type (scalar_type
, nunits
);
9920 if (!VECTOR_MODE_P (TYPE_MODE (vectype
))
9921 && !INTEGRAL_MODE_P (TYPE_MODE (vectype
)))
9924 /* Re-attach the address-space qualifier if we canonicalized the scalar
9926 if (TYPE_ADDR_SPACE (orig_scalar_type
) != TYPE_ADDR_SPACE (vectype
))
9927 return build_qualified_type
9928 (vectype
, KEEP_QUAL_ADDR_SPACE (TYPE_QUALS (orig_scalar_type
)));
9933 poly_uint64 current_vector_size
;
9935 /* Function get_vectype_for_scalar_type.
9937 Returns the vector type corresponding to SCALAR_TYPE as supported
9941 get_vectype_for_scalar_type (tree scalar_type
)
9944 vectype
= get_vectype_for_scalar_type_and_size (scalar_type
,
9945 current_vector_size
);
9947 && known_eq (current_vector_size
, 0U))
9948 current_vector_size
= GET_MODE_SIZE (TYPE_MODE (vectype
));
9952 /* Function get_mask_type_for_scalar_type.
9954 Returns the mask type corresponding to a result of comparison
9955 of vectors of specified SCALAR_TYPE as supported by target. */
9958 get_mask_type_for_scalar_type (tree scalar_type
)
9960 tree vectype
= get_vectype_for_scalar_type (scalar_type
);
9965 return build_truth_vector_type (TYPE_VECTOR_SUBPARTS (vectype
),
9966 current_vector_size
);
9969 /* Function get_same_sized_vectype
9971 Returns a vector type corresponding to SCALAR_TYPE of size
9972 VECTOR_TYPE if supported by the target. */
9975 get_same_sized_vectype (tree scalar_type
, tree vector_type
)
9977 if (VECT_SCALAR_BOOLEAN_TYPE_P (scalar_type
))
9978 return build_same_sized_truth_vector_type (vector_type
);
9980 return get_vectype_for_scalar_type_and_size
9981 (scalar_type
, GET_MODE_SIZE (TYPE_MODE (vector_type
)));
9984 /* Function vect_is_simple_use.
9987 VINFO - the vect info of the loop or basic block that is being vectorized.
9988 OPERAND - operand in the loop or bb.
9990 DEF_STMT_INFO_OUT (optional) - information about the defining stmt in
9991 case OPERAND is an SSA_NAME that is defined in the vectorizable region
9992 DEF_STMT_OUT (optional) - the defining stmt in case OPERAND is an SSA_NAME;
9993 the definition could be anywhere in the function
9994 DT - the type of definition
9996 Returns whether a stmt with OPERAND can be vectorized.
9997 For loops, supportable operands are constants, loop invariants, and operands
9998 that are defined by the current iteration of the loop. Unsupportable
9999 operands are those that are defined by a previous iteration of the loop (as
10000 is the case in reduction/induction computations).
10001 For basic blocks, supportable operands are constants and bb invariants.
10002 For now, operands defined outside the basic block are not supported. */
10005 vect_is_simple_use (tree operand
, vec_info
*vinfo
, enum vect_def_type
*dt
,
10006 stmt_vec_info
*def_stmt_info_out
, gimple
**def_stmt_out
)
10008 if (def_stmt_info_out
)
10009 *def_stmt_info_out
= NULL
;
10011 *def_stmt_out
= NULL
;
10012 *dt
= vect_unknown_def_type
;
10014 if (dump_enabled_p ())
10016 dump_printf_loc (MSG_NOTE
, vect_location
,
10017 "vect_is_simple_use: operand ");
10018 if (TREE_CODE (operand
) == SSA_NAME
10019 && !SSA_NAME_IS_DEFAULT_DEF (operand
))
10020 dump_gimple_expr (MSG_NOTE
, TDF_SLIM
, SSA_NAME_DEF_STMT (operand
), 0);
10022 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, operand
);
10025 if (CONSTANT_CLASS_P (operand
))
10026 *dt
= vect_constant_def
;
10027 else if (is_gimple_min_invariant (operand
))
10028 *dt
= vect_external_def
;
10029 else if (TREE_CODE (operand
) != SSA_NAME
)
10030 *dt
= vect_unknown_def_type
;
10031 else if (SSA_NAME_IS_DEFAULT_DEF (operand
))
10032 *dt
= vect_external_def
;
10035 gimple
*def_stmt
= SSA_NAME_DEF_STMT (operand
);
10036 stmt_vec_info stmt_vinfo
= vinfo
->lookup_def (operand
);
10038 *dt
= vect_external_def
;
10041 stmt_vinfo
= vect_stmt_to_vectorize (stmt_vinfo
);
10042 def_stmt
= stmt_vinfo
->stmt
;
10043 switch (gimple_code (def_stmt
))
10046 case GIMPLE_ASSIGN
:
10048 *dt
= STMT_VINFO_DEF_TYPE (stmt_vinfo
);
10051 *dt
= vect_unknown_def_type
;
10054 if (def_stmt_info_out
)
10055 *def_stmt_info_out
= stmt_vinfo
;
10058 *def_stmt_out
= def_stmt
;
10061 if (dump_enabled_p ())
10063 dump_printf (MSG_NOTE
, ", type of def: ");
10066 case vect_uninitialized_def
:
10067 dump_printf (MSG_NOTE
, "uninitialized\n");
10069 case vect_constant_def
:
10070 dump_printf (MSG_NOTE
, "constant\n");
10072 case vect_external_def
:
10073 dump_printf (MSG_NOTE
, "external\n");
10075 case vect_internal_def
:
10076 dump_printf (MSG_NOTE
, "internal\n");
10078 case vect_induction_def
:
10079 dump_printf (MSG_NOTE
, "induction\n");
10081 case vect_reduction_def
:
10082 dump_printf (MSG_NOTE
, "reduction\n");
10084 case vect_double_reduction_def
:
10085 dump_printf (MSG_NOTE
, "double reduction\n");
10087 case vect_nested_cycle
:
10088 dump_printf (MSG_NOTE
, "nested cycle\n");
10090 case vect_unknown_def_type
:
10091 dump_printf (MSG_NOTE
, "unknown\n");
10096 if (*dt
== vect_unknown_def_type
)
10098 if (dump_enabled_p ())
10099 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
10100 "Unsupported pattern.\n");
10107 /* Function vect_is_simple_use.
10109 Same as vect_is_simple_use but also determines the vector operand
10110 type of OPERAND and stores it to *VECTYPE. If the definition of
10111 OPERAND is vect_uninitialized_def, vect_constant_def or
10112 vect_external_def *VECTYPE will be set to NULL_TREE and the caller
10113 is responsible to compute the best suited vector type for the
10117 vect_is_simple_use (tree operand
, vec_info
*vinfo
, enum vect_def_type
*dt
,
10118 tree
*vectype
, stmt_vec_info
*def_stmt_info_out
,
10119 gimple
**def_stmt_out
)
10121 stmt_vec_info def_stmt_info
;
10123 if (!vect_is_simple_use (operand
, vinfo
, dt
, &def_stmt_info
, &def_stmt
))
10127 *def_stmt_out
= def_stmt
;
10128 if (def_stmt_info_out
)
10129 *def_stmt_info_out
= def_stmt_info
;
10131 /* Now get a vector type if the def is internal, otherwise supply
10132 NULL_TREE and leave it up to the caller to figure out a proper
10133 type for the use stmt. */
10134 if (*dt
== vect_internal_def
10135 || *dt
== vect_induction_def
10136 || *dt
== vect_reduction_def
10137 || *dt
== vect_double_reduction_def
10138 || *dt
== vect_nested_cycle
)
10140 *vectype
= STMT_VINFO_VECTYPE (def_stmt_info
);
10141 gcc_assert (*vectype
!= NULL_TREE
);
10142 if (dump_enabled_p ())
10144 dump_printf_loc (MSG_NOTE
, vect_location
,
10145 "vect_is_simple_use: vectype ");
10146 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, *vectype
);
10147 dump_printf (MSG_NOTE
, "\n");
10150 else if (*dt
== vect_uninitialized_def
10151 || *dt
== vect_constant_def
10152 || *dt
== vect_external_def
)
10153 *vectype
= NULL_TREE
;
10155 gcc_unreachable ();
10161 /* Function supportable_widening_operation
10163 Check whether an operation represented by the code CODE is a
10164 widening operation that is supported by the target platform in
10165 vector form (i.e., when operating on arguments of type VECTYPE_IN
10166 producing a result of type VECTYPE_OUT).
10168 Widening operations we currently support are NOP (CONVERT), FLOAT,
10169 FIX_TRUNC and WIDEN_MULT. This function checks if these operations
10170 are supported by the target platform either directly (via vector
10171 tree-codes), or via target builtins.
10174 - CODE1 and CODE2 are codes of vector operations to be used when
10175 vectorizing the operation, if available.
10176 - MULTI_STEP_CVT determines the number of required intermediate steps in
10177 case of multi-step conversion (like char->short->int - in that case
10178 MULTI_STEP_CVT will be 1).
10179 - INTERM_TYPES contains the intermediate type required to perform the
10180 widening operation (short in the above example). */
10183 supportable_widening_operation (enum tree_code code
, stmt_vec_info stmt_info
,
10184 tree vectype_out
, tree vectype_in
,
10185 enum tree_code
*code1
, enum tree_code
*code2
,
10186 int *multi_step_cvt
,
10187 vec
<tree
> *interm_types
)
10189 loop_vec_info loop_info
= STMT_VINFO_LOOP_VINFO (stmt_info
);
10190 struct loop
*vect_loop
= NULL
;
10191 machine_mode vec_mode
;
10192 enum insn_code icode1
, icode2
;
10193 optab optab1
, optab2
;
10194 tree vectype
= vectype_in
;
10195 tree wide_vectype
= vectype_out
;
10196 enum tree_code c1
, c2
;
10198 tree prev_type
, intermediate_type
;
10199 machine_mode intermediate_mode
, prev_mode
;
10200 optab optab3
, optab4
;
10202 *multi_step_cvt
= 0;
10204 vect_loop
= LOOP_VINFO_LOOP (loop_info
);
10208 case WIDEN_MULT_EXPR
:
10209 /* The result of a vectorized widening operation usually requires
10210 two vectors (because the widened results do not fit into one vector).
10211 The generated vector results would normally be expected to be
10212 generated in the same order as in the original scalar computation,
10213 i.e. if 8 results are generated in each vector iteration, they are
10214 to be organized as follows:
10215 vect1: [res1,res2,res3,res4],
10216 vect2: [res5,res6,res7,res8].
10218 However, in the special case that the result of the widening
10219 operation is used in a reduction computation only, the order doesn't
10220 matter (because when vectorizing a reduction we change the order of
10221 the computation). Some targets can take advantage of this and
10222 generate more efficient code. For example, targets like Altivec,
10223 that support widen_mult using a sequence of {mult_even,mult_odd}
10224 generate the following vectors:
10225 vect1: [res1,res3,res5,res7],
10226 vect2: [res2,res4,res6,res8].
10228 When vectorizing outer-loops, we execute the inner-loop sequentially
10229 (each vectorized inner-loop iteration contributes to VF outer-loop
10230 iterations in parallel). We therefore don't allow to change the
10231 order of the computation in the inner-loop during outer-loop
10233 /* TODO: Another case in which order doesn't *really* matter is when we
10234 widen and then contract again, e.g. (short)((int)x * y >> 8).
10235 Normally, pack_trunc performs an even/odd permute, whereas the
10236 repack from an even/odd expansion would be an interleave, which
10237 would be significantly simpler for e.g. AVX2. */
10238 /* In any case, in order to avoid duplicating the code below, recurse
10239 on VEC_WIDEN_MULT_EVEN_EXPR. If it succeeds, all the return values
10240 are properly set up for the caller. If we fail, we'll continue with
10241 a VEC_WIDEN_MULT_LO/HI_EXPR check. */
10243 && STMT_VINFO_RELEVANT (stmt_info
) == vect_used_by_reduction
10244 && !nested_in_vect_loop_p (vect_loop
, stmt_info
)
10245 && supportable_widening_operation (VEC_WIDEN_MULT_EVEN_EXPR
,
10246 stmt_info
, vectype_out
,
10247 vectype_in
, code1
, code2
,
10248 multi_step_cvt
, interm_types
))
10250 /* Elements in a vector with vect_used_by_reduction property cannot
10251 be reordered if the use chain with this property does not have the
10252 same operation. One such an example is s += a * b, where elements
10253 in a and b cannot be reordered. Here we check if the vector defined
10254 by STMT is only directly used in the reduction statement. */
10255 tree lhs
= gimple_assign_lhs (stmt_info
->stmt
);
10256 stmt_vec_info use_stmt_info
= loop_info
->lookup_single_use (lhs
);
10258 && STMT_VINFO_DEF_TYPE (use_stmt_info
) == vect_reduction_def
)
10261 c1
= VEC_WIDEN_MULT_LO_EXPR
;
10262 c2
= VEC_WIDEN_MULT_HI_EXPR
;
10265 case DOT_PROD_EXPR
:
10266 c1
= DOT_PROD_EXPR
;
10267 c2
= DOT_PROD_EXPR
;
10275 case VEC_WIDEN_MULT_EVEN_EXPR
:
10276 /* Support the recursion induced just above. */
10277 c1
= VEC_WIDEN_MULT_EVEN_EXPR
;
10278 c2
= VEC_WIDEN_MULT_ODD_EXPR
;
10281 case WIDEN_LSHIFT_EXPR
:
10282 c1
= VEC_WIDEN_LSHIFT_LO_EXPR
;
10283 c2
= VEC_WIDEN_LSHIFT_HI_EXPR
;
10287 c1
= VEC_UNPACK_LO_EXPR
;
10288 c2
= VEC_UNPACK_HI_EXPR
;
10292 c1
= VEC_UNPACK_FLOAT_LO_EXPR
;
10293 c2
= VEC_UNPACK_FLOAT_HI_EXPR
;
10296 case FIX_TRUNC_EXPR
:
10297 c1
= VEC_UNPACK_FIX_TRUNC_LO_EXPR
;
10298 c2
= VEC_UNPACK_FIX_TRUNC_HI_EXPR
;
10302 gcc_unreachable ();
10305 if (BYTES_BIG_ENDIAN
&& c1
!= VEC_WIDEN_MULT_EVEN_EXPR
)
10306 std::swap (c1
, c2
);
10308 if (code
== FIX_TRUNC_EXPR
)
10310 /* The signedness is determined from output operand. */
10311 optab1
= optab_for_tree_code (c1
, vectype_out
, optab_default
);
10312 optab2
= optab_for_tree_code (c2
, vectype_out
, optab_default
);
10316 optab1
= optab_for_tree_code (c1
, vectype
, optab_default
);
10317 optab2
= optab_for_tree_code (c2
, vectype
, optab_default
);
10320 if (!optab1
|| !optab2
)
10323 vec_mode
= TYPE_MODE (vectype
);
10324 if ((icode1
= optab_handler (optab1
, vec_mode
)) == CODE_FOR_nothing
10325 || (icode2
= optab_handler (optab2
, vec_mode
)) == CODE_FOR_nothing
)
10331 if (insn_data
[icode1
].operand
[0].mode
== TYPE_MODE (wide_vectype
)
10332 && insn_data
[icode2
].operand
[0].mode
== TYPE_MODE (wide_vectype
))
10333 /* For scalar masks we may have different boolean
10334 vector types having the same QImode. Thus we
10335 add additional check for elements number. */
10336 return (!VECTOR_BOOLEAN_TYPE_P (vectype
)
10337 || known_eq (TYPE_VECTOR_SUBPARTS (vectype
),
10338 TYPE_VECTOR_SUBPARTS (wide_vectype
) * 2));
10340 /* Check if it's a multi-step conversion that can be done using intermediate
10343 prev_type
= vectype
;
10344 prev_mode
= vec_mode
;
10346 if (!CONVERT_EXPR_CODE_P (code
))
10349 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
10350 intermediate steps in promotion sequence. We try
10351 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do
10353 interm_types
->create (MAX_INTERM_CVT_STEPS
);
10354 for (i
= 0; i
< MAX_INTERM_CVT_STEPS
; i
++)
10356 intermediate_mode
= insn_data
[icode1
].operand
[0].mode
;
10357 if (VECTOR_BOOLEAN_TYPE_P (prev_type
))
10359 intermediate_type
= vect_halve_mask_nunits (prev_type
);
10360 if (intermediate_mode
!= TYPE_MODE (intermediate_type
))
10365 = lang_hooks
.types
.type_for_mode (intermediate_mode
,
10366 TYPE_UNSIGNED (prev_type
));
10368 optab3
= optab_for_tree_code (c1
, intermediate_type
, optab_default
);
10369 optab4
= optab_for_tree_code (c2
, intermediate_type
, optab_default
);
10371 if (!optab3
|| !optab4
10372 || (icode1
= optab_handler (optab1
, prev_mode
)) == CODE_FOR_nothing
10373 || insn_data
[icode1
].operand
[0].mode
!= intermediate_mode
10374 || (icode2
= optab_handler (optab2
, prev_mode
)) == CODE_FOR_nothing
10375 || insn_data
[icode2
].operand
[0].mode
!= intermediate_mode
10376 || ((icode1
= optab_handler (optab3
, intermediate_mode
))
10377 == CODE_FOR_nothing
)
10378 || ((icode2
= optab_handler (optab4
, intermediate_mode
))
10379 == CODE_FOR_nothing
))
10382 interm_types
->quick_push (intermediate_type
);
10383 (*multi_step_cvt
)++;
10385 if (insn_data
[icode1
].operand
[0].mode
== TYPE_MODE (wide_vectype
)
10386 && insn_data
[icode2
].operand
[0].mode
== TYPE_MODE (wide_vectype
))
10387 return (!VECTOR_BOOLEAN_TYPE_P (vectype
)
10388 || known_eq (TYPE_VECTOR_SUBPARTS (intermediate_type
),
10389 TYPE_VECTOR_SUBPARTS (wide_vectype
) * 2));
10391 prev_type
= intermediate_type
;
10392 prev_mode
= intermediate_mode
;
10395 interm_types
->release ();
10400 /* Function supportable_narrowing_operation
10402 Check whether an operation represented by the code CODE is a
10403 narrowing operation that is supported by the target platform in
10404 vector form (i.e., when operating on arguments of type VECTYPE_IN
10405 and producing a result of type VECTYPE_OUT).
10407 Narrowing operations we currently support are NOP (CONVERT), FIX_TRUNC
10408 and FLOAT. This function checks if these operations are supported by
10409 the target platform directly via vector tree-codes.
10412 - CODE1 is the code of a vector operation to be used when
10413 vectorizing the operation, if available.
10414 - MULTI_STEP_CVT determines the number of required intermediate steps in
10415 case of multi-step conversion (like int->short->char - in that case
10416 MULTI_STEP_CVT will be 1).
10417 - INTERM_TYPES contains the intermediate type required to perform the
10418 narrowing operation (short in the above example). */
10421 supportable_narrowing_operation (enum tree_code code
,
10422 tree vectype_out
, tree vectype_in
,
10423 enum tree_code
*code1
, int *multi_step_cvt
,
10424 vec
<tree
> *interm_types
)
10426 machine_mode vec_mode
;
10427 enum insn_code icode1
;
10428 optab optab1
, interm_optab
;
10429 tree vectype
= vectype_in
;
10430 tree narrow_vectype
= vectype_out
;
10432 tree intermediate_type
, prev_type
;
10433 machine_mode intermediate_mode
, prev_mode
;
10437 *multi_step_cvt
= 0;
10441 c1
= VEC_PACK_TRUNC_EXPR
;
10444 case FIX_TRUNC_EXPR
:
10445 c1
= VEC_PACK_FIX_TRUNC_EXPR
;
10449 c1
= VEC_PACK_FLOAT_EXPR
;
10453 gcc_unreachable ();
10456 if (code
== FIX_TRUNC_EXPR
)
10457 /* The signedness is determined from output operand. */
10458 optab1
= optab_for_tree_code (c1
, vectype_out
, optab_default
);
10460 optab1
= optab_for_tree_code (c1
, vectype
, optab_default
);
10465 vec_mode
= TYPE_MODE (vectype
);
10466 if ((icode1
= optab_handler (optab1
, vec_mode
)) == CODE_FOR_nothing
)
10471 if (insn_data
[icode1
].operand
[0].mode
== TYPE_MODE (narrow_vectype
))
10472 /* For scalar masks we may have different boolean
10473 vector types having the same QImode. Thus we
10474 add additional check for elements number. */
10475 return (!VECTOR_BOOLEAN_TYPE_P (vectype
)
10476 || known_eq (TYPE_VECTOR_SUBPARTS (vectype
) * 2,
10477 TYPE_VECTOR_SUBPARTS (narrow_vectype
)));
10479 if (code
== FLOAT_EXPR
)
10482 /* Check if it's a multi-step conversion that can be done using intermediate
10484 prev_mode
= vec_mode
;
10485 prev_type
= vectype
;
10486 if (code
== FIX_TRUNC_EXPR
)
10487 uns
= TYPE_UNSIGNED (vectype_out
);
10489 uns
= TYPE_UNSIGNED (vectype
);
10491 /* For multi-step FIX_TRUNC_EXPR prefer signed floating to integer
10492 conversion over unsigned, as unsigned FIX_TRUNC_EXPR is often more
10493 costly than signed. */
10494 if (code
== FIX_TRUNC_EXPR
&& uns
)
10496 enum insn_code icode2
;
10499 = lang_hooks
.types
.type_for_mode (TYPE_MODE (vectype_out
), 0);
10501 = optab_for_tree_code (c1
, intermediate_type
, optab_default
);
10502 if (interm_optab
!= unknown_optab
10503 && (icode2
= optab_handler (optab1
, vec_mode
)) != CODE_FOR_nothing
10504 && insn_data
[icode1
].operand
[0].mode
10505 == insn_data
[icode2
].operand
[0].mode
)
10508 optab1
= interm_optab
;
10513 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
10514 intermediate steps in promotion sequence. We try
10515 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do not. */
10516 interm_types
->create (MAX_INTERM_CVT_STEPS
);
10517 for (i
= 0; i
< MAX_INTERM_CVT_STEPS
; i
++)
10519 intermediate_mode
= insn_data
[icode1
].operand
[0].mode
;
10520 if (VECTOR_BOOLEAN_TYPE_P (prev_type
))
10522 intermediate_type
= vect_double_mask_nunits (prev_type
);
10523 if (intermediate_mode
!= TYPE_MODE (intermediate_type
))
10528 = lang_hooks
.types
.type_for_mode (intermediate_mode
, uns
);
10530 = optab_for_tree_code (VEC_PACK_TRUNC_EXPR
, intermediate_type
,
10533 || ((icode1
= optab_handler (optab1
, prev_mode
)) == CODE_FOR_nothing
)
10534 || insn_data
[icode1
].operand
[0].mode
!= intermediate_mode
10535 || ((icode1
= optab_handler (interm_optab
, intermediate_mode
))
10536 == CODE_FOR_nothing
))
10539 interm_types
->quick_push (intermediate_type
);
10540 (*multi_step_cvt
)++;
10542 if (insn_data
[icode1
].operand
[0].mode
== TYPE_MODE (narrow_vectype
))
10543 return (!VECTOR_BOOLEAN_TYPE_P (vectype
)
10544 || known_eq (TYPE_VECTOR_SUBPARTS (intermediate_type
) * 2,
10545 TYPE_VECTOR_SUBPARTS (narrow_vectype
)));
10547 prev_mode
= intermediate_mode
;
10548 prev_type
= intermediate_type
;
10549 optab1
= interm_optab
;
10552 interm_types
->release ();
10556 /* Generate and return a statement that sets vector mask MASK such that
10557 MASK[I] is true iff J + START_INDEX < END_INDEX for all J <= I. */
10560 vect_gen_while (tree mask
, tree start_index
, tree end_index
)
10562 tree cmp_type
= TREE_TYPE (start_index
);
10563 tree mask_type
= TREE_TYPE (mask
);
10564 gcc_checking_assert (direct_internal_fn_supported_p (IFN_WHILE_ULT
,
10565 cmp_type
, mask_type
,
10566 OPTIMIZE_FOR_SPEED
));
10567 gcall
*call
= gimple_build_call_internal (IFN_WHILE_ULT
, 3,
10568 start_index
, end_index
,
10569 build_zero_cst (mask_type
));
10570 gimple_call_set_lhs (call
, mask
);
10574 /* Generate a vector mask of type MASK_TYPE for which index I is false iff
10575 J + START_INDEX < END_INDEX for all J <= I. Add the statements to SEQ. */
10578 vect_gen_while_not (gimple_seq
*seq
, tree mask_type
, tree start_index
,
10581 tree tmp
= make_ssa_name (mask_type
);
10582 gcall
*call
= vect_gen_while (tmp
, start_index
, end_index
);
10583 gimple_seq_add_stmt (seq
, call
);
10584 return gimple_build (seq
, BIT_NOT_EXPR
, mask_type
, tmp
);
10587 /* Try to compute the vector types required to vectorize STMT_INFO,
10588 returning true on success and false if vectorization isn't possible.
10592 - Set *STMT_VECTYPE_OUT to:
10593 - NULL_TREE if the statement doesn't need to be vectorized;
10594 - boolean_type_node if the statement is a boolean operation whose
10595 vector type can only be determined once all the other vector types
10597 - the equivalent of STMT_VINFO_VECTYPE otherwise.
10599 - Set *NUNITS_VECTYPE_OUT to the vector type that contains the maximum
10600 number of units needed to vectorize STMT_INFO, or NULL_TREE if the
10601 statement does not help to determine the overall number of units. */
10604 vect_get_vector_types_for_stmt (stmt_vec_info stmt_info
,
10605 tree
*stmt_vectype_out
,
10606 tree
*nunits_vectype_out
)
10608 gimple
*stmt
= stmt_info
->stmt
;
10610 *stmt_vectype_out
= NULL_TREE
;
10611 *nunits_vectype_out
= NULL_TREE
;
10613 if (gimple_get_lhs (stmt
) == NULL_TREE
10614 /* MASK_STORE has no lhs, but is ok. */
10615 && !gimple_call_internal_p (stmt
, IFN_MASK_STORE
))
10617 if (is_a
<gcall
*> (stmt
))
10619 /* Ignore calls with no lhs. These must be calls to
10620 #pragma omp simd functions, and what vectorization factor
10621 it really needs can't be determined until
10622 vectorizable_simd_clone_call. */
10623 if (dump_enabled_p ())
10624 dump_printf_loc (MSG_NOTE
, vect_location
,
10625 "defer to SIMD clone analysis.\n");
10629 if (dump_enabled_p ())
10631 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
10632 "not vectorized: irregular stmt.");
10633 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
10638 if (VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt
))))
10640 if (dump_enabled_p ())
10642 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
10643 "not vectorized: vector stmt in loop:");
10644 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
10650 tree scalar_type
= NULL_TREE
;
10651 if (STMT_VINFO_VECTYPE (stmt_info
))
10652 *stmt_vectype_out
= vectype
= STMT_VINFO_VECTYPE (stmt_info
);
10655 gcc_assert (!STMT_VINFO_DATA_REF (stmt_info
));
10656 if (gimple_call_internal_p (stmt
, IFN_MASK_STORE
))
10657 scalar_type
= TREE_TYPE (gimple_call_arg (stmt
, 3));
10659 scalar_type
= TREE_TYPE (gimple_get_lhs (stmt
));
10661 /* Pure bool ops don't participate in number-of-units computation.
10662 For comparisons use the types being compared. */
10663 if (VECT_SCALAR_BOOLEAN_TYPE_P (scalar_type
)
10664 && is_gimple_assign (stmt
)
10665 && gimple_assign_rhs_code (stmt
) != COND_EXPR
)
10667 *stmt_vectype_out
= boolean_type_node
;
10669 tree rhs1
= gimple_assign_rhs1 (stmt
);
10670 if (TREE_CODE_CLASS (gimple_assign_rhs_code (stmt
)) == tcc_comparison
10671 && !VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (rhs1
)))
10672 scalar_type
= TREE_TYPE (rhs1
);
10675 if (dump_enabled_p ())
10676 dump_printf_loc (MSG_NOTE
, vect_location
,
10677 "pure bool operation.\n");
10682 if (dump_enabled_p ())
10684 dump_printf_loc (MSG_NOTE
, vect_location
,
10685 "get vectype for scalar type: ");
10686 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, scalar_type
);
10687 dump_printf (MSG_NOTE
, "\n");
10689 vectype
= get_vectype_for_scalar_type (scalar_type
);
10692 if (dump_enabled_p ())
10694 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
10695 "not vectorized: unsupported data-type ");
10696 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
10698 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
10703 if (!*stmt_vectype_out
)
10704 *stmt_vectype_out
= vectype
;
10706 if (dump_enabled_p ())
10708 dump_printf_loc (MSG_NOTE
, vect_location
, "vectype: ");
10709 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, vectype
);
10710 dump_printf (MSG_NOTE
, "\n");
10714 /* Don't try to compute scalar types if the stmt produces a boolean
10715 vector; use the existing vector type instead. */
10716 tree nunits_vectype
;
10717 if (VECTOR_BOOLEAN_TYPE_P (vectype
))
10718 nunits_vectype
= vectype
;
10721 /* The number of units is set according to the smallest scalar
10722 type (or the largest vector size, but we only support one
10723 vector size per vectorization). */
10724 if (*stmt_vectype_out
!= boolean_type_node
)
10726 HOST_WIDE_INT dummy
;
10727 scalar_type
= vect_get_smallest_scalar_type (stmt_info
,
10730 if (dump_enabled_p ())
10732 dump_printf_loc (MSG_NOTE
, vect_location
,
10733 "get vectype for scalar type: ");
10734 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, scalar_type
);
10735 dump_printf (MSG_NOTE
, "\n");
10737 nunits_vectype
= get_vectype_for_scalar_type (scalar_type
);
10739 if (!nunits_vectype
)
10741 if (dump_enabled_p ())
10743 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
10744 "not vectorized: unsupported data-type ");
10745 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, scalar_type
);
10746 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
10751 if (maybe_ne (GET_MODE_SIZE (TYPE_MODE (vectype
)),
10752 GET_MODE_SIZE (TYPE_MODE (nunits_vectype
))))
10754 if (dump_enabled_p ())
10756 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
10757 "not vectorized: different sized vector "
10758 "types in statement, ");
10759 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, vectype
);
10760 dump_printf (MSG_MISSED_OPTIMIZATION
, " and ");
10761 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, nunits_vectype
);
10762 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
10767 if (dump_enabled_p ())
10769 dump_printf_loc (MSG_NOTE
, vect_location
, "vectype: ");
10770 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, nunits_vectype
);
10771 dump_printf (MSG_NOTE
, "\n");
10773 dump_printf_loc (MSG_NOTE
, vect_location
, "nunits = ");
10774 dump_dec (MSG_NOTE
, TYPE_VECTOR_SUBPARTS (nunits_vectype
));
10775 dump_printf (MSG_NOTE
, "\n");
10778 *nunits_vectype_out
= nunits_vectype
;
10782 /* Try to determine the correct vector type for STMT_INFO, which is a
10783 statement that produces a scalar boolean result. Return the vector
10784 type on success, otherwise return NULL_TREE. */
10787 vect_get_mask_type_for_stmt (stmt_vec_info stmt_info
)
10789 gimple
*stmt
= stmt_info
->stmt
;
10790 tree mask_type
= NULL
;
10791 tree vectype
, scalar_type
;
10793 if (is_gimple_assign (stmt
)
10794 && TREE_CODE_CLASS (gimple_assign_rhs_code (stmt
)) == tcc_comparison
10795 && !VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (gimple_assign_rhs1 (stmt
))))
10797 scalar_type
= TREE_TYPE (gimple_assign_rhs1 (stmt
));
10798 mask_type
= get_mask_type_for_scalar_type (scalar_type
);
10802 if (dump_enabled_p ())
10803 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
10804 "not vectorized: unsupported mask\n");
10812 enum vect_def_type dt
;
10814 FOR_EACH_SSA_TREE_OPERAND (rhs
, stmt
, iter
, SSA_OP_USE
)
10816 if (!vect_is_simple_use (rhs
, stmt_info
->vinfo
, &dt
, &vectype
))
10818 if (dump_enabled_p ())
10820 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
10821 "not vectorized: can't compute mask type "
10822 "for statement, ");
10823 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
,
10829 /* No vectype probably means external definition.
10830 Allow it in case there is another operand which
10831 allows to determine mask type. */
10836 mask_type
= vectype
;
10837 else if (maybe_ne (TYPE_VECTOR_SUBPARTS (mask_type
),
10838 TYPE_VECTOR_SUBPARTS (vectype
)))
10840 if (dump_enabled_p ())
10842 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
10843 "not vectorized: different sized masks "
10844 "types in statement, ");
10845 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
10847 dump_printf (MSG_MISSED_OPTIMIZATION
, " and ");
10848 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
10850 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
10854 else if (VECTOR_BOOLEAN_TYPE_P (mask_type
)
10855 != VECTOR_BOOLEAN_TYPE_P (vectype
))
10857 if (dump_enabled_p ())
10859 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
10860 "not vectorized: mixed mask and "
10861 "nonmask vector types in statement, ");
10862 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
10864 dump_printf (MSG_MISSED_OPTIMIZATION
, " and ");
10865 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
10867 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
10873 /* We may compare boolean value loaded as vector of integers.
10874 Fix mask_type in such case. */
10876 && !VECTOR_BOOLEAN_TYPE_P (mask_type
)
10877 && gimple_code (stmt
) == GIMPLE_ASSIGN
10878 && TREE_CODE_CLASS (gimple_assign_rhs_code (stmt
)) == tcc_comparison
)
10879 mask_type
= build_same_sized_truth_vector_type (mask_type
);
10882 /* No mask_type should mean loop invariant predicate.
10883 This is probably a subject for optimization in if-conversion. */
10884 if (!mask_type
&& dump_enabled_p ())
10886 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
10887 "not vectorized: can't compute mask type "
10888 "for statement, ");
10889 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);