1 /* Statement Analysis and Transformation for Vectorization
2 Copyright (C) 2003-2016 Free Software Foundation, Inc.
3 Contributed by Dorit Naishlos <dorit@il.ibm.com>
4 and Ira Rosen <irar@il.ibm.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
24 #include "coretypes.h"
31 #include "optabs-tree.h"
32 #include "insn-config.h"
33 #include "recog.h" /* FIXME: for insn_data */
37 #include "fold-const.h"
38 #include "stor-layout.h"
41 #include "gimple-iterator.h"
42 #include "gimplify-me.h"
44 #include "tree-ssa-loop-manip.h"
46 #include "tree-ssa-loop.h"
47 #include "tree-scalar-evolution.h"
48 #include "tree-vectorizer.h"
50 #include "internal-fn.h"
52 /* For lang_hooks.types.type_for_mode. */
53 #include "langhooks.h"
55 /* Return the vectorized type for the given statement. */
58 stmt_vectype (struct _stmt_vec_info
*stmt_info
)
60 return STMT_VINFO_VECTYPE (stmt_info
);
63 /* Return TRUE iff the given statement is in an inner loop relative to
64 the loop being vectorized. */
66 stmt_in_inner_loop_p (struct _stmt_vec_info
*stmt_info
)
68 gimple
*stmt
= STMT_VINFO_STMT (stmt_info
);
69 basic_block bb
= gimple_bb (stmt
);
70 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
76 loop
= LOOP_VINFO_LOOP (loop_vinfo
);
78 return (bb
->loop_father
== loop
->inner
);
81 /* Record the cost of a statement, either by directly informing the
82 target model or by saving it in a vector for later processing.
83 Return a preliminary estimate of the statement's cost. */
86 record_stmt_cost (stmt_vector_for_cost
*body_cost_vec
, int count
,
87 enum vect_cost_for_stmt kind
, stmt_vec_info stmt_info
,
88 int misalign
, enum vect_cost_model_location where
)
92 tree vectype
= stmt_info
? stmt_vectype (stmt_info
) : NULL_TREE
;
93 stmt_info_for_cost si
= { count
, kind
,
94 stmt_info
? STMT_VINFO_STMT (stmt_info
) : NULL
,
96 body_cost_vec
->safe_push (si
);
98 (builtin_vectorization_cost (kind
, vectype
, misalign
) * count
);
101 return add_stmt_cost (stmt_info
->vinfo
->target_cost_data
,
102 count
, kind
, stmt_info
, misalign
, where
);
105 /* Return a variable of type ELEM_TYPE[NELEMS]. */
108 create_vector_array (tree elem_type
, unsigned HOST_WIDE_INT nelems
)
110 return create_tmp_var (build_array_type_nelts (elem_type
, nelems
),
114 /* ARRAY is an array of vectors created by create_vector_array.
115 Return an SSA_NAME for the vector in index N. The reference
116 is part of the vectorization of STMT and the vector is associated
117 with scalar destination SCALAR_DEST. */
120 read_vector_array (gimple
*stmt
, gimple_stmt_iterator
*gsi
, tree scalar_dest
,
121 tree array
, unsigned HOST_WIDE_INT n
)
123 tree vect_type
, vect
, vect_name
, array_ref
;
126 gcc_assert (TREE_CODE (TREE_TYPE (array
)) == ARRAY_TYPE
);
127 vect_type
= TREE_TYPE (TREE_TYPE (array
));
128 vect
= vect_create_destination_var (scalar_dest
, vect_type
);
129 array_ref
= build4 (ARRAY_REF
, vect_type
, array
,
130 build_int_cst (size_type_node
, n
),
131 NULL_TREE
, NULL_TREE
);
133 new_stmt
= gimple_build_assign (vect
, array_ref
);
134 vect_name
= make_ssa_name (vect
, new_stmt
);
135 gimple_assign_set_lhs (new_stmt
, vect_name
);
136 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
141 /* ARRAY is an array of vectors created by create_vector_array.
142 Emit code to store SSA_NAME VECT in index N of the array.
143 The store is part of the vectorization of STMT. */
146 write_vector_array (gimple
*stmt
, gimple_stmt_iterator
*gsi
, tree vect
,
147 tree array
, unsigned HOST_WIDE_INT n
)
152 array_ref
= build4 (ARRAY_REF
, TREE_TYPE (vect
), array
,
153 build_int_cst (size_type_node
, n
),
154 NULL_TREE
, NULL_TREE
);
156 new_stmt
= gimple_build_assign (array_ref
, vect
);
157 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
160 /* PTR is a pointer to an array of type TYPE. Return a representation
161 of *PTR. The memory reference replaces those in FIRST_DR
165 create_array_ref (tree type
, tree ptr
, struct data_reference
*first_dr
)
167 tree mem_ref
, alias_ptr_type
;
169 alias_ptr_type
= reference_alias_ptr_type (DR_REF (first_dr
));
170 mem_ref
= build2 (MEM_REF
, type
, ptr
, build_int_cst (alias_ptr_type
, 0));
171 /* Arrays have the same alignment as their type. */
172 set_ptr_info_alignment (get_ptr_info (ptr
), TYPE_ALIGN_UNIT (type
), 0);
176 /* Utility functions used by vect_mark_stmts_to_be_vectorized. */
178 /* Function vect_mark_relevant.
180 Mark STMT as "relevant for vectorization" and add it to WORKLIST. */
183 vect_mark_relevant (vec
<gimple
*> *worklist
, gimple
*stmt
,
184 enum vect_relevant relevant
, bool live_p
)
186 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
187 enum vect_relevant save_relevant
= STMT_VINFO_RELEVANT (stmt_info
);
188 bool save_live_p
= STMT_VINFO_LIVE_P (stmt_info
);
189 gimple
*pattern_stmt
;
191 if (dump_enabled_p ())
193 dump_printf_loc (MSG_NOTE
, vect_location
,
194 "mark relevant %d, live %d: ", relevant
, live_p
);
195 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
198 /* If this stmt is an original stmt in a pattern, we might need to mark its
199 related pattern stmt instead of the original stmt. However, such stmts
200 may have their own uses that are not in any pattern, in such cases the
201 stmt itself should be marked. */
202 if (STMT_VINFO_IN_PATTERN_P (stmt_info
))
204 /* This is the last stmt in a sequence that was detected as a
205 pattern that can potentially be vectorized. Don't mark the stmt
206 as relevant/live because it's not going to be vectorized.
207 Instead mark the pattern-stmt that replaces it. */
209 pattern_stmt
= STMT_VINFO_RELATED_STMT (stmt_info
);
211 if (dump_enabled_p ())
212 dump_printf_loc (MSG_NOTE
, vect_location
,
213 "last stmt in pattern. don't mark"
214 " relevant/live.\n");
215 stmt_info
= vinfo_for_stmt (pattern_stmt
);
216 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_info
) == stmt
);
217 save_relevant
= STMT_VINFO_RELEVANT (stmt_info
);
218 save_live_p
= STMT_VINFO_LIVE_P (stmt_info
);
222 STMT_VINFO_LIVE_P (stmt_info
) |= live_p
;
223 if (relevant
> STMT_VINFO_RELEVANT (stmt_info
))
224 STMT_VINFO_RELEVANT (stmt_info
) = relevant
;
226 if (STMT_VINFO_RELEVANT (stmt_info
) == save_relevant
227 && STMT_VINFO_LIVE_P (stmt_info
) == save_live_p
)
229 if (dump_enabled_p ())
230 dump_printf_loc (MSG_NOTE
, vect_location
,
231 "already marked relevant/live.\n");
235 worklist
->safe_push (stmt
);
239 /* Function vect_stmt_relevant_p.
241 Return true if STMT in loop that is represented by LOOP_VINFO is
242 "relevant for vectorization".
244 A stmt is considered "relevant for vectorization" if:
245 - it has uses outside the loop.
246 - it has vdefs (it alters memory).
247 - control stmts in the loop (except for the exit condition).
249 CHECKME: what other side effects would the vectorizer allow? */
252 vect_stmt_relevant_p (gimple
*stmt
, loop_vec_info loop_vinfo
,
253 enum vect_relevant
*relevant
, bool *live_p
)
255 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
257 imm_use_iterator imm_iter
;
261 *relevant
= vect_unused_in_scope
;
264 /* cond stmt other than loop exit cond. */
265 if (is_ctrl_stmt (stmt
)
266 && STMT_VINFO_TYPE (vinfo_for_stmt (stmt
))
267 != loop_exit_ctrl_vec_info_type
)
268 *relevant
= vect_used_in_scope
;
270 /* changing memory. */
271 if (gimple_code (stmt
) != GIMPLE_PHI
)
272 if (gimple_vdef (stmt
)
273 && !gimple_clobber_p (stmt
))
275 if (dump_enabled_p ())
276 dump_printf_loc (MSG_NOTE
, vect_location
,
277 "vec_stmt_relevant_p: stmt has vdefs.\n");
278 *relevant
= vect_used_in_scope
;
281 /* uses outside the loop. */
282 FOR_EACH_PHI_OR_STMT_DEF (def_p
, stmt
, op_iter
, SSA_OP_DEF
)
284 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, DEF_FROM_PTR (def_p
))
286 basic_block bb
= gimple_bb (USE_STMT (use_p
));
287 if (!flow_bb_inside_loop_p (loop
, bb
))
289 if (dump_enabled_p ())
290 dump_printf_loc (MSG_NOTE
, vect_location
,
291 "vec_stmt_relevant_p: used out of loop.\n");
293 if (is_gimple_debug (USE_STMT (use_p
)))
296 /* We expect all such uses to be in the loop exit phis
297 (because of loop closed form) */
298 gcc_assert (gimple_code (USE_STMT (use_p
)) == GIMPLE_PHI
);
299 gcc_assert (bb
== single_exit (loop
)->dest
);
306 return (*live_p
|| *relevant
);
310 /* Function exist_non_indexing_operands_for_use_p
312 USE is one of the uses attached to STMT. Check if USE is
313 used in STMT for anything other than indexing an array. */
316 exist_non_indexing_operands_for_use_p (tree use
, gimple
*stmt
)
319 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
321 /* USE corresponds to some operand in STMT. If there is no data
322 reference in STMT, then any operand that corresponds to USE
323 is not indexing an array. */
324 if (!STMT_VINFO_DATA_REF (stmt_info
))
327 /* STMT has a data_ref. FORNOW this means that its of one of
331 (This should have been verified in analyze_data_refs).
333 'var' in the second case corresponds to a def, not a use,
334 so USE cannot correspond to any operands that are not used
337 Therefore, all we need to check is if STMT falls into the
338 first case, and whether var corresponds to USE. */
340 if (!gimple_assign_copy_p (stmt
))
342 if (is_gimple_call (stmt
)
343 && gimple_call_internal_p (stmt
))
344 switch (gimple_call_internal_fn (stmt
))
347 operand
= gimple_call_arg (stmt
, 3);
352 operand
= gimple_call_arg (stmt
, 2);
362 if (TREE_CODE (gimple_assign_lhs (stmt
)) == SSA_NAME
)
364 operand
= gimple_assign_rhs1 (stmt
);
365 if (TREE_CODE (operand
) != SSA_NAME
)
376 Function process_use.
379 - a USE in STMT in a loop represented by LOOP_VINFO
380 - LIVE_P, RELEVANT - enum values to be set in the STMT_VINFO of the stmt
381 that defined USE. This is done by calling mark_relevant and passing it
382 the WORKLIST (to add DEF_STMT to the WORKLIST in case it is relevant).
383 - FORCE is true if exist_non_indexing_operands_for_use_p check shouldn't
387 Generally, LIVE_P and RELEVANT are used to define the liveness and
388 relevance info of the DEF_STMT of this USE:
389 STMT_VINFO_LIVE_P (DEF_STMT_info) <-- live_p
390 STMT_VINFO_RELEVANT (DEF_STMT_info) <-- relevant
392 - case 1: If USE is used only for address computations (e.g. array indexing),
393 which does not need to be directly vectorized, then the liveness/relevance
394 of the respective DEF_STMT is left unchanged.
395 - case 2: If STMT is a reduction phi and DEF_STMT is a reduction stmt, we
396 skip DEF_STMT cause it had already been processed.
397 - case 3: If DEF_STMT and STMT are in different nests, then "relevant" will
398 be modified accordingly.
400 Return true if everything is as expected. Return false otherwise. */
403 process_use (gimple
*stmt
, tree use
, loop_vec_info loop_vinfo
, bool live_p
,
404 enum vect_relevant relevant
, vec
<gimple
*> *worklist
,
407 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
408 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (stmt
);
409 stmt_vec_info dstmt_vinfo
;
410 basic_block bb
, def_bb
;
412 enum vect_def_type dt
;
414 /* case 1: we are only interested in uses that need to be vectorized. Uses
415 that are used for address computation are not considered relevant. */
416 if (!force
&& !exist_non_indexing_operands_for_use_p (use
, stmt
))
419 if (!vect_is_simple_use (use
, loop_vinfo
, &def_stmt
, &dt
))
421 if (dump_enabled_p ())
422 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
423 "not vectorized: unsupported use in stmt.\n");
427 if (!def_stmt
|| gimple_nop_p (def_stmt
))
430 def_bb
= gimple_bb (def_stmt
);
431 if (!flow_bb_inside_loop_p (loop
, def_bb
))
433 if (dump_enabled_p ())
434 dump_printf_loc (MSG_NOTE
, vect_location
, "def_stmt is out of loop.\n");
438 /* case 2: A reduction phi (STMT) defined by a reduction stmt (DEF_STMT).
439 DEF_STMT must have already been processed, because this should be the
440 only way that STMT, which is a reduction-phi, was put in the worklist,
441 as there should be no other uses for DEF_STMT in the loop. So we just
442 check that everything is as expected, and we are done. */
443 dstmt_vinfo
= vinfo_for_stmt (def_stmt
);
444 bb
= gimple_bb (stmt
);
445 if (gimple_code (stmt
) == GIMPLE_PHI
446 && STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_reduction_def
447 && gimple_code (def_stmt
) != GIMPLE_PHI
448 && STMT_VINFO_DEF_TYPE (dstmt_vinfo
) == vect_reduction_def
449 && bb
->loop_father
== def_bb
->loop_father
)
451 if (dump_enabled_p ())
452 dump_printf_loc (MSG_NOTE
, vect_location
,
453 "reduc-stmt defining reduc-phi in the same nest.\n");
454 if (STMT_VINFO_IN_PATTERN_P (dstmt_vinfo
))
455 dstmt_vinfo
= vinfo_for_stmt (STMT_VINFO_RELATED_STMT (dstmt_vinfo
));
456 gcc_assert (STMT_VINFO_RELEVANT (dstmt_vinfo
) < vect_used_by_reduction
);
457 gcc_assert (STMT_VINFO_LIVE_P (dstmt_vinfo
)
458 || STMT_VINFO_RELEVANT (dstmt_vinfo
) > vect_unused_in_scope
);
462 /* case 3a: outer-loop stmt defining an inner-loop stmt:
463 outer-loop-header-bb:
469 if (flow_loop_nested_p (def_bb
->loop_father
, bb
->loop_father
))
471 if (dump_enabled_p ())
472 dump_printf_loc (MSG_NOTE
, vect_location
,
473 "outer-loop def-stmt defining inner-loop stmt.\n");
477 case vect_unused_in_scope
:
478 relevant
= (STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_nested_cycle
) ?
479 vect_used_in_scope
: vect_unused_in_scope
;
482 case vect_used_in_outer_by_reduction
:
483 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo
) != vect_reduction_def
);
484 relevant
= vect_used_by_reduction
;
487 case vect_used_in_outer
:
488 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo
) != vect_reduction_def
);
489 relevant
= vect_used_in_scope
;
492 case vect_used_in_scope
:
500 /* case 3b: inner-loop stmt defining an outer-loop stmt:
501 outer-loop-header-bb:
505 outer-loop-tail-bb (or outer-loop-exit-bb in double reduction):
507 else if (flow_loop_nested_p (bb
->loop_father
, def_bb
->loop_father
))
509 if (dump_enabled_p ())
510 dump_printf_loc (MSG_NOTE
, vect_location
,
511 "inner-loop def-stmt defining outer-loop stmt.\n");
515 case vect_unused_in_scope
:
516 relevant
= (STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_reduction_def
517 || STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_double_reduction_def
) ?
518 vect_used_in_outer_by_reduction
: vect_unused_in_scope
;
521 case vect_used_by_reduction
:
522 relevant
= vect_used_in_outer_by_reduction
;
525 case vect_used_in_scope
:
526 relevant
= vect_used_in_outer
;
534 vect_mark_relevant (worklist
, def_stmt
, relevant
, live_p
);
539 /* Function vect_mark_stmts_to_be_vectorized.
541 Not all stmts in the loop need to be vectorized. For example:
550 Stmt 1 and 3 do not need to be vectorized, because loop control and
551 addressing of vectorized data-refs are handled differently.
553 This pass detects such stmts. */
556 vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo
)
558 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
559 basic_block
*bbs
= LOOP_VINFO_BBS (loop_vinfo
);
560 unsigned int nbbs
= loop
->num_nodes
;
561 gimple_stmt_iterator si
;
564 stmt_vec_info stmt_vinfo
;
568 enum vect_relevant relevant
, tmp_relevant
;
569 enum vect_def_type def_type
;
571 if (dump_enabled_p ())
572 dump_printf_loc (MSG_NOTE
, vect_location
,
573 "=== vect_mark_stmts_to_be_vectorized ===\n");
575 auto_vec
<gimple
*, 64> worklist
;
577 /* 1. Init worklist. */
578 for (i
= 0; i
< nbbs
; i
++)
581 for (si
= gsi_start_phis (bb
); !gsi_end_p (si
); gsi_next (&si
))
584 if (dump_enabled_p ())
586 dump_printf_loc (MSG_NOTE
, vect_location
, "init: phi relevant? ");
587 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, phi
, 0);
590 if (vect_stmt_relevant_p (phi
, loop_vinfo
, &relevant
, &live_p
))
591 vect_mark_relevant (&worklist
, phi
, relevant
, live_p
);
593 for (si
= gsi_start_bb (bb
); !gsi_end_p (si
); gsi_next (&si
))
595 stmt
= gsi_stmt (si
);
596 if (dump_enabled_p ())
598 dump_printf_loc (MSG_NOTE
, vect_location
, "init: stmt relevant? ");
599 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
602 if (vect_stmt_relevant_p (stmt
, loop_vinfo
, &relevant
, &live_p
))
603 vect_mark_relevant (&worklist
, stmt
, relevant
, live_p
);
607 /* 2. Process_worklist */
608 while (worklist
.length () > 0)
613 stmt
= worklist
.pop ();
614 if (dump_enabled_p ())
616 dump_printf_loc (MSG_NOTE
, vect_location
, "worklist: examine stmt: ");
617 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
620 /* Examine the USEs of STMT. For each USE, mark the stmt that defines it
621 (DEF_STMT) as relevant/irrelevant and live/dead according to the
622 liveness and relevance properties of STMT. */
623 stmt_vinfo
= vinfo_for_stmt (stmt
);
624 relevant
= STMT_VINFO_RELEVANT (stmt_vinfo
);
625 live_p
= STMT_VINFO_LIVE_P (stmt_vinfo
);
627 /* Generally, the liveness and relevance properties of STMT are
628 propagated as is to the DEF_STMTs of its USEs:
629 live_p <-- STMT_VINFO_LIVE_P (STMT_VINFO)
630 relevant <-- STMT_VINFO_RELEVANT (STMT_VINFO)
632 One exception is when STMT has been identified as defining a reduction
633 variable; in this case we set the liveness/relevance as follows:
635 relevant = vect_used_by_reduction
636 This is because we distinguish between two kinds of relevant stmts -
637 those that are used by a reduction computation, and those that are
638 (also) used by a regular computation. This allows us later on to
639 identify stmts that are used solely by a reduction, and therefore the
640 order of the results that they produce does not have to be kept. */
642 def_type
= STMT_VINFO_DEF_TYPE (stmt_vinfo
);
643 tmp_relevant
= relevant
;
646 case vect_reduction_def
:
647 switch (tmp_relevant
)
649 case vect_unused_in_scope
:
650 relevant
= vect_used_by_reduction
;
653 case vect_used_by_reduction
:
654 if (gimple_code (stmt
) == GIMPLE_PHI
)
659 if (dump_enabled_p ())
660 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
661 "unsupported use of reduction.\n");
668 case vect_nested_cycle
:
669 if (tmp_relevant
!= vect_unused_in_scope
670 && tmp_relevant
!= vect_used_in_outer_by_reduction
671 && tmp_relevant
!= vect_used_in_outer
)
673 if (dump_enabled_p ())
674 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
675 "unsupported use of nested cycle.\n");
683 case vect_double_reduction_def
:
684 if (tmp_relevant
!= vect_unused_in_scope
685 && tmp_relevant
!= vect_used_by_reduction
)
687 if (dump_enabled_p ())
688 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
689 "unsupported use of double reduction.\n");
701 if (is_pattern_stmt_p (stmt_vinfo
))
703 /* Pattern statements are not inserted into the code, so
704 FOR_EACH_PHI_OR_STMT_USE optimizes their operands out, and we
705 have to scan the RHS or function arguments instead. */
706 if (is_gimple_assign (stmt
))
708 enum tree_code rhs_code
= gimple_assign_rhs_code (stmt
);
709 tree op
= gimple_assign_rhs1 (stmt
);
712 if (rhs_code
== COND_EXPR
&& COMPARISON_CLASS_P (op
))
714 if (!process_use (stmt
, TREE_OPERAND (op
, 0), loop_vinfo
,
715 live_p
, relevant
, &worklist
, false)
716 || !process_use (stmt
, TREE_OPERAND (op
, 1), loop_vinfo
,
717 live_p
, relevant
, &worklist
, false))
721 for (; i
< gimple_num_ops (stmt
); i
++)
723 op
= gimple_op (stmt
, i
);
724 if (TREE_CODE (op
) == SSA_NAME
725 && !process_use (stmt
, op
, loop_vinfo
, live_p
, relevant
,
730 else if (is_gimple_call (stmt
))
732 for (i
= 0; i
< gimple_call_num_args (stmt
); i
++)
734 tree arg
= gimple_call_arg (stmt
, i
);
735 if (!process_use (stmt
, arg
, loop_vinfo
, live_p
, relevant
,
742 FOR_EACH_PHI_OR_STMT_USE (use_p
, stmt
, iter
, SSA_OP_USE
)
744 tree op
= USE_FROM_PTR (use_p
);
745 if (!process_use (stmt
, op
, loop_vinfo
, live_p
, relevant
,
750 if (STMT_VINFO_GATHER_SCATTER_P (stmt_vinfo
))
753 tree decl
= vect_check_gather_scatter (stmt
, loop_vinfo
, NULL
, &off
, NULL
);
755 if (!process_use (stmt
, off
, loop_vinfo
, live_p
, relevant
,
759 } /* while worklist */
765 /* Function vect_model_simple_cost.
767 Models cost for simple operations, i.e. those that only emit ncopies of a
768 single op. Right now, this does not account for multiple insns that could
769 be generated for the single vector op. We will handle that shortly. */
772 vect_model_simple_cost (stmt_vec_info stmt_info
, int ncopies
,
773 enum vect_def_type
*dt
,
774 stmt_vector_for_cost
*prologue_cost_vec
,
775 stmt_vector_for_cost
*body_cost_vec
)
778 int inside_cost
= 0, prologue_cost
= 0;
780 /* The SLP costs were already calculated during SLP tree build. */
781 if (PURE_SLP_STMT (stmt_info
))
784 /* FORNOW: Assuming maximum 2 args per stmts. */
785 for (i
= 0; i
< 2; i
++)
786 if (dt
[i
] == vect_constant_def
|| dt
[i
] == vect_external_def
)
787 prologue_cost
+= record_stmt_cost (prologue_cost_vec
, 1, vector_stmt
,
788 stmt_info
, 0, vect_prologue
);
790 /* Pass the inside-of-loop statements to the target-specific cost model. */
791 inside_cost
= record_stmt_cost (body_cost_vec
, ncopies
, vector_stmt
,
792 stmt_info
, 0, vect_body
);
794 if (dump_enabled_p ())
795 dump_printf_loc (MSG_NOTE
, vect_location
,
796 "vect_model_simple_cost: inside_cost = %d, "
797 "prologue_cost = %d .\n", inside_cost
, prologue_cost
);
801 /* Model cost for type demotion and promotion operations. PWR is normally
802 zero for single-step promotions and demotions. It will be one if
803 two-step promotion/demotion is required, and so on. Each additional
804 step doubles the number of instructions required. */
807 vect_model_promotion_demotion_cost (stmt_vec_info stmt_info
,
808 enum vect_def_type
*dt
, int pwr
)
811 int inside_cost
= 0, prologue_cost
= 0;
812 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
813 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
814 void *target_cost_data
;
816 /* The SLP costs were already calculated during SLP tree build. */
817 if (PURE_SLP_STMT (stmt_info
))
821 target_cost_data
= LOOP_VINFO_TARGET_COST_DATA (loop_vinfo
);
823 target_cost_data
= BB_VINFO_TARGET_COST_DATA (bb_vinfo
);
825 for (i
= 0; i
< pwr
+ 1; i
++)
827 tmp
= (STMT_VINFO_TYPE (stmt_info
) == type_promotion_vec_info_type
) ?
829 inside_cost
+= add_stmt_cost (target_cost_data
, vect_pow2 (tmp
),
830 vec_promote_demote
, stmt_info
, 0,
834 /* FORNOW: Assuming maximum 2 args per stmts. */
835 for (i
= 0; i
< 2; i
++)
836 if (dt
[i
] == vect_constant_def
|| dt
[i
] == vect_external_def
)
837 prologue_cost
+= add_stmt_cost (target_cost_data
, 1, vector_stmt
,
838 stmt_info
, 0, vect_prologue
);
840 if (dump_enabled_p ())
841 dump_printf_loc (MSG_NOTE
, vect_location
,
842 "vect_model_promotion_demotion_cost: inside_cost = %d, "
843 "prologue_cost = %d .\n", inside_cost
, prologue_cost
);
846 /* Function vect_cost_group_size
848 For grouped load or store, return the group_size only if it is the first
849 load or store of a group, else return 1. This ensures that group size is
850 only returned once per group. */
853 vect_cost_group_size (stmt_vec_info stmt_info
)
855 gimple
*first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
857 if (first_stmt
== STMT_VINFO_STMT (stmt_info
))
858 return GROUP_SIZE (stmt_info
);
864 /* Function vect_model_store_cost
866 Models cost for stores. In the case of grouped accesses, one access
867 has the overhead of the grouped access attributed to it. */
870 vect_model_store_cost (stmt_vec_info stmt_info
, int ncopies
,
871 bool store_lanes_p
, enum vect_def_type dt
,
873 stmt_vector_for_cost
*prologue_cost_vec
,
874 stmt_vector_for_cost
*body_cost_vec
)
877 unsigned int inside_cost
= 0, prologue_cost
= 0;
878 struct data_reference
*first_dr
;
881 if (dt
== vect_constant_def
|| dt
== vect_external_def
)
882 prologue_cost
+= record_stmt_cost (prologue_cost_vec
, 1, scalar_to_vec
,
883 stmt_info
, 0, vect_prologue
);
885 /* Grouped access? */
886 if (STMT_VINFO_GROUPED_ACCESS (stmt_info
))
890 first_stmt
= SLP_TREE_SCALAR_STMTS (slp_node
)[0];
895 first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
896 group_size
= vect_cost_group_size (stmt_info
);
899 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
901 /* Not a grouped access. */
905 first_dr
= STMT_VINFO_DATA_REF (stmt_info
);
908 /* We assume that the cost of a single store-lanes instruction is
909 equivalent to the cost of GROUP_SIZE separate stores. If a grouped
910 access is instead being provided by a permute-and-store operation,
911 include the cost of the permutes. */
912 if (!store_lanes_p
&& group_size
> 1
913 && !STMT_VINFO_STRIDED_P (stmt_info
))
915 /* Uses a high and low interleave or shuffle operations for each
917 int nstmts
= ncopies
* ceil_log2 (group_size
) * group_size
;
918 inside_cost
= record_stmt_cost (body_cost_vec
, nstmts
, vec_perm
,
919 stmt_info
, 0, vect_body
);
921 if (dump_enabled_p ())
922 dump_printf_loc (MSG_NOTE
, vect_location
,
923 "vect_model_store_cost: strided group_size = %d .\n",
927 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
928 /* Costs of the stores. */
929 if (STMT_VINFO_STRIDED_P (stmt_info
)
930 && !STMT_VINFO_GROUPED_ACCESS (stmt_info
))
932 /* N scalar stores plus extracting the elements. */
933 inside_cost
+= record_stmt_cost (body_cost_vec
,
934 ncopies
* TYPE_VECTOR_SUBPARTS (vectype
),
935 scalar_store
, stmt_info
, 0, vect_body
);
938 vect_get_store_cost (first_dr
, ncopies
, &inside_cost
, body_cost_vec
);
940 if (STMT_VINFO_STRIDED_P (stmt_info
))
941 inside_cost
+= record_stmt_cost (body_cost_vec
,
942 ncopies
* TYPE_VECTOR_SUBPARTS (vectype
),
943 vec_to_scalar
, stmt_info
, 0, vect_body
);
945 if (dump_enabled_p ())
946 dump_printf_loc (MSG_NOTE
, vect_location
,
947 "vect_model_store_cost: inside_cost = %d, "
948 "prologue_cost = %d .\n", inside_cost
, prologue_cost
);
952 /* Calculate cost of DR's memory access. */
954 vect_get_store_cost (struct data_reference
*dr
, int ncopies
,
955 unsigned int *inside_cost
,
956 stmt_vector_for_cost
*body_cost_vec
)
958 int alignment_support_scheme
= vect_supportable_dr_alignment (dr
, false);
959 gimple
*stmt
= DR_STMT (dr
);
960 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
962 switch (alignment_support_scheme
)
966 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
,
967 vector_store
, stmt_info
, 0,
970 if (dump_enabled_p ())
971 dump_printf_loc (MSG_NOTE
, vect_location
,
972 "vect_model_store_cost: aligned.\n");
976 case dr_unaligned_supported
:
978 /* Here, we assign an additional cost for the unaligned store. */
979 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
,
980 unaligned_store
, stmt_info
,
981 DR_MISALIGNMENT (dr
), vect_body
);
982 if (dump_enabled_p ())
983 dump_printf_loc (MSG_NOTE
, vect_location
,
984 "vect_model_store_cost: unaligned supported by "
989 case dr_unaligned_unsupported
:
991 *inside_cost
= VECT_MAX_COST
;
993 if (dump_enabled_p ())
994 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
995 "vect_model_store_cost: unsupported access.\n");
1005 /* Function vect_model_load_cost
1007 Models cost for loads. In the case of grouped accesses, the last access
1008 has the overhead of the grouped access attributed to it. Since unaligned
1009 accesses are supported for loads, we also account for the costs of the
1010 access scheme chosen. */
1013 vect_model_load_cost (stmt_vec_info stmt_info
, int ncopies
,
1014 bool load_lanes_p
, slp_tree slp_node
,
1015 stmt_vector_for_cost
*prologue_cost_vec
,
1016 stmt_vector_for_cost
*body_cost_vec
)
1020 struct data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
), *first_dr
;
1021 unsigned int inside_cost
= 0, prologue_cost
= 0;
1023 /* Grouped accesses? */
1024 first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
1025 if (STMT_VINFO_GROUPED_ACCESS (stmt_info
) && first_stmt
&& !slp_node
)
1027 group_size
= vect_cost_group_size (stmt_info
);
1028 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
1030 /* Not a grouped access. */
1037 /* We assume that the cost of a single load-lanes instruction is
1038 equivalent to the cost of GROUP_SIZE separate loads. If a grouped
1039 access is instead being provided by a load-and-permute operation,
1040 include the cost of the permutes. */
1041 if (!load_lanes_p
&& group_size
> 1
1042 && !STMT_VINFO_STRIDED_P (stmt_info
))
1044 /* Uses an even and odd extract operations or shuffle operations
1045 for each needed permute. */
1046 int nstmts
= ncopies
* ceil_log2 (group_size
) * group_size
;
1047 inside_cost
= record_stmt_cost (body_cost_vec
, nstmts
, vec_perm
,
1048 stmt_info
, 0, vect_body
);
1050 if (dump_enabled_p ())
1051 dump_printf_loc (MSG_NOTE
, vect_location
,
1052 "vect_model_load_cost: strided group_size = %d .\n",
1056 /* The loads themselves. */
1057 if (STMT_VINFO_STRIDED_P (stmt_info
)
1058 && !STMT_VINFO_GROUPED_ACCESS (stmt_info
))
1060 /* N scalar loads plus gathering them into a vector. */
1061 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
1062 inside_cost
+= record_stmt_cost (body_cost_vec
,
1063 ncopies
* TYPE_VECTOR_SUBPARTS (vectype
),
1064 scalar_load
, stmt_info
, 0, vect_body
);
1067 vect_get_load_cost (first_dr
, ncopies
,
1068 ((!STMT_VINFO_GROUPED_ACCESS (stmt_info
))
1069 || group_size
> 1 || slp_node
),
1070 &inside_cost
, &prologue_cost
,
1071 prologue_cost_vec
, body_cost_vec
, true);
1072 if (STMT_VINFO_STRIDED_P (stmt_info
))
1073 inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
, vec_construct
,
1074 stmt_info
, 0, vect_body
);
1076 if (dump_enabled_p ())
1077 dump_printf_loc (MSG_NOTE
, vect_location
,
1078 "vect_model_load_cost: inside_cost = %d, "
1079 "prologue_cost = %d .\n", inside_cost
, prologue_cost
);
1083 /* Calculate cost of DR's memory access. */
1085 vect_get_load_cost (struct data_reference
*dr
, int ncopies
,
1086 bool add_realign_cost
, unsigned int *inside_cost
,
1087 unsigned int *prologue_cost
,
1088 stmt_vector_for_cost
*prologue_cost_vec
,
1089 stmt_vector_for_cost
*body_cost_vec
,
1090 bool record_prologue_costs
)
1092 int alignment_support_scheme
= vect_supportable_dr_alignment (dr
, false);
1093 gimple
*stmt
= DR_STMT (dr
);
1094 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1096 switch (alignment_support_scheme
)
1100 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
, vector_load
,
1101 stmt_info
, 0, vect_body
);
1103 if (dump_enabled_p ())
1104 dump_printf_loc (MSG_NOTE
, vect_location
,
1105 "vect_model_load_cost: aligned.\n");
1109 case dr_unaligned_supported
:
1111 /* Here, we assign an additional cost for the unaligned load. */
1112 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
,
1113 unaligned_load
, stmt_info
,
1114 DR_MISALIGNMENT (dr
), vect_body
);
1116 if (dump_enabled_p ())
1117 dump_printf_loc (MSG_NOTE
, vect_location
,
1118 "vect_model_load_cost: unaligned supported by "
1123 case dr_explicit_realign
:
1125 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
* 2,
1126 vector_load
, stmt_info
, 0, vect_body
);
1127 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
,
1128 vec_perm
, stmt_info
, 0, vect_body
);
1130 /* FIXME: If the misalignment remains fixed across the iterations of
1131 the containing loop, the following cost should be added to the
1133 if (targetm
.vectorize
.builtin_mask_for_load
)
1134 *inside_cost
+= record_stmt_cost (body_cost_vec
, 1, vector_stmt
,
1135 stmt_info
, 0, vect_body
);
1137 if (dump_enabled_p ())
1138 dump_printf_loc (MSG_NOTE
, vect_location
,
1139 "vect_model_load_cost: explicit realign\n");
1143 case dr_explicit_realign_optimized
:
1145 if (dump_enabled_p ())
1146 dump_printf_loc (MSG_NOTE
, vect_location
,
1147 "vect_model_load_cost: unaligned software "
1150 /* Unaligned software pipeline has a load of an address, an initial
1151 load, and possibly a mask operation to "prime" the loop. However,
1152 if this is an access in a group of loads, which provide grouped
1153 access, then the above cost should only be considered for one
1154 access in the group. Inside the loop, there is a load op
1155 and a realignment op. */
1157 if (add_realign_cost
&& record_prologue_costs
)
1159 *prologue_cost
+= record_stmt_cost (prologue_cost_vec
, 2,
1160 vector_stmt
, stmt_info
,
1162 if (targetm
.vectorize
.builtin_mask_for_load
)
1163 *prologue_cost
+= record_stmt_cost (prologue_cost_vec
, 1,
1164 vector_stmt
, stmt_info
,
1168 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
, vector_load
,
1169 stmt_info
, 0, vect_body
);
1170 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
, vec_perm
,
1171 stmt_info
, 0, vect_body
);
1173 if (dump_enabled_p ())
1174 dump_printf_loc (MSG_NOTE
, vect_location
,
1175 "vect_model_load_cost: explicit realign optimized"
1181 case dr_unaligned_unsupported
:
1183 *inside_cost
= VECT_MAX_COST
;
1185 if (dump_enabled_p ())
1186 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1187 "vect_model_load_cost: unsupported access.\n");
1196 /* Insert the new stmt NEW_STMT at *GSI or at the appropriate place in
1197 the loop preheader for the vectorized stmt STMT. */
1200 vect_init_vector_1 (gimple
*stmt
, gimple
*new_stmt
, gimple_stmt_iterator
*gsi
)
1203 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
1206 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (stmt
);
1207 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_vinfo
);
1211 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
1215 if (nested_in_vect_loop_p (loop
, stmt
))
1218 pe
= loop_preheader_edge (loop
);
1219 new_bb
= gsi_insert_on_edge_immediate (pe
, new_stmt
);
1220 gcc_assert (!new_bb
);
1224 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_vinfo
);
1226 gimple_stmt_iterator gsi_bb_start
;
1228 gcc_assert (bb_vinfo
);
1229 bb
= BB_VINFO_BB (bb_vinfo
);
1230 gsi_bb_start
= gsi_after_labels (bb
);
1231 gsi_insert_before (&gsi_bb_start
, new_stmt
, GSI_SAME_STMT
);
1235 if (dump_enabled_p ())
1237 dump_printf_loc (MSG_NOTE
, vect_location
,
1238 "created new init_stmt: ");
1239 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, new_stmt
, 0);
1243 /* Function vect_init_vector.
1245 Insert a new stmt (INIT_STMT) that initializes a new variable of type
1246 TYPE with the value VAL. If TYPE is a vector type and VAL does not have
1247 vector type a vector with all elements equal to VAL is created first.
1248 Place the initialization at BSI if it is not NULL. Otherwise, place the
1249 initialization at the loop preheader.
1250 Return the DEF of INIT_STMT.
1251 It will be used in the vectorization of STMT. */
1254 vect_init_vector (gimple
*stmt
, tree val
, tree type
, gimple_stmt_iterator
*gsi
)
1259 if (TREE_CODE (type
) == VECTOR_TYPE
1260 && TREE_CODE (TREE_TYPE (val
)) != VECTOR_TYPE
)
1262 if (!types_compatible_p (TREE_TYPE (type
), TREE_TYPE (val
)))
1264 /* Scalar boolean value should be transformed into
1265 all zeros or all ones value before building a vector. */
1266 if (VECTOR_BOOLEAN_TYPE_P (type
))
1268 tree true_val
= build_all_ones_cst (TREE_TYPE (type
));
1269 tree false_val
= build_zero_cst (TREE_TYPE (type
));
1271 if (CONSTANT_CLASS_P (val
))
1272 val
= integer_zerop (val
) ? false_val
: true_val
;
1275 new_temp
= make_ssa_name (TREE_TYPE (type
));
1276 init_stmt
= gimple_build_assign (new_temp
, COND_EXPR
,
1277 val
, true_val
, false_val
);
1278 vect_init_vector_1 (stmt
, init_stmt
, gsi
);
1282 else if (CONSTANT_CLASS_P (val
))
1283 val
= fold_convert (TREE_TYPE (type
), val
);
1286 new_temp
= make_ssa_name (TREE_TYPE (type
));
1287 init_stmt
= gimple_build_assign (new_temp
, NOP_EXPR
, val
);
1288 vect_init_vector_1 (stmt
, init_stmt
, gsi
);
1292 val
= build_vector_from_val (type
, val
);
1295 new_temp
= vect_get_new_ssa_name (type
, vect_simple_var
, "cst_");
1296 init_stmt
= gimple_build_assign (new_temp
, val
);
1297 vect_init_vector_1 (stmt
, init_stmt
, gsi
);
1302 /* Function vect_get_vec_def_for_operand.
1304 OP is an operand in STMT. This function returns a (vector) def that will be
1305 used in the vectorized stmt for STMT.
1307 In the case that OP is an SSA_NAME which is defined in the loop, then
1308 STMT_VINFO_VEC_STMT of the defining stmt holds the relevant def.
1310 In case OP is an invariant or constant, a new stmt that creates a vector def
1311 needs to be introduced. VECTYPE may be used to specify a required type for
1312 vector invariant. */
1315 vect_get_vec_def_for_operand (tree op
, gimple
*stmt
, tree vectype
)
1320 stmt_vec_info def_stmt_info
= NULL
;
1321 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (stmt
);
1322 tree stmt_vectype
= STMT_VINFO_VECTYPE (stmt_vinfo
);
1323 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_vinfo
);
1324 enum vect_def_type dt
;
1328 if (dump_enabled_p ())
1330 dump_printf_loc (MSG_NOTE
, vect_location
,
1331 "vect_get_vec_def_for_operand: ");
1332 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, op
);
1333 dump_printf (MSG_NOTE
, "\n");
1336 is_simple_use
= vect_is_simple_use (op
, loop_vinfo
, &def_stmt
, &dt
);
1337 gcc_assert (is_simple_use
);
1338 if (dump_enabled_p ())
1340 int loc_printed
= 0;
1344 dump_printf (MSG_NOTE
, " def_stmt = ");
1346 dump_printf_loc (MSG_NOTE
, vect_location
, " def_stmt = ");
1347 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, def_stmt
, 0);
1353 /* operand is a constant or a loop invariant. */
1354 case vect_constant_def
:
1355 case vect_external_def
:
1358 vector_type
= vectype
;
1359 else if (TREE_CODE (TREE_TYPE (op
)) == BOOLEAN_TYPE
1360 && VECTOR_BOOLEAN_TYPE_P (stmt_vectype
))
1361 vector_type
= build_same_sized_truth_vector_type (stmt_vectype
);
1363 vector_type
= get_vectype_for_scalar_type (TREE_TYPE (op
));
1365 gcc_assert (vector_type
);
1366 return vect_init_vector (stmt
, op
, vector_type
, NULL
);
1369 /* operand is defined inside the loop. */
1370 case vect_internal_def
:
1372 /* Get the def from the vectorized stmt. */
1373 def_stmt_info
= vinfo_for_stmt (def_stmt
);
1375 vec_stmt
= STMT_VINFO_VEC_STMT (def_stmt_info
);
1376 /* Get vectorized pattern statement. */
1378 && STMT_VINFO_IN_PATTERN_P (def_stmt_info
)
1379 && !STMT_VINFO_RELEVANT (def_stmt_info
))
1380 vec_stmt
= STMT_VINFO_VEC_STMT (vinfo_for_stmt (
1381 STMT_VINFO_RELATED_STMT (def_stmt_info
)));
1382 gcc_assert (vec_stmt
);
1383 if (gimple_code (vec_stmt
) == GIMPLE_PHI
)
1384 vec_oprnd
= PHI_RESULT (vec_stmt
);
1385 else if (is_gimple_call (vec_stmt
))
1386 vec_oprnd
= gimple_call_lhs (vec_stmt
);
1388 vec_oprnd
= gimple_assign_lhs (vec_stmt
);
1392 /* operand is defined by a loop header phi - reduction */
1393 case vect_reduction_def
:
1394 case vect_double_reduction_def
:
1395 case vect_nested_cycle
:
1396 /* Code should use get_initial_def_for_reduction. */
1399 /* operand is defined by loop-header phi - induction. */
1400 case vect_induction_def
:
1402 gcc_assert (gimple_code (def_stmt
) == GIMPLE_PHI
);
1404 /* Get the def from the vectorized stmt. */
1405 def_stmt_info
= vinfo_for_stmt (def_stmt
);
1406 vec_stmt
= STMT_VINFO_VEC_STMT (def_stmt_info
);
1407 if (gimple_code (vec_stmt
) == GIMPLE_PHI
)
1408 vec_oprnd
= PHI_RESULT (vec_stmt
);
1410 vec_oprnd
= gimple_get_lhs (vec_stmt
);
1420 /* Function vect_get_vec_def_for_stmt_copy
1422 Return a vector-def for an operand. This function is used when the
1423 vectorized stmt to be created (by the caller to this function) is a "copy"
1424 created in case the vectorized result cannot fit in one vector, and several
1425 copies of the vector-stmt are required. In this case the vector-def is
1426 retrieved from the vector stmt recorded in the STMT_VINFO_RELATED_STMT field
1427 of the stmt that defines VEC_OPRND.
1428 DT is the type of the vector def VEC_OPRND.
1431 In case the vectorization factor (VF) is bigger than the number
1432 of elements that can fit in a vectype (nunits), we have to generate
1433 more than one vector stmt to vectorize the scalar stmt. This situation
1434 arises when there are multiple data-types operated upon in the loop; the
1435 smallest data-type determines the VF, and as a result, when vectorizing
1436 stmts operating on wider types we need to create 'VF/nunits' "copies" of the
1437 vector stmt (each computing a vector of 'nunits' results, and together
1438 computing 'VF' results in each iteration). This function is called when
1439 vectorizing such a stmt (e.g. vectorizing S2 in the illustration below, in
1440 which VF=16 and nunits=4, so the number of copies required is 4):
1442 scalar stmt: vectorized into: STMT_VINFO_RELATED_STMT
1444 S1: x = load VS1.0: vx.0 = memref0 VS1.1
1445 VS1.1: vx.1 = memref1 VS1.2
1446 VS1.2: vx.2 = memref2 VS1.3
1447 VS1.3: vx.3 = memref3
1449 S2: z = x + ... VSnew.0: vz0 = vx.0 + ... VSnew.1
1450 VSnew.1: vz1 = vx.1 + ... VSnew.2
1451 VSnew.2: vz2 = vx.2 + ... VSnew.3
1452 VSnew.3: vz3 = vx.3 + ...
1454 The vectorization of S1 is explained in vectorizable_load.
1455 The vectorization of S2:
1456 To create the first vector-stmt out of the 4 copies - VSnew.0 -
1457 the function 'vect_get_vec_def_for_operand' is called to
1458 get the relevant vector-def for each operand of S2. For operand x it
1459 returns the vector-def 'vx.0'.
1461 To create the remaining copies of the vector-stmt (VSnew.j), this
1462 function is called to get the relevant vector-def for each operand. It is
1463 obtained from the respective VS1.j stmt, which is recorded in the
1464 STMT_VINFO_RELATED_STMT field of the stmt that defines VEC_OPRND.
1466 For example, to obtain the vector-def 'vx.1' in order to create the
1467 vector stmt 'VSnew.1', this function is called with VEC_OPRND='vx.0'.
1468 Given 'vx0' we obtain the stmt that defines it ('VS1.0'); from the
1469 STMT_VINFO_RELATED_STMT field of 'VS1.0' we obtain the next copy - 'VS1.1',
1470 and return its def ('vx.1').
1471 Overall, to create the above sequence this function will be called 3 times:
1472 vx.1 = vect_get_vec_def_for_stmt_copy (dt, vx.0);
1473 vx.2 = vect_get_vec_def_for_stmt_copy (dt, vx.1);
1474 vx.3 = vect_get_vec_def_for_stmt_copy (dt, vx.2); */
1477 vect_get_vec_def_for_stmt_copy (enum vect_def_type dt
, tree vec_oprnd
)
1479 gimple
*vec_stmt_for_operand
;
1480 stmt_vec_info def_stmt_info
;
1482 /* Do nothing; can reuse same def. */
1483 if (dt
== vect_external_def
|| dt
== vect_constant_def
)
1486 vec_stmt_for_operand
= SSA_NAME_DEF_STMT (vec_oprnd
);
1487 def_stmt_info
= vinfo_for_stmt (vec_stmt_for_operand
);
1488 gcc_assert (def_stmt_info
);
1489 vec_stmt_for_operand
= STMT_VINFO_RELATED_STMT (def_stmt_info
);
1490 gcc_assert (vec_stmt_for_operand
);
1491 if (gimple_code (vec_stmt_for_operand
) == GIMPLE_PHI
)
1492 vec_oprnd
= PHI_RESULT (vec_stmt_for_operand
);
1494 vec_oprnd
= gimple_get_lhs (vec_stmt_for_operand
);
1499 /* Get vectorized definitions for the operands to create a copy of an original
1500 stmt. See vect_get_vec_def_for_stmt_copy () for details. */
1503 vect_get_vec_defs_for_stmt_copy (enum vect_def_type
*dt
,
1504 vec
<tree
> *vec_oprnds0
,
1505 vec
<tree
> *vec_oprnds1
)
1507 tree vec_oprnd
= vec_oprnds0
->pop ();
1509 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
[0], vec_oprnd
);
1510 vec_oprnds0
->quick_push (vec_oprnd
);
1512 if (vec_oprnds1
&& vec_oprnds1
->length ())
1514 vec_oprnd
= vec_oprnds1
->pop ();
1515 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
[1], vec_oprnd
);
1516 vec_oprnds1
->quick_push (vec_oprnd
);
1521 /* Get vectorized definitions for OP0 and OP1.
1522 REDUC_INDEX is the index of reduction operand in case of reduction,
1523 and -1 otherwise. */
1526 vect_get_vec_defs (tree op0
, tree op1
, gimple
*stmt
,
1527 vec
<tree
> *vec_oprnds0
,
1528 vec
<tree
> *vec_oprnds1
,
1529 slp_tree slp_node
, int reduc_index
)
1533 int nops
= (op1
== NULL_TREE
) ? 1 : 2;
1534 auto_vec
<tree
> ops (nops
);
1535 auto_vec
<vec
<tree
> > vec_defs (nops
);
1537 ops
.quick_push (op0
);
1539 ops
.quick_push (op1
);
1541 vect_get_slp_defs (ops
, slp_node
, &vec_defs
, reduc_index
);
1543 *vec_oprnds0
= vec_defs
[0];
1545 *vec_oprnds1
= vec_defs
[1];
1551 vec_oprnds0
->create (1);
1552 vec_oprnd
= vect_get_vec_def_for_operand (op0
, stmt
);
1553 vec_oprnds0
->quick_push (vec_oprnd
);
1557 vec_oprnds1
->create (1);
1558 vec_oprnd
= vect_get_vec_def_for_operand (op1
, stmt
);
1559 vec_oprnds1
->quick_push (vec_oprnd
);
1565 /* Function vect_finish_stmt_generation.
1567 Insert a new stmt. */
1570 vect_finish_stmt_generation (gimple
*stmt
, gimple
*vec_stmt
,
1571 gimple_stmt_iterator
*gsi
)
1573 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1574 vec_info
*vinfo
= stmt_info
->vinfo
;
1576 gcc_assert (gimple_code (stmt
) != GIMPLE_LABEL
);
1578 if (!gsi_end_p (*gsi
)
1579 && gimple_has_mem_ops (vec_stmt
))
1581 gimple
*at_stmt
= gsi_stmt (*gsi
);
1582 tree vuse
= gimple_vuse (at_stmt
);
1583 if (vuse
&& TREE_CODE (vuse
) == SSA_NAME
)
1585 tree vdef
= gimple_vdef (at_stmt
);
1586 gimple_set_vuse (vec_stmt
, gimple_vuse (at_stmt
));
1587 /* If we have an SSA vuse and insert a store, update virtual
1588 SSA form to avoid triggering the renamer. Do so only
1589 if we can easily see all uses - which is what almost always
1590 happens with the way vectorized stmts are inserted. */
1591 if ((vdef
&& TREE_CODE (vdef
) == SSA_NAME
)
1592 && ((is_gimple_assign (vec_stmt
)
1593 && !is_gimple_reg (gimple_assign_lhs (vec_stmt
)))
1594 || (is_gimple_call (vec_stmt
)
1595 && !(gimple_call_flags (vec_stmt
)
1596 & (ECF_CONST
|ECF_PURE
|ECF_NOVOPS
)))))
1598 tree new_vdef
= copy_ssa_name (vuse
, vec_stmt
);
1599 gimple_set_vdef (vec_stmt
, new_vdef
);
1600 SET_USE (gimple_vuse_op (at_stmt
), new_vdef
);
1604 gsi_insert_before (gsi
, vec_stmt
, GSI_SAME_STMT
);
1606 set_vinfo_for_stmt (vec_stmt
, new_stmt_vec_info (vec_stmt
, vinfo
));
1608 if (dump_enabled_p ())
1610 dump_printf_loc (MSG_NOTE
, vect_location
, "add new stmt: ");
1611 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, vec_stmt
, 0);
1614 gimple_set_location (vec_stmt
, gimple_location (stmt
));
1616 /* While EH edges will generally prevent vectorization, stmt might
1617 e.g. be in a must-not-throw region. Ensure newly created stmts
1618 that could throw are part of the same region. */
1619 int lp_nr
= lookup_stmt_eh_lp (stmt
);
1620 if (lp_nr
!= 0 && stmt_could_throw_p (vec_stmt
))
1621 add_stmt_to_eh_lp (vec_stmt
, lp_nr
);
1624 /* We want to vectorize a call to combined function CFN with function
1625 decl FNDECL, using VECTYPE_OUT as the type of the output and VECTYPE_IN
1626 as the types of all inputs. Check whether this is possible using
1627 an internal function, returning its code if so or IFN_LAST if not. */
1630 vectorizable_internal_function (combined_fn cfn
, tree fndecl
,
1631 tree vectype_out
, tree vectype_in
)
1634 if (internal_fn_p (cfn
))
1635 ifn
= as_internal_fn (cfn
);
1637 ifn
= associated_internal_fn (fndecl
);
1638 if (ifn
!= IFN_LAST
&& direct_internal_fn_p (ifn
))
1640 const direct_internal_fn_info
&info
= direct_internal_fn (ifn
);
1641 if (info
.vectorizable
)
1643 tree type0
= (info
.type0
< 0 ? vectype_out
: vectype_in
);
1644 tree type1
= (info
.type1
< 0 ? vectype_out
: vectype_in
);
1645 if (direct_internal_fn_supported_p (ifn
, tree_pair (type0
, type1
),
1646 OPTIMIZE_FOR_SPEED
))
1654 static tree
permute_vec_elements (tree
, tree
, tree
, gimple
*,
1655 gimple_stmt_iterator
*);
1658 /* Function vectorizable_mask_load_store.
1660 Check if STMT performs a conditional load or store that can be vectorized.
1661 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
1662 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
1663 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1666 vectorizable_mask_load_store (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
1667 gimple
**vec_stmt
, slp_tree slp_node
)
1669 tree vec_dest
= NULL
;
1670 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1671 stmt_vec_info prev_stmt_info
;
1672 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
1673 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
1674 bool nested_in_vect_loop
= nested_in_vect_loop_p (loop
, stmt
);
1675 struct data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
);
1676 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
1677 tree rhs_vectype
= NULL_TREE
;
1682 tree dataref_ptr
= NULL_TREE
;
1684 int nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
1688 tree gather_base
= NULL_TREE
, gather_off
= NULL_TREE
;
1689 tree gather_off_vectype
= NULL_TREE
, gather_decl
= NULL_TREE
;
1690 int gather_scale
= 1;
1691 enum vect_def_type gather_dt
= vect_unknown_def_type
;
1695 enum vect_def_type dt
;
1697 if (slp_node
!= NULL
)
1700 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits
;
1701 gcc_assert (ncopies
>= 1);
1703 is_store
= gimple_call_internal_fn (stmt
) == IFN_MASK_STORE
;
1704 mask
= gimple_call_arg (stmt
, 2);
1706 if (TREE_CODE (TREE_TYPE (mask
)) != BOOLEAN_TYPE
)
1709 /* FORNOW. This restriction should be relaxed. */
1710 if (nested_in_vect_loop
&& ncopies
> 1)
1712 if (dump_enabled_p ())
1713 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1714 "multiple types in nested loop.");
1718 if (!STMT_VINFO_RELEVANT_P (stmt_info
))
1721 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
1725 if (!STMT_VINFO_DATA_REF (stmt_info
))
1728 elem_type
= TREE_TYPE (vectype
);
1730 if (STMT_VINFO_GROUPED_ACCESS (stmt_info
))
1733 if (STMT_VINFO_STRIDED_P (stmt_info
))
1736 if (TREE_CODE (mask
) != SSA_NAME
)
1739 if (!vect_is_simple_use (mask
, loop_vinfo
, &def_stmt
, &dt
, &mask_vectype
))
1743 mask_vectype
= get_mask_type_for_scalar_type (TREE_TYPE (vectype
));
1745 if (!mask_vectype
|| !VECTOR_BOOLEAN_TYPE_P (mask_vectype
)
1746 || TYPE_VECTOR_SUBPARTS (mask_vectype
) != TYPE_VECTOR_SUBPARTS (vectype
))
1751 tree rhs
= gimple_call_arg (stmt
, 3);
1752 if (!vect_is_simple_use (rhs
, loop_vinfo
, &def_stmt
, &dt
, &rhs_vectype
))
1756 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info
))
1759 gather_decl
= vect_check_gather_scatter (stmt
, loop_vinfo
, &gather_base
,
1760 &gather_off
, &gather_scale
);
1761 gcc_assert (gather_decl
);
1762 if (!vect_is_simple_use (gather_off
, loop_vinfo
, &def_stmt
, &gather_dt
,
1763 &gather_off_vectype
))
1765 if (dump_enabled_p ())
1766 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1767 "gather index use not simple.");
1771 tree arglist
= TYPE_ARG_TYPES (TREE_TYPE (gather_decl
));
1773 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (TREE_CHAIN (arglist
))));
1774 if (TREE_CODE (masktype
) == INTEGER_TYPE
)
1776 if (dump_enabled_p ())
1777 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1778 "masked gather with integer mask not supported.");
1782 else if (tree_int_cst_compare (nested_in_vect_loop
1783 ? STMT_VINFO_DR_STEP (stmt_info
)
1784 : DR_STEP (dr
), size_zero_node
) <= 0)
1786 else if (!VECTOR_MODE_P (TYPE_MODE (vectype
))
1787 || !can_vec_mask_load_store_p (TYPE_MODE (vectype
),
1788 TYPE_MODE (mask_vectype
),
1791 && !useless_type_conversion_p (vectype
, rhs_vectype
)))
1794 if (!vec_stmt
) /* transformation not required. */
1796 STMT_VINFO_TYPE (stmt_info
) = call_vec_info_type
;
1798 vect_model_store_cost (stmt_info
, ncopies
, false, dt
,
1801 vect_model_load_cost (stmt_info
, ncopies
, false, NULL
, NULL
, NULL
);
1807 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info
))
1809 tree vec_oprnd0
= NULL_TREE
, op
;
1810 tree arglist
= TYPE_ARG_TYPES (TREE_TYPE (gather_decl
));
1811 tree rettype
, srctype
, ptrtype
, idxtype
, masktype
, scaletype
;
1812 tree ptr
, vec_mask
= NULL_TREE
, mask_op
= NULL_TREE
, var
, scale
;
1813 tree perm_mask
= NULL_TREE
, prev_res
= NULL_TREE
;
1814 tree mask_perm_mask
= NULL_TREE
;
1815 edge pe
= loop_preheader_edge (loop
);
1818 enum { NARROW
, NONE
, WIDEN
} modifier
;
1819 int gather_off_nunits
= TYPE_VECTOR_SUBPARTS (gather_off_vectype
);
1821 rettype
= TREE_TYPE (TREE_TYPE (gather_decl
));
1822 srctype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
1823 ptrtype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
1824 idxtype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
1825 masktype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
1826 scaletype
= TREE_VALUE (arglist
);
1827 gcc_checking_assert (types_compatible_p (srctype
, rettype
)
1828 && types_compatible_p (srctype
, masktype
));
1830 if (nunits
== gather_off_nunits
)
1832 else if (nunits
== gather_off_nunits
/ 2)
1834 unsigned char *sel
= XALLOCAVEC (unsigned char, gather_off_nunits
);
1837 for (i
= 0; i
< gather_off_nunits
; ++i
)
1838 sel
[i
] = i
| nunits
;
1840 perm_mask
= vect_gen_perm_mask_checked (gather_off_vectype
, sel
);
1842 else if (nunits
== gather_off_nunits
* 2)
1844 unsigned char *sel
= XALLOCAVEC (unsigned char, nunits
);
1847 for (i
= 0; i
< nunits
; ++i
)
1848 sel
[i
] = i
< gather_off_nunits
1849 ? i
: i
+ nunits
- gather_off_nunits
;
1851 perm_mask
= vect_gen_perm_mask_checked (vectype
, sel
);
1853 for (i
= 0; i
< nunits
; ++i
)
1854 sel
[i
] = i
| gather_off_nunits
;
1855 mask_perm_mask
= vect_gen_perm_mask_checked (masktype
, sel
);
1860 vec_dest
= vect_create_destination_var (gimple_call_lhs (stmt
), vectype
);
1862 ptr
= fold_convert (ptrtype
, gather_base
);
1863 if (!is_gimple_min_invariant (ptr
))
1865 ptr
= force_gimple_operand (ptr
, &seq
, true, NULL_TREE
);
1866 new_bb
= gsi_insert_seq_on_edge_immediate (pe
, seq
);
1867 gcc_assert (!new_bb
);
1870 scale
= build_int_cst (scaletype
, gather_scale
);
1872 prev_stmt_info
= NULL
;
1873 for (j
= 0; j
< ncopies
; ++j
)
1875 if (modifier
== WIDEN
&& (j
& 1))
1876 op
= permute_vec_elements (vec_oprnd0
, vec_oprnd0
,
1877 perm_mask
, stmt
, gsi
);
1880 = vect_get_vec_def_for_operand (gather_off
, stmt
);
1883 = vect_get_vec_def_for_stmt_copy (gather_dt
, vec_oprnd0
);
1885 if (!useless_type_conversion_p (idxtype
, TREE_TYPE (op
)))
1887 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op
))
1888 == TYPE_VECTOR_SUBPARTS (idxtype
));
1889 var
= vect_get_new_ssa_name (idxtype
, vect_simple_var
);
1890 op
= build1 (VIEW_CONVERT_EXPR
, idxtype
, op
);
1892 = gimple_build_assign (var
, VIEW_CONVERT_EXPR
, op
);
1893 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
1897 if (mask_perm_mask
&& (j
& 1))
1898 mask_op
= permute_vec_elements (mask_op
, mask_op
,
1899 mask_perm_mask
, stmt
, gsi
);
1903 vec_mask
= vect_get_vec_def_for_operand (mask
, stmt
);
1906 vect_is_simple_use (vec_mask
, loop_vinfo
, &def_stmt
, &dt
);
1907 vec_mask
= vect_get_vec_def_for_stmt_copy (dt
, vec_mask
);
1911 if (!useless_type_conversion_p (masktype
, TREE_TYPE (vec_mask
)))
1913 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (mask_op
))
1914 == TYPE_VECTOR_SUBPARTS (masktype
));
1915 var
= vect_get_new_ssa_name (masktype
, vect_simple_var
);
1916 mask_op
= build1 (VIEW_CONVERT_EXPR
, masktype
, mask_op
);
1918 = gimple_build_assign (var
, VIEW_CONVERT_EXPR
, mask_op
);
1919 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
1925 = gimple_build_call (gather_decl
, 5, mask_op
, ptr
, op
, mask_op
,
1928 if (!useless_type_conversion_p (vectype
, rettype
))
1930 gcc_assert (TYPE_VECTOR_SUBPARTS (vectype
)
1931 == TYPE_VECTOR_SUBPARTS (rettype
));
1932 op
= vect_get_new_ssa_name (rettype
, vect_simple_var
);
1933 gimple_call_set_lhs (new_stmt
, op
);
1934 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
1935 var
= make_ssa_name (vec_dest
);
1936 op
= build1 (VIEW_CONVERT_EXPR
, vectype
, op
);
1937 new_stmt
= gimple_build_assign (var
, VIEW_CONVERT_EXPR
, op
);
1941 var
= make_ssa_name (vec_dest
, new_stmt
);
1942 gimple_call_set_lhs (new_stmt
, var
);
1945 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
1947 if (modifier
== NARROW
)
1954 var
= permute_vec_elements (prev_res
, var
,
1955 perm_mask
, stmt
, gsi
);
1956 new_stmt
= SSA_NAME_DEF_STMT (var
);
1959 if (prev_stmt_info
== NULL
)
1960 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
1962 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
1963 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
1966 /* Ensure that even with -fno-tree-dce the scalar MASK_LOAD is removed
1968 if (STMT_VINFO_RELATED_STMT (stmt_info
))
1970 stmt
= STMT_VINFO_RELATED_STMT (stmt_info
);
1971 stmt_info
= vinfo_for_stmt (stmt
);
1973 tree lhs
= gimple_call_lhs (stmt
);
1974 new_stmt
= gimple_build_assign (lhs
, build_zero_cst (TREE_TYPE (lhs
)));
1975 set_vinfo_for_stmt (new_stmt
, stmt_info
);
1976 set_vinfo_for_stmt (stmt
, NULL
);
1977 STMT_VINFO_STMT (stmt_info
) = new_stmt
;
1978 gsi_replace (gsi
, new_stmt
, true);
1983 tree vec_rhs
= NULL_TREE
, vec_mask
= NULL_TREE
;
1984 prev_stmt_info
= NULL
;
1985 LOOP_VINFO_HAS_MASK_STORE (loop_vinfo
) = true;
1986 for (i
= 0; i
< ncopies
; i
++)
1988 unsigned align
, misalign
;
1992 tree rhs
= gimple_call_arg (stmt
, 3);
1993 vec_rhs
= vect_get_vec_def_for_operand (rhs
, stmt
);
1994 vec_mask
= vect_get_vec_def_for_operand (mask
, stmt
);
1995 /* We should have catched mismatched types earlier. */
1996 gcc_assert (useless_type_conversion_p (vectype
,
1997 TREE_TYPE (vec_rhs
)));
1998 dataref_ptr
= vect_create_data_ref_ptr (stmt
, vectype
, NULL
,
1999 NULL_TREE
, &dummy
, gsi
,
2000 &ptr_incr
, false, &inv_p
);
2001 gcc_assert (!inv_p
);
2005 vect_is_simple_use (vec_rhs
, loop_vinfo
, &def_stmt
, &dt
);
2006 vec_rhs
= vect_get_vec_def_for_stmt_copy (dt
, vec_rhs
);
2007 vect_is_simple_use (vec_mask
, loop_vinfo
, &def_stmt
, &dt
);
2008 vec_mask
= vect_get_vec_def_for_stmt_copy (dt
, vec_mask
);
2009 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
, stmt
,
2010 TYPE_SIZE_UNIT (vectype
));
2013 align
= TYPE_ALIGN_UNIT (vectype
);
2014 if (aligned_access_p (dr
))
2016 else if (DR_MISALIGNMENT (dr
) == -1)
2018 align
= TYPE_ALIGN_UNIT (elem_type
);
2022 misalign
= DR_MISALIGNMENT (dr
);
2023 set_ptr_info_alignment (get_ptr_info (dataref_ptr
), align
,
2025 tree ptr
= build_int_cst (TREE_TYPE (gimple_call_arg (stmt
, 1)),
2026 misalign
? misalign
& -misalign
: align
);
2028 = gimple_build_call_internal (IFN_MASK_STORE
, 4, dataref_ptr
,
2029 ptr
, vec_mask
, vec_rhs
);
2030 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2032 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
2034 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
2035 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
2040 tree vec_mask
= NULL_TREE
;
2041 prev_stmt_info
= NULL
;
2042 vec_dest
= vect_create_destination_var (gimple_call_lhs (stmt
), vectype
);
2043 for (i
= 0; i
< ncopies
; i
++)
2045 unsigned align
, misalign
;
2049 vec_mask
= vect_get_vec_def_for_operand (mask
, stmt
);
2050 dataref_ptr
= vect_create_data_ref_ptr (stmt
, vectype
, NULL
,
2051 NULL_TREE
, &dummy
, gsi
,
2052 &ptr_incr
, false, &inv_p
);
2053 gcc_assert (!inv_p
);
2057 vect_is_simple_use (vec_mask
, loop_vinfo
, &def_stmt
, &dt
);
2058 vec_mask
= vect_get_vec_def_for_stmt_copy (dt
, vec_mask
);
2059 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
, stmt
,
2060 TYPE_SIZE_UNIT (vectype
));
2063 align
= TYPE_ALIGN_UNIT (vectype
);
2064 if (aligned_access_p (dr
))
2066 else if (DR_MISALIGNMENT (dr
) == -1)
2068 align
= TYPE_ALIGN_UNIT (elem_type
);
2072 misalign
= DR_MISALIGNMENT (dr
);
2073 set_ptr_info_alignment (get_ptr_info (dataref_ptr
), align
,
2075 tree ptr
= build_int_cst (TREE_TYPE (gimple_call_arg (stmt
, 1)),
2076 misalign
? misalign
& -misalign
: align
);
2078 = gimple_build_call_internal (IFN_MASK_LOAD
, 3, dataref_ptr
,
2080 gimple_call_set_lhs (new_stmt
, make_ssa_name (vec_dest
));
2081 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2083 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
2085 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
2086 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
2092 /* Ensure that even with -fno-tree-dce the scalar MASK_LOAD is removed
2094 if (STMT_VINFO_RELATED_STMT (stmt_info
))
2096 stmt
= STMT_VINFO_RELATED_STMT (stmt_info
);
2097 stmt_info
= vinfo_for_stmt (stmt
);
2099 tree lhs
= gimple_call_lhs (stmt
);
2100 new_stmt
= gimple_build_assign (lhs
, build_zero_cst (TREE_TYPE (lhs
)));
2101 set_vinfo_for_stmt (new_stmt
, stmt_info
);
2102 set_vinfo_for_stmt (stmt
, NULL
);
2103 STMT_VINFO_STMT (stmt_info
) = new_stmt
;
2104 gsi_replace (gsi
, new_stmt
, true);
2110 /* Return true if vector types VECTYPE_IN and VECTYPE_OUT have
2111 integer elements and if we can narrow VECTYPE_IN to VECTYPE_OUT
2112 in a single step. On success, store the binary pack code in
2116 simple_integer_narrowing (tree vectype_out
, tree vectype_in
,
2117 tree_code
*convert_code
)
2119 if (!INTEGRAL_TYPE_P (TREE_TYPE (vectype_out
))
2120 || !INTEGRAL_TYPE_P (TREE_TYPE (vectype_in
)))
2124 int multi_step_cvt
= 0;
2125 auto_vec
<tree
, 8> interm_types
;
2126 if (!supportable_narrowing_operation (NOP_EXPR
, vectype_out
, vectype_in
,
2127 &code
, &multi_step_cvt
,
2132 *convert_code
= code
;
2136 /* Function vectorizable_call.
2138 Check if GS performs a function call that can be vectorized.
2139 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2140 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2141 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2144 vectorizable_call (gimple
*gs
, gimple_stmt_iterator
*gsi
, gimple
**vec_stmt
,
2151 tree vec_oprnd0
= NULL_TREE
, vec_oprnd1
= NULL_TREE
;
2152 stmt_vec_info stmt_info
= vinfo_for_stmt (gs
), prev_stmt_info
;
2153 tree vectype_out
, vectype_in
;
2156 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
2157 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
2158 vec_info
*vinfo
= stmt_info
->vinfo
;
2159 tree fndecl
, new_temp
, rhs_type
;
2161 enum vect_def_type dt
[3]
2162 = {vect_unknown_def_type
, vect_unknown_def_type
, vect_unknown_def_type
};
2163 gimple
*new_stmt
= NULL
;
2165 vec
<tree
> vargs
= vNULL
;
2166 enum { NARROW
, NONE
, WIDEN
} modifier
;
2170 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
2173 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
2177 /* Is GS a vectorizable call? */
2178 stmt
= dyn_cast
<gcall
*> (gs
);
2182 if (gimple_call_internal_p (stmt
)
2183 && (gimple_call_internal_fn (stmt
) == IFN_MASK_LOAD
2184 || gimple_call_internal_fn (stmt
) == IFN_MASK_STORE
))
2185 return vectorizable_mask_load_store (stmt
, gsi
, vec_stmt
,
2188 if (gimple_call_lhs (stmt
) == NULL_TREE
2189 || TREE_CODE (gimple_call_lhs (stmt
)) != SSA_NAME
)
2192 gcc_checking_assert (!stmt_can_throw_internal (stmt
));
2194 vectype_out
= STMT_VINFO_VECTYPE (stmt_info
);
2196 /* Process function arguments. */
2197 rhs_type
= NULL_TREE
;
2198 vectype_in
= NULL_TREE
;
2199 nargs
= gimple_call_num_args (stmt
);
2201 /* Bail out if the function has more than three arguments, we do not have
2202 interesting builtin functions to vectorize with more than two arguments
2203 except for fma. No arguments is also not good. */
2204 if (nargs
== 0 || nargs
> 3)
2207 /* Ignore the argument of IFN_GOMP_SIMD_LANE, it is magic. */
2208 if (gimple_call_internal_p (stmt
)
2209 && gimple_call_internal_fn (stmt
) == IFN_GOMP_SIMD_LANE
)
2212 rhs_type
= unsigned_type_node
;
2215 for (i
= 0; i
< nargs
; i
++)
2219 op
= gimple_call_arg (stmt
, i
);
2221 /* We can only handle calls with arguments of the same type. */
2223 && !types_compatible_p (rhs_type
, TREE_TYPE (op
)))
2225 if (dump_enabled_p ())
2226 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2227 "argument types differ.\n");
2231 rhs_type
= TREE_TYPE (op
);
2233 if (!vect_is_simple_use (op
, vinfo
, &def_stmt
, &dt
[i
], &opvectype
))
2235 if (dump_enabled_p ())
2236 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2237 "use not simple.\n");
2242 vectype_in
= opvectype
;
2244 && opvectype
!= vectype_in
)
2246 if (dump_enabled_p ())
2247 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2248 "argument vector types differ.\n");
2252 /* If all arguments are external or constant defs use a vector type with
2253 the same size as the output vector type. */
2255 vectype_in
= get_same_sized_vectype (rhs_type
, vectype_out
);
2257 gcc_assert (vectype_in
);
2260 if (dump_enabled_p ())
2262 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2263 "no vectype for scalar type ");
2264 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, rhs_type
);
2265 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
2272 nunits_in
= TYPE_VECTOR_SUBPARTS (vectype_in
);
2273 nunits_out
= TYPE_VECTOR_SUBPARTS (vectype_out
);
2274 if (nunits_in
== nunits_out
/ 2)
2276 else if (nunits_out
== nunits_in
)
2278 else if (nunits_out
== nunits_in
/ 2)
2283 /* We only handle functions that do not read or clobber memory. */
2284 if (gimple_vuse (stmt
))
2286 if (dump_enabled_p ())
2287 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2288 "function reads from or writes to memory.\n");
2292 /* For now, we only vectorize functions if a target specific builtin
2293 is available. TODO -- in some cases, it might be profitable to
2294 insert the calls for pieces of the vector, in order to be able
2295 to vectorize other operations in the loop. */
2297 internal_fn ifn
= IFN_LAST
;
2298 combined_fn cfn
= gimple_call_combined_fn (stmt
);
2299 tree callee
= gimple_call_fndecl (stmt
);
2301 /* First try using an internal function. */
2302 tree_code convert_code
= ERROR_MARK
;
2304 && (modifier
== NONE
2305 || (modifier
== NARROW
2306 && simple_integer_narrowing (vectype_out
, vectype_in
,
2308 ifn
= vectorizable_internal_function (cfn
, callee
, vectype_out
,
2311 /* If that fails, try asking for a target-specific built-in function. */
2312 if (ifn
== IFN_LAST
)
2314 if (cfn
!= CFN_LAST
)
2315 fndecl
= targetm
.vectorize
.builtin_vectorized_function
2316 (cfn
, vectype_out
, vectype_in
);
2318 fndecl
= targetm
.vectorize
.builtin_md_vectorized_function
2319 (callee
, vectype_out
, vectype_in
);
2322 if (ifn
== IFN_LAST
&& !fndecl
)
2324 if (cfn
== CFN_GOMP_SIMD_LANE
2327 && LOOP_VINFO_LOOP (loop_vinfo
)->simduid
2328 && TREE_CODE (gimple_call_arg (stmt
, 0)) == SSA_NAME
2329 && LOOP_VINFO_LOOP (loop_vinfo
)->simduid
2330 == SSA_NAME_VAR (gimple_call_arg (stmt
, 0)))
2332 /* We can handle IFN_GOMP_SIMD_LANE by returning a
2333 { 0, 1, 2, ... vf - 1 } vector. */
2334 gcc_assert (nargs
== 0);
2338 if (dump_enabled_p ())
2339 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2340 "function is not vectorizable.\n");
2345 if (slp_node
|| PURE_SLP_STMT (stmt_info
))
2347 else if (modifier
== NARROW
&& ifn
== IFN_LAST
)
2348 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits_out
;
2350 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits_in
;
2352 /* Sanity check: make sure that at least one copy of the vectorized stmt
2353 needs to be generated. */
2354 gcc_assert (ncopies
>= 1);
2356 if (!vec_stmt
) /* transformation not required. */
2358 STMT_VINFO_TYPE (stmt_info
) = call_vec_info_type
;
2359 if (dump_enabled_p ())
2360 dump_printf_loc (MSG_NOTE
, vect_location
, "=== vectorizable_call ==="
2362 vect_model_simple_cost (stmt_info
, ncopies
, dt
, NULL
, NULL
);
2363 if (ifn
!= IFN_LAST
&& modifier
== NARROW
&& !slp_node
)
2364 add_stmt_cost (stmt_info
->vinfo
->target_cost_data
, ncopies
/ 2,
2365 vec_promote_demote
, stmt_info
, 0, vect_body
);
2372 if (dump_enabled_p ())
2373 dump_printf_loc (MSG_NOTE
, vect_location
, "transform call.\n");
2376 scalar_dest
= gimple_call_lhs (stmt
);
2377 vec_dest
= vect_create_destination_var (scalar_dest
, vectype_out
);
2379 prev_stmt_info
= NULL
;
2380 if (modifier
== NONE
|| ifn
!= IFN_LAST
)
2382 tree prev_res
= NULL_TREE
;
2383 for (j
= 0; j
< ncopies
; ++j
)
2385 /* Build argument list for the vectorized call. */
2387 vargs
.create (nargs
);
2393 auto_vec
<vec
<tree
> > vec_defs (nargs
);
2394 vec
<tree
> vec_oprnds0
;
2396 for (i
= 0; i
< nargs
; i
++)
2397 vargs
.quick_push (gimple_call_arg (stmt
, i
));
2398 vect_get_slp_defs (vargs
, slp_node
, &vec_defs
, -1);
2399 vec_oprnds0
= vec_defs
[0];
2401 /* Arguments are ready. Create the new vector stmt. */
2402 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vec_oprnd0
)
2405 for (k
= 0; k
< nargs
; k
++)
2407 vec
<tree
> vec_oprndsk
= vec_defs
[k
];
2408 vargs
[k
] = vec_oprndsk
[i
];
2410 if (modifier
== NARROW
)
2412 tree half_res
= make_ssa_name (vectype_in
);
2413 new_stmt
= gimple_build_call_internal_vec (ifn
, vargs
);
2414 gimple_call_set_lhs (new_stmt
, half_res
);
2415 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2418 prev_res
= half_res
;
2421 new_temp
= make_ssa_name (vec_dest
);
2422 new_stmt
= gimple_build_assign (new_temp
, convert_code
,
2423 prev_res
, half_res
);
2427 if (ifn
!= IFN_LAST
)
2428 new_stmt
= gimple_build_call_internal_vec (ifn
, vargs
);
2430 new_stmt
= gimple_build_call_vec (fndecl
, vargs
);
2431 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
2432 gimple_call_set_lhs (new_stmt
, new_temp
);
2434 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2435 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
2438 for (i
= 0; i
< nargs
; i
++)
2440 vec
<tree
> vec_oprndsi
= vec_defs
[i
];
2441 vec_oprndsi
.release ();
2446 for (i
= 0; i
< nargs
; i
++)
2448 op
= gimple_call_arg (stmt
, i
);
2451 = vect_get_vec_def_for_operand (op
, stmt
);
2454 vec_oprnd0
= gimple_call_arg (new_stmt
, i
);
2456 = vect_get_vec_def_for_stmt_copy (dt
[i
], vec_oprnd0
);
2459 vargs
.quick_push (vec_oprnd0
);
2462 if (gimple_call_internal_p (stmt
)
2463 && gimple_call_internal_fn (stmt
) == IFN_GOMP_SIMD_LANE
)
2465 tree
*v
= XALLOCAVEC (tree
, nunits_out
);
2467 for (k
= 0; k
< nunits_out
; ++k
)
2468 v
[k
] = build_int_cst (unsigned_type_node
, j
* nunits_out
+ k
);
2469 tree cst
= build_vector (vectype_out
, v
);
2471 = vect_get_new_ssa_name (vectype_out
, vect_simple_var
, "cst_");
2472 gimple
*init_stmt
= gimple_build_assign (new_var
, cst
);
2473 vect_init_vector_1 (stmt
, init_stmt
, NULL
);
2474 new_temp
= make_ssa_name (vec_dest
);
2475 new_stmt
= gimple_build_assign (new_temp
, new_var
);
2477 else if (modifier
== NARROW
)
2479 tree half_res
= make_ssa_name (vectype_in
);
2480 new_stmt
= gimple_build_call_internal_vec (ifn
, vargs
);
2481 gimple_call_set_lhs (new_stmt
, half_res
);
2482 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2485 prev_res
= half_res
;
2488 new_temp
= make_ssa_name (vec_dest
);
2489 new_stmt
= gimple_build_assign (new_temp
, convert_code
,
2490 prev_res
, half_res
);
2494 if (ifn
!= IFN_LAST
)
2495 new_stmt
= gimple_build_call_internal_vec (ifn
, vargs
);
2497 new_stmt
= gimple_build_call_vec (fndecl
, vargs
);
2498 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
2499 gimple_call_set_lhs (new_stmt
, new_temp
);
2501 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2503 if (j
== (modifier
== NARROW
? 1 : 0))
2504 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
2506 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
2508 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
2511 else if (modifier
== NARROW
)
2513 for (j
= 0; j
< ncopies
; ++j
)
2515 /* Build argument list for the vectorized call. */
2517 vargs
.create (nargs
* 2);
2523 auto_vec
<vec
<tree
> > vec_defs (nargs
);
2524 vec
<tree
> vec_oprnds0
;
2526 for (i
= 0; i
< nargs
; i
++)
2527 vargs
.quick_push (gimple_call_arg (stmt
, i
));
2528 vect_get_slp_defs (vargs
, slp_node
, &vec_defs
, -1);
2529 vec_oprnds0
= vec_defs
[0];
2531 /* Arguments are ready. Create the new vector stmt. */
2532 for (i
= 0; vec_oprnds0
.iterate (i
, &vec_oprnd0
); i
+= 2)
2536 for (k
= 0; k
< nargs
; k
++)
2538 vec
<tree
> vec_oprndsk
= vec_defs
[k
];
2539 vargs
.quick_push (vec_oprndsk
[i
]);
2540 vargs
.quick_push (vec_oprndsk
[i
+ 1]);
2542 if (ifn
!= IFN_LAST
)
2543 new_stmt
= gimple_build_call_internal_vec (ifn
, vargs
);
2545 new_stmt
= gimple_build_call_vec (fndecl
, vargs
);
2546 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
2547 gimple_call_set_lhs (new_stmt
, new_temp
);
2548 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2549 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
2552 for (i
= 0; i
< nargs
; i
++)
2554 vec
<tree
> vec_oprndsi
= vec_defs
[i
];
2555 vec_oprndsi
.release ();
2560 for (i
= 0; i
< nargs
; i
++)
2562 op
= gimple_call_arg (stmt
, i
);
2566 = vect_get_vec_def_for_operand (op
, stmt
);
2568 = vect_get_vec_def_for_stmt_copy (dt
[i
], vec_oprnd0
);
2572 vec_oprnd1
= gimple_call_arg (new_stmt
, 2*i
+ 1);
2574 = vect_get_vec_def_for_stmt_copy (dt
[i
], vec_oprnd1
);
2576 = vect_get_vec_def_for_stmt_copy (dt
[i
], vec_oprnd0
);
2579 vargs
.quick_push (vec_oprnd0
);
2580 vargs
.quick_push (vec_oprnd1
);
2583 new_stmt
= gimple_build_call_vec (fndecl
, vargs
);
2584 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
2585 gimple_call_set_lhs (new_stmt
, new_temp
);
2586 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2589 STMT_VINFO_VEC_STMT (stmt_info
) = new_stmt
;
2591 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
2593 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
2596 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
2599 /* No current target implements this case. */
2604 /* The call in STMT might prevent it from being removed in dce.
2605 We however cannot remove it here, due to the way the ssa name
2606 it defines is mapped to the new definition. So just replace
2607 rhs of the statement with something harmless. */
2612 type
= TREE_TYPE (scalar_dest
);
2613 if (is_pattern_stmt_p (stmt_info
))
2614 lhs
= gimple_call_lhs (STMT_VINFO_RELATED_STMT (stmt_info
));
2616 lhs
= gimple_call_lhs (stmt
);
2618 if (gimple_call_internal_p (stmt
)
2619 && gimple_call_internal_fn (stmt
) == IFN_GOMP_SIMD_LANE
)
2621 /* Replace uses of the lhs of GOMP_SIMD_LANE call outside the loop
2622 with vf - 1 rather than 0, that is the last iteration of the
2624 imm_use_iterator iter
;
2625 use_operand_p use_p
;
2627 FOR_EACH_IMM_USE_STMT (use_stmt
, iter
, lhs
)
2629 basic_block use_bb
= gimple_bb (use_stmt
);
2631 && !flow_bb_inside_loop_p (LOOP_VINFO_LOOP (loop_vinfo
), use_bb
))
2633 FOR_EACH_IMM_USE_ON_STMT (use_p
, iter
)
2634 SET_USE (use_p
, build_int_cst (TREE_TYPE (lhs
),
2635 ncopies
* nunits_out
- 1));
2636 update_stmt (use_stmt
);
2641 new_stmt
= gimple_build_assign (lhs
, build_zero_cst (type
));
2642 set_vinfo_for_stmt (new_stmt
, stmt_info
);
2643 set_vinfo_for_stmt (stmt
, NULL
);
2644 STMT_VINFO_STMT (stmt_info
) = new_stmt
;
2645 gsi_replace (gsi
, new_stmt
, false);
2651 struct simd_call_arg_info
2655 enum vect_def_type dt
;
2656 HOST_WIDE_INT linear_step
;
2658 bool simd_lane_linear
;
2661 /* Helper function of vectorizable_simd_clone_call. If OP, an SSA_NAME,
2662 is linear within simd lane (but not within whole loop), note it in
2666 vect_simd_lane_linear (tree op
, struct loop
*loop
,
2667 struct simd_call_arg_info
*arginfo
)
2669 gimple
*def_stmt
= SSA_NAME_DEF_STMT (op
);
2671 if (!is_gimple_assign (def_stmt
)
2672 || gimple_assign_rhs_code (def_stmt
) != POINTER_PLUS_EXPR
2673 || !is_gimple_min_invariant (gimple_assign_rhs1 (def_stmt
)))
2676 tree base
= gimple_assign_rhs1 (def_stmt
);
2677 HOST_WIDE_INT linear_step
= 0;
2678 tree v
= gimple_assign_rhs2 (def_stmt
);
2679 while (TREE_CODE (v
) == SSA_NAME
)
2682 def_stmt
= SSA_NAME_DEF_STMT (v
);
2683 if (is_gimple_assign (def_stmt
))
2684 switch (gimple_assign_rhs_code (def_stmt
))
2687 t
= gimple_assign_rhs2 (def_stmt
);
2688 if (linear_step
|| TREE_CODE (t
) != INTEGER_CST
)
2690 base
= fold_build2 (POINTER_PLUS_EXPR
, TREE_TYPE (base
), base
, t
);
2691 v
= gimple_assign_rhs1 (def_stmt
);
2694 t
= gimple_assign_rhs2 (def_stmt
);
2695 if (linear_step
|| !tree_fits_shwi_p (t
) || integer_zerop (t
))
2697 linear_step
= tree_to_shwi (t
);
2698 v
= gimple_assign_rhs1 (def_stmt
);
2701 t
= gimple_assign_rhs1 (def_stmt
);
2702 if (TREE_CODE (TREE_TYPE (t
)) != INTEGER_TYPE
2703 || (TYPE_PRECISION (TREE_TYPE (v
))
2704 < TYPE_PRECISION (TREE_TYPE (t
))))
2713 else if (is_gimple_call (def_stmt
)
2714 && gimple_call_internal_p (def_stmt
)
2715 && gimple_call_internal_fn (def_stmt
) == IFN_GOMP_SIMD_LANE
2717 && TREE_CODE (gimple_call_arg (def_stmt
, 0)) == SSA_NAME
2718 && (SSA_NAME_VAR (gimple_call_arg (def_stmt
, 0))
2723 arginfo
->linear_step
= linear_step
;
2725 arginfo
->simd_lane_linear
= true;
2731 /* Function vectorizable_simd_clone_call.
2733 Check if STMT performs a function call that can be vectorized
2734 by calling a simd clone of the function.
2735 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2736 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2737 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2740 vectorizable_simd_clone_call (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
2741 gimple
**vec_stmt
, slp_tree slp_node
)
2746 tree vec_oprnd0
= NULL_TREE
;
2747 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
), prev_stmt_info
;
2749 unsigned int nunits
;
2750 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
2751 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
2752 vec_info
*vinfo
= stmt_info
->vinfo
;
2753 struct loop
*loop
= loop_vinfo
? LOOP_VINFO_LOOP (loop_vinfo
) : NULL
;
2754 tree fndecl
, new_temp
;
2756 gimple
*new_stmt
= NULL
;
2758 vec
<simd_call_arg_info
> arginfo
= vNULL
;
2759 vec
<tree
> vargs
= vNULL
;
2761 tree lhs
, rtype
, ratype
;
2762 vec
<constructor_elt
, va_gc
> *ret_ctor_elts
;
2764 /* Is STMT a vectorizable call? */
2765 if (!is_gimple_call (stmt
))
2768 fndecl
= gimple_call_fndecl (stmt
);
2769 if (fndecl
== NULL_TREE
)
2772 struct cgraph_node
*node
= cgraph_node::get (fndecl
);
2773 if (node
== NULL
|| node
->simd_clones
== NULL
)
2776 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
2779 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
2783 if (gimple_call_lhs (stmt
)
2784 && TREE_CODE (gimple_call_lhs (stmt
)) != SSA_NAME
)
2787 gcc_checking_assert (!stmt_can_throw_internal (stmt
));
2789 vectype
= STMT_VINFO_VECTYPE (stmt_info
);
2791 if (loop_vinfo
&& nested_in_vect_loop_p (loop
, stmt
))
2795 if (slp_node
|| PURE_SLP_STMT (stmt_info
))
2798 /* Process function arguments. */
2799 nargs
= gimple_call_num_args (stmt
);
2801 /* Bail out if the function has zero arguments. */
2805 arginfo
.create (nargs
);
2807 for (i
= 0; i
< nargs
; i
++)
2809 simd_call_arg_info thisarginfo
;
2812 thisarginfo
.linear_step
= 0;
2813 thisarginfo
.align
= 0;
2814 thisarginfo
.op
= NULL_TREE
;
2815 thisarginfo
.simd_lane_linear
= false;
2817 op
= gimple_call_arg (stmt
, i
);
2818 if (!vect_is_simple_use (op
, vinfo
, &def_stmt
, &thisarginfo
.dt
,
2819 &thisarginfo
.vectype
)
2820 || thisarginfo
.dt
== vect_uninitialized_def
)
2822 if (dump_enabled_p ())
2823 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2824 "use not simple.\n");
2829 if (thisarginfo
.dt
== vect_constant_def
2830 || thisarginfo
.dt
== vect_external_def
)
2831 gcc_assert (thisarginfo
.vectype
== NULL_TREE
);
2833 gcc_assert (thisarginfo
.vectype
!= NULL_TREE
);
2835 /* For linear arguments, the analyze phase should have saved
2836 the base and step in STMT_VINFO_SIMD_CLONE_INFO. */
2837 if (i
* 3 + 4 <= STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).length ()
2838 && STMT_VINFO_SIMD_CLONE_INFO (stmt_info
)[i
* 3 + 2])
2840 gcc_assert (vec_stmt
);
2841 thisarginfo
.linear_step
2842 = tree_to_shwi (STMT_VINFO_SIMD_CLONE_INFO (stmt_info
)[i
* 3 + 2]);
2844 = STMT_VINFO_SIMD_CLONE_INFO (stmt_info
)[i
* 3 + 1];
2845 thisarginfo
.simd_lane_linear
2846 = (STMT_VINFO_SIMD_CLONE_INFO (stmt_info
)[i
* 3 + 3]
2847 == boolean_true_node
);
2848 /* If loop has been peeled for alignment, we need to adjust it. */
2849 tree n1
= LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo
);
2850 tree n2
= LOOP_VINFO_NITERS (loop_vinfo
);
2851 if (n1
!= n2
&& !thisarginfo
.simd_lane_linear
)
2853 tree bias
= fold_build2 (MINUS_EXPR
, TREE_TYPE (n1
), n1
, n2
);
2854 tree step
= STMT_VINFO_SIMD_CLONE_INFO (stmt_info
)[i
* 3 + 2];
2855 tree opt
= TREE_TYPE (thisarginfo
.op
);
2856 bias
= fold_convert (TREE_TYPE (step
), bias
);
2857 bias
= fold_build2 (MULT_EXPR
, TREE_TYPE (step
), bias
, step
);
2859 = fold_build2 (POINTER_TYPE_P (opt
)
2860 ? POINTER_PLUS_EXPR
: PLUS_EXPR
, opt
,
2861 thisarginfo
.op
, bias
);
2865 && thisarginfo
.dt
!= vect_constant_def
2866 && thisarginfo
.dt
!= vect_external_def
2868 && TREE_CODE (op
) == SSA_NAME
2869 && simple_iv (loop
, loop_containing_stmt (stmt
), op
,
2871 && tree_fits_shwi_p (iv
.step
))
2873 thisarginfo
.linear_step
= tree_to_shwi (iv
.step
);
2874 thisarginfo
.op
= iv
.base
;
2876 else if ((thisarginfo
.dt
== vect_constant_def
2877 || thisarginfo
.dt
== vect_external_def
)
2878 && POINTER_TYPE_P (TREE_TYPE (op
)))
2879 thisarginfo
.align
= get_pointer_alignment (op
) / BITS_PER_UNIT
;
2880 /* Addresses of array elements indexed by GOMP_SIMD_LANE are
2882 if (POINTER_TYPE_P (TREE_TYPE (op
))
2883 && !thisarginfo
.linear_step
2885 && thisarginfo
.dt
!= vect_constant_def
2886 && thisarginfo
.dt
!= vect_external_def
2889 && TREE_CODE (op
) == SSA_NAME
)
2890 vect_simd_lane_linear (op
, loop
, &thisarginfo
);
2892 arginfo
.quick_push (thisarginfo
);
2895 unsigned int badness
= 0;
2896 struct cgraph_node
*bestn
= NULL
;
2897 if (STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).exists ())
2898 bestn
= cgraph_node::get (STMT_VINFO_SIMD_CLONE_INFO (stmt_info
)[0]);
2900 for (struct cgraph_node
*n
= node
->simd_clones
; n
!= NULL
;
2901 n
= n
->simdclone
->next_clone
)
2903 unsigned int this_badness
= 0;
2904 if (n
->simdclone
->simdlen
2905 > (unsigned) LOOP_VINFO_VECT_FACTOR (loop_vinfo
)
2906 || n
->simdclone
->nargs
!= nargs
)
2908 if (n
->simdclone
->simdlen
2909 < (unsigned) LOOP_VINFO_VECT_FACTOR (loop_vinfo
))
2910 this_badness
+= (exact_log2 (LOOP_VINFO_VECT_FACTOR (loop_vinfo
))
2911 - exact_log2 (n
->simdclone
->simdlen
)) * 1024;
2912 if (n
->simdclone
->inbranch
)
2913 this_badness
+= 2048;
2914 int target_badness
= targetm
.simd_clone
.usable (n
);
2915 if (target_badness
< 0)
2917 this_badness
+= target_badness
* 512;
2918 /* FORNOW: Have to add code to add the mask argument. */
2919 if (n
->simdclone
->inbranch
)
2921 for (i
= 0; i
< nargs
; i
++)
2923 switch (n
->simdclone
->args
[i
].arg_type
)
2925 case SIMD_CLONE_ARG_TYPE_VECTOR
:
2926 if (!useless_type_conversion_p
2927 (n
->simdclone
->args
[i
].orig_type
,
2928 TREE_TYPE (gimple_call_arg (stmt
, i
))))
2930 else if (arginfo
[i
].dt
== vect_constant_def
2931 || arginfo
[i
].dt
== vect_external_def
2932 || arginfo
[i
].linear_step
)
2935 case SIMD_CLONE_ARG_TYPE_UNIFORM
:
2936 if (arginfo
[i
].dt
!= vect_constant_def
2937 && arginfo
[i
].dt
!= vect_external_def
)
2940 case SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP
:
2941 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP
:
2942 if (arginfo
[i
].dt
== vect_constant_def
2943 || arginfo
[i
].dt
== vect_external_def
2944 || (arginfo
[i
].linear_step
2945 != n
->simdclone
->args
[i
].linear_step
))
2948 case SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP
:
2949 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_CONSTANT_STEP
:
2950 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_CONSTANT_STEP
:
2951 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_VARIABLE_STEP
:
2952 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_VARIABLE_STEP
:
2953 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_VARIABLE_STEP
:
2957 case SIMD_CLONE_ARG_TYPE_MASK
:
2960 if (i
== (size_t) -1)
2962 if (n
->simdclone
->args
[i
].alignment
> arginfo
[i
].align
)
2967 if (arginfo
[i
].align
)
2968 this_badness
+= (exact_log2 (arginfo
[i
].align
)
2969 - exact_log2 (n
->simdclone
->args
[i
].alignment
));
2971 if (i
== (size_t) -1)
2973 if (bestn
== NULL
|| this_badness
< badness
)
2976 badness
= this_badness
;
2986 for (i
= 0; i
< nargs
; i
++)
2987 if ((arginfo
[i
].dt
== vect_constant_def
2988 || arginfo
[i
].dt
== vect_external_def
)
2989 && bestn
->simdclone
->args
[i
].arg_type
== SIMD_CLONE_ARG_TYPE_VECTOR
)
2992 = get_vectype_for_scalar_type (TREE_TYPE (gimple_call_arg (stmt
,
2994 if (arginfo
[i
].vectype
== NULL
2995 || (TYPE_VECTOR_SUBPARTS (arginfo
[i
].vectype
)
2996 > bestn
->simdclone
->simdlen
))
3003 fndecl
= bestn
->decl
;
3004 nunits
= bestn
->simdclone
->simdlen
;
3005 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits
;
3007 /* If the function isn't const, only allow it in simd loops where user
3008 has asserted that at least nunits consecutive iterations can be
3009 performed using SIMD instructions. */
3010 if ((loop
== NULL
|| (unsigned) loop
->safelen
< nunits
)
3011 && gimple_vuse (stmt
))
3017 /* Sanity check: make sure that at least one copy of the vectorized stmt
3018 needs to be generated. */
3019 gcc_assert (ncopies
>= 1);
3021 if (!vec_stmt
) /* transformation not required. */
3023 STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).safe_push (bestn
->decl
);
3024 for (i
= 0; i
< nargs
; i
++)
3025 if (bestn
->simdclone
->args
[i
].arg_type
3026 == SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP
)
3028 STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).safe_grow_cleared (i
* 3
3030 STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).safe_push (arginfo
[i
].op
);
3031 tree lst
= POINTER_TYPE_P (TREE_TYPE (arginfo
[i
].op
))
3032 ? size_type_node
: TREE_TYPE (arginfo
[i
].op
);
3033 tree ls
= build_int_cst (lst
, arginfo
[i
].linear_step
);
3034 STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).safe_push (ls
);
3035 tree sll
= arginfo
[i
].simd_lane_linear
3036 ? boolean_true_node
: boolean_false_node
;
3037 STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).safe_push (sll
);
3039 STMT_VINFO_TYPE (stmt_info
) = call_simd_clone_vec_info_type
;
3040 if (dump_enabled_p ())
3041 dump_printf_loc (MSG_NOTE
, vect_location
,
3042 "=== vectorizable_simd_clone_call ===\n");
3043 /* vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL); */
3050 if (dump_enabled_p ())
3051 dump_printf_loc (MSG_NOTE
, vect_location
, "transform call.\n");
3054 scalar_dest
= gimple_call_lhs (stmt
);
3055 vec_dest
= NULL_TREE
;
3060 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
3061 rtype
= TREE_TYPE (TREE_TYPE (fndecl
));
3062 if (TREE_CODE (rtype
) == ARRAY_TYPE
)
3065 rtype
= TREE_TYPE (ratype
);
3069 prev_stmt_info
= NULL
;
3070 for (j
= 0; j
< ncopies
; ++j
)
3072 /* Build argument list for the vectorized call. */
3074 vargs
.create (nargs
);
3078 for (i
= 0; i
< nargs
; i
++)
3080 unsigned int k
, l
, m
, o
;
3082 op
= gimple_call_arg (stmt
, i
);
3083 switch (bestn
->simdclone
->args
[i
].arg_type
)
3085 case SIMD_CLONE_ARG_TYPE_VECTOR
:
3086 atype
= bestn
->simdclone
->args
[i
].vector_type
;
3087 o
= nunits
/ TYPE_VECTOR_SUBPARTS (atype
);
3088 for (m
= j
* o
; m
< (j
+ 1) * o
; m
++)
3090 if (TYPE_VECTOR_SUBPARTS (atype
)
3091 < TYPE_VECTOR_SUBPARTS (arginfo
[i
].vectype
))
3093 unsigned int prec
= GET_MODE_BITSIZE (TYPE_MODE (atype
));
3094 k
= (TYPE_VECTOR_SUBPARTS (arginfo
[i
].vectype
)
3095 / TYPE_VECTOR_SUBPARTS (atype
));
3096 gcc_assert ((k
& (k
- 1)) == 0);
3099 = vect_get_vec_def_for_operand (op
, stmt
);
3102 vec_oprnd0
= arginfo
[i
].op
;
3103 if ((m
& (k
- 1)) == 0)
3105 = vect_get_vec_def_for_stmt_copy (arginfo
[i
].dt
,
3108 arginfo
[i
].op
= vec_oprnd0
;
3110 = build3 (BIT_FIELD_REF
, atype
, vec_oprnd0
,
3112 bitsize_int ((m
& (k
- 1)) * prec
));
3114 = gimple_build_assign (make_ssa_name (atype
),
3116 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3117 vargs
.safe_push (gimple_assign_lhs (new_stmt
));
3121 k
= (TYPE_VECTOR_SUBPARTS (atype
)
3122 / TYPE_VECTOR_SUBPARTS (arginfo
[i
].vectype
));
3123 gcc_assert ((k
& (k
- 1)) == 0);
3124 vec
<constructor_elt
, va_gc
> *ctor_elts
;
3126 vec_alloc (ctor_elts
, k
);
3129 for (l
= 0; l
< k
; l
++)
3131 if (m
== 0 && l
== 0)
3133 = vect_get_vec_def_for_operand (op
, stmt
);
3136 = vect_get_vec_def_for_stmt_copy (arginfo
[i
].dt
,
3138 arginfo
[i
].op
= vec_oprnd0
;
3141 CONSTRUCTOR_APPEND_ELT (ctor_elts
, NULL_TREE
,
3145 vargs
.safe_push (vec_oprnd0
);
3148 vec_oprnd0
= build_constructor (atype
, ctor_elts
);
3150 = gimple_build_assign (make_ssa_name (atype
),
3152 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3153 vargs
.safe_push (gimple_assign_lhs (new_stmt
));
3158 case SIMD_CLONE_ARG_TYPE_UNIFORM
:
3159 vargs
.safe_push (op
);
3161 case SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP
:
3166 = force_gimple_operand (arginfo
[i
].op
, &stmts
, true,
3171 edge pe
= loop_preheader_edge (loop
);
3172 new_bb
= gsi_insert_seq_on_edge_immediate (pe
, stmts
);
3173 gcc_assert (!new_bb
);
3175 if (arginfo
[i
].simd_lane_linear
)
3177 vargs
.safe_push (arginfo
[i
].op
);
3180 tree phi_res
= copy_ssa_name (op
);
3181 gphi
*new_phi
= create_phi_node (phi_res
, loop
->header
);
3182 set_vinfo_for_stmt (new_phi
,
3183 new_stmt_vec_info (new_phi
, loop_vinfo
));
3184 add_phi_arg (new_phi
, arginfo
[i
].op
,
3185 loop_preheader_edge (loop
), UNKNOWN_LOCATION
);
3187 = POINTER_TYPE_P (TREE_TYPE (op
))
3188 ? POINTER_PLUS_EXPR
: PLUS_EXPR
;
3189 tree type
= POINTER_TYPE_P (TREE_TYPE (op
))
3190 ? sizetype
: TREE_TYPE (op
);
3192 = wi::mul (bestn
->simdclone
->args
[i
].linear_step
,
3194 tree tcst
= wide_int_to_tree (type
, cst
);
3195 tree phi_arg
= copy_ssa_name (op
);
3197 = gimple_build_assign (phi_arg
, code
, phi_res
, tcst
);
3198 gimple_stmt_iterator si
= gsi_after_labels (loop
->header
);
3199 gsi_insert_after (&si
, new_stmt
, GSI_NEW_STMT
);
3200 set_vinfo_for_stmt (new_stmt
,
3201 new_stmt_vec_info (new_stmt
, loop_vinfo
));
3202 add_phi_arg (new_phi
, phi_arg
, loop_latch_edge (loop
),
3204 arginfo
[i
].op
= phi_res
;
3205 vargs
.safe_push (phi_res
);
3210 = POINTER_TYPE_P (TREE_TYPE (op
))
3211 ? POINTER_PLUS_EXPR
: PLUS_EXPR
;
3212 tree type
= POINTER_TYPE_P (TREE_TYPE (op
))
3213 ? sizetype
: TREE_TYPE (op
);
3215 = wi::mul (bestn
->simdclone
->args
[i
].linear_step
,
3217 tree tcst
= wide_int_to_tree (type
, cst
);
3218 new_temp
= make_ssa_name (TREE_TYPE (op
));
3219 new_stmt
= gimple_build_assign (new_temp
, code
,
3220 arginfo
[i
].op
, tcst
);
3221 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3222 vargs
.safe_push (new_temp
);
3225 case SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP
:
3226 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_VARIABLE_STEP
:
3227 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_VARIABLE_STEP
:
3228 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_VARIABLE_STEP
:
3234 new_stmt
= gimple_build_call_vec (fndecl
, vargs
);
3237 gcc_assert (ratype
|| TYPE_VECTOR_SUBPARTS (rtype
) == nunits
);
3239 new_temp
= create_tmp_var (ratype
);
3240 else if (TYPE_VECTOR_SUBPARTS (vectype
)
3241 == TYPE_VECTOR_SUBPARTS (rtype
))
3242 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
3244 new_temp
= make_ssa_name (rtype
, new_stmt
);
3245 gimple_call_set_lhs (new_stmt
, new_temp
);
3247 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3251 if (TYPE_VECTOR_SUBPARTS (vectype
) < nunits
)
3254 unsigned int prec
= GET_MODE_BITSIZE (TYPE_MODE (vectype
));
3255 k
= nunits
/ TYPE_VECTOR_SUBPARTS (vectype
);
3256 gcc_assert ((k
& (k
- 1)) == 0);
3257 for (l
= 0; l
< k
; l
++)
3262 t
= build_fold_addr_expr (new_temp
);
3263 t
= build2 (MEM_REF
, vectype
, t
,
3264 build_int_cst (TREE_TYPE (t
),
3265 l
* prec
/ BITS_PER_UNIT
));
3268 t
= build3 (BIT_FIELD_REF
, vectype
, new_temp
,
3269 size_int (prec
), bitsize_int (l
* prec
));
3271 = gimple_build_assign (make_ssa_name (vectype
), t
);
3272 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3273 if (j
== 0 && l
== 0)
3274 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
3276 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
3278 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
3283 tree clobber
= build_constructor (ratype
, NULL
);
3284 TREE_THIS_VOLATILE (clobber
) = 1;
3285 new_stmt
= gimple_build_assign (new_temp
, clobber
);
3286 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3290 else if (TYPE_VECTOR_SUBPARTS (vectype
) > nunits
)
3292 unsigned int k
= (TYPE_VECTOR_SUBPARTS (vectype
)
3293 / TYPE_VECTOR_SUBPARTS (rtype
));
3294 gcc_assert ((k
& (k
- 1)) == 0);
3295 if ((j
& (k
- 1)) == 0)
3296 vec_alloc (ret_ctor_elts
, k
);
3299 unsigned int m
, o
= nunits
/ TYPE_VECTOR_SUBPARTS (rtype
);
3300 for (m
= 0; m
< o
; m
++)
3302 tree tem
= build4 (ARRAY_REF
, rtype
, new_temp
,
3303 size_int (m
), NULL_TREE
, NULL_TREE
);
3305 = gimple_build_assign (make_ssa_name (rtype
), tem
);
3306 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3307 CONSTRUCTOR_APPEND_ELT (ret_ctor_elts
, NULL_TREE
,
3308 gimple_assign_lhs (new_stmt
));
3310 tree clobber
= build_constructor (ratype
, NULL
);
3311 TREE_THIS_VOLATILE (clobber
) = 1;
3312 new_stmt
= gimple_build_assign (new_temp
, clobber
);
3313 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3316 CONSTRUCTOR_APPEND_ELT (ret_ctor_elts
, NULL_TREE
, new_temp
);
3317 if ((j
& (k
- 1)) != k
- 1)
3319 vec_oprnd0
= build_constructor (vectype
, ret_ctor_elts
);
3321 = gimple_build_assign (make_ssa_name (vec_dest
), vec_oprnd0
);
3322 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3324 if ((unsigned) j
== k
- 1)
3325 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
3327 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
3329 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
3334 tree t
= build_fold_addr_expr (new_temp
);
3335 t
= build2 (MEM_REF
, vectype
, t
,
3336 build_int_cst (TREE_TYPE (t
), 0));
3338 = gimple_build_assign (make_ssa_name (vec_dest
), t
);
3339 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3340 tree clobber
= build_constructor (ratype
, NULL
);
3341 TREE_THIS_VOLATILE (clobber
) = 1;
3342 vect_finish_stmt_generation (stmt
,
3343 gimple_build_assign (new_temp
,
3349 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
3351 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
3353 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
3358 /* The call in STMT might prevent it from being removed in dce.
3359 We however cannot remove it here, due to the way the ssa name
3360 it defines is mapped to the new definition. So just replace
3361 rhs of the statement with something harmless. */
3368 type
= TREE_TYPE (scalar_dest
);
3369 if (is_pattern_stmt_p (stmt_info
))
3370 lhs
= gimple_call_lhs (STMT_VINFO_RELATED_STMT (stmt_info
));
3372 lhs
= gimple_call_lhs (stmt
);
3373 new_stmt
= gimple_build_assign (lhs
, build_zero_cst (type
));
3376 new_stmt
= gimple_build_nop ();
3377 set_vinfo_for_stmt (new_stmt
, stmt_info
);
3378 set_vinfo_for_stmt (stmt
, NULL
);
3379 STMT_VINFO_STMT (stmt_info
) = new_stmt
;
3380 gsi_replace (gsi
, new_stmt
, true);
3381 unlink_stmt_vdef (stmt
);
3387 /* Function vect_gen_widened_results_half
3389 Create a vector stmt whose code, type, number of arguments, and result
3390 variable are CODE, OP_TYPE, and VEC_DEST, and its arguments are
3391 VEC_OPRND0 and VEC_OPRND1. The new vector stmt is to be inserted at BSI.
3392 In the case that CODE is a CALL_EXPR, this means that a call to DECL
3393 needs to be created (DECL is a function-decl of a target-builtin).
3394 STMT is the original scalar stmt that we are vectorizing. */
3397 vect_gen_widened_results_half (enum tree_code code
,
3399 tree vec_oprnd0
, tree vec_oprnd1
, int op_type
,
3400 tree vec_dest
, gimple_stmt_iterator
*gsi
,
3406 /* Generate half of the widened result: */
3407 if (code
== CALL_EXPR
)
3409 /* Target specific support */
3410 if (op_type
== binary_op
)
3411 new_stmt
= gimple_build_call (decl
, 2, vec_oprnd0
, vec_oprnd1
);
3413 new_stmt
= gimple_build_call (decl
, 1, vec_oprnd0
);
3414 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
3415 gimple_call_set_lhs (new_stmt
, new_temp
);
3419 /* Generic support */
3420 gcc_assert (op_type
== TREE_CODE_LENGTH (code
));
3421 if (op_type
!= binary_op
)
3423 new_stmt
= gimple_build_assign (vec_dest
, code
, vec_oprnd0
, vec_oprnd1
);
3424 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
3425 gimple_assign_set_lhs (new_stmt
, new_temp
);
3427 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3433 /* Get vectorized definitions for loop-based vectorization. For the first
3434 operand we call vect_get_vec_def_for_operand() (with OPRND containing
3435 scalar operand), and for the rest we get a copy with
3436 vect_get_vec_def_for_stmt_copy() using the previous vector definition
3437 (stored in OPRND). See vect_get_vec_def_for_stmt_copy() for details.
3438 The vectors are collected into VEC_OPRNDS. */
3441 vect_get_loop_based_defs (tree
*oprnd
, gimple
*stmt
, enum vect_def_type dt
,
3442 vec
<tree
> *vec_oprnds
, int multi_step_cvt
)
3446 /* Get first vector operand. */
3447 /* All the vector operands except the very first one (that is scalar oprnd)
3449 if (TREE_CODE (TREE_TYPE (*oprnd
)) != VECTOR_TYPE
)
3450 vec_oprnd
= vect_get_vec_def_for_operand (*oprnd
, stmt
);
3452 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
, *oprnd
);
3454 vec_oprnds
->quick_push (vec_oprnd
);
3456 /* Get second vector operand. */
3457 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
, vec_oprnd
);
3458 vec_oprnds
->quick_push (vec_oprnd
);
3462 /* For conversion in multiple steps, continue to get operands
3465 vect_get_loop_based_defs (oprnd
, stmt
, dt
, vec_oprnds
, multi_step_cvt
- 1);
3469 /* Create vectorized demotion statements for vector operands from VEC_OPRNDS.
3470 For multi-step conversions store the resulting vectors and call the function
3474 vect_create_vectorized_demotion_stmts (vec
<tree
> *vec_oprnds
,
3475 int multi_step_cvt
, gimple
*stmt
,
3477 gimple_stmt_iterator
*gsi
,
3478 slp_tree slp_node
, enum tree_code code
,
3479 stmt_vec_info
*prev_stmt_info
)
3482 tree vop0
, vop1
, new_tmp
, vec_dest
;
3484 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
3486 vec_dest
= vec_dsts
.pop ();
3488 for (i
= 0; i
< vec_oprnds
->length (); i
+= 2)
3490 /* Create demotion operation. */
3491 vop0
= (*vec_oprnds
)[i
];
3492 vop1
= (*vec_oprnds
)[i
+ 1];
3493 new_stmt
= gimple_build_assign (vec_dest
, code
, vop0
, vop1
);
3494 new_tmp
= make_ssa_name (vec_dest
, new_stmt
);
3495 gimple_assign_set_lhs (new_stmt
, new_tmp
);
3496 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3499 /* Store the resulting vector for next recursive call. */
3500 (*vec_oprnds
)[i
/2] = new_tmp
;
3503 /* This is the last step of the conversion sequence. Store the
3504 vectors in SLP_NODE or in vector info of the scalar statement
3505 (or in STMT_VINFO_RELATED_STMT chain). */
3507 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
3510 if (!*prev_stmt_info
)
3511 STMT_VINFO_VEC_STMT (stmt_info
) = new_stmt
;
3513 STMT_VINFO_RELATED_STMT (*prev_stmt_info
) = new_stmt
;
3515 *prev_stmt_info
= vinfo_for_stmt (new_stmt
);
3520 /* For multi-step demotion operations we first generate demotion operations
3521 from the source type to the intermediate types, and then combine the
3522 results (stored in VEC_OPRNDS) in demotion operation to the destination
3526 /* At each level of recursion we have half of the operands we had at the
3528 vec_oprnds
->truncate ((i
+1)/2);
3529 vect_create_vectorized_demotion_stmts (vec_oprnds
, multi_step_cvt
- 1,
3530 stmt
, vec_dsts
, gsi
, slp_node
,
3531 VEC_PACK_TRUNC_EXPR
,
3535 vec_dsts
.quick_push (vec_dest
);
3539 /* Create vectorized promotion statements for vector operands from VEC_OPRNDS0
3540 and VEC_OPRNDS1 (for binary operations). For multi-step conversions store
3541 the resulting vectors and call the function recursively. */
3544 vect_create_vectorized_promotion_stmts (vec
<tree
> *vec_oprnds0
,
3545 vec
<tree
> *vec_oprnds1
,
3546 gimple
*stmt
, tree vec_dest
,
3547 gimple_stmt_iterator
*gsi
,
3548 enum tree_code code1
,
3549 enum tree_code code2
, tree decl1
,
3550 tree decl2
, int op_type
)
3553 tree vop0
, vop1
, new_tmp1
, new_tmp2
;
3554 gimple
*new_stmt1
, *new_stmt2
;
3555 vec
<tree
> vec_tmp
= vNULL
;
3557 vec_tmp
.create (vec_oprnds0
->length () * 2);
3558 FOR_EACH_VEC_ELT (*vec_oprnds0
, i
, vop0
)
3560 if (op_type
== binary_op
)
3561 vop1
= (*vec_oprnds1
)[i
];
3565 /* Generate the two halves of promotion operation. */
3566 new_stmt1
= vect_gen_widened_results_half (code1
, decl1
, vop0
, vop1
,
3567 op_type
, vec_dest
, gsi
, stmt
);
3568 new_stmt2
= vect_gen_widened_results_half (code2
, decl2
, vop0
, vop1
,
3569 op_type
, vec_dest
, gsi
, stmt
);
3570 if (is_gimple_call (new_stmt1
))
3572 new_tmp1
= gimple_call_lhs (new_stmt1
);
3573 new_tmp2
= gimple_call_lhs (new_stmt2
);
3577 new_tmp1
= gimple_assign_lhs (new_stmt1
);
3578 new_tmp2
= gimple_assign_lhs (new_stmt2
);
3581 /* Store the results for the next step. */
3582 vec_tmp
.quick_push (new_tmp1
);
3583 vec_tmp
.quick_push (new_tmp2
);
3586 vec_oprnds0
->release ();
3587 *vec_oprnds0
= vec_tmp
;
3591 /* Check if STMT performs a conversion operation, that can be vectorized.
3592 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3593 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
3594 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3597 vectorizable_conversion (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
3598 gimple
**vec_stmt
, slp_tree slp_node
)
3602 tree op0
, op1
= NULL_TREE
;
3603 tree vec_oprnd0
= NULL_TREE
, vec_oprnd1
= NULL_TREE
;
3604 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
3605 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
3606 enum tree_code code
, code1
= ERROR_MARK
, code2
= ERROR_MARK
;
3607 enum tree_code codecvt1
= ERROR_MARK
, codecvt2
= ERROR_MARK
;
3608 tree decl1
= NULL_TREE
, decl2
= NULL_TREE
;
3611 enum vect_def_type dt
[2] = {vect_unknown_def_type
, vect_unknown_def_type
};
3612 gimple
*new_stmt
= NULL
;
3613 stmt_vec_info prev_stmt_info
;
3616 tree vectype_out
, vectype_in
;
3618 tree lhs_type
, rhs_type
;
3619 enum { NARROW
, NONE
, WIDEN
} modifier
;
3620 vec
<tree
> vec_oprnds0
= vNULL
;
3621 vec
<tree
> vec_oprnds1
= vNULL
;
3623 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
3624 vec_info
*vinfo
= stmt_info
->vinfo
;
3625 int multi_step_cvt
= 0;
3626 vec
<tree
> vec_dsts
= vNULL
;
3627 vec
<tree
> interm_types
= vNULL
;
3628 tree last_oprnd
, intermediate_type
, cvt_type
= NULL_TREE
;
3630 machine_mode rhs_mode
;
3631 unsigned short fltsz
;
3633 /* Is STMT a vectorizable conversion? */
3635 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
3638 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
3642 if (!is_gimple_assign (stmt
))
3645 if (TREE_CODE (gimple_assign_lhs (stmt
)) != SSA_NAME
)
3648 code
= gimple_assign_rhs_code (stmt
);
3649 if (!CONVERT_EXPR_CODE_P (code
)
3650 && code
!= FIX_TRUNC_EXPR
3651 && code
!= FLOAT_EXPR
3652 && code
!= WIDEN_MULT_EXPR
3653 && code
!= WIDEN_LSHIFT_EXPR
)
3656 op_type
= TREE_CODE_LENGTH (code
);
3658 /* Check types of lhs and rhs. */
3659 scalar_dest
= gimple_assign_lhs (stmt
);
3660 lhs_type
= TREE_TYPE (scalar_dest
);
3661 vectype_out
= STMT_VINFO_VECTYPE (stmt_info
);
3663 op0
= gimple_assign_rhs1 (stmt
);
3664 rhs_type
= TREE_TYPE (op0
);
3666 if ((code
!= FIX_TRUNC_EXPR
&& code
!= FLOAT_EXPR
)
3667 && !((INTEGRAL_TYPE_P (lhs_type
)
3668 && INTEGRAL_TYPE_P (rhs_type
))
3669 || (SCALAR_FLOAT_TYPE_P (lhs_type
)
3670 && SCALAR_FLOAT_TYPE_P (rhs_type
))))
3673 if (!VECTOR_BOOLEAN_TYPE_P (vectype_out
)
3674 && ((INTEGRAL_TYPE_P (lhs_type
)
3675 && (TYPE_PRECISION (lhs_type
)
3676 != GET_MODE_PRECISION (TYPE_MODE (lhs_type
))))
3677 || (INTEGRAL_TYPE_P (rhs_type
)
3678 && (TYPE_PRECISION (rhs_type
)
3679 != GET_MODE_PRECISION (TYPE_MODE (rhs_type
))))))
3681 if (dump_enabled_p ())
3682 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3683 "type conversion to/from bit-precision unsupported."
3688 /* Check the operands of the operation. */
3689 if (!vect_is_simple_use (op0
, vinfo
, &def_stmt
, &dt
[0], &vectype_in
))
3691 if (dump_enabled_p ())
3692 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3693 "use not simple.\n");
3696 if (op_type
== binary_op
)
3700 op1
= gimple_assign_rhs2 (stmt
);
3701 gcc_assert (code
== WIDEN_MULT_EXPR
|| code
== WIDEN_LSHIFT_EXPR
);
3702 /* For WIDEN_MULT_EXPR, if OP0 is a constant, use the type of
3704 if (CONSTANT_CLASS_P (op0
))
3705 ok
= vect_is_simple_use (op1
, vinfo
, &def_stmt
, &dt
[1], &vectype_in
);
3707 ok
= vect_is_simple_use (op1
, vinfo
, &def_stmt
, &dt
[1]);
3711 if (dump_enabled_p ())
3712 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3713 "use not simple.\n");
3718 /* If op0 is an external or constant defs use a vector type of
3719 the same size as the output vector type. */
3721 vectype_in
= get_same_sized_vectype (rhs_type
, vectype_out
);
3723 gcc_assert (vectype_in
);
3726 if (dump_enabled_p ())
3728 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3729 "no vectype for scalar type ");
3730 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, rhs_type
);
3731 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
3737 if (VECTOR_BOOLEAN_TYPE_P (vectype_out
)
3738 && !VECTOR_BOOLEAN_TYPE_P (vectype_in
))
3740 if (dump_enabled_p ())
3742 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3743 "can't convert between boolean and non "
3745 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, rhs_type
);
3746 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
3752 nunits_in
= TYPE_VECTOR_SUBPARTS (vectype_in
);
3753 nunits_out
= TYPE_VECTOR_SUBPARTS (vectype_out
);
3754 if (nunits_in
< nunits_out
)
3756 else if (nunits_out
== nunits_in
)
3761 /* Multiple types in SLP are handled by creating the appropriate number of
3762 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
3764 if (slp_node
|| PURE_SLP_STMT (stmt_info
))
3766 else if (modifier
== NARROW
)
3767 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits_out
;
3769 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits_in
;
3771 /* Sanity check: make sure that at least one copy of the vectorized stmt
3772 needs to be generated. */
3773 gcc_assert (ncopies
>= 1);
3775 /* Supportable by target? */
3779 if (code
!= FIX_TRUNC_EXPR
&& code
!= FLOAT_EXPR
)
3781 if (supportable_convert_operation (code
, vectype_out
, vectype_in
,
3786 if (dump_enabled_p ())
3787 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3788 "conversion not supported by target.\n");
3792 if (supportable_widening_operation (code
, stmt
, vectype_out
, vectype_in
,
3793 &code1
, &code2
, &multi_step_cvt
,
3796 /* Binary widening operation can only be supported directly by the
3798 gcc_assert (!(multi_step_cvt
&& op_type
== binary_op
));
3802 if (code
!= FLOAT_EXPR
3803 || (GET_MODE_SIZE (TYPE_MODE (lhs_type
))
3804 <= GET_MODE_SIZE (TYPE_MODE (rhs_type
))))
3807 rhs_mode
= TYPE_MODE (rhs_type
);
3808 fltsz
= GET_MODE_SIZE (TYPE_MODE (lhs_type
));
3809 for (rhs_mode
= GET_MODE_2XWIDER_MODE (TYPE_MODE (rhs_type
));
3810 rhs_mode
!= VOIDmode
&& GET_MODE_SIZE (rhs_mode
) <= fltsz
;
3811 rhs_mode
= GET_MODE_2XWIDER_MODE (rhs_mode
))
3814 = build_nonstandard_integer_type (GET_MODE_BITSIZE (rhs_mode
), 0);
3815 cvt_type
= get_same_sized_vectype (cvt_type
, vectype_in
);
3816 if (cvt_type
== NULL_TREE
)
3819 if (GET_MODE_SIZE (rhs_mode
) == fltsz
)
3821 if (!supportable_convert_operation (code
, vectype_out
,
3822 cvt_type
, &decl1
, &codecvt1
))
3825 else if (!supportable_widening_operation (code
, stmt
, vectype_out
,
3826 cvt_type
, &codecvt1
,
3827 &codecvt2
, &multi_step_cvt
,
3831 gcc_assert (multi_step_cvt
== 0);
3833 if (supportable_widening_operation (NOP_EXPR
, stmt
, cvt_type
,
3834 vectype_in
, &code1
, &code2
,
3835 &multi_step_cvt
, &interm_types
))
3839 if (rhs_mode
== VOIDmode
|| GET_MODE_SIZE (rhs_mode
) > fltsz
)
3842 if (GET_MODE_SIZE (rhs_mode
) == fltsz
)
3843 codecvt2
= ERROR_MARK
;
3847 interm_types
.safe_push (cvt_type
);
3848 cvt_type
= NULL_TREE
;
3853 gcc_assert (op_type
== unary_op
);
3854 if (supportable_narrowing_operation (code
, vectype_out
, vectype_in
,
3855 &code1
, &multi_step_cvt
,
3859 if (code
!= FIX_TRUNC_EXPR
3860 || (GET_MODE_SIZE (TYPE_MODE (lhs_type
))
3861 >= GET_MODE_SIZE (TYPE_MODE (rhs_type
))))
3864 rhs_mode
= TYPE_MODE (rhs_type
);
3866 = build_nonstandard_integer_type (GET_MODE_BITSIZE (rhs_mode
), 0);
3867 cvt_type
= get_same_sized_vectype (cvt_type
, vectype_in
);
3868 if (cvt_type
== NULL_TREE
)
3870 if (!supportable_convert_operation (code
, cvt_type
, vectype_in
,
3873 if (supportable_narrowing_operation (NOP_EXPR
, vectype_out
, cvt_type
,
3874 &code1
, &multi_step_cvt
,
3883 if (!vec_stmt
) /* transformation not required. */
3885 if (dump_enabled_p ())
3886 dump_printf_loc (MSG_NOTE
, vect_location
,
3887 "=== vectorizable_conversion ===\n");
3888 if (code
== FIX_TRUNC_EXPR
|| code
== FLOAT_EXPR
)
3890 STMT_VINFO_TYPE (stmt_info
) = type_conversion_vec_info_type
;
3891 vect_model_simple_cost (stmt_info
, ncopies
, dt
, NULL
, NULL
);
3893 else if (modifier
== NARROW
)
3895 STMT_VINFO_TYPE (stmt_info
) = type_demotion_vec_info_type
;
3896 vect_model_promotion_demotion_cost (stmt_info
, dt
, multi_step_cvt
);
3900 STMT_VINFO_TYPE (stmt_info
) = type_promotion_vec_info_type
;
3901 vect_model_promotion_demotion_cost (stmt_info
, dt
, multi_step_cvt
);
3903 interm_types
.release ();
3908 if (dump_enabled_p ())
3909 dump_printf_loc (MSG_NOTE
, vect_location
,
3910 "transform conversion. ncopies = %d.\n", ncopies
);
3912 if (op_type
== binary_op
)
3914 if (CONSTANT_CLASS_P (op0
))
3915 op0
= fold_convert (TREE_TYPE (op1
), op0
);
3916 else if (CONSTANT_CLASS_P (op1
))
3917 op1
= fold_convert (TREE_TYPE (op0
), op1
);
3920 /* In case of multi-step conversion, we first generate conversion operations
3921 to the intermediate types, and then from that types to the final one.
3922 We create vector destinations for the intermediate type (TYPES) received
3923 from supportable_*_operation, and store them in the correct order
3924 for future use in vect_create_vectorized_*_stmts (). */
3925 vec_dsts
.create (multi_step_cvt
+ 1);
3926 vec_dest
= vect_create_destination_var (scalar_dest
,
3927 (cvt_type
&& modifier
== WIDEN
)
3928 ? cvt_type
: vectype_out
);
3929 vec_dsts
.quick_push (vec_dest
);
3933 for (i
= interm_types
.length () - 1;
3934 interm_types
.iterate (i
, &intermediate_type
); i
--)
3936 vec_dest
= vect_create_destination_var (scalar_dest
,
3938 vec_dsts
.quick_push (vec_dest
);
3943 vec_dest
= vect_create_destination_var (scalar_dest
,
3945 ? vectype_out
: cvt_type
);
3949 if (modifier
== WIDEN
)
3951 vec_oprnds0
.create (multi_step_cvt
? vect_pow2 (multi_step_cvt
) : 1);
3952 if (op_type
== binary_op
)
3953 vec_oprnds1
.create (1);
3955 else if (modifier
== NARROW
)
3956 vec_oprnds0
.create (
3957 2 * (multi_step_cvt
? vect_pow2 (multi_step_cvt
) : 1));
3959 else if (code
== WIDEN_LSHIFT_EXPR
)
3960 vec_oprnds1
.create (slp_node
->vec_stmts_size
);
3963 prev_stmt_info
= NULL
;
3967 for (j
= 0; j
< ncopies
; j
++)
3970 vect_get_vec_defs (op0
, NULL
, stmt
, &vec_oprnds0
, NULL
, slp_node
,
3973 vect_get_vec_defs_for_stmt_copy (dt
, &vec_oprnds0
, NULL
);
3975 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vop0
)
3977 /* Arguments are ready, create the new vector stmt. */
3978 if (code1
== CALL_EXPR
)
3980 new_stmt
= gimple_build_call (decl1
, 1, vop0
);
3981 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
3982 gimple_call_set_lhs (new_stmt
, new_temp
);
3986 gcc_assert (TREE_CODE_LENGTH (code1
) == unary_op
);
3987 new_stmt
= gimple_build_assign (vec_dest
, code1
, vop0
);
3988 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
3989 gimple_assign_set_lhs (new_stmt
, new_temp
);
3992 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3994 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
3997 if (!prev_stmt_info
)
3998 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
4000 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
4001 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
4008 /* In case the vectorization factor (VF) is bigger than the number
4009 of elements that we can fit in a vectype (nunits), we have to
4010 generate more than one vector stmt - i.e - we need to "unroll"
4011 the vector stmt by a factor VF/nunits. */
4012 for (j
= 0; j
< ncopies
; j
++)
4019 if (code
== WIDEN_LSHIFT_EXPR
)
4024 /* Store vec_oprnd1 for every vector stmt to be created
4025 for SLP_NODE. We check during the analysis that all
4026 the shift arguments are the same. */
4027 for (k
= 0; k
< slp_node
->vec_stmts_size
- 1; k
++)
4028 vec_oprnds1
.quick_push (vec_oprnd1
);
4030 vect_get_vec_defs (op0
, NULL_TREE
, stmt
, &vec_oprnds0
, NULL
,
4034 vect_get_vec_defs (op0
, op1
, stmt
, &vec_oprnds0
,
4035 &vec_oprnds1
, slp_node
, -1);
4039 vec_oprnd0
= vect_get_vec_def_for_operand (op0
, stmt
);
4040 vec_oprnds0
.quick_push (vec_oprnd0
);
4041 if (op_type
== binary_op
)
4043 if (code
== WIDEN_LSHIFT_EXPR
)
4046 vec_oprnd1
= vect_get_vec_def_for_operand (op1
, stmt
);
4047 vec_oprnds1
.quick_push (vec_oprnd1
);
4053 vec_oprnd0
= vect_get_vec_def_for_stmt_copy (dt
[0], vec_oprnd0
);
4054 vec_oprnds0
.truncate (0);
4055 vec_oprnds0
.quick_push (vec_oprnd0
);
4056 if (op_type
== binary_op
)
4058 if (code
== WIDEN_LSHIFT_EXPR
)
4061 vec_oprnd1
= vect_get_vec_def_for_stmt_copy (dt
[1],
4063 vec_oprnds1
.truncate (0);
4064 vec_oprnds1
.quick_push (vec_oprnd1
);
4068 /* Arguments are ready. Create the new vector stmts. */
4069 for (i
= multi_step_cvt
; i
>= 0; i
--)
4071 tree this_dest
= vec_dsts
[i
];
4072 enum tree_code c1
= code1
, c2
= code2
;
4073 if (i
== 0 && codecvt2
!= ERROR_MARK
)
4078 vect_create_vectorized_promotion_stmts (&vec_oprnds0
,
4080 stmt
, this_dest
, gsi
,
4081 c1
, c2
, decl1
, decl2
,
4085 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vop0
)
4089 if (codecvt1
== CALL_EXPR
)
4091 new_stmt
= gimple_build_call (decl1
, 1, vop0
);
4092 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
4093 gimple_call_set_lhs (new_stmt
, new_temp
);
4097 gcc_assert (TREE_CODE_LENGTH (codecvt1
) == unary_op
);
4098 new_temp
= make_ssa_name (vec_dest
);
4099 new_stmt
= gimple_build_assign (new_temp
, codecvt1
,
4103 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
4106 new_stmt
= SSA_NAME_DEF_STMT (vop0
);
4109 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
4112 if (!prev_stmt_info
)
4113 STMT_VINFO_VEC_STMT (stmt_info
) = new_stmt
;
4115 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
4116 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
4121 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
4125 /* In case the vectorization factor (VF) is bigger than the number
4126 of elements that we can fit in a vectype (nunits), we have to
4127 generate more than one vector stmt - i.e - we need to "unroll"
4128 the vector stmt by a factor VF/nunits. */
4129 for (j
= 0; j
< ncopies
; j
++)
4133 vect_get_vec_defs (op0
, NULL_TREE
, stmt
, &vec_oprnds0
, NULL
,
4137 vec_oprnds0
.truncate (0);
4138 vect_get_loop_based_defs (&last_oprnd
, stmt
, dt
[0], &vec_oprnds0
,
4139 vect_pow2 (multi_step_cvt
) - 1);
4142 /* Arguments are ready. Create the new vector stmts. */
4144 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vop0
)
4146 if (codecvt1
== CALL_EXPR
)
4148 new_stmt
= gimple_build_call (decl1
, 1, vop0
);
4149 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
4150 gimple_call_set_lhs (new_stmt
, new_temp
);
4154 gcc_assert (TREE_CODE_LENGTH (codecvt1
) == unary_op
);
4155 new_temp
= make_ssa_name (vec_dest
);
4156 new_stmt
= gimple_build_assign (new_temp
, codecvt1
,
4160 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
4161 vec_oprnds0
[i
] = new_temp
;
4164 vect_create_vectorized_demotion_stmts (&vec_oprnds0
, multi_step_cvt
,
4165 stmt
, vec_dsts
, gsi
,
4170 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
4174 vec_oprnds0
.release ();
4175 vec_oprnds1
.release ();
4176 vec_dsts
.release ();
4177 interm_types
.release ();
4183 /* Function vectorizable_assignment.
4185 Check if STMT performs an assignment (copy) that can be vectorized.
4186 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4187 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
4188 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4191 vectorizable_assignment (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
4192 gimple
**vec_stmt
, slp_tree slp_node
)
4197 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
4198 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
4201 enum vect_def_type dt
[2] = {vect_unknown_def_type
, vect_unknown_def_type
};
4204 vec
<tree
> vec_oprnds
= vNULL
;
4206 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
4207 vec_info
*vinfo
= stmt_info
->vinfo
;
4208 gimple
*new_stmt
= NULL
;
4209 stmt_vec_info prev_stmt_info
= NULL
;
4210 enum tree_code code
;
4213 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
4216 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
4220 /* Is vectorizable assignment? */
4221 if (!is_gimple_assign (stmt
))
4224 scalar_dest
= gimple_assign_lhs (stmt
);
4225 if (TREE_CODE (scalar_dest
) != SSA_NAME
)
4228 code
= gimple_assign_rhs_code (stmt
);
4229 if (gimple_assign_single_p (stmt
)
4230 || code
== PAREN_EXPR
4231 || CONVERT_EXPR_CODE_P (code
))
4232 op
= gimple_assign_rhs1 (stmt
);
4236 if (code
== VIEW_CONVERT_EXPR
)
4237 op
= TREE_OPERAND (op
, 0);
4239 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
4240 unsigned int nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
4242 /* Multiple types in SLP are handled by creating the appropriate number of
4243 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4245 if (slp_node
|| PURE_SLP_STMT (stmt_info
))
4248 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits
;
4250 gcc_assert (ncopies
>= 1);
4252 if (!vect_is_simple_use (op
, vinfo
, &def_stmt
, &dt
[0], &vectype_in
))
4254 if (dump_enabled_p ())
4255 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4256 "use not simple.\n");
4260 /* We can handle NOP_EXPR conversions that do not change the number
4261 of elements or the vector size. */
4262 if ((CONVERT_EXPR_CODE_P (code
)
4263 || code
== VIEW_CONVERT_EXPR
)
4265 || TYPE_VECTOR_SUBPARTS (vectype_in
) != nunits
4266 || (GET_MODE_SIZE (TYPE_MODE (vectype
))
4267 != GET_MODE_SIZE (TYPE_MODE (vectype_in
)))))
4270 /* We do not handle bit-precision changes. */
4271 if ((CONVERT_EXPR_CODE_P (code
)
4272 || code
== VIEW_CONVERT_EXPR
)
4273 && INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest
))
4274 && ((TYPE_PRECISION (TREE_TYPE (scalar_dest
))
4275 != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (scalar_dest
))))
4276 || ((TYPE_PRECISION (TREE_TYPE (op
))
4277 != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (op
))))))
4278 /* But a conversion that does not change the bit-pattern is ok. */
4279 && !((TYPE_PRECISION (TREE_TYPE (scalar_dest
))
4280 > TYPE_PRECISION (TREE_TYPE (op
)))
4281 && TYPE_UNSIGNED (TREE_TYPE (op
)))
4282 /* Conversion between boolean types of different sizes is
4283 a simple assignment in case their vectypes are same
4285 && (!VECTOR_BOOLEAN_TYPE_P (vectype
)
4286 || !VECTOR_BOOLEAN_TYPE_P (vectype_in
)))
4288 if (dump_enabled_p ())
4289 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4290 "type conversion to/from bit-precision "
4295 if (!vec_stmt
) /* transformation not required. */
4297 STMT_VINFO_TYPE (stmt_info
) = assignment_vec_info_type
;
4298 if (dump_enabled_p ())
4299 dump_printf_loc (MSG_NOTE
, vect_location
,
4300 "=== vectorizable_assignment ===\n");
4301 vect_model_simple_cost (stmt_info
, ncopies
, dt
, NULL
, NULL
);
4306 if (dump_enabled_p ())
4307 dump_printf_loc (MSG_NOTE
, vect_location
, "transform assignment.\n");
4310 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
4313 for (j
= 0; j
< ncopies
; j
++)
4317 vect_get_vec_defs (op
, NULL
, stmt
, &vec_oprnds
, NULL
, slp_node
, -1);
4319 vect_get_vec_defs_for_stmt_copy (dt
, &vec_oprnds
, NULL
);
4321 /* Arguments are ready. create the new vector stmt. */
4322 FOR_EACH_VEC_ELT (vec_oprnds
, i
, vop
)
4324 if (CONVERT_EXPR_CODE_P (code
)
4325 || code
== VIEW_CONVERT_EXPR
)
4326 vop
= build1 (VIEW_CONVERT_EXPR
, vectype
, vop
);
4327 new_stmt
= gimple_build_assign (vec_dest
, vop
);
4328 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
4329 gimple_assign_set_lhs (new_stmt
, new_temp
);
4330 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
4332 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
4339 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
4341 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
4343 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
4346 vec_oprnds
.release ();
4351 /* Return TRUE if CODE (a shift operation) is supported for SCALAR_TYPE
4352 either as shift by a scalar or by a vector. */
4355 vect_supportable_shift (enum tree_code code
, tree scalar_type
)
4358 machine_mode vec_mode
;
4363 vectype
= get_vectype_for_scalar_type (scalar_type
);
4367 optab
= optab_for_tree_code (code
, vectype
, optab_scalar
);
4369 || optab_handler (optab
, TYPE_MODE (vectype
)) == CODE_FOR_nothing
)
4371 optab
= optab_for_tree_code (code
, vectype
, optab_vector
);
4373 || (optab_handler (optab
, TYPE_MODE (vectype
))
4374 == CODE_FOR_nothing
))
4378 vec_mode
= TYPE_MODE (vectype
);
4379 icode
= (int) optab_handler (optab
, vec_mode
);
4380 if (icode
== CODE_FOR_nothing
)
4387 /* Function vectorizable_shift.
4389 Check if STMT performs a shift operation that can be vectorized.
4390 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4391 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
4392 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4395 vectorizable_shift (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
4396 gimple
**vec_stmt
, slp_tree slp_node
)
4400 tree op0
, op1
= NULL
;
4401 tree vec_oprnd1
= NULL_TREE
;
4402 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
4404 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
4405 enum tree_code code
;
4406 machine_mode vec_mode
;
4410 machine_mode optab_op2_mode
;
4412 enum vect_def_type dt
[2] = {vect_unknown_def_type
, vect_unknown_def_type
};
4413 gimple
*new_stmt
= NULL
;
4414 stmt_vec_info prev_stmt_info
;
4421 vec
<tree
> vec_oprnds0
= vNULL
;
4422 vec
<tree
> vec_oprnds1
= vNULL
;
4425 bool scalar_shift_arg
= true;
4426 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
4427 vec_info
*vinfo
= stmt_info
->vinfo
;
4430 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
4433 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
4437 /* Is STMT a vectorizable binary/unary operation? */
4438 if (!is_gimple_assign (stmt
))
4441 if (TREE_CODE (gimple_assign_lhs (stmt
)) != SSA_NAME
)
4444 code
= gimple_assign_rhs_code (stmt
);
4446 if (!(code
== LSHIFT_EXPR
|| code
== RSHIFT_EXPR
|| code
== LROTATE_EXPR
4447 || code
== RROTATE_EXPR
))
4450 scalar_dest
= gimple_assign_lhs (stmt
);
4451 vectype_out
= STMT_VINFO_VECTYPE (stmt_info
);
4452 if (TYPE_PRECISION (TREE_TYPE (scalar_dest
))
4453 != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (scalar_dest
))))
4455 if (dump_enabled_p ())
4456 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4457 "bit-precision shifts not supported.\n");
4461 op0
= gimple_assign_rhs1 (stmt
);
4462 if (!vect_is_simple_use (op0
, vinfo
, &def_stmt
, &dt
[0], &vectype
))
4464 if (dump_enabled_p ())
4465 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4466 "use not simple.\n");
4469 /* If op0 is an external or constant def use a vector type with
4470 the same size as the output vector type. */
4472 vectype
= get_same_sized_vectype (TREE_TYPE (op0
), vectype_out
);
4474 gcc_assert (vectype
);
4477 if (dump_enabled_p ())
4478 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4479 "no vectype for scalar type\n");
4483 nunits_out
= TYPE_VECTOR_SUBPARTS (vectype_out
);
4484 nunits_in
= TYPE_VECTOR_SUBPARTS (vectype
);
4485 if (nunits_out
!= nunits_in
)
4488 op1
= gimple_assign_rhs2 (stmt
);
4489 if (!vect_is_simple_use (op1
, vinfo
, &def_stmt
, &dt
[1], &op1_vectype
))
4491 if (dump_enabled_p ())
4492 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4493 "use not simple.\n");
4498 vf
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
4502 /* Multiple types in SLP are handled by creating the appropriate number of
4503 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4505 if (slp_node
|| PURE_SLP_STMT (stmt_info
))
4508 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits_in
;
4510 gcc_assert (ncopies
>= 1);
4512 /* Determine whether the shift amount is a vector, or scalar. If the
4513 shift/rotate amount is a vector, use the vector/vector shift optabs. */
4515 if ((dt
[1] == vect_internal_def
4516 || dt
[1] == vect_induction_def
)
4518 scalar_shift_arg
= false;
4519 else if (dt
[1] == vect_constant_def
4520 || dt
[1] == vect_external_def
4521 || dt
[1] == vect_internal_def
)
4523 /* In SLP, need to check whether the shift count is the same,
4524 in loops if it is a constant or invariant, it is always
4528 vec
<gimple
*> stmts
= SLP_TREE_SCALAR_STMTS (slp_node
);
4531 FOR_EACH_VEC_ELT (stmts
, k
, slpstmt
)
4532 if (!operand_equal_p (gimple_assign_rhs2 (slpstmt
), op1
, 0))
4533 scalar_shift_arg
= false;
4538 if (dump_enabled_p ())
4539 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4540 "operand mode requires invariant argument.\n");
4544 /* Vector shifted by vector. */
4545 if (!scalar_shift_arg
)
4547 optab
= optab_for_tree_code (code
, vectype
, optab_vector
);
4548 if (dump_enabled_p ())
4549 dump_printf_loc (MSG_NOTE
, vect_location
,
4550 "vector/vector shift/rotate found.\n");
4553 op1_vectype
= get_same_sized_vectype (TREE_TYPE (op1
), vectype_out
);
4554 if (op1_vectype
== NULL_TREE
4555 || TYPE_MODE (op1_vectype
) != TYPE_MODE (vectype
))
4557 if (dump_enabled_p ())
4558 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4559 "unusable type for last operand in"
4560 " vector/vector shift/rotate.\n");
4564 /* See if the machine has a vector shifted by scalar insn and if not
4565 then see if it has a vector shifted by vector insn. */
4568 optab
= optab_for_tree_code (code
, vectype
, optab_scalar
);
4570 && optab_handler (optab
, TYPE_MODE (vectype
)) != CODE_FOR_nothing
)
4572 if (dump_enabled_p ())
4573 dump_printf_loc (MSG_NOTE
, vect_location
,
4574 "vector/scalar shift/rotate found.\n");
4578 optab
= optab_for_tree_code (code
, vectype
, optab_vector
);
4580 && (optab_handler (optab
, TYPE_MODE (vectype
))
4581 != CODE_FOR_nothing
))
4583 scalar_shift_arg
= false;
4585 if (dump_enabled_p ())
4586 dump_printf_loc (MSG_NOTE
, vect_location
,
4587 "vector/vector shift/rotate found.\n");
4589 /* Unlike the other binary operators, shifts/rotates have
4590 the rhs being int, instead of the same type as the lhs,
4591 so make sure the scalar is the right type if we are
4592 dealing with vectors of long long/long/short/char. */
4593 if (dt
[1] == vect_constant_def
)
4594 op1
= fold_convert (TREE_TYPE (vectype
), op1
);
4595 else if (!useless_type_conversion_p (TREE_TYPE (vectype
),
4599 && TYPE_MODE (TREE_TYPE (vectype
))
4600 != TYPE_MODE (TREE_TYPE (op1
)))
4602 if (dump_enabled_p ())
4603 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4604 "unusable type for last operand in"
4605 " vector/vector shift/rotate.\n");
4608 if (vec_stmt
&& !slp_node
)
4610 op1
= fold_convert (TREE_TYPE (vectype
), op1
);
4611 op1
= vect_init_vector (stmt
, op1
,
4612 TREE_TYPE (vectype
), NULL
);
4619 /* Supportable by target? */
4622 if (dump_enabled_p ())
4623 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4627 vec_mode
= TYPE_MODE (vectype
);
4628 icode
= (int) optab_handler (optab
, vec_mode
);
4629 if (icode
== CODE_FOR_nothing
)
4631 if (dump_enabled_p ())
4632 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4633 "op not supported by target.\n");
4634 /* Check only during analysis. */
4635 if (GET_MODE_SIZE (vec_mode
) != UNITS_PER_WORD
4636 || (vf
< vect_min_worthwhile_factor (code
)
4639 if (dump_enabled_p ())
4640 dump_printf_loc (MSG_NOTE
, vect_location
,
4641 "proceeding using word mode.\n");
4644 /* Worthwhile without SIMD support? Check only during analysis. */
4645 if (!VECTOR_MODE_P (TYPE_MODE (vectype
))
4646 && vf
< vect_min_worthwhile_factor (code
)
4649 if (dump_enabled_p ())
4650 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4651 "not worthwhile without SIMD support.\n");
4655 if (!vec_stmt
) /* transformation not required. */
4657 STMT_VINFO_TYPE (stmt_info
) = shift_vec_info_type
;
4658 if (dump_enabled_p ())
4659 dump_printf_loc (MSG_NOTE
, vect_location
,
4660 "=== vectorizable_shift ===\n");
4661 vect_model_simple_cost (stmt_info
, ncopies
, dt
, NULL
, NULL
);
4667 if (dump_enabled_p ())
4668 dump_printf_loc (MSG_NOTE
, vect_location
,
4669 "transform binary/unary operation.\n");
4672 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
4674 prev_stmt_info
= NULL
;
4675 for (j
= 0; j
< ncopies
; j
++)
4680 if (scalar_shift_arg
)
4682 /* Vector shl and shr insn patterns can be defined with scalar
4683 operand 2 (shift operand). In this case, use constant or loop
4684 invariant op1 directly, without extending it to vector mode
4686 optab_op2_mode
= insn_data
[icode
].operand
[2].mode
;
4687 if (!VECTOR_MODE_P (optab_op2_mode
))
4689 if (dump_enabled_p ())
4690 dump_printf_loc (MSG_NOTE
, vect_location
,
4691 "operand 1 using scalar mode.\n");
4693 vec_oprnds1
.create (slp_node
? slp_node
->vec_stmts_size
: 1);
4694 vec_oprnds1
.quick_push (vec_oprnd1
);
4697 /* Store vec_oprnd1 for every vector stmt to be created
4698 for SLP_NODE. We check during the analysis that all
4699 the shift arguments are the same.
4700 TODO: Allow different constants for different vector
4701 stmts generated for an SLP instance. */
4702 for (k
= 0; k
< slp_node
->vec_stmts_size
- 1; k
++)
4703 vec_oprnds1
.quick_push (vec_oprnd1
);
4708 /* vec_oprnd1 is available if operand 1 should be of a scalar-type
4709 (a special case for certain kind of vector shifts); otherwise,
4710 operand 1 should be of a vector type (the usual case). */
4712 vect_get_vec_defs (op0
, NULL_TREE
, stmt
, &vec_oprnds0
, NULL
,
4715 vect_get_vec_defs (op0
, op1
, stmt
, &vec_oprnds0
, &vec_oprnds1
,
4719 vect_get_vec_defs_for_stmt_copy (dt
, &vec_oprnds0
, &vec_oprnds1
);
4721 /* Arguments are ready. Create the new vector stmt. */
4722 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vop0
)
4724 vop1
= vec_oprnds1
[i
];
4725 new_stmt
= gimple_build_assign (vec_dest
, code
, vop0
, vop1
);
4726 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
4727 gimple_assign_set_lhs (new_stmt
, new_temp
);
4728 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
4730 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
4737 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
4739 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
4740 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
4743 vec_oprnds0
.release ();
4744 vec_oprnds1
.release ();
4750 /* Function vectorizable_operation.
4752 Check if STMT performs a binary, unary or ternary operation that can
4754 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4755 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
4756 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4759 vectorizable_operation (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
4760 gimple
**vec_stmt
, slp_tree slp_node
)
4764 tree op0
, op1
= NULL_TREE
, op2
= NULL_TREE
;
4765 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
4767 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
4768 enum tree_code code
;
4769 machine_mode vec_mode
;
4773 bool target_support_p
;
4775 enum vect_def_type dt
[3]
4776 = {vect_unknown_def_type
, vect_unknown_def_type
, vect_unknown_def_type
};
4777 gimple
*new_stmt
= NULL
;
4778 stmt_vec_info prev_stmt_info
;
4784 vec
<tree
> vec_oprnds0
= vNULL
;
4785 vec
<tree
> vec_oprnds1
= vNULL
;
4786 vec
<tree
> vec_oprnds2
= vNULL
;
4787 tree vop0
, vop1
, vop2
;
4788 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
4789 vec_info
*vinfo
= stmt_info
->vinfo
;
4792 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
4795 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
4799 /* Is STMT a vectorizable binary/unary operation? */
4800 if (!is_gimple_assign (stmt
))
4803 if (TREE_CODE (gimple_assign_lhs (stmt
)) != SSA_NAME
)
4806 code
= gimple_assign_rhs_code (stmt
);
4808 /* For pointer addition, we should use the normal plus for
4809 the vector addition. */
4810 if (code
== POINTER_PLUS_EXPR
)
4813 /* Support only unary or binary operations. */
4814 op_type
= TREE_CODE_LENGTH (code
);
4815 if (op_type
!= unary_op
&& op_type
!= binary_op
&& op_type
!= ternary_op
)
4817 if (dump_enabled_p ())
4818 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4819 "num. args = %d (not unary/binary/ternary op).\n",
4824 scalar_dest
= gimple_assign_lhs (stmt
);
4825 vectype_out
= STMT_VINFO_VECTYPE (stmt_info
);
4827 /* Most operations cannot handle bit-precision types without extra
4829 if (!VECTOR_BOOLEAN_TYPE_P (vectype_out
)
4830 && (TYPE_PRECISION (TREE_TYPE (scalar_dest
))
4831 != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (scalar_dest
))))
4832 /* Exception are bitwise binary operations. */
4833 && code
!= BIT_IOR_EXPR
4834 && code
!= BIT_XOR_EXPR
4835 && code
!= BIT_AND_EXPR
)
4837 if (dump_enabled_p ())
4838 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4839 "bit-precision arithmetic not supported.\n");
4843 op0
= gimple_assign_rhs1 (stmt
);
4844 if (!vect_is_simple_use (op0
, vinfo
, &def_stmt
, &dt
[0], &vectype
))
4846 if (dump_enabled_p ())
4847 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4848 "use not simple.\n");
4851 /* If op0 is an external or constant def use a vector type with
4852 the same size as the output vector type. */
4855 /* For boolean type we cannot determine vectype by
4856 invariant value (don't know whether it is a vector
4857 of booleans or vector of integers). We use output
4858 vectype because operations on boolean don't change
4860 if (TREE_CODE (TREE_TYPE (op0
)) == BOOLEAN_TYPE
)
4862 if (TREE_CODE (TREE_TYPE (scalar_dest
)) != BOOLEAN_TYPE
)
4864 if (dump_enabled_p ())
4865 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4866 "not supported operation on bool value.\n");
4869 vectype
= vectype_out
;
4872 vectype
= get_same_sized_vectype (TREE_TYPE (op0
), vectype_out
);
4875 gcc_assert (vectype
);
4878 if (dump_enabled_p ())
4880 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4881 "no vectype for scalar type ");
4882 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
4884 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
4890 nunits_out
= TYPE_VECTOR_SUBPARTS (vectype_out
);
4891 nunits_in
= TYPE_VECTOR_SUBPARTS (vectype
);
4892 if (nunits_out
!= nunits_in
)
4895 if (op_type
== binary_op
|| op_type
== ternary_op
)
4897 op1
= gimple_assign_rhs2 (stmt
);
4898 if (!vect_is_simple_use (op1
, vinfo
, &def_stmt
, &dt
[1]))
4900 if (dump_enabled_p ())
4901 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4902 "use not simple.\n");
4906 if (op_type
== ternary_op
)
4908 op2
= gimple_assign_rhs3 (stmt
);
4909 if (!vect_is_simple_use (op2
, vinfo
, &def_stmt
, &dt
[2]))
4911 if (dump_enabled_p ())
4912 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4913 "use not simple.\n");
4919 vf
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
4923 /* Multiple types in SLP are handled by creating the appropriate number of
4924 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4926 if (slp_node
|| PURE_SLP_STMT (stmt_info
))
4929 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits_in
;
4931 gcc_assert (ncopies
>= 1);
4933 /* Shifts are handled in vectorizable_shift (). */
4934 if (code
== LSHIFT_EXPR
|| code
== RSHIFT_EXPR
|| code
== LROTATE_EXPR
4935 || code
== RROTATE_EXPR
)
4938 /* Supportable by target? */
4940 vec_mode
= TYPE_MODE (vectype
);
4941 if (code
== MULT_HIGHPART_EXPR
)
4942 target_support_p
= can_mult_highpart_p (vec_mode
, TYPE_UNSIGNED (vectype
));
4945 optab
= optab_for_tree_code (code
, vectype
, optab_default
);
4948 if (dump_enabled_p ())
4949 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4953 target_support_p
= (optab_handler (optab
, vec_mode
)
4954 != CODE_FOR_nothing
);
4957 if (!target_support_p
)
4959 if (dump_enabled_p ())
4960 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4961 "op not supported by target.\n");
4962 /* Check only during analysis. */
4963 if (GET_MODE_SIZE (vec_mode
) != UNITS_PER_WORD
4964 || (!vec_stmt
&& vf
< vect_min_worthwhile_factor (code
)))
4966 if (dump_enabled_p ())
4967 dump_printf_loc (MSG_NOTE
, vect_location
,
4968 "proceeding using word mode.\n");
4971 /* Worthwhile without SIMD support? Check only during analysis. */
4972 if (!VECTOR_MODE_P (vec_mode
)
4974 && vf
< vect_min_worthwhile_factor (code
))
4976 if (dump_enabled_p ())
4977 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4978 "not worthwhile without SIMD support.\n");
4982 if (!vec_stmt
) /* transformation not required. */
4984 STMT_VINFO_TYPE (stmt_info
) = op_vec_info_type
;
4985 if (dump_enabled_p ())
4986 dump_printf_loc (MSG_NOTE
, vect_location
,
4987 "=== vectorizable_operation ===\n");
4988 vect_model_simple_cost (stmt_info
, ncopies
, dt
, NULL
, NULL
);
4994 if (dump_enabled_p ())
4995 dump_printf_loc (MSG_NOTE
, vect_location
,
4996 "transform binary/unary operation.\n");
4999 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
5001 /* In case the vectorization factor (VF) is bigger than the number
5002 of elements that we can fit in a vectype (nunits), we have to generate
5003 more than one vector stmt - i.e - we need to "unroll" the
5004 vector stmt by a factor VF/nunits. In doing so, we record a pointer
5005 from one copy of the vector stmt to the next, in the field
5006 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
5007 stages to find the correct vector defs to be used when vectorizing
5008 stmts that use the defs of the current stmt. The example below
5009 illustrates the vectorization process when VF=16 and nunits=4 (i.e.,
5010 we need to create 4 vectorized stmts):
5012 before vectorization:
5013 RELATED_STMT VEC_STMT
5017 step 1: vectorize stmt S1 (done in vectorizable_load. See more details
5019 RELATED_STMT VEC_STMT
5020 VS1_0: vx0 = memref0 VS1_1 -
5021 VS1_1: vx1 = memref1 VS1_2 -
5022 VS1_2: vx2 = memref2 VS1_3 -
5023 VS1_3: vx3 = memref3 - -
5024 S1: x = load - VS1_0
5027 step2: vectorize stmt S2 (done here):
5028 To vectorize stmt S2 we first need to find the relevant vector
5029 def for the first operand 'x'. This is, as usual, obtained from
5030 the vector stmt recorded in the STMT_VINFO_VEC_STMT of the stmt
5031 that defines 'x' (S1). This way we find the stmt VS1_0, and the
5032 relevant vector def 'vx0'. Having found 'vx0' we can generate
5033 the vector stmt VS2_0, and as usual, record it in the
5034 STMT_VINFO_VEC_STMT of stmt S2.
5035 When creating the second copy (VS2_1), we obtain the relevant vector
5036 def from the vector stmt recorded in the STMT_VINFO_RELATED_STMT of
5037 stmt VS1_0. This way we find the stmt VS1_1 and the relevant
5038 vector def 'vx1'. Using 'vx1' we create stmt VS2_1 and record a
5039 pointer to it in the STMT_VINFO_RELATED_STMT of the vector stmt VS2_0.
5040 Similarly when creating stmts VS2_2 and VS2_3. This is the resulting
5041 chain of stmts and pointers:
5042 RELATED_STMT VEC_STMT
5043 VS1_0: vx0 = memref0 VS1_1 -
5044 VS1_1: vx1 = memref1 VS1_2 -
5045 VS1_2: vx2 = memref2 VS1_3 -
5046 VS1_3: vx3 = memref3 - -
5047 S1: x = load - VS1_0
5048 VS2_0: vz0 = vx0 + v1 VS2_1 -
5049 VS2_1: vz1 = vx1 + v1 VS2_2 -
5050 VS2_2: vz2 = vx2 + v1 VS2_3 -
5051 VS2_3: vz3 = vx3 + v1 - -
5052 S2: z = x + 1 - VS2_0 */
5054 prev_stmt_info
= NULL
;
5055 for (j
= 0; j
< ncopies
; j
++)
5060 if (op_type
== binary_op
|| op_type
== ternary_op
)
5061 vect_get_vec_defs (op0
, op1
, stmt
, &vec_oprnds0
, &vec_oprnds1
,
5064 vect_get_vec_defs (op0
, NULL_TREE
, stmt
, &vec_oprnds0
, NULL
,
5066 if (op_type
== ternary_op
)
5068 vec_oprnds2
.create (1);
5069 vec_oprnds2
.quick_push (vect_get_vec_def_for_operand (op2
,
5075 vect_get_vec_defs_for_stmt_copy (dt
, &vec_oprnds0
, &vec_oprnds1
);
5076 if (op_type
== ternary_op
)
5078 tree vec_oprnd
= vec_oprnds2
.pop ();
5079 vec_oprnds2
.quick_push (vect_get_vec_def_for_stmt_copy (dt
[2],
5084 /* Arguments are ready. Create the new vector stmt. */
5085 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vop0
)
5087 vop1
= ((op_type
== binary_op
|| op_type
== ternary_op
)
5088 ? vec_oprnds1
[i
] : NULL_TREE
);
5089 vop2
= ((op_type
== ternary_op
)
5090 ? vec_oprnds2
[i
] : NULL_TREE
);
5091 new_stmt
= gimple_build_assign (vec_dest
, code
, vop0
, vop1
, vop2
);
5092 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
5093 gimple_assign_set_lhs (new_stmt
, new_temp
);
5094 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
5096 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
5103 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
5105 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
5106 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
5109 vec_oprnds0
.release ();
5110 vec_oprnds1
.release ();
5111 vec_oprnds2
.release ();
5116 /* A helper function to ensure data reference DR's base alignment
5120 ensure_base_align (stmt_vec_info stmt_info
, struct data_reference
*dr
)
5125 if (DR_VECT_AUX (dr
)->base_misaligned
)
5127 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
5128 tree base_decl
= DR_VECT_AUX (dr
)->base_decl
;
5130 if (decl_in_symtab_p (base_decl
))
5131 symtab_node::get (base_decl
)->increase_alignment (TYPE_ALIGN (vectype
));
5134 DECL_ALIGN (base_decl
) = TYPE_ALIGN (vectype
);
5135 DECL_USER_ALIGN (base_decl
) = 1;
5137 DR_VECT_AUX (dr
)->base_misaligned
= false;
5142 /* Given a vector type VECTYPE returns the VECTOR_CST mask that implements
5143 reversal of the vector elements. If that is impossible to do,
5147 perm_mask_for_reverse (tree vectype
)
5152 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
5153 sel
= XALLOCAVEC (unsigned char, nunits
);
5155 for (i
= 0; i
< nunits
; ++i
)
5156 sel
[i
] = nunits
- 1 - i
;
5158 if (!can_vec_perm_p (TYPE_MODE (vectype
), false, sel
))
5160 return vect_gen_perm_mask_checked (vectype
, sel
);
5163 /* Function vectorizable_store.
5165 Check if STMT defines a non scalar data-ref (array/pointer/structure) that
5167 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
5168 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
5169 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
5172 vectorizable_store (gimple
*stmt
, gimple_stmt_iterator
*gsi
, gimple
**vec_stmt
,
5178 tree vec_oprnd
= NULL_TREE
;
5179 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
5180 struct data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
), *first_dr
= NULL
;
5182 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
5183 struct loop
*loop
= NULL
;
5184 machine_mode vec_mode
;
5186 enum dr_alignment_support alignment_support_scheme
;
5188 enum vect_def_type dt
;
5189 stmt_vec_info prev_stmt_info
= NULL
;
5190 tree dataref_ptr
= NULL_TREE
;
5191 tree dataref_offset
= NULL_TREE
;
5192 gimple
*ptr_incr
= NULL
;
5195 gimple
*next_stmt
, *first_stmt
= NULL
;
5196 bool grouped_store
= false;
5197 bool store_lanes_p
= false;
5198 unsigned int group_size
, i
;
5199 vec
<tree
> dr_chain
= vNULL
;
5200 vec
<tree
> oprnds
= vNULL
;
5201 vec
<tree
> result_chain
= vNULL
;
5203 bool negative
= false;
5204 tree offset
= NULL_TREE
;
5205 vec
<tree
> vec_oprnds
= vNULL
;
5206 bool slp
= (slp_node
!= NULL
);
5207 unsigned int vec_num
;
5208 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
5209 vec_info
*vinfo
= stmt_info
->vinfo
;
5211 tree scatter_base
= NULL_TREE
, scatter_off
= NULL_TREE
;
5212 tree scatter_off_vectype
= NULL_TREE
, scatter_decl
= NULL_TREE
;
5213 int scatter_scale
= 1;
5214 enum vect_def_type scatter_idx_dt
= vect_unknown_def_type
;
5215 enum vect_def_type scatter_src_dt
= vect_unknown_def_type
;
5218 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
5221 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
5225 /* Is vectorizable store? */
5227 if (!is_gimple_assign (stmt
))
5230 scalar_dest
= gimple_assign_lhs (stmt
);
5231 if (TREE_CODE (scalar_dest
) == VIEW_CONVERT_EXPR
5232 && is_pattern_stmt_p (stmt_info
))
5233 scalar_dest
= TREE_OPERAND (scalar_dest
, 0);
5234 if (TREE_CODE (scalar_dest
) != ARRAY_REF
5235 && TREE_CODE (scalar_dest
) != BIT_FIELD_REF
5236 && TREE_CODE (scalar_dest
) != INDIRECT_REF
5237 && TREE_CODE (scalar_dest
) != COMPONENT_REF
5238 && TREE_CODE (scalar_dest
) != IMAGPART_EXPR
5239 && TREE_CODE (scalar_dest
) != REALPART_EXPR
5240 && TREE_CODE (scalar_dest
) != MEM_REF
)
5243 gcc_assert (gimple_assign_single_p (stmt
));
5245 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
), rhs_vectype
= NULL_TREE
;
5246 unsigned int nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
5249 loop
= LOOP_VINFO_LOOP (loop_vinfo
);
5251 /* Multiple types in SLP are handled by creating the appropriate number of
5252 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
5254 if (slp
|| PURE_SLP_STMT (stmt_info
))
5257 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits
;
5259 gcc_assert (ncopies
>= 1);
5261 /* FORNOW. This restriction should be relaxed. */
5262 if (loop
&& nested_in_vect_loop_p (loop
, stmt
) && ncopies
> 1)
5264 if (dump_enabled_p ())
5265 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5266 "multiple types in nested loop.\n");
5270 op
= gimple_assign_rhs1 (stmt
);
5272 if (!vect_is_simple_use (op
, vinfo
, &def_stmt
, &dt
, &rhs_vectype
))
5274 if (dump_enabled_p ())
5275 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5276 "use not simple.\n");
5280 if (rhs_vectype
&& !useless_type_conversion_p (vectype
, rhs_vectype
))
5283 elem_type
= TREE_TYPE (vectype
);
5284 vec_mode
= TYPE_MODE (vectype
);
5286 /* FORNOW. In some cases can vectorize even if data-type not supported
5287 (e.g. - array initialization with 0). */
5288 if (optab_handler (mov_optab
, vec_mode
) == CODE_FOR_nothing
)
5291 if (!STMT_VINFO_DATA_REF (stmt_info
))
5294 if (!STMT_VINFO_STRIDED_P (stmt_info
))
5297 tree_int_cst_compare (loop
&& nested_in_vect_loop_p (loop
, stmt
)
5298 ? STMT_VINFO_DR_STEP (stmt_info
) : DR_STEP (dr
),
5299 size_zero_node
) < 0;
5300 if (negative
&& ncopies
> 1)
5302 if (dump_enabled_p ())
5303 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5304 "multiple types with negative step.\n");
5309 gcc_assert (!grouped_store
);
5310 alignment_support_scheme
= vect_supportable_dr_alignment (dr
, false);
5311 if (alignment_support_scheme
!= dr_aligned
5312 && alignment_support_scheme
!= dr_unaligned_supported
)
5314 if (dump_enabled_p ())
5315 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5316 "negative step but alignment required.\n");
5319 if (dt
!= vect_constant_def
5320 && dt
!= vect_external_def
5321 && !perm_mask_for_reverse (vectype
))
5323 if (dump_enabled_p ())
5324 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5325 "negative step and reversing not supported.\n");
5331 if (STMT_VINFO_GROUPED_ACCESS (stmt_info
))
5333 grouped_store
= true;
5334 first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
5335 group_size
= GROUP_SIZE (vinfo_for_stmt (first_stmt
));
5337 && !PURE_SLP_STMT (stmt_info
)
5338 && !STMT_VINFO_STRIDED_P (stmt_info
))
5340 if (vect_store_lanes_supported (vectype
, group_size
))
5341 store_lanes_p
= true;
5342 else if (!vect_grouped_store_supported (vectype
, group_size
))
5346 if (STMT_VINFO_STRIDED_P (stmt_info
)
5347 && (slp
|| PURE_SLP_STMT (stmt_info
))
5348 && (group_size
> nunits
5349 || nunits
% group_size
!= 0))
5351 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5352 "unhandled strided group store\n");
5356 if (first_stmt
== stmt
)
5358 /* STMT is the leader of the group. Check the operands of all the
5359 stmts of the group. */
5360 next_stmt
= GROUP_NEXT_ELEMENT (stmt_info
);
5363 gcc_assert (gimple_assign_single_p (next_stmt
));
5364 op
= gimple_assign_rhs1 (next_stmt
);
5365 if (!vect_is_simple_use (op
, vinfo
, &def_stmt
, &dt
))
5367 if (dump_enabled_p ())
5368 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5369 "use not simple.\n");
5372 next_stmt
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt
));
5377 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info
))
5380 scatter_decl
= vect_check_gather_scatter (stmt
, loop_vinfo
, &scatter_base
,
5381 &scatter_off
, &scatter_scale
);
5382 gcc_assert (scatter_decl
);
5383 if (!vect_is_simple_use (scatter_off
, vinfo
, &def_stmt
, &scatter_idx_dt
,
5384 &scatter_off_vectype
))
5386 if (dump_enabled_p ())
5387 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5388 "scatter index use not simple.");
5393 if (!vec_stmt
) /* transformation not required. */
5395 STMT_VINFO_TYPE (stmt_info
) = store_vec_info_type
;
5396 /* The SLP costs are calculated during SLP analysis. */
5397 if (!PURE_SLP_STMT (stmt_info
))
5398 vect_model_store_cost (stmt_info
, ncopies
, store_lanes_p
, dt
,
5405 ensure_base_align (stmt_info
, dr
);
5407 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info
))
5409 tree vec_oprnd0
= NULL_TREE
, vec_oprnd1
= NULL_TREE
, op
, src
;
5410 tree arglist
= TYPE_ARG_TYPES (TREE_TYPE (scatter_decl
));
5411 tree rettype
, srctype
, ptrtype
, idxtype
, masktype
, scaletype
;
5412 tree ptr
, mask
, var
, scale
, perm_mask
= NULL_TREE
;
5413 edge pe
= loop_preheader_edge (loop
);
5416 enum { NARROW
, NONE
, WIDEN
} modifier
;
5417 int scatter_off_nunits
= TYPE_VECTOR_SUBPARTS (scatter_off_vectype
);
5419 if (nunits
== (unsigned int) scatter_off_nunits
)
5421 else if (nunits
== (unsigned int) scatter_off_nunits
/ 2)
5423 unsigned char *sel
= XALLOCAVEC (unsigned char, scatter_off_nunits
);
5426 for (i
= 0; i
< (unsigned int) scatter_off_nunits
; ++i
)
5427 sel
[i
] = i
| nunits
;
5429 perm_mask
= vect_gen_perm_mask_checked (scatter_off_vectype
, sel
);
5430 gcc_assert (perm_mask
!= NULL_TREE
);
5432 else if (nunits
== (unsigned int) scatter_off_nunits
* 2)
5434 unsigned char *sel
= XALLOCAVEC (unsigned char, nunits
);
5437 for (i
= 0; i
< (unsigned int) nunits
; ++i
)
5438 sel
[i
] = i
| scatter_off_nunits
;
5440 perm_mask
= vect_gen_perm_mask_checked (vectype
, sel
);
5441 gcc_assert (perm_mask
!= NULL_TREE
);
5447 rettype
= TREE_TYPE (TREE_TYPE (scatter_decl
));
5448 ptrtype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
5449 masktype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
5450 idxtype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
5451 srctype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
5452 scaletype
= TREE_VALUE (arglist
);
5454 gcc_checking_assert (TREE_CODE (masktype
) == INTEGER_TYPE
5455 && TREE_CODE (rettype
) == VOID_TYPE
);
5457 ptr
= fold_convert (ptrtype
, scatter_base
);
5458 if (!is_gimple_min_invariant (ptr
))
5460 ptr
= force_gimple_operand (ptr
, &seq
, true, NULL_TREE
);
5461 new_bb
= gsi_insert_seq_on_edge_immediate (pe
, seq
);
5462 gcc_assert (!new_bb
);
5465 /* Currently we support only unconditional scatter stores,
5466 so mask should be all ones. */
5467 mask
= build_int_cst (masktype
, -1);
5468 mask
= vect_init_vector (stmt
, mask
, masktype
, NULL
);
5470 scale
= build_int_cst (scaletype
, scatter_scale
);
5472 prev_stmt_info
= NULL
;
5473 for (j
= 0; j
< ncopies
; ++j
)
5478 = vect_get_vec_def_for_operand (gimple_assign_rhs1 (stmt
), stmt
);
5480 = vect_get_vec_def_for_operand (scatter_off
, stmt
);
5482 else if (modifier
!= NONE
&& (j
& 1))
5484 if (modifier
== WIDEN
)
5487 = vect_get_vec_def_for_stmt_copy (scatter_src_dt
, vec_oprnd1
);
5488 op
= permute_vec_elements (vec_oprnd0
, vec_oprnd0
, perm_mask
,
5491 else if (modifier
== NARROW
)
5493 src
= permute_vec_elements (vec_oprnd1
, vec_oprnd1
, perm_mask
,
5496 = vect_get_vec_def_for_stmt_copy (scatter_idx_dt
, vec_oprnd0
);
5504 = vect_get_vec_def_for_stmt_copy (scatter_src_dt
, vec_oprnd1
);
5506 = vect_get_vec_def_for_stmt_copy (scatter_idx_dt
, vec_oprnd0
);
5509 if (!useless_type_conversion_p (srctype
, TREE_TYPE (src
)))
5511 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (src
))
5512 == TYPE_VECTOR_SUBPARTS (srctype
));
5513 var
= vect_get_new_ssa_name (srctype
, vect_simple_var
);
5514 src
= build1 (VIEW_CONVERT_EXPR
, srctype
, src
);
5515 new_stmt
= gimple_build_assign (var
, VIEW_CONVERT_EXPR
, src
);
5516 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
5520 if (!useless_type_conversion_p (idxtype
, TREE_TYPE (op
)))
5522 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op
))
5523 == TYPE_VECTOR_SUBPARTS (idxtype
));
5524 var
= vect_get_new_ssa_name (idxtype
, vect_simple_var
);
5525 op
= build1 (VIEW_CONVERT_EXPR
, idxtype
, op
);
5526 new_stmt
= gimple_build_assign (var
, VIEW_CONVERT_EXPR
, op
);
5527 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
5532 = gimple_build_call (scatter_decl
, 5, ptr
, mask
, op
, src
, scale
);
5534 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
5536 if (prev_stmt_info
== NULL
)
5537 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
5539 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
5540 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
5547 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
5548 group_size
= GROUP_SIZE (vinfo_for_stmt (first_stmt
));
5550 GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt
))++;
5553 gcc_assert (!loop
|| !nested_in_vect_loop_p (loop
, stmt
));
5555 /* We vectorize all the stmts of the interleaving group when we
5556 reach the last stmt in the group. */
5557 if (GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt
))
5558 < GROUP_SIZE (vinfo_for_stmt (first_stmt
))
5567 grouped_store
= false;
5568 /* VEC_NUM is the number of vect stmts to be created for this
5570 vec_num
= SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node
);
5571 first_stmt
= SLP_TREE_SCALAR_STMTS (slp_node
)[0];
5572 gcc_assert (GROUP_FIRST_ELEMENT (vinfo_for_stmt (first_stmt
)) == first_stmt
);
5573 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
5574 op
= gimple_assign_rhs1 (first_stmt
);
5577 /* VEC_NUM is the number of vect stmts to be created for this
5579 vec_num
= group_size
;
5585 group_size
= vec_num
= 1;
5588 if (dump_enabled_p ())
5589 dump_printf_loc (MSG_NOTE
, vect_location
,
5590 "transform store. ncopies = %d\n", ncopies
);
5592 if (STMT_VINFO_STRIDED_P (stmt_info
))
5594 gimple_stmt_iterator incr_gsi
;
5600 gimple_seq stmts
= NULL
;
5601 tree stride_base
, stride_step
, alias_off
;
5605 gcc_assert (!nested_in_vect_loop_p (loop
, stmt
));
5608 = fold_build_pointer_plus
5609 (unshare_expr (DR_BASE_ADDRESS (first_dr
)),
5610 size_binop (PLUS_EXPR
,
5611 convert_to_ptrofftype (unshare_expr (DR_OFFSET (first_dr
))),
5612 convert_to_ptrofftype (DR_INIT(first_dr
))));
5613 stride_step
= fold_convert (sizetype
, unshare_expr (DR_STEP (first_dr
)));
5615 /* For a store with loop-invariant (but other than power-of-2)
5616 stride (i.e. not a grouped access) like so:
5618 for (i = 0; i < n; i += stride)
5621 we generate a new induction variable and new stores from
5622 the components of the (vectorized) rhs:
5624 for (j = 0; ; j += VF*stride)
5629 array[j + stride] = tmp2;
5633 unsigned nstores
= nunits
;
5634 tree ltype
= elem_type
;
5637 nstores
= nunits
/ group_size
;
5638 if (group_size
< nunits
)
5639 ltype
= build_vector_type (elem_type
, group_size
);
5642 ltype
= build_aligned_type (ltype
, TYPE_ALIGN (elem_type
));
5643 ncopies
= SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node
);
5647 ivstep
= stride_step
;
5648 ivstep
= fold_build2 (MULT_EXPR
, TREE_TYPE (ivstep
), ivstep
,
5649 build_int_cst (TREE_TYPE (ivstep
),
5650 ncopies
* nstores
));
5652 standard_iv_increment_position (loop
, &incr_gsi
, &insert_after
);
5654 create_iv (stride_base
, ivstep
, NULL
,
5655 loop
, &incr_gsi
, insert_after
,
5657 incr
= gsi_stmt (incr_gsi
);
5658 set_vinfo_for_stmt (incr
, new_stmt_vec_info (incr
, loop_vinfo
));
5660 stride_step
= force_gimple_operand (stride_step
, &stmts
, true, NULL_TREE
);
5662 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop
), stmts
);
5664 prev_stmt_info
= NULL
;
5665 alias_off
= build_int_cst (reference_alias_ptr_type (DR_REF (first_dr
)), 0);
5666 next_stmt
= first_stmt
;
5667 for (g
= 0; g
< group_size
; g
++)
5669 running_off
= offvar
;
5672 tree size
= TYPE_SIZE_UNIT (ltype
);
5673 tree pos
= fold_build2 (MULT_EXPR
, sizetype
, size_int (g
),
5675 tree newoff
= copy_ssa_name (running_off
, NULL
);
5676 incr
= gimple_build_assign (newoff
, POINTER_PLUS_EXPR
,
5678 vect_finish_stmt_generation (stmt
, incr
, gsi
);
5679 running_off
= newoff
;
5681 for (j
= 0; j
< ncopies
; j
++)
5683 /* We've set op and dt above, from gimple_assign_rhs1(stmt),
5684 and first_stmt == stmt. */
5689 vect_get_vec_defs (op
, NULL_TREE
, stmt
, &vec_oprnds
, NULL
,
5691 vec_oprnd
= vec_oprnds
[0];
5695 gcc_assert (gimple_assign_single_p (next_stmt
));
5696 op
= gimple_assign_rhs1 (next_stmt
);
5697 vec_oprnd
= vect_get_vec_def_for_operand (op
, next_stmt
);
5703 vec_oprnd
= vec_oprnds
[j
];
5706 vect_is_simple_use (vec_oprnd
, vinfo
, &def_stmt
, &dt
);
5707 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
, vec_oprnd
);
5711 for (i
= 0; i
< nstores
; i
++)
5713 tree newref
, newoff
;
5714 gimple
*incr
, *assign
;
5715 tree size
= TYPE_SIZE (ltype
);
5716 /* Extract the i'th component. */
5717 tree pos
= fold_build2 (MULT_EXPR
, bitsizetype
,
5718 bitsize_int (i
), size
);
5719 tree elem
= fold_build3 (BIT_FIELD_REF
, ltype
, vec_oprnd
,
5722 elem
= force_gimple_operand_gsi (gsi
, elem
, true,
5726 newref
= build2 (MEM_REF
, ltype
,
5727 running_off
, alias_off
);
5729 /* And store it to *running_off. */
5730 assign
= gimple_build_assign (newref
, elem
);
5731 vect_finish_stmt_generation (stmt
, assign
, gsi
);
5733 newoff
= copy_ssa_name (running_off
, NULL
);
5734 incr
= gimple_build_assign (newoff
, POINTER_PLUS_EXPR
,
5735 running_off
, stride_step
);
5736 vect_finish_stmt_generation (stmt
, incr
, gsi
);
5738 running_off
= newoff
;
5739 if (g
== group_size
- 1
5742 if (j
== 0 && i
== 0)
5743 STMT_VINFO_VEC_STMT (stmt_info
)
5744 = *vec_stmt
= assign
;
5746 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = assign
;
5747 prev_stmt_info
= vinfo_for_stmt (assign
);
5751 next_stmt
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt
));
5756 dr_chain
.create (group_size
);
5757 oprnds
.create (group_size
);
5759 alignment_support_scheme
= vect_supportable_dr_alignment (first_dr
, false);
5760 gcc_assert (alignment_support_scheme
);
5761 /* Targets with store-lane instructions must not require explicit
5763 gcc_assert (!store_lanes_p
5764 || alignment_support_scheme
== dr_aligned
5765 || alignment_support_scheme
== dr_unaligned_supported
);
5768 offset
= size_int (-TYPE_VECTOR_SUBPARTS (vectype
) + 1);
5771 aggr_type
= build_array_type_nelts (elem_type
, vec_num
* nunits
);
5773 aggr_type
= vectype
;
5775 /* In case the vectorization factor (VF) is bigger than the number
5776 of elements that we can fit in a vectype (nunits), we have to generate
5777 more than one vector stmt - i.e - we need to "unroll" the
5778 vector stmt by a factor VF/nunits. For more details see documentation in
5779 vect_get_vec_def_for_copy_stmt. */
5781 /* In case of interleaving (non-unit grouped access):
5788 We create vectorized stores starting from base address (the access of the
5789 first stmt in the chain (S2 in the above example), when the last store stmt
5790 of the chain (S4) is reached:
5793 VS2: &base + vec_size*1 = vx0
5794 VS3: &base + vec_size*2 = vx1
5795 VS4: &base + vec_size*3 = vx3
5797 Then permutation statements are generated:
5799 VS5: vx5 = VEC_PERM_EXPR < vx0, vx3, {0, 8, 1, 9, 2, 10, 3, 11} >
5800 VS6: vx6 = VEC_PERM_EXPR < vx0, vx3, {4, 12, 5, 13, 6, 14, 7, 15} >
5803 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
5804 (the order of the data-refs in the output of vect_permute_store_chain
5805 corresponds to the order of scalar stmts in the interleaving chain - see
5806 the documentation of vect_permute_store_chain()).
5808 In case of both multiple types and interleaving, above vector stores and
5809 permutation stmts are created for every copy. The result vector stmts are
5810 put in STMT_VINFO_VEC_STMT for the first copy and in the corresponding
5811 STMT_VINFO_RELATED_STMT for the next copies.
5814 prev_stmt_info
= NULL
;
5815 for (j
= 0; j
< ncopies
; j
++)
5822 /* Get vectorized arguments for SLP_NODE. */
5823 vect_get_vec_defs (op
, NULL_TREE
, stmt
, &vec_oprnds
,
5824 NULL
, slp_node
, -1);
5826 vec_oprnd
= vec_oprnds
[0];
5830 /* For interleaved stores we collect vectorized defs for all the
5831 stores in the group in DR_CHAIN and OPRNDS. DR_CHAIN is then
5832 used as an input to vect_permute_store_chain(), and OPRNDS as
5833 an input to vect_get_vec_def_for_stmt_copy() for the next copy.
5835 If the store is not grouped, GROUP_SIZE is 1, and DR_CHAIN and
5836 OPRNDS are of size 1. */
5837 next_stmt
= first_stmt
;
5838 for (i
= 0; i
< group_size
; i
++)
5840 /* Since gaps are not supported for interleaved stores,
5841 GROUP_SIZE is the exact number of stmts in the chain.
5842 Therefore, NEXT_STMT can't be NULL_TREE. In case that
5843 there is no interleaving, GROUP_SIZE is 1, and only one
5844 iteration of the loop will be executed. */
5845 gcc_assert (next_stmt
5846 && gimple_assign_single_p (next_stmt
));
5847 op
= gimple_assign_rhs1 (next_stmt
);
5849 vec_oprnd
= vect_get_vec_def_for_operand (op
, next_stmt
);
5850 dr_chain
.quick_push (vec_oprnd
);
5851 oprnds
.quick_push (vec_oprnd
);
5852 next_stmt
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt
));
5856 /* We should have catched mismatched types earlier. */
5857 gcc_assert (useless_type_conversion_p (vectype
,
5858 TREE_TYPE (vec_oprnd
)));
5859 bool simd_lane_access_p
5860 = STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info
);
5861 if (simd_lane_access_p
5862 && TREE_CODE (DR_BASE_ADDRESS (first_dr
)) == ADDR_EXPR
5863 && VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (first_dr
), 0))
5864 && integer_zerop (DR_OFFSET (first_dr
))
5865 && integer_zerop (DR_INIT (first_dr
))
5866 && alias_sets_conflict_p (get_alias_set (aggr_type
),
5867 get_alias_set (DR_REF (first_dr
))))
5869 dataref_ptr
= unshare_expr (DR_BASE_ADDRESS (first_dr
));
5870 dataref_offset
= build_int_cst (reference_alias_ptr_type
5871 (DR_REF (first_dr
)), 0);
5876 = vect_create_data_ref_ptr (first_stmt
, aggr_type
,
5877 simd_lane_access_p
? loop
: NULL
,
5878 offset
, &dummy
, gsi
, &ptr_incr
,
5879 simd_lane_access_p
, &inv_p
);
5880 gcc_assert (bb_vinfo
|| !inv_p
);
5884 /* For interleaved stores we created vectorized defs for all the
5885 defs stored in OPRNDS in the previous iteration (previous copy).
5886 DR_CHAIN is then used as an input to vect_permute_store_chain(),
5887 and OPRNDS as an input to vect_get_vec_def_for_stmt_copy() for the
5889 If the store is not grouped, GROUP_SIZE is 1, and DR_CHAIN and
5890 OPRNDS are of size 1. */
5891 for (i
= 0; i
< group_size
; i
++)
5894 vect_is_simple_use (op
, vinfo
, &def_stmt
, &dt
);
5895 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
, op
);
5896 dr_chain
[i
] = vec_oprnd
;
5897 oprnds
[i
] = vec_oprnd
;
5901 = int_const_binop (PLUS_EXPR
, dataref_offset
,
5902 TYPE_SIZE_UNIT (aggr_type
));
5904 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
, stmt
,
5905 TYPE_SIZE_UNIT (aggr_type
));
5912 /* Combine all the vectors into an array. */
5913 vec_array
= create_vector_array (vectype
, vec_num
);
5914 for (i
= 0; i
< vec_num
; i
++)
5916 vec_oprnd
= dr_chain
[i
];
5917 write_vector_array (stmt
, gsi
, vec_oprnd
, vec_array
, i
);
5921 MEM_REF[...all elements...] = STORE_LANES (VEC_ARRAY). */
5922 data_ref
= create_array_ref (aggr_type
, dataref_ptr
, first_dr
);
5923 new_stmt
= gimple_build_call_internal (IFN_STORE_LANES
, 1, vec_array
);
5924 gimple_call_set_lhs (new_stmt
, data_ref
);
5925 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
5933 result_chain
.create (group_size
);
5935 vect_permute_store_chain (dr_chain
, group_size
, stmt
, gsi
,
5939 next_stmt
= first_stmt
;
5940 for (i
= 0; i
< vec_num
; i
++)
5942 unsigned align
, misalign
;
5945 /* Bump the vector pointer. */
5946 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
,
5950 vec_oprnd
= vec_oprnds
[i
];
5951 else if (grouped_store
)
5952 /* For grouped stores vectorized defs are interleaved in
5953 vect_permute_store_chain(). */
5954 vec_oprnd
= result_chain
[i
];
5956 data_ref
= fold_build2 (MEM_REF
, TREE_TYPE (vec_oprnd
),
5960 : build_int_cst (reference_alias_ptr_type
5961 (DR_REF (first_dr
)), 0));
5962 align
= TYPE_ALIGN_UNIT (vectype
);
5963 if (aligned_access_p (first_dr
))
5965 else if (DR_MISALIGNMENT (first_dr
) == -1)
5967 if (DR_VECT_AUX (first_dr
)->base_element_aligned
)
5968 align
= TYPE_ALIGN_UNIT (elem_type
);
5970 align
= get_object_alignment (DR_REF (first_dr
))
5973 TREE_TYPE (data_ref
)
5974 = build_aligned_type (TREE_TYPE (data_ref
),
5975 align
* BITS_PER_UNIT
);
5979 TREE_TYPE (data_ref
)
5980 = build_aligned_type (TREE_TYPE (data_ref
),
5981 TYPE_ALIGN (elem_type
));
5982 misalign
= DR_MISALIGNMENT (first_dr
);
5984 if (dataref_offset
== NULL_TREE
5985 && TREE_CODE (dataref_ptr
) == SSA_NAME
)
5986 set_ptr_info_alignment (get_ptr_info (dataref_ptr
), align
,
5990 && dt
!= vect_constant_def
5991 && dt
!= vect_external_def
)
5993 tree perm_mask
= perm_mask_for_reverse (vectype
);
5995 = vect_create_destination_var (gimple_assign_rhs1 (stmt
),
5997 tree new_temp
= make_ssa_name (perm_dest
);
5999 /* Generate the permute statement. */
6001 = gimple_build_assign (new_temp
, VEC_PERM_EXPR
, vec_oprnd
,
6002 vec_oprnd
, perm_mask
);
6003 vect_finish_stmt_generation (stmt
, perm_stmt
, gsi
);
6005 perm_stmt
= SSA_NAME_DEF_STMT (new_temp
);
6006 vec_oprnd
= new_temp
;
6009 /* Arguments are ready. Create the new vector stmt. */
6010 new_stmt
= gimple_build_assign (data_ref
, vec_oprnd
);
6011 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6016 next_stmt
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt
));
6024 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
6026 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
6027 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
6031 dr_chain
.release ();
6033 result_chain
.release ();
6034 vec_oprnds
.release ();
6039 /* Given a vector type VECTYPE, turns permutation SEL into the equivalent
6040 VECTOR_CST mask. No checks are made that the target platform supports the
6041 mask, so callers may wish to test can_vec_perm_p separately, or use
6042 vect_gen_perm_mask_checked. */
6045 vect_gen_perm_mask_any (tree vectype
, const unsigned char *sel
)
6047 tree mask_elt_type
, mask_type
, mask_vec
, *mask_elts
;
6050 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
6052 mask_elt_type
= lang_hooks
.types
.type_for_mode
6053 (int_mode_for_mode (TYPE_MODE (TREE_TYPE (vectype
))), 1);
6054 mask_type
= get_vectype_for_scalar_type (mask_elt_type
);
6056 mask_elts
= XALLOCAVEC (tree
, nunits
);
6057 for (i
= nunits
- 1; i
>= 0; i
--)
6058 mask_elts
[i
] = build_int_cst (mask_elt_type
, sel
[i
]);
6059 mask_vec
= build_vector (mask_type
, mask_elts
);
6064 /* Checked version of vect_gen_perm_mask_any. Asserts can_vec_perm_p,
6065 i.e. that the target supports the pattern _for arbitrary input vectors_. */
6068 vect_gen_perm_mask_checked (tree vectype
, const unsigned char *sel
)
6070 gcc_assert (can_vec_perm_p (TYPE_MODE (vectype
), false, sel
));
6071 return vect_gen_perm_mask_any (vectype
, sel
);
6074 /* Given a vector variable X and Y, that was generated for the scalar
6075 STMT, generate instructions to permute the vector elements of X and Y
6076 using permutation mask MASK_VEC, insert them at *GSI and return the
6077 permuted vector variable. */
6080 permute_vec_elements (tree x
, tree y
, tree mask_vec
, gimple
*stmt
,
6081 gimple_stmt_iterator
*gsi
)
6083 tree vectype
= TREE_TYPE (x
);
6084 tree perm_dest
, data_ref
;
6087 perm_dest
= vect_create_destination_var (gimple_get_lhs (stmt
), vectype
);
6088 data_ref
= make_ssa_name (perm_dest
);
6090 /* Generate the permute statement. */
6091 perm_stmt
= gimple_build_assign (data_ref
, VEC_PERM_EXPR
, x
, y
, mask_vec
);
6092 vect_finish_stmt_generation (stmt
, perm_stmt
, gsi
);
6097 /* Hoist the definitions of all SSA uses on STMT out of the loop LOOP,
6098 inserting them on the loops preheader edge. Returns true if we
6099 were successful in doing so (and thus STMT can be moved then),
6100 otherwise returns false. */
6103 hoist_defs_of_uses (gimple
*stmt
, struct loop
*loop
)
6109 FOR_EACH_SSA_TREE_OPERAND (op
, stmt
, i
, SSA_OP_USE
)
6111 gimple
*def_stmt
= SSA_NAME_DEF_STMT (op
);
6112 if (!gimple_nop_p (def_stmt
)
6113 && flow_bb_inside_loop_p (loop
, gimple_bb (def_stmt
)))
6115 /* Make sure we don't need to recurse. While we could do
6116 so in simple cases when there are more complex use webs
6117 we don't have an easy way to preserve stmt order to fulfil
6118 dependencies within them. */
6121 if (gimple_code (def_stmt
) == GIMPLE_PHI
)
6123 FOR_EACH_SSA_TREE_OPERAND (op2
, def_stmt
, i2
, SSA_OP_USE
)
6125 gimple
*def_stmt2
= SSA_NAME_DEF_STMT (op2
);
6126 if (!gimple_nop_p (def_stmt2
)
6127 && flow_bb_inside_loop_p (loop
, gimple_bb (def_stmt2
)))
6137 FOR_EACH_SSA_TREE_OPERAND (op
, stmt
, i
, SSA_OP_USE
)
6139 gimple
*def_stmt
= SSA_NAME_DEF_STMT (op
);
6140 if (!gimple_nop_p (def_stmt
)
6141 && flow_bb_inside_loop_p (loop
, gimple_bb (def_stmt
)))
6143 gimple_stmt_iterator gsi
= gsi_for_stmt (def_stmt
);
6144 gsi_remove (&gsi
, false);
6145 gsi_insert_on_edge_immediate (loop_preheader_edge (loop
), def_stmt
);
6152 /* vectorizable_load.
6154 Check if STMT reads a non scalar data-ref (array/pointer/structure) that
6156 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
6157 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
6158 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
6161 vectorizable_load (gimple
*stmt
, gimple_stmt_iterator
*gsi
, gimple
**vec_stmt
,
6162 slp_tree slp_node
, slp_instance slp_node_instance
)
6165 tree vec_dest
= NULL
;
6166 tree data_ref
= NULL
;
6167 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
6168 stmt_vec_info prev_stmt_info
;
6169 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
6170 struct loop
*loop
= NULL
;
6171 struct loop
*containing_loop
= (gimple_bb (stmt
))->loop_father
;
6172 bool nested_in_vect_loop
= false;
6173 struct data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
), *first_dr
= NULL
;
6177 gimple
*new_stmt
= NULL
;
6179 enum dr_alignment_support alignment_support_scheme
;
6180 tree dataref_ptr
= NULL_TREE
;
6181 tree dataref_offset
= NULL_TREE
;
6182 gimple
*ptr_incr
= NULL
;
6184 int i
, j
, group_size
= -1, group_gap_adj
;
6185 tree msq
= NULL_TREE
, lsq
;
6186 tree offset
= NULL_TREE
;
6187 tree byte_offset
= NULL_TREE
;
6188 tree realignment_token
= NULL_TREE
;
6190 vec
<tree
> dr_chain
= vNULL
;
6191 bool grouped_load
= false;
6192 bool load_lanes_p
= false;
6194 gimple
*first_stmt_for_drptr
= NULL
;
6196 bool negative
= false;
6197 bool compute_in_loop
= false;
6198 struct loop
*at_loop
;
6200 bool slp
= (slp_node
!= NULL
);
6201 bool slp_perm
= false;
6202 enum tree_code code
;
6203 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
6206 tree gather_base
= NULL_TREE
, gather_off
= NULL_TREE
;
6207 tree gather_off_vectype
= NULL_TREE
, gather_decl
= NULL_TREE
;
6208 int gather_scale
= 1;
6209 enum vect_def_type gather_dt
= vect_unknown_def_type
;
6210 vec_info
*vinfo
= stmt_info
->vinfo
;
6212 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
6215 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
6219 /* Is vectorizable load? */
6220 if (!is_gimple_assign (stmt
))
6223 scalar_dest
= gimple_assign_lhs (stmt
);
6224 if (TREE_CODE (scalar_dest
) != SSA_NAME
)
6227 code
= gimple_assign_rhs_code (stmt
);
6228 if (code
!= ARRAY_REF
6229 && code
!= BIT_FIELD_REF
6230 && code
!= INDIRECT_REF
6231 && code
!= COMPONENT_REF
6232 && code
!= IMAGPART_EXPR
6233 && code
!= REALPART_EXPR
6235 && TREE_CODE_CLASS (code
) != tcc_declaration
)
6238 if (!STMT_VINFO_DATA_REF (stmt_info
))
6241 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
6242 int nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
6246 loop
= LOOP_VINFO_LOOP (loop_vinfo
);
6247 nested_in_vect_loop
= nested_in_vect_loop_p (loop
, stmt
);
6248 vf
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
6253 /* Multiple types in SLP are handled by creating the appropriate number of
6254 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
6256 if (slp
|| PURE_SLP_STMT (stmt_info
))
6259 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits
;
6261 gcc_assert (ncopies
>= 1);
6263 /* FORNOW. This restriction should be relaxed. */
6264 if (nested_in_vect_loop
&& ncopies
> 1)
6266 if (dump_enabled_p ())
6267 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6268 "multiple types in nested loop.\n");
6272 /* Invalidate assumptions made by dependence analysis when vectorization
6273 on the unrolled body effectively re-orders stmts. */
6275 && STMT_VINFO_MIN_NEG_DIST (stmt_info
) != 0
6276 && ((unsigned)LOOP_VINFO_VECT_FACTOR (loop_vinfo
)
6277 > STMT_VINFO_MIN_NEG_DIST (stmt_info
)))
6279 if (dump_enabled_p ())
6280 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6281 "cannot perform implicit CSE when unrolling "
6282 "with negative dependence distance\n");
6286 elem_type
= TREE_TYPE (vectype
);
6287 mode
= TYPE_MODE (vectype
);
6289 /* FORNOW. In some cases can vectorize even if data-type not supported
6290 (e.g. - data copies). */
6291 if (optab_handler (mov_optab
, mode
) == CODE_FOR_nothing
)
6293 if (dump_enabled_p ())
6294 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6295 "Aligned load, but unsupported type.\n");
6299 /* Check if the load is a part of an interleaving chain. */
6300 if (STMT_VINFO_GROUPED_ACCESS (stmt_info
))
6302 grouped_load
= true;
6304 gcc_assert (!nested_in_vect_loop
&& !STMT_VINFO_GATHER_SCATTER_P (stmt_info
));
6306 first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
6308 /* If this is single-element interleaving with an element distance
6309 that leaves unused vector loads around punt - we at least create
6310 very sub-optimal code in that case (and blow up memory,
6312 bool force_peeling
= false;
6313 if (first_stmt
== stmt
6314 && !GROUP_NEXT_ELEMENT (stmt_info
))
6316 if (GROUP_SIZE (stmt_info
) > TYPE_VECTOR_SUBPARTS (vectype
))
6318 if (dump_enabled_p ())
6319 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6320 "single-element interleaving not supported "
6321 "for not adjacent vector loads\n");
6325 /* Single-element interleaving requires peeling for gaps. */
6326 force_peeling
= true;
6329 /* If there is a gap in the end of the group or the group size cannot
6330 be made a multiple of the vector element count then we access excess
6331 elements in the last iteration and thus need to peel that off. */
6333 && ! STMT_VINFO_STRIDED_P (stmt_info
)
6335 || GROUP_GAP (vinfo_for_stmt (first_stmt
)) != 0
6336 || (!slp
&& vf
% GROUP_SIZE (vinfo_for_stmt (first_stmt
)) != 0)))
6338 if (dump_enabled_p ())
6339 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6340 "Data access with gaps requires scalar "
6344 if (dump_enabled_p ())
6345 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6346 "Peeling for outer loop is not supported\n");
6350 LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo
) = true;
6353 if (slp
&& SLP_TREE_LOAD_PERMUTATION (slp_node
).exists ())
6356 group_size
= GROUP_SIZE (vinfo_for_stmt (first_stmt
));
6358 /* ??? The following is overly pessimistic (as well as the loop
6359 case above) in the case we can statically determine the excess
6360 elements loaded are within the bounds of a decl that is accessed.
6361 Likewise for BB vectorizations using masked loads is a possibility. */
6362 if (bb_vinfo
&& slp_perm
&& group_size
% nunits
!= 0)
6364 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6365 "BB vectorization with gaps at the end of a load "
6366 "is not supported\n");
6371 && !PURE_SLP_STMT (stmt_info
)
6372 && !STMT_VINFO_STRIDED_P (stmt_info
))
6374 if (vect_load_lanes_supported (vectype
, group_size
))
6375 load_lanes_p
= true;
6376 else if (!vect_grouped_load_supported (vectype
, group_size
))
6380 /* Invalidate assumptions made by dependence analysis when vectorization
6381 on the unrolled body effectively re-orders stmts. */
6382 if (!PURE_SLP_STMT (stmt_info
)
6383 && STMT_VINFO_MIN_NEG_DIST (stmt_info
) != 0
6384 && ((unsigned)LOOP_VINFO_VECT_FACTOR (loop_vinfo
)
6385 > STMT_VINFO_MIN_NEG_DIST (stmt_info
)))
6387 if (dump_enabled_p ())
6388 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6389 "cannot perform implicit CSE when performing "
6390 "group loads with negative dependence distance\n");
6394 /* Similarly when the stmt is a load that is both part of a SLP
6395 instance and a loop vectorized stmt via the same-dr mechanism
6396 we have to give up. */
6397 if (STMT_VINFO_GROUP_SAME_DR_STMT (stmt_info
)
6398 && (STMT_SLP_TYPE (stmt_info
)
6399 != STMT_SLP_TYPE (vinfo_for_stmt
6400 (STMT_VINFO_GROUP_SAME_DR_STMT (stmt_info
)))))
6402 if (dump_enabled_p ())
6403 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6404 "conflicting SLP types for CSEd load\n");
6410 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info
))
6413 gather_decl
= vect_check_gather_scatter (stmt
, loop_vinfo
, &gather_base
,
6414 &gather_off
, &gather_scale
);
6415 gcc_assert (gather_decl
);
6416 if (!vect_is_simple_use (gather_off
, vinfo
, &def_stmt
, &gather_dt
,
6417 &gather_off_vectype
))
6419 if (dump_enabled_p ())
6420 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6421 "gather index use not simple.\n");
6425 else if (STMT_VINFO_STRIDED_P (stmt_info
))
6428 && (slp
|| PURE_SLP_STMT (stmt_info
)))
6429 && (group_size
> nunits
6430 || nunits
% group_size
!= 0))
6432 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6433 "unhandled strided group load\n");
6439 negative
= tree_int_cst_compare (nested_in_vect_loop
6440 ? STMT_VINFO_DR_STEP (stmt_info
)
6442 size_zero_node
) < 0;
6443 if (negative
&& ncopies
> 1)
6445 if (dump_enabled_p ())
6446 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6447 "multiple types with negative step.\n");
6455 if (dump_enabled_p ())
6456 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6457 "negative step for group load not supported"
6461 alignment_support_scheme
= vect_supportable_dr_alignment (dr
, false);
6462 if (alignment_support_scheme
!= dr_aligned
6463 && alignment_support_scheme
!= dr_unaligned_supported
)
6465 if (dump_enabled_p ())
6466 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6467 "negative step but alignment required.\n");
6470 if (!perm_mask_for_reverse (vectype
))
6472 if (dump_enabled_p ())
6473 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6474 "negative step and reversing not supported."
6481 if (!vec_stmt
) /* transformation not required. */
6483 STMT_VINFO_TYPE (stmt_info
) = load_vec_info_type
;
6484 /* The SLP costs are calculated during SLP analysis. */
6485 if (!PURE_SLP_STMT (stmt_info
))
6486 vect_model_load_cost (stmt_info
, ncopies
, load_lanes_p
,
6491 if (dump_enabled_p ())
6492 dump_printf_loc (MSG_NOTE
, vect_location
,
6493 "transform load. ncopies = %d\n", ncopies
);
6497 ensure_base_align (stmt_info
, dr
);
6499 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info
))
6501 tree vec_oprnd0
= NULL_TREE
, op
;
6502 tree arglist
= TYPE_ARG_TYPES (TREE_TYPE (gather_decl
));
6503 tree rettype
, srctype
, ptrtype
, idxtype
, masktype
, scaletype
;
6504 tree ptr
, mask
, var
, scale
, merge
, perm_mask
= NULL_TREE
, prev_res
= NULL_TREE
;
6505 edge pe
= loop_preheader_edge (loop
);
6508 enum { NARROW
, NONE
, WIDEN
} modifier
;
6509 int gather_off_nunits
= TYPE_VECTOR_SUBPARTS (gather_off_vectype
);
6511 if (nunits
== gather_off_nunits
)
6513 else if (nunits
== gather_off_nunits
/ 2)
6515 unsigned char *sel
= XALLOCAVEC (unsigned char, gather_off_nunits
);
6518 for (i
= 0; i
< gather_off_nunits
; ++i
)
6519 sel
[i
] = i
| nunits
;
6521 perm_mask
= vect_gen_perm_mask_checked (gather_off_vectype
, sel
);
6523 else if (nunits
== gather_off_nunits
* 2)
6525 unsigned char *sel
= XALLOCAVEC (unsigned char, nunits
);
6528 for (i
= 0; i
< nunits
; ++i
)
6529 sel
[i
] = i
< gather_off_nunits
6530 ? i
: i
+ nunits
- gather_off_nunits
;
6532 perm_mask
= vect_gen_perm_mask_checked (vectype
, sel
);
6538 rettype
= TREE_TYPE (TREE_TYPE (gather_decl
));
6539 srctype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
6540 ptrtype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
6541 idxtype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
6542 masktype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
6543 scaletype
= TREE_VALUE (arglist
);
6544 gcc_checking_assert (types_compatible_p (srctype
, rettype
));
6546 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
6548 ptr
= fold_convert (ptrtype
, gather_base
);
6549 if (!is_gimple_min_invariant (ptr
))
6551 ptr
= force_gimple_operand (ptr
, &seq
, true, NULL_TREE
);
6552 new_bb
= gsi_insert_seq_on_edge_immediate (pe
, seq
);
6553 gcc_assert (!new_bb
);
6556 /* Currently we support only unconditional gather loads,
6557 so mask should be all ones. */
6558 if (TREE_CODE (masktype
) == INTEGER_TYPE
)
6559 mask
= build_int_cst (masktype
, -1);
6560 else if (TREE_CODE (TREE_TYPE (masktype
)) == INTEGER_TYPE
)
6562 mask
= build_int_cst (TREE_TYPE (masktype
), -1);
6563 mask
= build_vector_from_val (masktype
, mask
);
6564 mask
= vect_init_vector (stmt
, mask
, masktype
, NULL
);
6566 else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (masktype
)))
6570 for (j
= 0; j
< 6; ++j
)
6572 real_from_target (&r
, tmp
, TYPE_MODE (TREE_TYPE (masktype
)));
6573 mask
= build_real (TREE_TYPE (masktype
), r
);
6574 mask
= build_vector_from_val (masktype
, mask
);
6575 mask
= vect_init_vector (stmt
, mask
, masktype
, NULL
);
6580 scale
= build_int_cst (scaletype
, gather_scale
);
6582 if (TREE_CODE (TREE_TYPE (rettype
)) == INTEGER_TYPE
)
6583 merge
= build_int_cst (TREE_TYPE (rettype
), 0);
6584 else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (rettype
)))
6588 for (j
= 0; j
< 6; ++j
)
6590 real_from_target (&r
, tmp
, TYPE_MODE (TREE_TYPE (rettype
)));
6591 merge
= build_real (TREE_TYPE (rettype
), r
);
6595 merge
= build_vector_from_val (rettype
, merge
);
6596 merge
= vect_init_vector (stmt
, merge
, rettype
, NULL
);
6598 prev_stmt_info
= NULL
;
6599 for (j
= 0; j
< ncopies
; ++j
)
6601 if (modifier
== WIDEN
&& (j
& 1))
6602 op
= permute_vec_elements (vec_oprnd0
, vec_oprnd0
,
6603 perm_mask
, stmt
, gsi
);
6606 = vect_get_vec_def_for_operand (gather_off
, stmt
);
6609 = vect_get_vec_def_for_stmt_copy (gather_dt
, vec_oprnd0
);
6611 if (!useless_type_conversion_p (idxtype
, TREE_TYPE (op
)))
6613 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op
))
6614 == TYPE_VECTOR_SUBPARTS (idxtype
));
6615 var
= vect_get_new_ssa_name (idxtype
, vect_simple_var
);
6616 op
= build1 (VIEW_CONVERT_EXPR
, idxtype
, op
);
6618 = gimple_build_assign (var
, VIEW_CONVERT_EXPR
, op
);
6619 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6624 = gimple_build_call (gather_decl
, 5, merge
, ptr
, op
, mask
, scale
);
6626 if (!useless_type_conversion_p (vectype
, rettype
))
6628 gcc_assert (TYPE_VECTOR_SUBPARTS (vectype
)
6629 == TYPE_VECTOR_SUBPARTS (rettype
));
6630 op
= vect_get_new_ssa_name (rettype
, vect_simple_var
);
6631 gimple_call_set_lhs (new_stmt
, op
);
6632 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6633 var
= make_ssa_name (vec_dest
);
6634 op
= build1 (VIEW_CONVERT_EXPR
, vectype
, op
);
6636 = gimple_build_assign (var
, VIEW_CONVERT_EXPR
, op
);
6640 var
= make_ssa_name (vec_dest
, new_stmt
);
6641 gimple_call_set_lhs (new_stmt
, var
);
6644 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6646 if (modifier
== NARROW
)
6653 var
= permute_vec_elements (prev_res
, var
,
6654 perm_mask
, stmt
, gsi
);
6655 new_stmt
= SSA_NAME_DEF_STMT (var
);
6658 if (prev_stmt_info
== NULL
)
6659 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
6661 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
6662 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
6666 else if (STMT_VINFO_STRIDED_P (stmt_info
))
6668 gimple_stmt_iterator incr_gsi
;
6674 vec
<constructor_elt
, va_gc
> *v
= NULL
;
6675 gimple_seq stmts
= NULL
;
6676 tree stride_base
, stride_step
, alias_off
;
6678 gcc_assert (!nested_in_vect_loop
);
6680 if (slp
&& grouped_load
)
6681 first_dr
= STMT_VINFO_DATA_REF
6682 (vinfo_for_stmt (GROUP_FIRST_ELEMENT (stmt_info
)));
6687 = fold_build_pointer_plus
6688 (DR_BASE_ADDRESS (first_dr
),
6689 size_binop (PLUS_EXPR
,
6690 convert_to_ptrofftype (DR_OFFSET (first_dr
)),
6691 convert_to_ptrofftype (DR_INIT (first_dr
))));
6692 stride_step
= fold_convert (sizetype
, DR_STEP (first_dr
));
6694 /* For a load with loop-invariant (but other than power-of-2)
6695 stride (i.e. not a grouped access) like so:
6697 for (i = 0; i < n; i += stride)
6700 we generate a new induction variable and new accesses to
6701 form a new vector (or vectors, depending on ncopies):
6703 for (j = 0; ; j += VF*stride)
6705 tmp2 = array[j + stride];
6707 vectemp = {tmp1, tmp2, ...}
6710 ivstep
= fold_build2 (MULT_EXPR
, TREE_TYPE (stride_step
), stride_step
,
6711 build_int_cst (TREE_TYPE (stride_step
), vf
));
6713 standard_iv_increment_position (loop
, &incr_gsi
, &insert_after
);
6715 create_iv (unshare_expr (stride_base
), unshare_expr (ivstep
), NULL
,
6716 loop
, &incr_gsi
, insert_after
,
6718 incr
= gsi_stmt (incr_gsi
);
6719 set_vinfo_for_stmt (incr
, new_stmt_vec_info (incr
, loop_vinfo
));
6721 stride_step
= force_gimple_operand (unshare_expr (stride_step
),
6722 &stmts
, true, NULL_TREE
);
6724 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop
), stmts
);
6726 prev_stmt_info
= NULL
;
6727 running_off
= offvar
;
6728 alias_off
= build_int_cst (reference_alias_ptr_type (DR_REF (first_dr
)), 0);
6729 int nloads
= nunits
;
6730 tree ltype
= TREE_TYPE (vectype
);
6731 auto_vec
<tree
> dr_chain
;
6734 nloads
= nunits
/ group_size
;
6735 if (group_size
< nunits
)
6736 ltype
= build_vector_type (TREE_TYPE (vectype
), group_size
);
6739 ltype
= build_aligned_type (ltype
, TYPE_ALIGN (TREE_TYPE (vectype
)));
6740 /* For SLP permutation support we need to load the whole group,
6741 not only the number of vector stmts the permutation result
6745 ncopies
= (group_size
* vf
+ nunits
- 1) / nunits
;
6746 dr_chain
.create (ncopies
);
6749 ncopies
= SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node
);
6751 for (j
= 0; j
< ncopies
; j
++)
6757 vec_alloc (v
, nloads
);
6758 for (i
= 0; i
< nloads
; i
++)
6760 tree newref
, newoff
;
6762 newref
= build2 (MEM_REF
, ltype
, running_off
, alias_off
);
6764 newref
= force_gimple_operand_gsi (gsi
, newref
, true,
6767 CONSTRUCTOR_APPEND_ELT (v
, NULL_TREE
, newref
);
6768 newoff
= copy_ssa_name (running_off
);
6769 incr
= gimple_build_assign (newoff
, POINTER_PLUS_EXPR
,
6770 running_off
, stride_step
);
6771 vect_finish_stmt_generation (stmt
, incr
, gsi
);
6773 running_off
= newoff
;
6776 vec_inv
= build_constructor (vectype
, v
);
6777 new_temp
= vect_init_vector (stmt
, vec_inv
, vectype
, gsi
);
6778 new_stmt
= SSA_NAME_DEF_STMT (new_temp
);
6782 new_stmt
= gimple_build_assign (make_ssa_name (ltype
),
6783 build2 (MEM_REF
, ltype
,
6784 running_off
, alias_off
));
6785 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6787 tree newoff
= copy_ssa_name (running_off
);
6788 gimple
*incr
= gimple_build_assign (newoff
, POINTER_PLUS_EXPR
,
6789 running_off
, stride_step
);
6790 vect_finish_stmt_generation (stmt
, incr
, gsi
);
6792 running_off
= newoff
;
6798 dr_chain
.quick_push (gimple_assign_lhs (new_stmt
));
6800 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
6805 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
6807 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
6808 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
6812 vect_transform_slp_perm_load (slp_node
, dr_chain
, gsi
, vf
,
6813 slp_node_instance
, false);
6819 first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
6820 /* For SLP vectorization we directly vectorize a subchain
6821 without permutation. */
6822 if (slp
&& ! SLP_TREE_LOAD_PERMUTATION (slp_node
).exists ())
6823 first_stmt
= SLP_TREE_SCALAR_STMTS (slp_node
)[0];
6824 /* For BB vectorization always use the first stmt to base
6825 the data ref pointer on. */
6827 first_stmt_for_drptr
= SLP_TREE_SCALAR_STMTS (slp_node
)[0];
6829 /* Check if the chain of loads is already vectorized. */
6830 if (STMT_VINFO_VEC_STMT (vinfo_for_stmt (first_stmt
))
6831 /* For SLP we would need to copy over SLP_TREE_VEC_STMTS.
6832 ??? But we can only do so if there is exactly one
6833 as we have no way to get at the rest. Leave the CSE
6835 ??? With the group load eventually participating
6836 in multiple different permutations (having multiple
6837 slp nodes which refer to the same group) the CSE
6838 is even wrong code. See PR56270. */
6841 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
6844 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
6845 group_size
= GROUP_SIZE (vinfo_for_stmt (first_stmt
));
6848 /* VEC_NUM is the number of vect stmts to be created for this group. */
6851 grouped_load
= false;
6852 /* For SLP permutation support we need to load the whole group,
6853 not only the number of vector stmts the permutation result
6856 vec_num
= (group_size
* vf
+ nunits
- 1) / nunits
;
6858 vec_num
= SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node
);
6859 group_gap_adj
= vf
* group_size
- nunits
* vec_num
;
6862 vec_num
= group_size
;
6868 group_size
= vec_num
= 1;
6872 alignment_support_scheme
= vect_supportable_dr_alignment (first_dr
, false);
6873 gcc_assert (alignment_support_scheme
);
6874 /* Targets with load-lane instructions must not require explicit
6876 gcc_assert (!load_lanes_p
6877 || alignment_support_scheme
== dr_aligned
6878 || alignment_support_scheme
== dr_unaligned_supported
);
6880 /* In case the vectorization factor (VF) is bigger than the number
6881 of elements that we can fit in a vectype (nunits), we have to generate
6882 more than one vector stmt - i.e - we need to "unroll" the
6883 vector stmt by a factor VF/nunits. In doing so, we record a pointer
6884 from one copy of the vector stmt to the next, in the field
6885 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
6886 stages to find the correct vector defs to be used when vectorizing
6887 stmts that use the defs of the current stmt. The example below
6888 illustrates the vectorization process when VF=16 and nunits=4 (i.e., we
6889 need to create 4 vectorized stmts):
6891 before vectorization:
6892 RELATED_STMT VEC_STMT
6896 step 1: vectorize stmt S1:
6897 We first create the vector stmt VS1_0, and, as usual, record a
6898 pointer to it in the STMT_VINFO_VEC_STMT of the scalar stmt S1.
6899 Next, we create the vector stmt VS1_1, and record a pointer to
6900 it in the STMT_VINFO_RELATED_STMT of the vector stmt VS1_0.
6901 Similarly, for VS1_2 and VS1_3. This is the resulting chain of
6903 RELATED_STMT VEC_STMT
6904 VS1_0: vx0 = memref0 VS1_1 -
6905 VS1_1: vx1 = memref1 VS1_2 -
6906 VS1_2: vx2 = memref2 VS1_3 -
6907 VS1_3: vx3 = memref3 - -
6908 S1: x = load - VS1_0
6911 See in documentation in vect_get_vec_def_for_stmt_copy for how the
6912 information we recorded in RELATED_STMT field is used to vectorize
6915 /* In case of interleaving (non-unit grouped access):
6922 Vectorized loads are created in the order of memory accesses
6923 starting from the access of the first stmt of the chain:
6926 VS2: vx1 = &base + vec_size*1
6927 VS3: vx3 = &base + vec_size*2
6928 VS4: vx4 = &base + vec_size*3
6930 Then permutation statements are generated:
6932 VS5: vx5 = VEC_PERM_EXPR < vx0, vx1, { 0, 2, ..., i*2 } >
6933 VS6: vx6 = VEC_PERM_EXPR < vx0, vx1, { 1, 3, ..., i*2+1 } >
6936 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
6937 (the order of the data-refs in the output of vect_permute_load_chain
6938 corresponds to the order of scalar stmts in the interleaving chain - see
6939 the documentation of vect_permute_load_chain()).
6940 The generation of permutation stmts and recording them in
6941 STMT_VINFO_VEC_STMT is done in vect_transform_grouped_load().
6943 In case of both multiple types and interleaving, the vector loads and
6944 permutation stmts above are created for every copy. The result vector
6945 stmts are put in STMT_VINFO_VEC_STMT for the first copy and in the
6946 corresponding STMT_VINFO_RELATED_STMT for the next copies. */
6948 /* If the data reference is aligned (dr_aligned) or potentially unaligned
6949 on a target that supports unaligned accesses (dr_unaligned_supported)
6950 we generate the following code:
6954 p = p + indx * vectype_size;
6959 Otherwise, the data reference is potentially unaligned on a target that
6960 does not support unaligned accesses (dr_explicit_realign_optimized) -
6961 then generate the following code, in which the data in each iteration is
6962 obtained by two vector loads, one from the previous iteration, and one
6963 from the current iteration:
6965 msq_init = *(floor(p1))
6966 p2 = initial_addr + VS - 1;
6967 realignment_token = call target_builtin;
6970 p2 = p2 + indx * vectype_size
6972 vec_dest = realign_load (msq, lsq, realignment_token)
6977 /* If the misalignment remains the same throughout the execution of the
6978 loop, we can create the init_addr and permutation mask at the loop
6979 preheader. Otherwise, it needs to be created inside the loop.
6980 This can only occur when vectorizing memory accesses in the inner-loop
6981 nested within an outer-loop that is being vectorized. */
6983 if (nested_in_vect_loop
6984 && (TREE_INT_CST_LOW (DR_STEP (dr
))
6985 % GET_MODE_SIZE (TYPE_MODE (vectype
)) != 0))
6987 gcc_assert (alignment_support_scheme
!= dr_explicit_realign_optimized
);
6988 compute_in_loop
= true;
6991 if ((alignment_support_scheme
== dr_explicit_realign_optimized
6992 || alignment_support_scheme
== dr_explicit_realign
)
6993 && !compute_in_loop
)
6995 msq
= vect_setup_realignment (first_stmt
, gsi
, &realignment_token
,
6996 alignment_support_scheme
, NULL_TREE
,
6998 if (alignment_support_scheme
== dr_explicit_realign_optimized
)
7000 phi
= as_a
<gphi
*> (SSA_NAME_DEF_STMT (msq
));
7001 byte_offset
= size_binop (MINUS_EXPR
, TYPE_SIZE_UNIT (vectype
),
7009 offset
= size_int (-TYPE_VECTOR_SUBPARTS (vectype
) + 1);
7012 aggr_type
= build_array_type_nelts (elem_type
, vec_num
* nunits
);
7014 aggr_type
= vectype
;
7016 prev_stmt_info
= NULL
;
7017 for (j
= 0; j
< ncopies
; j
++)
7019 /* 1. Create the vector or array pointer update chain. */
7022 bool simd_lane_access_p
7023 = STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info
);
7024 if (simd_lane_access_p
7025 && TREE_CODE (DR_BASE_ADDRESS (first_dr
)) == ADDR_EXPR
7026 && VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (first_dr
), 0))
7027 && integer_zerop (DR_OFFSET (first_dr
))
7028 && integer_zerop (DR_INIT (first_dr
))
7029 && alias_sets_conflict_p (get_alias_set (aggr_type
),
7030 get_alias_set (DR_REF (first_dr
)))
7031 && (alignment_support_scheme
== dr_aligned
7032 || alignment_support_scheme
== dr_unaligned_supported
))
7034 dataref_ptr
= unshare_expr (DR_BASE_ADDRESS (first_dr
));
7035 dataref_offset
= build_int_cst (reference_alias_ptr_type
7036 (DR_REF (first_dr
)), 0);
7039 else if (first_stmt_for_drptr
7040 && first_stmt
!= first_stmt_for_drptr
)
7043 = vect_create_data_ref_ptr (first_stmt_for_drptr
, aggr_type
,
7044 at_loop
, offset
, &dummy
, gsi
,
7045 &ptr_incr
, simd_lane_access_p
,
7046 &inv_p
, byte_offset
);
7047 /* Adjust the pointer by the difference to first_stmt. */
7048 data_reference_p ptrdr
7049 = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt_for_drptr
));
7050 tree diff
= fold_convert (sizetype
,
7051 size_binop (MINUS_EXPR
,
7054 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
,
7059 = vect_create_data_ref_ptr (first_stmt
, aggr_type
, at_loop
,
7060 offset
, &dummy
, gsi
, &ptr_incr
,
7061 simd_lane_access_p
, &inv_p
,
7064 else if (dataref_offset
)
7065 dataref_offset
= int_const_binop (PLUS_EXPR
, dataref_offset
,
7066 TYPE_SIZE_UNIT (aggr_type
));
7068 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
, stmt
,
7069 TYPE_SIZE_UNIT (aggr_type
));
7071 if (grouped_load
|| slp_perm
)
7072 dr_chain
.create (vec_num
);
7078 vec_array
= create_vector_array (vectype
, vec_num
);
7081 VEC_ARRAY = LOAD_LANES (MEM_REF[...all elements...]). */
7082 data_ref
= create_array_ref (aggr_type
, dataref_ptr
, first_dr
);
7083 new_stmt
= gimple_build_call_internal (IFN_LOAD_LANES
, 1, data_ref
);
7084 gimple_call_set_lhs (new_stmt
, vec_array
);
7085 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
7087 /* Extract each vector into an SSA_NAME. */
7088 for (i
= 0; i
< vec_num
; i
++)
7090 new_temp
= read_vector_array (stmt
, gsi
, scalar_dest
,
7092 dr_chain
.quick_push (new_temp
);
7095 /* Record the mapping between SSA_NAMEs and statements. */
7096 vect_record_grouped_load_vectors (stmt
, dr_chain
);
7100 for (i
= 0; i
< vec_num
; i
++)
7103 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
,
7106 /* 2. Create the vector-load in the loop. */
7107 switch (alignment_support_scheme
)
7110 case dr_unaligned_supported
:
7112 unsigned int align
, misalign
;
7115 = fold_build2 (MEM_REF
, vectype
, dataref_ptr
,
7118 : build_int_cst (reference_alias_ptr_type
7119 (DR_REF (first_dr
)), 0));
7120 align
= TYPE_ALIGN_UNIT (vectype
);
7121 if (alignment_support_scheme
== dr_aligned
)
7123 gcc_assert (aligned_access_p (first_dr
));
7126 else if (DR_MISALIGNMENT (first_dr
) == -1)
7128 if (DR_VECT_AUX (first_dr
)->base_element_aligned
)
7129 align
= TYPE_ALIGN_UNIT (elem_type
);
7131 align
= (get_object_alignment (DR_REF (first_dr
))
7134 TREE_TYPE (data_ref
)
7135 = build_aligned_type (TREE_TYPE (data_ref
),
7136 align
* BITS_PER_UNIT
);
7140 TREE_TYPE (data_ref
)
7141 = build_aligned_type (TREE_TYPE (data_ref
),
7142 TYPE_ALIGN (elem_type
));
7143 misalign
= DR_MISALIGNMENT (first_dr
);
7145 if (dataref_offset
== NULL_TREE
7146 && TREE_CODE (dataref_ptr
) == SSA_NAME
)
7147 set_ptr_info_alignment (get_ptr_info (dataref_ptr
),
7151 case dr_explicit_realign
:
7155 tree vs
= size_int (TYPE_VECTOR_SUBPARTS (vectype
));
7157 if (compute_in_loop
)
7158 msq
= vect_setup_realignment (first_stmt
, gsi
,
7160 dr_explicit_realign
,
7163 if (TREE_CODE (dataref_ptr
) == SSA_NAME
)
7164 ptr
= copy_ssa_name (dataref_ptr
);
7166 ptr
= make_ssa_name (TREE_TYPE (dataref_ptr
));
7167 new_stmt
= gimple_build_assign
7168 (ptr
, BIT_AND_EXPR
, dataref_ptr
,
7170 (TREE_TYPE (dataref_ptr
),
7171 -(HOST_WIDE_INT
)TYPE_ALIGN_UNIT (vectype
)));
7172 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
7174 = build2 (MEM_REF
, vectype
, ptr
,
7175 build_int_cst (reference_alias_ptr_type
7176 (DR_REF (first_dr
)), 0));
7177 vec_dest
= vect_create_destination_var (scalar_dest
,
7179 new_stmt
= gimple_build_assign (vec_dest
, data_ref
);
7180 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
7181 gimple_assign_set_lhs (new_stmt
, new_temp
);
7182 gimple_set_vdef (new_stmt
, gimple_vdef (stmt
));
7183 gimple_set_vuse (new_stmt
, gimple_vuse (stmt
));
7184 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
7187 bump
= size_binop (MULT_EXPR
, vs
,
7188 TYPE_SIZE_UNIT (elem_type
));
7189 bump
= size_binop (MINUS_EXPR
, bump
, size_one_node
);
7190 ptr
= bump_vector_ptr (dataref_ptr
, NULL
, gsi
, stmt
, bump
);
7191 new_stmt
= gimple_build_assign
7192 (NULL_TREE
, BIT_AND_EXPR
, ptr
,
7195 -(HOST_WIDE_INT
)TYPE_ALIGN_UNIT (vectype
)));
7196 ptr
= copy_ssa_name (ptr
, new_stmt
);
7197 gimple_assign_set_lhs (new_stmt
, ptr
);
7198 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
7200 = build2 (MEM_REF
, vectype
, ptr
,
7201 build_int_cst (reference_alias_ptr_type
7202 (DR_REF (first_dr
)), 0));
7205 case dr_explicit_realign_optimized
:
7206 if (TREE_CODE (dataref_ptr
) == SSA_NAME
)
7207 new_temp
= copy_ssa_name (dataref_ptr
);
7209 new_temp
= make_ssa_name (TREE_TYPE (dataref_ptr
));
7210 new_stmt
= gimple_build_assign
7211 (new_temp
, BIT_AND_EXPR
, dataref_ptr
,
7213 (TREE_TYPE (dataref_ptr
),
7214 -(HOST_WIDE_INT
)TYPE_ALIGN_UNIT (vectype
)));
7215 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
7217 = build2 (MEM_REF
, vectype
, new_temp
,
7218 build_int_cst (reference_alias_ptr_type
7219 (DR_REF (first_dr
)), 0));
7224 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
7225 new_stmt
= gimple_build_assign (vec_dest
, data_ref
);
7226 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
7227 gimple_assign_set_lhs (new_stmt
, new_temp
);
7228 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
7230 /* 3. Handle explicit realignment if necessary/supported.
7232 vec_dest = realign_load (msq, lsq, realignment_token) */
7233 if (alignment_support_scheme
== dr_explicit_realign_optimized
7234 || alignment_support_scheme
== dr_explicit_realign
)
7236 lsq
= gimple_assign_lhs (new_stmt
);
7237 if (!realignment_token
)
7238 realignment_token
= dataref_ptr
;
7239 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
7240 new_stmt
= gimple_build_assign (vec_dest
, REALIGN_LOAD_EXPR
,
7241 msq
, lsq
, realignment_token
);
7242 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
7243 gimple_assign_set_lhs (new_stmt
, new_temp
);
7244 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
7246 if (alignment_support_scheme
== dr_explicit_realign_optimized
)
7249 if (i
== vec_num
- 1 && j
== ncopies
- 1)
7250 add_phi_arg (phi
, lsq
,
7251 loop_latch_edge (containing_loop
),
7257 /* 4. Handle invariant-load. */
7258 if (inv_p
&& !bb_vinfo
)
7260 gcc_assert (!grouped_load
);
7261 /* If we have versioned for aliasing or the loop doesn't
7262 have any data dependencies that would preclude this,
7263 then we are sure this is a loop invariant load and
7264 thus we can insert it on the preheader edge. */
7265 if (LOOP_VINFO_NO_DATA_DEPENDENCIES (loop_vinfo
)
7266 && !nested_in_vect_loop
7267 && hoist_defs_of_uses (stmt
, loop
))
7269 if (dump_enabled_p ())
7271 dump_printf_loc (MSG_NOTE
, vect_location
,
7272 "hoisting out of the vectorized "
7274 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
7276 tree tem
= copy_ssa_name (scalar_dest
);
7277 gsi_insert_on_edge_immediate
7278 (loop_preheader_edge (loop
),
7279 gimple_build_assign (tem
,
7281 (gimple_assign_rhs1 (stmt
))));
7282 new_temp
= vect_init_vector (stmt
, tem
, vectype
, NULL
);
7283 new_stmt
= SSA_NAME_DEF_STMT (new_temp
);
7284 set_vinfo_for_stmt (new_stmt
,
7285 new_stmt_vec_info (new_stmt
, vinfo
));
7289 gimple_stmt_iterator gsi2
= *gsi
;
7291 new_temp
= vect_init_vector (stmt
, scalar_dest
,
7293 new_stmt
= SSA_NAME_DEF_STMT (new_temp
);
7299 tree perm_mask
= perm_mask_for_reverse (vectype
);
7300 new_temp
= permute_vec_elements (new_temp
, new_temp
,
7301 perm_mask
, stmt
, gsi
);
7302 new_stmt
= SSA_NAME_DEF_STMT (new_temp
);
7305 /* Collect vector loads and later create their permutation in
7306 vect_transform_grouped_load (). */
7307 if (grouped_load
|| slp_perm
)
7308 dr_chain
.quick_push (new_temp
);
7310 /* Store vector loads in the corresponding SLP_NODE. */
7311 if (slp
&& !slp_perm
)
7312 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
7314 /* Bump the vector pointer to account for a gap or for excess
7315 elements loaded for a permuted SLP load. */
7316 if (group_gap_adj
!= 0)
7320 = wide_int_to_tree (sizetype
,
7321 wi::smul (TYPE_SIZE_UNIT (elem_type
),
7322 group_gap_adj
, &ovf
));
7323 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
,
7328 if (slp
&& !slp_perm
)
7333 if (!vect_transform_slp_perm_load (slp_node
, dr_chain
, gsi
, vf
,
7334 slp_node_instance
, false))
7336 dr_chain
.release ();
7345 vect_transform_grouped_load (stmt
, dr_chain
, group_size
, gsi
);
7346 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
7351 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
7353 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
7354 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
7357 dr_chain
.release ();
7363 /* Function vect_is_simple_cond.
7366 LOOP - the loop that is being vectorized.
7367 COND - Condition that is checked for simple use.
7370 *COMP_VECTYPE - the vector type for the comparison.
7372 Returns whether a COND can be vectorized. Checks whether
7373 condition operands are supportable using vec_is_simple_use. */
7376 vect_is_simple_cond (tree cond
, vec_info
*vinfo
, tree
*comp_vectype
)
7379 enum vect_def_type dt
;
7380 tree vectype1
= NULL_TREE
, vectype2
= NULL_TREE
;
7383 if (TREE_CODE (cond
) == SSA_NAME
7384 && TREE_CODE (TREE_TYPE (cond
)) == BOOLEAN_TYPE
)
7386 gimple
*lhs_def_stmt
= SSA_NAME_DEF_STMT (cond
);
7387 if (!vect_is_simple_use (cond
, vinfo
, &lhs_def_stmt
,
7390 || !VECTOR_BOOLEAN_TYPE_P (*comp_vectype
))
7395 if (!COMPARISON_CLASS_P (cond
))
7398 lhs
= TREE_OPERAND (cond
, 0);
7399 rhs
= TREE_OPERAND (cond
, 1);
7401 if (TREE_CODE (lhs
) == SSA_NAME
)
7403 gimple
*lhs_def_stmt
= SSA_NAME_DEF_STMT (lhs
);
7404 if (!vect_is_simple_use (lhs
, vinfo
, &lhs_def_stmt
, &dt
, &vectype1
))
7407 else if (TREE_CODE (lhs
) != INTEGER_CST
&& TREE_CODE (lhs
) != REAL_CST
7408 && TREE_CODE (lhs
) != FIXED_CST
)
7411 if (TREE_CODE (rhs
) == SSA_NAME
)
7413 gimple
*rhs_def_stmt
= SSA_NAME_DEF_STMT (rhs
);
7414 if (!vect_is_simple_use (rhs
, vinfo
, &rhs_def_stmt
, &dt
, &vectype2
))
7417 else if (TREE_CODE (rhs
) != INTEGER_CST
&& TREE_CODE (rhs
) != REAL_CST
7418 && TREE_CODE (rhs
) != FIXED_CST
)
7421 if (vectype1
&& vectype2
7422 && TYPE_VECTOR_SUBPARTS (vectype1
) != TYPE_VECTOR_SUBPARTS (vectype2
))
7425 *comp_vectype
= vectype1
? vectype1
: vectype2
;
7429 /* vectorizable_condition.
7431 Check if STMT is conditional modify expression that can be vectorized.
7432 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
7433 stmt using VEC_COND_EXPR to replace it, put it in VEC_STMT, and insert it
7436 When STMT is vectorized as nested cycle, REDUC_DEF is the vector variable
7437 to be used at REDUC_INDEX (in then clause if REDUC_INDEX is 1, and in
7438 else clause if it is 2).
7440 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
7443 vectorizable_condition (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
7444 gimple
**vec_stmt
, tree reduc_def
, int reduc_index
,
7447 tree scalar_dest
= NULL_TREE
;
7448 tree vec_dest
= NULL_TREE
;
7449 tree cond_expr
, then_clause
, else_clause
;
7450 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
7451 tree comp_vectype
= NULL_TREE
;
7452 tree vec_cond_lhs
= NULL_TREE
, vec_cond_rhs
= NULL_TREE
;
7453 tree vec_then_clause
= NULL_TREE
, vec_else_clause
= NULL_TREE
;
7456 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
7457 enum vect_def_type dt
, dts
[4];
7459 enum tree_code code
;
7460 stmt_vec_info prev_stmt_info
= NULL
;
7462 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
7463 vec
<tree
> vec_oprnds0
= vNULL
;
7464 vec
<tree
> vec_oprnds1
= vNULL
;
7465 vec
<tree
> vec_oprnds2
= vNULL
;
7466 vec
<tree
> vec_oprnds3
= vNULL
;
7468 bool masked
= false;
7470 if (reduc_index
&& STMT_SLP_TYPE (stmt_info
))
7473 if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info
) == TREE_CODE_REDUCTION
)
7475 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
7478 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
7479 && !(STMT_VINFO_DEF_TYPE (stmt_info
) == vect_nested_cycle
7483 /* FORNOW: not yet supported. */
7484 if (STMT_VINFO_LIVE_P (stmt_info
))
7486 if (dump_enabled_p ())
7487 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7488 "value used after loop.\n");
7493 /* Is vectorizable conditional operation? */
7494 if (!is_gimple_assign (stmt
))
7497 code
= gimple_assign_rhs_code (stmt
);
7499 if (code
!= COND_EXPR
)
7502 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
7503 int nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
7504 tree vectype1
= NULL_TREE
, vectype2
= NULL_TREE
;
7506 if (slp_node
|| PURE_SLP_STMT (stmt_info
))
7509 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits
;
7511 gcc_assert (ncopies
>= 1);
7512 if (reduc_index
&& ncopies
> 1)
7513 return false; /* FORNOW */
7515 cond_expr
= gimple_assign_rhs1 (stmt
);
7516 then_clause
= gimple_assign_rhs2 (stmt
);
7517 else_clause
= gimple_assign_rhs3 (stmt
);
7519 if (!vect_is_simple_cond (cond_expr
, stmt_info
->vinfo
, &comp_vectype
)
7524 if (!vect_is_simple_use (then_clause
, stmt_info
->vinfo
, &def_stmt
, &dt
,
7527 if (!vect_is_simple_use (else_clause
, stmt_info
->vinfo
, &def_stmt
, &dt
,
7531 if (vectype1
&& !useless_type_conversion_p (vectype
, vectype1
))
7534 if (vectype2
&& !useless_type_conversion_p (vectype
, vectype2
))
7537 masked
= !COMPARISON_CLASS_P (cond_expr
);
7538 vec_cmp_type
= build_same_sized_truth_vector_type (comp_vectype
);
7540 if (vec_cmp_type
== NULL_TREE
)
7545 STMT_VINFO_TYPE (stmt_info
) = condition_vec_info_type
;
7546 return expand_vec_cond_expr_p (vectype
, comp_vectype
);
7553 vec_oprnds0
.create (1);
7554 vec_oprnds1
.create (1);
7555 vec_oprnds2
.create (1);
7556 vec_oprnds3
.create (1);
7560 scalar_dest
= gimple_assign_lhs (stmt
);
7561 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
7563 /* Handle cond expr. */
7564 for (j
= 0; j
< ncopies
; j
++)
7566 gassign
*new_stmt
= NULL
;
7571 auto_vec
<tree
, 4> ops
;
7572 auto_vec
<vec
<tree
>, 4> vec_defs
;
7575 ops
.safe_push (cond_expr
);
7578 ops
.safe_push (TREE_OPERAND (cond_expr
, 0));
7579 ops
.safe_push (TREE_OPERAND (cond_expr
, 1));
7581 ops
.safe_push (then_clause
);
7582 ops
.safe_push (else_clause
);
7583 vect_get_slp_defs (ops
, slp_node
, &vec_defs
, -1);
7584 vec_oprnds3
= vec_defs
.pop ();
7585 vec_oprnds2
= vec_defs
.pop ();
7587 vec_oprnds1
= vec_defs
.pop ();
7588 vec_oprnds0
= vec_defs
.pop ();
7591 vec_defs
.release ();
7599 = vect_get_vec_def_for_operand (cond_expr
, stmt
,
7601 vect_is_simple_use (cond_expr
, stmt_info
->vinfo
,
7607 vect_get_vec_def_for_operand (TREE_OPERAND (cond_expr
, 0),
7608 stmt
, comp_vectype
);
7609 vect_is_simple_use (TREE_OPERAND (cond_expr
, 0),
7610 loop_vinfo
, >emp
, &dts
[0]);
7613 vect_get_vec_def_for_operand (TREE_OPERAND (cond_expr
, 1),
7614 stmt
, comp_vectype
);
7615 vect_is_simple_use (TREE_OPERAND (cond_expr
, 1),
7616 loop_vinfo
, >emp
, &dts
[1]);
7618 if (reduc_index
== 1)
7619 vec_then_clause
= reduc_def
;
7622 vec_then_clause
= vect_get_vec_def_for_operand (then_clause
,
7624 vect_is_simple_use (then_clause
, loop_vinfo
,
7627 if (reduc_index
== 2)
7628 vec_else_clause
= reduc_def
;
7631 vec_else_clause
= vect_get_vec_def_for_operand (else_clause
,
7633 vect_is_simple_use (else_clause
, loop_vinfo
, >emp
, &dts
[3]);
7640 = vect_get_vec_def_for_stmt_copy (dts
[0],
7641 vec_oprnds0
.pop ());
7644 = vect_get_vec_def_for_stmt_copy (dts
[1],
7645 vec_oprnds1
.pop ());
7647 vec_then_clause
= vect_get_vec_def_for_stmt_copy (dts
[2],
7648 vec_oprnds2
.pop ());
7649 vec_else_clause
= vect_get_vec_def_for_stmt_copy (dts
[3],
7650 vec_oprnds3
.pop ());
7655 vec_oprnds0
.quick_push (vec_cond_lhs
);
7657 vec_oprnds1
.quick_push (vec_cond_rhs
);
7658 vec_oprnds2
.quick_push (vec_then_clause
);
7659 vec_oprnds3
.quick_push (vec_else_clause
);
7662 /* Arguments are ready. Create the new vector stmt. */
7663 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vec_cond_lhs
)
7665 vec_then_clause
= vec_oprnds2
[i
];
7666 vec_else_clause
= vec_oprnds3
[i
];
7669 vec_compare
= vec_cond_lhs
;
7672 vec_cond_rhs
= vec_oprnds1
[i
];
7673 vec_compare
= build2 (TREE_CODE (cond_expr
), vec_cmp_type
,
7674 vec_cond_lhs
, vec_cond_rhs
);
7676 new_temp
= make_ssa_name (vec_dest
);
7677 new_stmt
= gimple_build_assign (new_temp
, VEC_COND_EXPR
,
7678 vec_compare
, vec_then_clause
,
7680 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
7682 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
7689 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
7691 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
7693 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
7696 vec_oprnds0
.release ();
7697 vec_oprnds1
.release ();
7698 vec_oprnds2
.release ();
7699 vec_oprnds3
.release ();
7704 /* vectorizable_comparison.
7706 Check if STMT is comparison expression that can be vectorized.
7707 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
7708 comparison, put it in VEC_STMT, and insert it at GSI.
7710 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
7713 vectorizable_comparison (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
7714 gimple
**vec_stmt
, tree reduc_def
,
7717 tree lhs
, rhs1
, rhs2
;
7718 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
7719 tree vectype1
= NULL_TREE
, vectype2
= NULL_TREE
;
7720 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
7721 tree vec_rhs1
= NULL_TREE
, vec_rhs2
= NULL_TREE
;
7723 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
7724 enum vect_def_type dts
[2] = {vect_unknown_def_type
, vect_unknown_def_type
};
7727 enum tree_code code
;
7728 stmt_vec_info prev_stmt_info
= NULL
;
7730 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
7731 vec
<tree
> vec_oprnds0
= vNULL
;
7732 vec
<tree
> vec_oprnds1
= vNULL
;
7737 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
7740 if (!vectype
|| !VECTOR_BOOLEAN_TYPE_P (vectype
))
7743 mask_type
= vectype
;
7744 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
7746 if (slp_node
|| PURE_SLP_STMT (stmt_info
))
7749 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits
;
7751 gcc_assert (ncopies
>= 1);
7752 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
7753 && !(STMT_VINFO_DEF_TYPE (stmt_info
) == vect_nested_cycle
7757 if (STMT_VINFO_LIVE_P (stmt_info
))
7759 if (dump_enabled_p ())
7760 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7761 "value used after loop.\n");
7765 if (!is_gimple_assign (stmt
))
7768 code
= gimple_assign_rhs_code (stmt
);
7770 if (TREE_CODE_CLASS (code
) != tcc_comparison
)
7773 rhs1
= gimple_assign_rhs1 (stmt
);
7774 rhs2
= gimple_assign_rhs2 (stmt
);
7776 if (!vect_is_simple_use (rhs1
, stmt_info
->vinfo
, &def_stmt
,
7777 &dts
[0], &vectype1
))
7780 if (!vect_is_simple_use (rhs2
, stmt_info
->vinfo
, &def_stmt
,
7781 &dts
[1], &vectype2
))
7784 if (vectype1
&& vectype2
7785 && TYPE_VECTOR_SUBPARTS (vectype1
) != TYPE_VECTOR_SUBPARTS (vectype2
))
7788 vectype
= vectype1
? vectype1
: vectype2
;
7790 /* Invariant comparison. */
7793 vectype
= build_vector_type (TREE_TYPE (rhs1
), nunits
);
7794 if (tree_to_shwi (TYPE_SIZE_UNIT (vectype
)) != current_vector_size
)
7797 else if (nunits
!= TYPE_VECTOR_SUBPARTS (vectype
))
7802 STMT_VINFO_TYPE (stmt_info
) = comparison_vec_info_type
;
7803 vect_model_simple_cost (stmt_info
, ncopies
, dts
, NULL
, NULL
);
7804 return expand_vec_cmp_expr_p (vectype
, mask_type
);
7810 vec_oprnds0
.create (1);
7811 vec_oprnds1
.create (1);
7815 lhs
= gimple_assign_lhs (stmt
);
7816 mask
= vect_create_destination_var (lhs
, mask_type
);
7818 /* Handle cmp expr. */
7819 for (j
= 0; j
< ncopies
; j
++)
7821 gassign
*new_stmt
= NULL
;
7826 auto_vec
<tree
, 2> ops
;
7827 auto_vec
<vec
<tree
>, 2> vec_defs
;
7829 ops
.safe_push (rhs1
);
7830 ops
.safe_push (rhs2
);
7831 vect_get_slp_defs (ops
, slp_node
, &vec_defs
, -1);
7832 vec_oprnds1
= vec_defs
.pop ();
7833 vec_oprnds0
= vec_defs
.pop ();
7837 vec_rhs1
= vect_get_vec_def_for_operand (rhs1
, stmt
, vectype
);
7838 vec_rhs2
= vect_get_vec_def_for_operand (rhs2
, stmt
, vectype
);
7843 vec_rhs1
= vect_get_vec_def_for_stmt_copy (dts
[0],
7844 vec_oprnds0
.pop ());
7845 vec_rhs2
= vect_get_vec_def_for_stmt_copy (dts
[1],
7846 vec_oprnds1
.pop ());
7851 vec_oprnds0
.quick_push (vec_rhs1
);
7852 vec_oprnds1
.quick_push (vec_rhs2
);
7855 /* Arguments are ready. Create the new vector stmt. */
7856 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vec_rhs1
)
7858 vec_rhs2
= vec_oprnds1
[i
];
7860 new_temp
= make_ssa_name (mask
);
7861 new_stmt
= gimple_build_assign (new_temp
, code
, vec_rhs1
, vec_rhs2
);
7862 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
7864 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
7871 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
7873 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
7875 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
7878 vec_oprnds0
.release ();
7879 vec_oprnds1
.release ();
7884 /* Make sure the statement is vectorizable. */
7887 vect_analyze_stmt (gimple
*stmt
, bool *need_to_vectorize
, slp_tree node
)
7889 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
7890 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
7891 enum vect_relevant relevance
= STMT_VINFO_RELEVANT (stmt_info
);
7893 tree scalar_type
, vectype
;
7894 gimple
*pattern_stmt
;
7895 gimple_seq pattern_def_seq
;
7897 if (dump_enabled_p ())
7899 dump_printf_loc (MSG_NOTE
, vect_location
, "==> examining statement: ");
7900 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
7903 if (gimple_has_volatile_ops (stmt
))
7905 if (dump_enabled_p ())
7906 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7907 "not vectorized: stmt has volatile operands\n");
7912 /* Skip stmts that do not need to be vectorized. In loops this is expected
7914 - the COND_EXPR which is the loop exit condition
7915 - any LABEL_EXPRs in the loop
7916 - computations that are used only for array indexing or loop control.
7917 In basic blocks we only analyze statements that are a part of some SLP
7918 instance, therefore, all the statements are relevant.
7920 Pattern statement needs to be analyzed instead of the original statement
7921 if the original statement is not relevant. Otherwise, we analyze both
7922 statements. In basic blocks we are called from some SLP instance
7923 traversal, don't analyze pattern stmts instead, the pattern stmts
7924 already will be part of SLP instance. */
7926 pattern_stmt
= STMT_VINFO_RELATED_STMT (stmt_info
);
7927 if (!STMT_VINFO_RELEVANT_P (stmt_info
)
7928 && !STMT_VINFO_LIVE_P (stmt_info
))
7930 if (STMT_VINFO_IN_PATTERN_P (stmt_info
)
7932 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt
))
7933 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt
))))
7935 /* Analyze PATTERN_STMT instead of the original stmt. */
7936 stmt
= pattern_stmt
;
7937 stmt_info
= vinfo_for_stmt (pattern_stmt
);
7938 if (dump_enabled_p ())
7940 dump_printf_loc (MSG_NOTE
, vect_location
,
7941 "==> examining pattern statement: ");
7942 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
7947 if (dump_enabled_p ())
7948 dump_printf_loc (MSG_NOTE
, vect_location
, "irrelevant.\n");
7953 else if (STMT_VINFO_IN_PATTERN_P (stmt_info
)
7956 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt
))
7957 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt
))))
7959 /* Analyze PATTERN_STMT too. */
7960 if (dump_enabled_p ())
7962 dump_printf_loc (MSG_NOTE
, vect_location
,
7963 "==> examining pattern statement: ");
7964 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
7967 if (!vect_analyze_stmt (pattern_stmt
, need_to_vectorize
, node
))
7971 if (is_pattern_stmt_p (stmt_info
)
7973 && (pattern_def_seq
= STMT_VINFO_PATTERN_DEF_SEQ (stmt_info
)))
7975 gimple_stmt_iterator si
;
7977 for (si
= gsi_start (pattern_def_seq
); !gsi_end_p (si
); gsi_next (&si
))
7979 gimple
*pattern_def_stmt
= gsi_stmt (si
);
7980 if (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_def_stmt
))
7981 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_def_stmt
)))
7983 /* Analyze def stmt of STMT if it's a pattern stmt. */
7984 if (dump_enabled_p ())
7986 dump_printf_loc (MSG_NOTE
, vect_location
,
7987 "==> examining pattern def statement: ");
7988 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, pattern_def_stmt
, 0);
7991 if (!vect_analyze_stmt (pattern_def_stmt
,
7992 need_to_vectorize
, node
))
7998 switch (STMT_VINFO_DEF_TYPE (stmt_info
))
8000 case vect_internal_def
:
8003 case vect_reduction_def
:
8004 case vect_nested_cycle
:
8005 gcc_assert (!bb_vinfo
8006 && (relevance
== vect_used_in_outer
8007 || relevance
== vect_used_in_outer_by_reduction
8008 || relevance
== vect_used_by_reduction
8009 || relevance
== vect_unused_in_scope
));
8012 case vect_induction_def
:
8013 case vect_constant_def
:
8014 case vect_external_def
:
8015 case vect_unknown_def_type
:
8022 gcc_assert (PURE_SLP_STMT (stmt_info
));
8024 scalar_type
= TREE_TYPE (gimple_get_lhs (stmt
));
8025 if (dump_enabled_p ())
8027 dump_printf_loc (MSG_NOTE
, vect_location
,
8028 "get vectype for scalar type: ");
8029 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, scalar_type
);
8030 dump_printf (MSG_NOTE
, "\n");
8033 vectype
= get_vectype_for_scalar_type (scalar_type
);
8036 if (dump_enabled_p ())
8038 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
8039 "not SLPed: unsupported data-type ");
8040 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
8042 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
8047 if (dump_enabled_p ())
8049 dump_printf_loc (MSG_NOTE
, vect_location
, "vectype: ");
8050 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, vectype
);
8051 dump_printf (MSG_NOTE
, "\n");
8054 STMT_VINFO_VECTYPE (stmt_info
) = vectype
;
8057 if (STMT_VINFO_RELEVANT_P (stmt_info
))
8059 gcc_assert (!VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt
))));
8060 gcc_assert (STMT_VINFO_VECTYPE (stmt_info
)
8061 || (is_gimple_call (stmt
)
8062 && gimple_call_lhs (stmt
) == NULL_TREE
));
8063 *need_to_vectorize
= true;
8066 if (PURE_SLP_STMT (stmt_info
) && !node
)
8068 dump_printf_loc (MSG_NOTE
, vect_location
,
8069 "handled only by SLP analysis\n");
8075 && (STMT_VINFO_RELEVANT_P (stmt_info
)
8076 || STMT_VINFO_DEF_TYPE (stmt_info
) == vect_reduction_def
))
8077 ok
= (vectorizable_simd_clone_call (stmt
, NULL
, NULL
, node
)
8078 || vectorizable_conversion (stmt
, NULL
, NULL
, node
)
8079 || vectorizable_shift (stmt
, NULL
, NULL
, node
)
8080 || vectorizable_operation (stmt
, NULL
, NULL
, node
)
8081 || vectorizable_assignment (stmt
, NULL
, NULL
, node
)
8082 || vectorizable_load (stmt
, NULL
, NULL
, node
, NULL
)
8083 || vectorizable_call (stmt
, NULL
, NULL
, node
)
8084 || vectorizable_store (stmt
, NULL
, NULL
, node
)
8085 || vectorizable_reduction (stmt
, NULL
, NULL
, node
)
8086 || vectorizable_condition (stmt
, NULL
, NULL
, NULL
, 0, node
)
8087 || vectorizable_comparison (stmt
, NULL
, NULL
, NULL
, node
));
8091 ok
= (vectorizable_simd_clone_call (stmt
, NULL
, NULL
, node
)
8092 || vectorizable_conversion (stmt
, NULL
, NULL
, node
)
8093 || vectorizable_shift (stmt
, NULL
, NULL
, node
)
8094 || vectorizable_operation (stmt
, NULL
, NULL
, node
)
8095 || vectorizable_assignment (stmt
, NULL
, NULL
, node
)
8096 || vectorizable_load (stmt
, NULL
, NULL
, node
, NULL
)
8097 || vectorizable_call (stmt
, NULL
, NULL
, node
)
8098 || vectorizable_store (stmt
, NULL
, NULL
, node
)
8099 || vectorizable_condition (stmt
, NULL
, NULL
, NULL
, 0, node
)
8100 || vectorizable_comparison (stmt
, NULL
, NULL
, NULL
, node
));
8105 if (dump_enabled_p ())
8107 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
8108 "not vectorized: relevant stmt not ");
8109 dump_printf (MSG_MISSED_OPTIMIZATION
, "supported: ");
8110 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
8119 /* Stmts that are (also) "live" (i.e. - that are used out of the loop)
8120 need extra handling, except for vectorizable reductions. */
8121 if (STMT_VINFO_LIVE_P (stmt_info
)
8122 && STMT_VINFO_TYPE (stmt_info
) != reduc_vec_info_type
)
8123 ok
= vectorizable_live_operation (stmt
, NULL
, NULL
);
8127 if (dump_enabled_p ())
8129 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
8130 "not vectorized: live stmt not ");
8131 dump_printf (MSG_MISSED_OPTIMIZATION
, "supported: ");
8132 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
8142 /* Function vect_transform_stmt.
8144 Create a vectorized stmt to replace STMT, and insert it at BSI. */
8147 vect_transform_stmt (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
8148 bool *grouped_store
, slp_tree slp_node
,
8149 slp_instance slp_node_instance
)
8151 bool is_store
= false;
8152 gimple
*vec_stmt
= NULL
;
8153 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
8156 gimple
*old_vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
8158 switch (STMT_VINFO_TYPE (stmt_info
))
8160 case type_demotion_vec_info_type
:
8161 case type_promotion_vec_info_type
:
8162 case type_conversion_vec_info_type
:
8163 done
= vectorizable_conversion (stmt
, gsi
, &vec_stmt
, slp_node
);
8167 case induc_vec_info_type
:
8168 gcc_assert (!slp_node
);
8169 done
= vectorizable_induction (stmt
, gsi
, &vec_stmt
);
8173 case shift_vec_info_type
:
8174 done
= vectorizable_shift (stmt
, gsi
, &vec_stmt
, slp_node
);
8178 case op_vec_info_type
:
8179 done
= vectorizable_operation (stmt
, gsi
, &vec_stmt
, slp_node
);
8183 case assignment_vec_info_type
:
8184 done
= vectorizable_assignment (stmt
, gsi
, &vec_stmt
, slp_node
);
8188 case load_vec_info_type
:
8189 done
= vectorizable_load (stmt
, gsi
, &vec_stmt
, slp_node
,
8194 case store_vec_info_type
:
8195 done
= vectorizable_store (stmt
, gsi
, &vec_stmt
, slp_node
);
8197 if (STMT_VINFO_GROUPED_ACCESS (stmt_info
) && !slp_node
)
8199 /* In case of interleaving, the whole chain is vectorized when the
8200 last store in the chain is reached. Store stmts before the last
8201 one are skipped, and there vec_stmt_info shouldn't be freed
8203 *grouped_store
= true;
8204 if (STMT_VINFO_VEC_STMT (stmt_info
))
8211 case condition_vec_info_type
:
8212 done
= vectorizable_condition (stmt
, gsi
, &vec_stmt
, NULL
, 0, slp_node
);
8216 case comparison_vec_info_type
:
8217 done
= vectorizable_comparison (stmt
, gsi
, &vec_stmt
, NULL
, slp_node
);
8221 case call_vec_info_type
:
8222 done
= vectorizable_call (stmt
, gsi
, &vec_stmt
, slp_node
);
8223 stmt
= gsi_stmt (*gsi
);
8224 if (is_gimple_call (stmt
)
8225 && gimple_call_internal_p (stmt
)
8226 && gimple_call_internal_fn (stmt
) == IFN_MASK_STORE
)
8230 case call_simd_clone_vec_info_type
:
8231 done
= vectorizable_simd_clone_call (stmt
, gsi
, &vec_stmt
, slp_node
);
8232 stmt
= gsi_stmt (*gsi
);
8235 case reduc_vec_info_type
:
8236 done
= vectorizable_reduction (stmt
, gsi
, &vec_stmt
, slp_node
);
8241 if (!STMT_VINFO_LIVE_P (stmt_info
))
8243 if (dump_enabled_p ())
8244 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
8245 "stmt not supported.\n");
8250 /* Verify SLP vectorization doesn't mess with STMT_VINFO_VEC_STMT.
8251 This would break hybrid SLP vectorization. */
8253 gcc_assert (!vec_stmt
8254 && STMT_VINFO_VEC_STMT (stmt_info
) == old_vec_stmt
);
8256 /* Handle inner-loop stmts whose DEF is used in the loop-nest that
8257 is being vectorized, but outside the immediately enclosing loop. */
8259 && STMT_VINFO_LOOP_VINFO (stmt_info
)
8260 && nested_in_vect_loop_p (LOOP_VINFO_LOOP (
8261 STMT_VINFO_LOOP_VINFO (stmt_info
)), stmt
)
8262 && STMT_VINFO_TYPE (stmt_info
) != reduc_vec_info_type
8263 && (STMT_VINFO_RELEVANT (stmt_info
) == vect_used_in_outer
8264 || STMT_VINFO_RELEVANT (stmt_info
) ==
8265 vect_used_in_outer_by_reduction
))
8267 struct loop
*innerloop
= LOOP_VINFO_LOOP (
8268 STMT_VINFO_LOOP_VINFO (stmt_info
))->inner
;
8269 imm_use_iterator imm_iter
;
8270 use_operand_p use_p
;
8274 if (dump_enabled_p ())
8275 dump_printf_loc (MSG_NOTE
, vect_location
,
8276 "Record the vdef for outer-loop vectorization.\n");
8278 /* Find the relevant loop-exit phi-node, and reord the vec_stmt there
8279 (to be used when vectorizing outer-loop stmts that use the DEF of
8281 if (gimple_code (stmt
) == GIMPLE_PHI
)
8282 scalar_dest
= PHI_RESULT (stmt
);
8284 scalar_dest
= gimple_assign_lhs (stmt
);
8286 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, scalar_dest
)
8288 if (!flow_bb_inside_loop_p (innerloop
, gimple_bb (USE_STMT (use_p
))))
8290 exit_phi
= USE_STMT (use_p
);
8291 STMT_VINFO_VEC_STMT (vinfo_for_stmt (exit_phi
)) = vec_stmt
;
8296 /* Handle stmts whose DEF is used outside the loop-nest that is
8297 being vectorized. */
8298 if (STMT_VINFO_LIVE_P (stmt_info
)
8299 && STMT_VINFO_TYPE (stmt_info
) != reduc_vec_info_type
)
8301 done
= vectorizable_live_operation (stmt
, gsi
, &vec_stmt
);
8306 STMT_VINFO_VEC_STMT (stmt_info
) = vec_stmt
;
8312 /* Remove a group of stores (for SLP or interleaving), free their
8316 vect_remove_stores (gimple
*first_stmt
)
8318 gimple
*next
= first_stmt
;
8320 gimple_stmt_iterator next_si
;
8324 stmt_vec_info stmt_info
= vinfo_for_stmt (next
);
8326 tmp
= GROUP_NEXT_ELEMENT (stmt_info
);
8327 if (is_pattern_stmt_p (stmt_info
))
8328 next
= STMT_VINFO_RELATED_STMT (stmt_info
);
8329 /* Free the attached stmt_vec_info and remove the stmt. */
8330 next_si
= gsi_for_stmt (next
);
8331 unlink_stmt_vdef (next
);
8332 gsi_remove (&next_si
, true);
8333 release_defs (next
);
8334 free_stmt_vec_info (next
);
8340 /* Function new_stmt_vec_info.
8342 Create and initialize a new stmt_vec_info struct for STMT. */
8345 new_stmt_vec_info (gimple
*stmt
, vec_info
*vinfo
)
8348 res
= (stmt_vec_info
) xcalloc (1, sizeof (struct _stmt_vec_info
));
8350 STMT_VINFO_TYPE (res
) = undef_vec_info_type
;
8351 STMT_VINFO_STMT (res
) = stmt
;
8353 STMT_VINFO_RELEVANT (res
) = vect_unused_in_scope
;
8354 STMT_VINFO_LIVE_P (res
) = false;
8355 STMT_VINFO_VECTYPE (res
) = NULL
;
8356 STMT_VINFO_VEC_STMT (res
) = NULL
;
8357 STMT_VINFO_VECTORIZABLE (res
) = true;
8358 STMT_VINFO_IN_PATTERN_P (res
) = false;
8359 STMT_VINFO_RELATED_STMT (res
) = NULL
;
8360 STMT_VINFO_PATTERN_DEF_SEQ (res
) = NULL
;
8361 STMT_VINFO_DATA_REF (res
) = NULL
;
8362 STMT_VINFO_VEC_REDUCTION_TYPE (res
) = TREE_CODE_REDUCTION
;
8364 STMT_VINFO_DR_BASE_ADDRESS (res
) = NULL
;
8365 STMT_VINFO_DR_OFFSET (res
) = NULL
;
8366 STMT_VINFO_DR_INIT (res
) = NULL
;
8367 STMT_VINFO_DR_STEP (res
) = NULL
;
8368 STMT_VINFO_DR_ALIGNED_TO (res
) = NULL
;
8370 if (gimple_code (stmt
) == GIMPLE_PHI
8371 && is_loop_header_bb_p (gimple_bb (stmt
)))
8372 STMT_VINFO_DEF_TYPE (res
) = vect_unknown_def_type
;
8374 STMT_VINFO_DEF_TYPE (res
) = vect_internal_def
;
8376 STMT_VINFO_SAME_ALIGN_REFS (res
).create (0);
8377 STMT_SLP_TYPE (res
) = loop_vect
;
8378 STMT_VINFO_NUM_SLP_USES (res
) = 0;
8380 GROUP_FIRST_ELEMENT (res
) = NULL
;
8381 GROUP_NEXT_ELEMENT (res
) = NULL
;
8382 GROUP_SIZE (res
) = 0;
8383 GROUP_STORE_COUNT (res
) = 0;
8384 GROUP_GAP (res
) = 0;
8385 GROUP_SAME_DR_STMT (res
) = NULL
;
8391 /* Create a hash table for stmt_vec_info. */
8394 init_stmt_vec_info_vec (void)
8396 gcc_assert (!stmt_vec_info_vec
.exists ());
8397 stmt_vec_info_vec
.create (50);
8401 /* Free hash table for stmt_vec_info. */
8404 free_stmt_vec_info_vec (void)
8408 FOR_EACH_VEC_ELT (stmt_vec_info_vec
, i
, info
)
8410 free_stmt_vec_info (STMT_VINFO_STMT (info
));
8411 gcc_assert (stmt_vec_info_vec
.exists ());
8412 stmt_vec_info_vec
.release ();
8416 /* Free stmt vectorization related info. */
8419 free_stmt_vec_info (gimple
*stmt
)
8421 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
8426 /* Check if this statement has a related "pattern stmt"
8427 (introduced by the vectorizer during the pattern recognition
8428 pass). Free pattern's stmt_vec_info and def stmt's stmt_vec_info
8430 if (STMT_VINFO_IN_PATTERN_P (stmt_info
))
8432 stmt_vec_info patt_info
8433 = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info
));
8436 gimple_seq seq
= STMT_VINFO_PATTERN_DEF_SEQ (patt_info
);
8437 gimple
*patt_stmt
= STMT_VINFO_STMT (patt_info
);
8438 gimple_set_bb (patt_stmt
, NULL
);
8439 tree lhs
= gimple_get_lhs (patt_stmt
);
8440 if (lhs
&& TREE_CODE (lhs
) == SSA_NAME
)
8441 release_ssa_name (lhs
);
8444 gimple_stmt_iterator si
;
8445 for (si
= gsi_start (seq
); !gsi_end_p (si
); gsi_next (&si
))
8447 gimple
*seq_stmt
= gsi_stmt (si
);
8448 gimple_set_bb (seq_stmt
, NULL
);
8449 lhs
= gimple_get_lhs (seq_stmt
);
8450 if (lhs
&& TREE_CODE (lhs
) == SSA_NAME
)
8451 release_ssa_name (lhs
);
8452 free_stmt_vec_info (seq_stmt
);
8455 free_stmt_vec_info (patt_stmt
);
8459 STMT_VINFO_SAME_ALIGN_REFS (stmt_info
).release ();
8460 STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).release ();
8461 set_vinfo_for_stmt (stmt
, NULL
);
8466 /* Function get_vectype_for_scalar_type_and_size.
8468 Returns the vector type corresponding to SCALAR_TYPE and SIZE as supported
8472 get_vectype_for_scalar_type_and_size (tree scalar_type
, unsigned size
)
8474 machine_mode inner_mode
= TYPE_MODE (scalar_type
);
8475 machine_mode simd_mode
;
8476 unsigned int nbytes
= GET_MODE_SIZE (inner_mode
);
8483 if (GET_MODE_CLASS (inner_mode
) != MODE_INT
8484 && GET_MODE_CLASS (inner_mode
) != MODE_FLOAT
)
8487 /* For vector types of elements whose mode precision doesn't
8488 match their types precision we use a element type of mode
8489 precision. The vectorization routines will have to make sure
8490 they support the proper result truncation/extension.
8491 We also make sure to build vector types with INTEGER_TYPE
8492 component type only. */
8493 if (INTEGRAL_TYPE_P (scalar_type
)
8494 && (GET_MODE_BITSIZE (inner_mode
) != TYPE_PRECISION (scalar_type
)
8495 || TREE_CODE (scalar_type
) != INTEGER_TYPE
))
8496 scalar_type
= build_nonstandard_integer_type (GET_MODE_BITSIZE (inner_mode
),
8497 TYPE_UNSIGNED (scalar_type
));
8499 /* We shouldn't end up building VECTOR_TYPEs of non-scalar components.
8500 When the component mode passes the above test simply use a type
8501 corresponding to that mode. The theory is that any use that
8502 would cause problems with this will disable vectorization anyway. */
8503 else if (!SCALAR_FLOAT_TYPE_P (scalar_type
)
8504 && !INTEGRAL_TYPE_P (scalar_type
))
8505 scalar_type
= lang_hooks
.types
.type_for_mode (inner_mode
, 1);
8507 /* We can't build a vector type of elements with alignment bigger than
8509 else if (nbytes
< TYPE_ALIGN_UNIT (scalar_type
))
8510 scalar_type
= lang_hooks
.types
.type_for_mode (inner_mode
,
8511 TYPE_UNSIGNED (scalar_type
));
8513 /* If we felt back to using the mode fail if there was
8514 no scalar type for it. */
8515 if (scalar_type
== NULL_TREE
)
8518 /* If no size was supplied use the mode the target prefers. Otherwise
8519 lookup a vector mode of the specified size. */
8521 simd_mode
= targetm
.vectorize
.preferred_simd_mode (inner_mode
);
8523 simd_mode
= mode_for_vector (inner_mode
, size
/ nbytes
);
8524 nunits
= GET_MODE_SIZE (simd_mode
) / nbytes
;
8528 vectype
= build_vector_type (scalar_type
, nunits
);
8530 if (!VECTOR_MODE_P (TYPE_MODE (vectype
))
8531 && !INTEGRAL_MODE_P (TYPE_MODE (vectype
)))
8537 unsigned int current_vector_size
;
8539 /* Function get_vectype_for_scalar_type.
8541 Returns the vector type corresponding to SCALAR_TYPE as supported
8545 get_vectype_for_scalar_type (tree scalar_type
)
8548 vectype
= get_vectype_for_scalar_type_and_size (scalar_type
,
8549 current_vector_size
);
8551 && current_vector_size
== 0)
8552 current_vector_size
= GET_MODE_SIZE (TYPE_MODE (vectype
));
8556 /* Function get_mask_type_for_scalar_type.
8558 Returns the mask type corresponding to a result of comparison
8559 of vectors of specified SCALAR_TYPE as supported by target. */
8562 get_mask_type_for_scalar_type (tree scalar_type
)
8564 tree vectype
= get_vectype_for_scalar_type (scalar_type
);
8569 return build_truth_vector_type (TYPE_VECTOR_SUBPARTS (vectype
),
8570 current_vector_size
);
8573 /* Function get_same_sized_vectype
8575 Returns a vector type corresponding to SCALAR_TYPE of size
8576 VECTOR_TYPE if supported by the target. */
8579 get_same_sized_vectype (tree scalar_type
, tree vector_type
)
8581 if (TREE_CODE (scalar_type
) == BOOLEAN_TYPE
)
8582 return build_same_sized_truth_vector_type (vector_type
);
8584 return get_vectype_for_scalar_type_and_size
8585 (scalar_type
, GET_MODE_SIZE (TYPE_MODE (vector_type
)));
8588 /* Function vect_is_simple_use.
8591 VINFO - the vect info of the loop or basic block that is being vectorized.
8592 OPERAND - operand in the loop or bb.
8594 DEF_STMT - the defining stmt in case OPERAND is an SSA_NAME.
8595 DT - the type of definition
8597 Returns whether a stmt with OPERAND can be vectorized.
8598 For loops, supportable operands are constants, loop invariants, and operands
8599 that are defined by the current iteration of the loop. Unsupportable
8600 operands are those that are defined by a previous iteration of the loop (as
8601 is the case in reduction/induction computations).
8602 For basic blocks, supportable operands are constants and bb invariants.
8603 For now, operands defined outside the basic block are not supported. */
8606 vect_is_simple_use (tree operand
, vec_info
*vinfo
,
8607 gimple
**def_stmt
, enum vect_def_type
*dt
)
8610 *dt
= vect_unknown_def_type
;
8612 if (dump_enabled_p ())
8614 dump_printf_loc (MSG_NOTE
, vect_location
,
8615 "vect_is_simple_use: operand ");
8616 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, operand
);
8617 dump_printf (MSG_NOTE
, "\n");
8620 if (CONSTANT_CLASS_P (operand
))
8622 *dt
= vect_constant_def
;
8626 if (is_gimple_min_invariant (operand
))
8628 *dt
= vect_external_def
;
8632 if (TREE_CODE (operand
) != SSA_NAME
)
8634 if (dump_enabled_p ())
8635 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
8640 if (SSA_NAME_IS_DEFAULT_DEF (operand
))
8642 *dt
= vect_external_def
;
8646 *def_stmt
= SSA_NAME_DEF_STMT (operand
);
8647 if (dump_enabled_p ())
8649 dump_printf_loc (MSG_NOTE
, vect_location
, "def_stmt: ");
8650 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, *def_stmt
, 0);
8653 if (! vect_stmt_in_region_p (vinfo
, *def_stmt
))
8654 *dt
= vect_external_def
;
8657 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (*def_stmt
);
8658 *dt
= STMT_VINFO_DEF_TYPE (stmt_vinfo
);
8661 if (dump_enabled_p ())
8663 dump_printf_loc (MSG_NOTE
, vect_location
, "type of def: ");
8666 case vect_uninitialized_def
:
8667 dump_printf (MSG_NOTE
, "uninitialized\n");
8669 case vect_constant_def
:
8670 dump_printf (MSG_NOTE
, "constant\n");
8672 case vect_external_def
:
8673 dump_printf (MSG_NOTE
, "external\n");
8675 case vect_internal_def
:
8676 dump_printf (MSG_NOTE
, "internal\n");
8678 case vect_induction_def
:
8679 dump_printf (MSG_NOTE
, "induction\n");
8681 case vect_reduction_def
:
8682 dump_printf (MSG_NOTE
, "reduction\n");
8684 case vect_double_reduction_def
:
8685 dump_printf (MSG_NOTE
, "double reduction\n");
8687 case vect_nested_cycle
:
8688 dump_printf (MSG_NOTE
, "nested cycle\n");
8690 case vect_unknown_def_type
:
8691 dump_printf (MSG_NOTE
, "unknown\n");
8696 if (*dt
== vect_unknown_def_type
)
8698 if (dump_enabled_p ())
8699 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
8700 "Unsupported pattern.\n");
8704 switch (gimple_code (*def_stmt
))
8711 if (dump_enabled_p ())
8712 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
8713 "unsupported defining stmt:\n");
8720 /* Function vect_is_simple_use.
8722 Same as vect_is_simple_use but also determines the vector operand
8723 type of OPERAND and stores it to *VECTYPE. If the definition of
8724 OPERAND is vect_uninitialized_def, vect_constant_def or
8725 vect_external_def *VECTYPE will be set to NULL_TREE and the caller
8726 is responsible to compute the best suited vector type for the
8730 vect_is_simple_use (tree operand
, vec_info
*vinfo
,
8731 gimple
**def_stmt
, enum vect_def_type
*dt
, tree
*vectype
)
8733 if (!vect_is_simple_use (operand
, vinfo
, def_stmt
, dt
))
8736 /* Now get a vector type if the def is internal, otherwise supply
8737 NULL_TREE and leave it up to the caller to figure out a proper
8738 type for the use stmt. */
8739 if (*dt
== vect_internal_def
8740 || *dt
== vect_induction_def
8741 || *dt
== vect_reduction_def
8742 || *dt
== vect_double_reduction_def
8743 || *dt
== vect_nested_cycle
)
8745 stmt_vec_info stmt_info
= vinfo_for_stmt (*def_stmt
);
8747 if (STMT_VINFO_IN_PATTERN_P (stmt_info
)
8748 && !STMT_VINFO_RELEVANT (stmt_info
)
8749 && !STMT_VINFO_LIVE_P (stmt_info
))
8750 stmt_info
= vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info
));
8752 *vectype
= STMT_VINFO_VECTYPE (stmt_info
);
8753 gcc_assert (*vectype
!= NULL_TREE
);
8755 else if (*dt
== vect_uninitialized_def
8756 || *dt
== vect_constant_def
8757 || *dt
== vect_external_def
)
8758 *vectype
= NULL_TREE
;
8766 /* Function supportable_widening_operation
8768 Check whether an operation represented by the code CODE is a
8769 widening operation that is supported by the target platform in
8770 vector form (i.e., when operating on arguments of type VECTYPE_IN
8771 producing a result of type VECTYPE_OUT).
8773 Widening operations we currently support are NOP (CONVERT), FLOAT
8774 and WIDEN_MULT. This function checks if these operations are supported
8775 by the target platform either directly (via vector tree-codes), or via
8779 - CODE1 and CODE2 are codes of vector operations to be used when
8780 vectorizing the operation, if available.
8781 - MULTI_STEP_CVT determines the number of required intermediate steps in
8782 case of multi-step conversion (like char->short->int - in that case
8783 MULTI_STEP_CVT will be 1).
8784 - INTERM_TYPES contains the intermediate type required to perform the
8785 widening operation (short in the above example). */
8788 supportable_widening_operation (enum tree_code code
, gimple
*stmt
,
8789 tree vectype_out
, tree vectype_in
,
8790 enum tree_code
*code1
, enum tree_code
*code2
,
8791 int *multi_step_cvt
,
8792 vec
<tree
> *interm_types
)
8794 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
8795 loop_vec_info loop_info
= STMT_VINFO_LOOP_VINFO (stmt_info
);
8796 struct loop
*vect_loop
= NULL
;
8797 machine_mode vec_mode
;
8798 enum insn_code icode1
, icode2
;
8799 optab optab1
, optab2
;
8800 tree vectype
= vectype_in
;
8801 tree wide_vectype
= vectype_out
;
8802 enum tree_code c1
, c2
;
8804 tree prev_type
, intermediate_type
;
8805 machine_mode intermediate_mode
, prev_mode
;
8806 optab optab3
, optab4
;
8808 *multi_step_cvt
= 0;
8810 vect_loop
= LOOP_VINFO_LOOP (loop_info
);
8814 case WIDEN_MULT_EXPR
:
8815 /* The result of a vectorized widening operation usually requires
8816 two vectors (because the widened results do not fit into one vector).
8817 The generated vector results would normally be expected to be
8818 generated in the same order as in the original scalar computation,
8819 i.e. if 8 results are generated in each vector iteration, they are
8820 to be organized as follows:
8821 vect1: [res1,res2,res3,res4],
8822 vect2: [res5,res6,res7,res8].
8824 However, in the special case that the result of the widening
8825 operation is used in a reduction computation only, the order doesn't
8826 matter (because when vectorizing a reduction we change the order of
8827 the computation). Some targets can take advantage of this and
8828 generate more efficient code. For example, targets like Altivec,
8829 that support widen_mult using a sequence of {mult_even,mult_odd}
8830 generate the following vectors:
8831 vect1: [res1,res3,res5,res7],
8832 vect2: [res2,res4,res6,res8].
8834 When vectorizing outer-loops, we execute the inner-loop sequentially
8835 (each vectorized inner-loop iteration contributes to VF outer-loop
8836 iterations in parallel). We therefore don't allow to change the
8837 order of the computation in the inner-loop during outer-loop
8839 /* TODO: Another case in which order doesn't *really* matter is when we
8840 widen and then contract again, e.g. (short)((int)x * y >> 8).
8841 Normally, pack_trunc performs an even/odd permute, whereas the
8842 repack from an even/odd expansion would be an interleave, which
8843 would be significantly simpler for e.g. AVX2. */
8844 /* In any case, in order to avoid duplicating the code below, recurse
8845 on VEC_WIDEN_MULT_EVEN_EXPR. If it succeeds, all the return values
8846 are properly set up for the caller. If we fail, we'll continue with
8847 a VEC_WIDEN_MULT_LO/HI_EXPR check. */
8849 && STMT_VINFO_RELEVANT (stmt_info
) == vect_used_by_reduction
8850 && !nested_in_vect_loop_p (vect_loop
, stmt
)
8851 && supportable_widening_operation (VEC_WIDEN_MULT_EVEN_EXPR
,
8852 stmt
, vectype_out
, vectype_in
,
8853 code1
, code2
, multi_step_cvt
,
8856 /* Elements in a vector with vect_used_by_reduction property cannot
8857 be reordered if the use chain with this property does not have the
8858 same operation. One such an example is s += a * b, where elements
8859 in a and b cannot be reordered. Here we check if the vector defined
8860 by STMT is only directly used in the reduction statement. */
8861 tree lhs
= gimple_assign_lhs (stmt
);
8862 use_operand_p dummy
;
8864 stmt_vec_info use_stmt_info
= NULL
;
8865 if (single_imm_use (lhs
, &dummy
, &use_stmt
)
8866 && (use_stmt_info
= vinfo_for_stmt (use_stmt
))
8867 && STMT_VINFO_DEF_TYPE (use_stmt_info
) == vect_reduction_def
)
8870 c1
= VEC_WIDEN_MULT_LO_EXPR
;
8871 c2
= VEC_WIDEN_MULT_HI_EXPR
;
8884 case VEC_WIDEN_MULT_EVEN_EXPR
:
8885 /* Support the recursion induced just above. */
8886 c1
= VEC_WIDEN_MULT_EVEN_EXPR
;
8887 c2
= VEC_WIDEN_MULT_ODD_EXPR
;
8890 case WIDEN_LSHIFT_EXPR
:
8891 c1
= VEC_WIDEN_LSHIFT_LO_EXPR
;
8892 c2
= VEC_WIDEN_LSHIFT_HI_EXPR
;
8896 c1
= VEC_UNPACK_LO_EXPR
;
8897 c2
= VEC_UNPACK_HI_EXPR
;
8901 c1
= VEC_UNPACK_FLOAT_LO_EXPR
;
8902 c2
= VEC_UNPACK_FLOAT_HI_EXPR
;
8905 case FIX_TRUNC_EXPR
:
8906 /* ??? Not yet implemented due to missing VEC_UNPACK_FIX_TRUNC_HI_EXPR/
8907 VEC_UNPACK_FIX_TRUNC_LO_EXPR tree codes and optabs used for
8908 computing the operation. */
8915 if (BYTES_BIG_ENDIAN
&& c1
!= VEC_WIDEN_MULT_EVEN_EXPR
)
8918 if (code
== FIX_TRUNC_EXPR
)
8920 /* The signedness is determined from output operand. */
8921 optab1
= optab_for_tree_code (c1
, vectype_out
, optab_default
);
8922 optab2
= optab_for_tree_code (c2
, vectype_out
, optab_default
);
8926 optab1
= optab_for_tree_code (c1
, vectype
, optab_default
);
8927 optab2
= optab_for_tree_code (c2
, vectype
, optab_default
);
8930 if (!optab1
|| !optab2
)
8933 vec_mode
= TYPE_MODE (vectype
);
8934 if ((icode1
= optab_handler (optab1
, vec_mode
)) == CODE_FOR_nothing
8935 || (icode2
= optab_handler (optab2
, vec_mode
)) == CODE_FOR_nothing
)
8941 if (insn_data
[icode1
].operand
[0].mode
== TYPE_MODE (wide_vectype
)
8942 && insn_data
[icode2
].operand
[0].mode
== TYPE_MODE (wide_vectype
))
8943 /* For scalar masks we may have different boolean
8944 vector types having the same QImode. Thus we
8945 add additional check for elements number. */
8946 return (!VECTOR_BOOLEAN_TYPE_P (vectype
)
8947 || (TYPE_VECTOR_SUBPARTS (vectype
) / 2
8948 == TYPE_VECTOR_SUBPARTS (wide_vectype
)));
8950 /* Check if it's a multi-step conversion that can be done using intermediate
8953 prev_type
= vectype
;
8954 prev_mode
= vec_mode
;
8956 if (!CONVERT_EXPR_CODE_P (code
))
8959 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
8960 intermediate steps in promotion sequence. We try
8961 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do
8963 interm_types
->create (MAX_INTERM_CVT_STEPS
);
8964 for (i
= 0; i
< MAX_INTERM_CVT_STEPS
; i
++)
8966 intermediate_mode
= insn_data
[icode1
].operand
[0].mode
;
8967 if (VECTOR_BOOLEAN_TYPE_P (prev_type
))
8970 = build_truth_vector_type (TYPE_VECTOR_SUBPARTS (prev_type
) / 2,
8971 current_vector_size
);
8972 if (intermediate_mode
!= TYPE_MODE (intermediate_type
))
8977 = lang_hooks
.types
.type_for_mode (intermediate_mode
,
8978 TYPE_UNSIGNED (prev_type
));
8980 optab3
= optab_for_tree_code (c1
, intermediate_type
, optab_default
);
8981 optab4
= optab_for_tree_code (c2
, intermediate_type
, optab_default
);
8983 if (!optab3
|| !optab4
8984 || (icode1
= optab_handler (optab1
, prev_mode
)) == CODE_FOR_nothing
8985 || insn_data
[icode1
].operand
[0].mode
!= intermediate_mode
8986 || (icode2
= optab_handler (optab2
, prev_mode
)) == CODE_FOR_nothing
8987 || insn_data
[icode2
].operand
[0].mode
!= intermediate_mode
8988 || ((icode1
= optab_handler (optab3
, intermediate_mode
))
8989 == CODE_FOR_nothing
)
8990 || ((icode2
= optab_handler (optab4
, intermediate_mode
))
8991 == CODE_FOR_nothing
))
8994 interm_types
->quick_push (intermediate_type
);
8995 (*multi_step_cvt
)++;
8997 if (insn_data
[icode1
].operand
[0].mode
== TYPE_MODE (wide_vectype
)
8998 && insn_data
[icode2
].operand
[0].mode
== TYPE_MODE (wide_vectype
))
8999 return (!VECTOR_BOOLEAN_TYPE_P (vectype
)
9000 || (TYPE_VECTOR_SUBPARTS (intermediate_type
) / 2
9001 == TYPE_VECTOR_SUBPARTS (wide_vectype
)));
9003 prev_type
= intermediate_type
;
9004 prev_mode
= intermediate_mode
;
9007 interm_types
->release ();
9012 /* Function supportable_narrowing_operation
9014 Check whether an operation represented by the code CODE is a
9015 narrowing operation that is supported by the target platform in
9016 vector form (i.e., when operating on arguments of type VECTYPE_IN
9017 and producing a result of type VECTYPE_OUT).
9019 Narrowing operations we currently support are NOP (CONVERT) and
9020 FIX_TRUNC. This function checks if these operations are supported by
9021 the target platform directly via vector tree-codes.
9024 - CODE1 is the code of a vector operation to be used when
9025 vectorizing the operation, if available.
9026 - MULTI_STEP_CVT determines the number of required intermediate steps in
9027 case of multi-step conversion (like int->short->char - in that case
9028 MULTI_STEP_CVT will be 1).
9029 - INTERM_TYPES contains the intermediate type required to perform the
9030 narrowing operation (short in the above example). */
9033 supportable_narrowing_operation (enum tree_code code
,
9034 tree vectype_out
, tree vectype_in
,
9035 enum tree_code
*code1
, int *multi_step_cvt
,
9036 vec
<tree
> *interm_types
)
9038 machine_mode vec_mode
;
9039 enum insn_code icode1
;
9040 optab optab1
, interm_optab
;
9041 tree vectype
= vectype_in
;
9042 tree narrow_vectype
= vectype_out
;
9044 tree intermediate_type
, prev_type
;
9045 machine_mode intermediate_mode
, prev_mode
;
9049 *multi_step_cvt
= 0;
9053 c1
= VEC_PACK_TRUNC_EXPR
;
9056 case FIX_TRUNC_EXPR
:
9057 c1
= VEC_PACK_FIX_TRUNC_EXPR
;
9061 /* ??? Not yet implemented due to missing VEC_PACK_FLOAT_EXPR
9062 tree code and optabs used for computing the operation. */
9069 if (code
== FIX_TRUNC_EXPR
)
9070 /* The signedness is determined from output operand. */
9071 optab1
= optab_for_tree_code (c1
, vectype_out
, optab_default
);
9073 optab1
= optab_for_tree_code (c1
, vectype
, optab_default
);
9078 vec_mode
= TYPE_MODE (vectype
);
9079 if ((icode1
= optab_handler (optab1
, vec_mode
)) == CODE_FOR_nothing
)
9084 if (insn_data
[icode1
].operand
[0].mode
== TYPE_MODE (narrow_vectype
))
9085 /* For scalar masks we may have different boolean
9086 vector types having the same QImode. Thus we
9087 add additional check for elements number. */
9088 return (!VECTOR_BOOLEAN_TYPE_P (vectype
)
9089 || (TYPE_VECTOR_SUBPARTS (vectype
) * 2
9090 == TYPE_VECTOR_SUBPARTS (narrow_vectype
)));
9092 /* Check if it's a multi-step conversion that can be done using intermediate
9094 prev_mode
= vec_mode
;
9095 prev_type
= vectype
;
9096 if (code
== FIX_TRUNC_EXPR
)
9097 uns
= TYPE_UNSIGNED (vectype_out
);
9099 uns
= TYPE_UNSIGNED (vectype
);
9101 /* For multi-step FIX_TRUNC_EXPR prefer signed floating to integer
9102 conversion over unsigned, as unsigned FIX_TRUNC_EXPR is often more
9103 costly than signed. */
9104 if (code
== FIX_TRUNC_EXPR
&& uns
)
9106 enum insn_code icode2
;
9109 = lang_hooks
.types
.type_for_mode (TYPE_MODE (vectype_out
), 0);
9111 = optab_for_tree_code (c1
, intermediate_type
, optab_default
);
9112 if (interm_optab
!= unknown_optab
9113 && (icode2
= optab_handler (optab1
, vec_mode
)) != CODE_FOR_nothing
9114 && insn_data
[icode1
].operand
[0].mode
9115 == insn_data
[icode2
].operand
[0].mode
)
9118 optab1
= interm_optab
;
9123 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
9124 intermediate steps in promotion sequence. We try
9125 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do not. */
9126 interm_types
->create (MAX_INTERM_CVT_STEPS
);
9127 for (i
= 0; i
< MAX_INTERM_CVT_STEPS
; i
++)
9129 intermediate_mode
= insn_data
[icode1
].operand
[0].mode
;
9130 if (VECTOR_BOOLEAN_TYPE_P (prev_type
))
9133 = build_truth_vector_type (TYPE_VECTOR_SUBPARTS (prev_type
) * 2,
9134 current_vector_size
);
9135 if (intermediate_mode
!= TYPE_MODE (intermediate_type
))
9140 = lang_hooks
.types
.type_for_mode (intermediate_mode
, uns
);
9142 = optab_for_tree_code (VEC_PACK_TRUNC_EXPR
, intermediate_type
,
9145 || ((icode1
= optab_handler (optab1
, prev_mode
)) == CODE_FOR_nothing
)
9146 || insn_data
[icode1
].operand
[0].mode
!= intermediate_mode
9147 || ((icode1
= optab_handler (interm_optab
, intermediate_mode
))
9148 == CODE_FOR_nothing
))
9151 interm_types
->quick_push (intermediate_type
);
9152 (*multi_step_cvt
)++;
9154 if (insn_data
[icode1
].operand
[0].mode
== TYPE_MODE (narrow_vectype
))
9155 return (!VECTOR_BOOLEAN_TYPE_P (vectype
)
9156 || (TYPE_VECTOR_SUBPARTS (intermediate_type
) * 2
9157 == TYPE_VECTOR_SUBPARTS (narrow_vectype
)));
9159 prev_mode
= intermediate_mode
;
9160 prev_type
= intermediate_type
;
9161 optab1
= interm_optab
;
9164 interm_types
->release ();