1 /* Statement Analysis and Transformation for Vectorization
2 Copyright (C) 2003-2016 Free Software Foundation, Inc.
3 Contributed by Dorit Naishlos <dorit@il.ibm.com>
4 and Ira Rosen <irar@il.ibm.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
24 #include "coretypes.h"
31 #include "optabs-tree.h"
32 #include "insn-config.h"
33 #include "recog.h" /* FIXME: for insn_data */
37 #include "fold-const.h"
38 #include "stor-layout.h"
41 #include "gimple-iterator.h"
42 #include "gimplify-me.h"
44 #include "tree-ssa-loop-manip.h"
46 #include "tree-ssa-loop.h"
47 #include "tree-scalar-evolution.h"
48 #include "tree-vectorizer.h"
50 #include "internal-fn.h"
52 /* For lang_hooks.types.type_for_mode. */
53 #include "langhooks.h"
55 /* Return the vectorized type for the given statement. */
58 stmt_vectype (struct _stmt_vec_info
*stmt_info
)
60 return STMT_VINFO_VECTYPE (stmt_info
);
63 /* Return TRUE iff the given statement is in an inner loop relative to
64 the loop being vectorized. */
66 stmt_in_inner_loop_p (struct _stmt_vec_info
*stmt_info
)
68 gimple
*stmt
= STMT_VINFO_STMT (stmt_info
);
69 basic_block bb
= gimple_bb (stmt
);
70 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
76 loop
= LOOP_VINFO_LOOP (loop_vinfo
);
78 return (bb
->loop_father
== loop
->inner
);
81 /* Record the cost of a statement, either by directly informing the
82 target model or by saving it in a vector for later processing.
83 Return a preliminary estimate of the statement's cost. */
86 record_stmt_cost (stmt_vector_for_cost
*body_cost_vec
, int count
,
87 enum vect_cost_for_stmt kind
, stmt_vec_info stmt_info
,
88 int misalign
, enum vect_cost_model_location where
)
92 tree vectype
= stmt_info
? stmt_vectype (stmt_info
) : NULL_TREE
;
93 stmt_info_for_cost si
= { count
, kind
,
94 stmt_info
? STMT_VINFO_STMT (stmt_info
) : NULL
,
96 body_cost_vec
->safe_push (si
);
98 (builtin_vectorization_cost (kind
, vectype
, misalign
) * count
);
101 return add_stmt_cost (stmt_info
->vinfo
->target_cost_data
,
102 count
, kind
, stmt_info
, misalign
, where
);
105 /* Return a variable of type ELEM_TYPE[NELEMS]. */
108 create_vector_array (tree elem_type
, unsigned HOST_WIDE_INT nelems
)
110 return create_tmp_var (build_array_type_nelts (elem_type
, nelems
),
114 /* ARRAY is an array of vectors created by create_vector_array.
115 Return an SSA_NAME for the vector in index N. The reference
116 is part of the vectorization of STMT and the vector is associated
117 with scalar destination SCALAR_DEST. */
120 read_vector_array (gimple
*stmt
, gimple_stmt_iterator
*gsi
, tree scalar_dest
,
121 tree array
, unsigned HOST_WIDE_INT n
)
123 tree vect_type
, vect
, vect_name
, array_ref
;
126 gcc_assert (TREE_CODE (TREE_TYPE (array
)) == ARRAY_TYPE
);
127 vect_type
= TREE_TYPE (TREE_TYPE (array
));
128 vect
= vect_create_destination_var (scalar_dest
, vect_type
);
129 array_ref
= build4 (ARRAY_REF
, vect_type
, array
,
130 build_int_cst (size_type_node
, n
),
131 NULL_TREE
, NULL_TREE
);
133 new_stmt
= gimple_build_assign (vect
, array_ref
);
134 vect_name
= make_ssa_name (vect
, new_stmt
);
135 gimple_assign_set_lhs (new_stmt
, vect_name
);
136 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
141 /* ARRAY is an array of vectors created by create_vector_array.
142 Emit code to store SSA_NAME VECT in index N of the array.
143 The store is part of the vectorization of STMT. */
146 write_vector_array (gimple
*stmt
, gimple_stmt_iterator
*gsi
, tree vect
,
147 tree array
, unsigned HOST_WIDE_INT n
)
152 array_ref
= build4 (ARRAY_REF
, TREE_TYPE (vect
), array
,
153 build_int_cst (size_type_node
, n
),
154 NULL_TREE
, NULL_TREE
);
156 new_stmt
= gimple_build_assign (array_ref
, vect
);
157 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
160 /* PTR is a pointer to an array of type TYPE. Return a representation
161 of *PTR. The memory reference replaces those in FIRST_DR
165 create_array_ref (tree type
, tree ptr
, struct data_reference
*first_dr
)
167 tree mem_ref
, alias_ptr_type
;
169 alias_ptr_type
= reference_alias_ptr_type (DR_REF (first_dr
));
170 mem_ref
= build2 (MEM_REF
, type
, ptr
, build_int_cst (alias_ptr_type
, 0));
171 /* Arrays have the same alignment as their type. */
172 set_ptr_info_alignment (get_ptr_info (ptr
), TYPE_ALIGN_UNIT (type
), 0);
176 /* Utility functions used by vect_mark_stmts_to_be_vectorized. */
178 /* Function vect_mark_relevant.
180 Mark STMT as "relevant for vectorization" and add it to WORKLIST. */
183 vect_mark_relevant (vec
<gimple
*> *worklist
, gimple
*stmt
,
184 enum vect_relevant relevant
, bool live_p
)
186 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
187 enum vect_relevant save_relevant
= STMT_VINFO_RELEVANT (stmt_info
);
188 bool save_live_p
= STMT_VINFO_LIVE_P (stmt_info
);
189 gimple
*pattern_stmt
;
191 if (dump_enabled_p ())
193 dump_printf_loc (MSG_NOTE
, vect_location
,
194 "mark relevant %d, live %d: ", relevant
, live_p
);
195 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
198 /* If this stmt is an original stmt in a pattern, we might need to mark its
199 related pattern stmt instead of the original stmt. However, such stmts
200 may have their own uses that are not in any pattern, in such cases the
201 stmt itself should be marked. */
202 if (STMT_VINFO_IN_PATTERN_P (stmt_info
))
204 /* This is the last stmt in a sequence that was detected as a
205 pattern that can potentially be vectorized. Don't mark the stmt
206 as relevant/live because it's not going to be vectorized.
207 Instead mark the pattern-stmt that replaces it. */
209 pattern_stmt
= STMT_VINFO_RELATED_STMT (stmt_info
);
211 if (dump_enabled_p ())
212 dump_printf_loc (MSG_NOTE
, vect_location
,
213 "last stmt in pattern. don't mark"
214 " relevant/live.\n");
215 stmt_info
= vinfo_for_stmt (pattern_stmt
);
216 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_info
) == stmt
);
217 save_relevant
= STMT_VINFO_RELEVANT (stmt_info
);
218 save_live_p
= STMT_VINFO_LIVE_P (stmt_info
);
222 STMT_VINFO_LIVE_P (stmt_info
) |= live_p
;
223 if (relevant
> STMT_VINFO_RELEVANT (stmt_info
))
224 STMT_VINFO_RELEVANT (stmt_info
) = relevant
;
226 if (STMT_VINFO_RELEVANT (stmt_info
) == save_relevant
227 && STMT_VINFO_LIVE_P (stmt_info
) == save_live_p
)
229 if (dump_enabled_p ())
230 dump_printf_loc (MSG_NOTE
, vect_location
,
231 "already marked relevant/live.\n");
235 worklist
->safe_push (stmt
);
239 /* Function vect_stmt_relevant_p.
241 Return true if STMT in loop that is represented by LOOP_VINFO is
242 "relevant for vectorization".
244 A stmt is considered "relevant for vectorization" if:
245 - it has uses outside the loop.
246 - it has vdefs (it alters memory).
247 - control stmts in the loop (except for the exit condition).
249 CHECKME: what other side effects would the vectorizer allow? */
252 vect_stmt_relevant_p (gimple
*stmt
, loop_vec_info loop_vinfo
,
253 enum vect_relevant
*relevant
, bool *live_p
)
255 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
257 imm_use_iterator imm_iter
;
261 *relevant
= vect_unused_in_scope
;
264 /* cond stmt other than loop exit cond. */
265 if (is_ctrl_stmt (stmt
)
266 && STMT_VINFO_TYPE (vinfo_for_stmt (stmt
))
267 != loop_exit_ctrl_vec_info_type
)
268 *relevant
= vect_used_in_scope
;
270 /* changing memory. */
271 if (gimple_code (stmt
) != GIMPLE_PHI
)
272 if (gimple_vdef (stmt
)
273 && !gimple_clobber_p (stmt
))
275 if (dump_enabled_p ())
276 dump_printf_loc (MSG_NOTE
, vect_location
,
277 "vec_stmt_relevant_p: stmt has vdefs.\n");
278 *relevant
= vect_used_in_scope
;
281 /* uses outside the loop. */
282 FOR_EACH_PHI_OR_STMT_DEF (def_p
, stmt
, op_iter
, SSA_OP_DEF
)
284 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, DEF_FROM_PTR (def_p
))
286 basic_block bb
= gimple_bb (USE_STMT (use_p
));
287 if (!flow_bb_inside_loop_p (loop
, bb
))
289 if (dump_enabled_p ())
290 dump_printf_loc (MSG_NOTE
, vect_location
,
291 "vec_stmt_relevant_p: used out of loop.\n");
293 if (is_gimple_debug (USE_STMT (use_p
)))
296 /* We expect all such uses to be in the loop exit phis
297 (because of loop closed form) */
298 gcc_assert (gimple_code (USE_STMT (use_p
)) == GIMPLE_PHI
);
299 gcc_assert (bb
== single_exit (loop
)->dest
);
306 return (*live_p
|| *relevant
);
310 /* Function exist_non_indexing_operands_for_use_p
312 USE is one of the uses attached to STMT. Check if USE is
313 used in STMT for anything other than indexing an array. */
316 exist_non_indexing_operands_for_use_p (tree use
, gimple
*stmt
)
319 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
321 /* USE corresponds to some operand in STMT. If there is no data
322 reference in STMT, then any operand that corresponds to USE
323 is not indexing an array. */
324 if (!STMT_VINFO_DATA_REF (stmt_info
))
327 /* STMT has a data_ref. FORNOW this means that its of one of
331 (This should have been verified in analyze_data_refs).
333 'var' in the second case corresponds to a def, not a use,
334 so USE cannot correspond to any operands that are not used
337 Therefore, all we need to check is if STMT falls into the
338 first case, and whether var corresponds to USE. */
340 if (!gimple_assign_copy_p (stmt
))
342 if (is_gimple_call (stmt
)
343 && gimple_call_internal_p (stmt
))
344 switch (gimple_call_internal_fn (stmt
))
347 operand
= gimple_call_arg (stmt
, 3);
352 operand
= gimple_call_arg (stmt
, 2);
362 if (TREE_CODE (gimple_assign_lhs (stmt
)) == SSA_NAME
)
364 operand
= gimple_assign_rhs1 (stmt
);
365 if (TREE_CODE (operand
) != SSA_NAME
)
376 Function process_use.
379 - a USE in STMT in a loop represented by LOOP_VINFO
380 - LIVE_P, RELEVANT - enum values to be set in the STMT_VINFO of the stmt
381 that defined USE. This is done by calling mark_relevant and passing it
382 the WORKLIST (to add DEF_STMT to the WORKLIST in case it is relevant).
383 - FORCE is true if exist_non_indexing_operands_for_use_p check shouldn't
387 Generally, LIVE_P and RELEVANT are used to define the liveness and
388 relevance info of the DEF_STMT of this USE:
389 STMT_VINFO_LIVE_P (DEF_STMT_info) <-- live_p
390 STMT_VINFO_RELEVANT (DEF_STMT_info) <-- relevant
392 - case 1: If USE is used only for address computations (e.g. array indexing),
393 which does not need to be directly vectorized, then the liveness/relevance
394 of the respective DEF_STMT is left unchanged.
395 - case 2: If STMT is a reduction phi and DEF_STMT is a reduction stmt, we
396 skip DEF_STMT cause it had already been processed.
397 - case 3: If DEF_STMT and STMT are in different nests, then "relevant" will
398 be modified accordingly.
400 Return true if everything is as expected. Return false otherwise. */
403 process_use (gimple
*stmt
, tree use
, loop_vec_info loop_vinfo
, bool live_p
,
404 enum vect_relevant relevant
, vec
<gimple
*> *worklist
,
407 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
408 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (stmt
);
409 stmt_vec_info dstmt_vinfo
;
410 basic_block bb
, def_bb
;
412 enum vect_def_type dt
;
414 /* case 1: we are only interested in uses that need to be vectorized. Uses
415 that are used for address computation are not considered relevant. */
416 if (!force
&& !exist_non_indexing_operands_for_use_p (use
, stmt
))
419 if (!vect_is_simple_use (use
, loop_vinfo
, &def_stmt
, &dt
))
421 if (dump_enabled_p ())
422 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
423 "not vectorized: unsupported use in stmt.\n");
427 if (!def_stmt
|| gimple_nop_p (def_stmt
))
430 def_bb
= gimple_bb (def_stmt
);
431 if (!flow_bb_inside_loop_p (loop
, def_bb
))
433 if (dump_enabled_p ())
434 dump_printf_loc (MSG_NOTE
, vect_location
, "def_stmt is out of loop.\n");
438 /* case 2: A reduction phi (STMT) defined by a reduction stmt (DEF_STMT).
439 DEF_STMT must have already been processed, because this should be the
440 only way that STMT, which is a reduction-phi, was put in the worklist,
441 as there should be no other uses for DEF_STMT in the loop. So we just
442 check that everything is as expected, and we are done. */
443 dstmt_vinfo
= vinfo_for_stmt (def_stmt
);
444 bb
= gimple_bb (stmt
);
445 if (gimple_code (stmt
) == GIMPLE_PHI
446 && STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_reduction_def
447 && gimple_code (def_stmt
) != GIMPLE_PHI
448 && STMT_VINFO_DEF_TYPE (dstmt_vinfo
) == vect_reduction_def
449 && bb
->loop_father
== def_bb
->loop_father
)
451 if (dump_enabled_p ())
452 dump_printf_loc (MSG_NOTE
, vect_location
,
453 "reduc-stmt defining reduc-phi in the same nest.\n");
454 if (STMT_VINFO_IN_PATTERN_P (dstmt_vinfo
))
455 dstmt_vinfo
= vinfo_for_stmt (STMT_VINFO_RELATED_STMT (dstmt_vinfo
));
456 gcc_assert (STMT_VINFO_RELEVANT (dstmt_vinfo
) < vect_used_by_reduction
);
457 gcc_assert (STMT_VINFO_LIVE_P (dstmt_vinfo
)
458 || STMT_VINFO_RELEVANT (dstmt_vinfo
) > vect_unused_in_scope
);
462 /* case 3a: outer-loop stmt defining an inner-loop stmt:
463 outer-loop-header-bb:
469 if (flow_loop_nested_p (def_bb
->loop_father
, bb
->loop_father
))
471 if (dump_enabled_p ())
472 dump_printf_loc (MSG_NOTE
, vect_location
,
473 "outer-loop def-stmt defining inner-loop stmt.\n");
477 case vect_unused_in_scope
:
478 relevant
= (STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_nested_cycle
) ?
479 vect_used_in_scope
: vect_unused_in_scope
;
482 case vect_used_in_outer_by_reduction
:
483 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo
) != vect_reduction_def
);
484 relevant
= vect_used_by_reduction
;
487 case vect_used_in_outer
:
488 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo
) != vect_reduction_def
);
489 relevant
= vect_used_in_scope
;
492 case vect_used_in_scope
:
500 /* case 3b: inner-loop stmt defining an outer-loop stmt:
501 outer-loop-header-bb:
505 outer-loop-tail-bb (or outer-loop-exit-bb in double reduction):
507 else if (flow_loop_nested_p (bb
->loop_father
, def_bb
->loop_father
))
509 if (dump_enabled_p ())
510 dump_printf_loc (MSG_NOTE
, vect_location
,
511 "inner-loop def-stmt defining outer-loop stmt.\n");
515 case vect_unused_in_scope
:
516 relevant
= (STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_reduction_def
517 || STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_double_reduction_def
) ?
518 vect_used_in_outer_by_reduction
: vect_unused_in_scope
;
521 case vect_used_by_reduction
:
522 relevant
= vect_used_in_outer_by_reduction
;
525 case vect_used_in_scope
:
526 relevant
= vect_used_in_outer
;
534 vect_mark_relevant (worklist
, def_stmt
, relevant
, live_p
);
539 /* Function vect_mark_stmts_to_be_vectorized.
541 Not all stmts in the loop need to be vectorized. For example:
550 Stmt 1 and 3 do not need to be vectorized, because loop control and
551 addressing of vectorized data-refs are handled differently.
553 This pass detects such stmts. */
556 vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo
)
558 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
559 basic_block
*bbs
= LOOP_VINFO_BBS (loop_vinfo
);
560 unsigned int nbbs
= loop
->num_nodes
;
561 gimple_stmt_iterator si
;
564 stmt_vec_info stmt_vinfo
;
568 enum vect_relevant relevant
, tmp_relevant
;
569 enum vect_def_type def_type
;
571 if (dump_enabled_p ())
572 dump_printf_loc (MSG_NOTE
, vect_location
,
573 "=== vect_mark_stmts_to_be_vectorized ===\n");
575 auto_vec
<gimple
*, 64> worklist
;
577 /* 1. Init worklist. */
578 for (i
= 0; i
< nbbs
; i
++)
581 for (si
= gsi_start_phis (bb
); !gsi_end_p (si
); gsi_next (&si
))
584 if (dump_enabled_p ())
586 dump_printf_loc (MSG_NOTE
, vect_location
, "init: phi relevant? ");
587 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, phi
, 0);
590 if (vect_stmt_relevant_p (phi
, loop_vinfo
, &relevant
, &live_p
))
591 vect_mark_relevant (&worklist
, phi
, relevant
, live_p
);
593 for (si
= gsi_start_bb (bb
); !gsi_end_p (si
); gsi_next (&si
))
595 stmt
= gsi_stmt (si
);
596 if (dump_enabled_p ())
598 dump_printf_loc (MSG_NOTE
, vect_location
, "init: stmt relevant? ");
599 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
602 if (vect_stmt_relevant_p (stmt
, loop_vinfo
, &relevant
, &live_p
))
603 vect_mark_relevant (&worklist
, stmt
, relevant
, live_p
);
607 /* 2. Process_worklist */
608 while (worklist
.length () > 0)
613 stmt
= worklist
.pop ();
614 if (dump_enabled_p ())
616 dump_printf_loc (MSG_NOTE
, vect_location
, "worklist: examine stmt: ");
617 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
620 /* Examine the USEs of STMT. For each USE, mark the stmt that defines it
621 (DEF_STMT) as relevant/irrelevant and live/dead according to the
622 liveness and relevance properties of STMT. */
623 stmt_vinfo
= vinfo_for_stmt (stmt
);
624 relevant
= STMT_VINFO_RELEVANT (stmt_vinfo
);
625 live_p
= STMT_VINFO_LIVE_P (stmt_vinfo
);
627 /* Generally, the liveness and relevance properties of STMT are
628 propagated as is to the DEF_STMTs of its USEs:
629 live_p <-- STMT_VINFO_LIVE_P (STMT_VINFO)
630 relevant <-- STMT_VINFO_RELEVANT (STMT_VINFO)
632 One exception is when STMT has been identified as defining a reduction
633 variable; in this case we set the liveness/relevance as follows:
635 relevant = vect_used_by_reduction
636 This is because we distinguish between two kinds of relevant stmts -
637 those that are used by a reduction computation, and those that are
638 (also) used by a regular computation. This allows us later on to
639 identify stmts that are used solely by a reduction, and therefore the
640 order of the results that they produce does not have to be kept. */
642 def_type
= STMT_VINFO_DEF_TYPE (stmt_vinfo
);
643 tmp_relevant
= relevant
;
646 case vect_reduction_def
:
647 switch (tmp_relevant
)
649 case vect_unused_in_scope
:
650 relevant
= vect_used_by_reduction
;
653 case vect_used_by_reduction
:
654 if (gimple_code (stmt
) == GIMPLE_PHI
)
659 if (dump_enabled_p ())
660 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
661 "unsupported use of reduction.\n");
668 case vect_nested_cycle
:
669 if (tmp_relevant
!= vect_unused_in_scope
670 && tmp_relevant
!= vect_used_in_outer_by_reduction
671 && tmp_relevant
!= vect_used_in_outer
)
673 if (dump_enabled_p ())
674 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
675 "unsupported use of nested cycle.\n");
683 case vect_double_reduction_def
:
684 if (tmp_relevant
!= vect_unused_in_scope
685 && tmp_relevant
!= vect_used_by_reduction
)
687 if (dump_enabled_p ())
688 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
689 "unsupported use of double reduction.\n");
701 if (is_pattern_stmt_p (stmt_vinfo
))
703 /* Pattern statements are not inserted into the code, so
704 FOR_EACH_PHI_OR_STMT_USE optimizes their operands out, and we
705 have to scan the RHS or function arguments instead. */
706 if (is_gimple_assign (stmt
))
708 enum tree_code rhs_code
= gimple_assign_rhs_code (stmt
);
709 tree op
= gimple_assign_rhs1 (stmt
);
712 if (rhs_code
== COND_EXPR
&& COMPARISON_CLASS_P (op
))
714 if (!process_use (stmt
, TREE_OPERAND (op
, 0), loop_vinfo
,
715 live_p
, relevant
, &worklist
, false)
716 || !process_use (stmt
, TREE_OPERAND (op
, 1), loop_vinfo
,
717 live_p
, relevant
, &worklist
, false))
721 for (; i
< gimple_num_ops (stmt
); i
++)
723 op
= gimple_op (stmt
, i
);
724 if (TREE_CODE (op
) == SSA_NAME
725 && !process_use (stmt
, op
, loop_vinfo
, live_p
, relevant
,
730 else if (is_gimple_call (stmt
))
732 for (i
= 0; i
< gimple_call_num_args (stmt
); i
++)
734 tree arg
= gimple_call_arg (stmt
, i
);
735 if (!process_use (stmt
, arg
, loop_vinfo
, live_p
, relevant
,
742 FOR_EACH_PHI_OR_STMT_USE (use_p
, stmt
, iter
, SSA_OP_USE
)
744 tree op
= USE_FROM_PTR (use_p
);
745 if (!process_use (stmt
, op
, loop_vinfo
, live_p
, relevant
,
750 if (STMT_VINFO_GATHER_SCATTER_P (stmt_vinfo
))
753 tree decl
= vect_check_gather_scatter (stmt
, loop_vinfo
, NULL
, &off
, NULL
);
755 if (!process_use (stmt
, off
, loop_vinfo
, live_p
, relevant
,
759 } /* while worklist */
765 /* Function vect_model_simple_cost.
767 Models cost for simple operations, i.e. those that only emit ncopies of a
768 single op. Right now, this does not account for multiple insns that could
769 be generated for the single vector op. We will handle that shortly. */
772 vect_model_simple_cost (stmt_vec_info stmt_info
, int ncopies
,
773 enum vect_def_type
*dt
,
774 stmt_vector_for_cost
*prologue_cost_vec
,
775 stmt_vector_for_cost
*body_cost_vec
)
778 int inside_cost
= 0, prologue_cost
= 0;
780 /* The SLP costs were already calculated during SLP tree build. */
781 if (PURE_SLP_STMT (stmt_info
))
784 /* FORNOW: Assuming maximum 2 args per stmts. */
785 for (i
= 0; i
< 2; i
++)
786 if (dt
[i
] == vect_constant_def
|| dt
[i
] == vect_external_def
)
787 prologue_cost
+= record_stmt_cost (prologue_cost_vec
, 1, vector_stmt
,
788 stmt_info
, 0, vect_prologue
);
790 /* Pass the inside-of-loop statements to the target-specific cost model. */
791 inside_cost
= record_stmt_cost (body_cost_vec
, ncopies
, vector_stmt
,
792 stmt_info
, 0, vect_body
);
794 if (dump_enabled_p ())
795 dump_printf_loc (MSG_NOTE
, vect_location
,
796 "vect_model_simple_cost: inside_cost = %d, "
797 "prologue_cost = %d .\n", inside_cost
, prologue_cost
);
801 /* Model cost for type demotion and promotion operations. PWR is normally
802 zero for single-step promotions and demotions. It will be one if
803 two-step promotion/demotion is required, and so on. Each additional
804 step doubles the number of instructions required. */
807 vect_model_promotion_demotion_cost (stmt_vec_info stmt_info
,
808 enum vect_def_type
*dt
, int pwr
)
811 int inside_cost
= 0, prologue_cost
= 0;
812 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
813 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
814 void *target_cost_data
;
816 /* The SLP costs were already calculated during SLP tree build. */
817 if (PURE_SLP_STMT (stmt_info
))
821 target_cost_data
= LOOP_VINFO_TARGET_COST_DATA (loop_vinfo
);
823 target_cost_data
= BB_VINFO_TARGET_COST_DATA (bb_vinfo
);
825 for (i
= 0; i
< pwr
+ 1; i
++)
827 tmp
= (STMT_VINFO_TYPE (stmt_info
) == type_promotion_vec_info_type
) ?
829 inside_cost
+= add_stmt_cost (target_cost_data
, vect_pow2 (tmp
),
830 vec_promote_demote
, stmt_info
, 0,
834 /* FORNOW: Assuming maximum 2 args per stmts. */
835 for (i
= 0; i
< 2; i
++)
836 if (dt
[i
] == vect_constant_def
|| dt
[i
] == vect_external_def
)
837 prologue_cost
+= add_stmt_cost (target_cost_data
, 1, vector_stmt
,
838 stmt_info
, 0, vect_prologue
);
840 if (dump_enabled_p ())
841 dump_printf_loc (MSG_NOTE
, vect_location
,
842 "vect_model_promotion_demotion_cost: inside_cost = %d, "
843 "prologue_cost = %d .\n", inside_cost
, prologue_cost
);
846 /* Function vect_cost_group_size
848 For grouped load or store, return the group_size only if it is the first
849 load or store of a group, else return 1. This ensures that group size is
850 only returned once per group. */
853 vect_cost_group_size (stmt_vec_info stmt_info
)
855 gimple
*first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
857 if (first_stmt
== STMT_VINFO_STMT (stmt_info
))
858 return GROUP_SIZE (stmt_info
);
864 /* Function vect_model_store_cost
866 Models cost for stores. In the case of grouped accesses, one access
867 has the overhead of the grouped access attributed to it. */
870 vect_model_store_cost (stmt_vec_info stmt_info
, int ncopies
,
871 bool store_lanes_p
, enum vect_def_type dt
,
873 stmt_vector_for_cost
*prologue_cost_vec
,
874 stmt_vector_for_cost
*body_cost_vec
)
877 unsigned int inside_cost
= 0, prologue_cost
= 0;
878 struct data_reference
*first_dr
;
881 if (dt
== vect_constant_def
|| dt
== vect_external_def
)
882 prologue_cost
+= record_stmt_cost (prologue_cost_vec
, 1, scalar_to_vec
,
883 stmt_info
, 0, vect_prologue
);
885 /* Grouped access? */
886 if (STMT_VINFO_GROUPED_ACCESS (stmt_info
))
890 first_stmt
= SLP_TREE_SCALAR_STMTS (slp_node
)[0];
895 first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
896 group_size
= vect_cost_group_size (stmt_info
);
899 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
901 /* Not a grouped access. */
905 first_dr
= STMT_VINFO_DATA_REF (stmt_info
);
908 /* We assume that the cost of a single store-lanes instruction is
909 equivalent to the cost of GROUP_SIZE separate stores. If a grouped
910 access is instead being provided by a permute-and-store operation,
911 include the cost of the permutes. */
912 if (!store_lanes_p
&& group_size
> 1
913 && !STMT_VINFO_STRIDED_P (stmt_info
))
915 /* Uses a high and low interleave or shuffle operations for each
917 int nstmts
= ncopies
* ceil_log2 (group_size
) * group_size
;
918 inside_cost
= record_stmt_cost (body_cost_vec
, nstmts
, vec_perm
,
919 stmt_info
, 0, vect_body
);
921 if (dump_enabled_p ())
922 dump_printf_loc (MSG_NOTE
, vect_location
,
923 "vect_model_store_cost: strided group_size = %d .\n",
927 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
928 /* Costs of the stores. */
929 if (STMT_VINFO_STRIDED_P (stmt_info
)
930 && !STMT_VINFO_GROUPED_ACCESS (stmt_info
))
932 /* N scalar stores plus extracting the elements. */
933 inside_cost
+= record_stmt_cost (body_cost_vec
,
934 ncopies
* TYPE_VECTOR_SUBPARTS (vectype
),
935 scalar_store
, stmt_info
, 0, vect_body
);
938 vect_get_store_cost (first_dr
, ncopies
, &inside_cost
, body_cost_vec
);
940 if (STMT_VINFO_STRIDED_P (stmt_info
))
941 inside_cost
+= record_stmt_cost (body_cost_vec
,
942 ncopies
* TYPE_VECTOR_SUBPARTS (vectype
),
943 vec_to_scalar
, stmt_info
, 0, vect_body
);
945 if (dump_enabled_p ())
946 dump_printf_loc (MSG_NOTE
, vect_location
,
947 "vect_model_store_cost: inside_cost = %d, "
948 "prologue_cost = %d .\n", inside_cost
, prologue_cost
);
952 /* Calculate cost of DR's memory access. */
954 vect_get_store_cost (struct data_reference
*dr
, int ncopies
,
955 unsigned int *inside_cost
,
956 stmt_vector_for_cost
*body_cost_vec
)
958 int alignment_support_scheme
= vect_supportable_dr_alignment (dr
, false);
959 gimple
*stmt
= DR_STMT (dr
);
960 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
962 switch (alignment_support_scheme
)
966 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
,
967 vector_store
, stmt_info
, 0,
970 if (dump_enabled_p ())
971 dump_printf_loc (MSG_NOTE
, vect_location
,
972 "vect_model_store_cost: aligned.\n");
976 case dr_unaligned_supported
:
978 /* Here, we assign an additional cost for the unaligned store. */
979 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
,
980 unaligned_store
, stmt_info
,
981 DR_MISALIGNMENT (dr
), vect_body
);
982 if (dump_enabled_p ())
983 dump_printf_loc (MSG_NOTE
, vect_location
,
984 "vect_model_store_cost: unaligned supported by "
989 case dr_unaligned_unsupported
:
991 *inside_cost
= VECT_MAX_COST
;
993 if (dump_enabled_p ())
994 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
995 "vect_model_store_cost: unsupported access.\n");
1005 /* Function vect_model_load_cost
1007 Models cost for loads. In the case of grouped accesses, the last access
1008 has the overhead of the grouped access attributed to it. Since unaligned
1009 accesses are supported for loads, we also account for the costs of the
1010 access scheme chosen. */
1013 vect_model_load_cost (stmt_vec_info stmt_info
, int ncopies
,
1014 bool load_lanes_p
, slp_tree slp_node
,
1015 stmt_vector_for_cost
*prologue_cost_vec
,
1016 stmt_vector_for_cost
*body_cost_vec
)
1020 struct data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
), *first_dr
;
1021 unsigned int inside_cost
= 0, prologue_cost
= 0;
1023 /* Grouped accesses? */
1024 first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
1025 if (STMT_VINFO_GROUPED_ACCESS (stmt_info
) && first_stmt
&& !slp_node
)
1027 group_size
= vect_cost_group_size (stmt_info
);
1028 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
1030 /* Not a grouped access. */
1037 /* We assume that the cost of a single load-lanes instruction is
1038 equivalent to the cost of GROUP_SIZE separate loads. If a grouped
1039 access is instead being provided by a load-and-permute operation,
1040 include the cost of the permutes. */
1041 if (!load_lanes_p
&& group_size
> 1
1042 && !STMT_VINFO_STRIDED_P (stmt_info
))
1044 /* Uses an even and odd extract operations or shuffle operations
1045 for each needed permute. */
1046 int nstmts
= ncopies
* ceil_log2 (group_size
) * group_size
;
1047 inside_cost
= record_stmt_cost (body_cost_vec
, nstmts
, vec_perm
,
1048 stmt_info
, 0, vect_body
);
1050 if (dump_enabled_p ())
1051 dump_printf_loc (MSG_NOTE
, vect_location
,
1052 "vect_model_load_cost: strided group_size = %d .\n",
1056 /* The loads themselves. */
1057 if (STMT_VINFO_STRIDED_P (stmt_info
)
1058 && !STMT_VINFO_GROUPED_ACCESS (stmt_info
))
1060 /* N scalar loads plus gathering them into a vector. */
1061 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
1062 inside_cost
+= record_stmt_cost (body_cost_vec
,
1063 ncopies
* TYPE_VECTOR_SUBPARTS (vectype
),
1064 scalar_load
, stmt_info
, 0, vect_body
);
1067 vect_get_load_cost (first_dr
, ncopies
,
1068 ((!STMT_VINFO_GROUPED_ACCESS (stmt_info
))
1069 || group_size
> 1 || slp_node
),
1070 &inside_cost
, &prologue_cost
,
1071 prologue_cost_vec
, body_cost_vec
, true);
1072 if (STMT_VINFO_STRIDED_P (stmt_info
))
1073 inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
, vec_construct
,
1074 stmt_info
, 0, vect_body
);
1076 if (dump_enabled_p ())
1077 dump_printf_loc (MSG_NOTE
, vect_location
,
1078 "vect_model_load_cost: inside_cost = %d, "
1079 "prologue_cost = %d .\n", inside_cost
, prologue_cost
);
1083 /* Calculate cost of DR's memory access. */
1085 vect_get_load_cost (struct data_reference
*dr
, int ncopies
,
1086 bool add_realign_cost
, unsigned int *inside_cost
,
1087 unsigned int *prologue_cost
,
1088 stmt_vector_for_cost
*prologue_cost_vec
,
1089 stmt_vector_for_cost
*body_cost_vec
,
1090 bool record_prologue_costs
)
1092 int alignment_support_scheme
= vect_supportable_dr_alignment (dr
, false);
1093 gimple
*stmt
= DR_STMT (dr
);
1094 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1096 switch (alignment_support_scheme
)
1100 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
, vector_load
,
1101 stmt_info
, 0, vect_body
);
1103 if (dump_enabled_p ())
1104 dump_printf_loc (MSG_NOTE
, vect_location
,
1105 "vect_model_load_cost: aligned.\n");
1109 case dr_unaligned_supported
:
1111 /* Here, we assign an additional cost for the unaligned load. */
1112 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
,
1113 unaligned_load
, stmt_info
,
1114 DR_MISALIGNMENT (dr
), vect_body
);
1116 if (dump_enabled_p ())
1117 dump_printf_loc (MSG_NOTE
, vect_location
,
1118 "vect_model_load_cost: unaligned supported by "
1123 case dr_explicit_realign
:
1125 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
* 2,
1126 vector_load
, stmt_info
, 0, vect_body
);
1127 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
,
1128 vec_perm
, stmt_info
, 0, vect_body
);
1130 /* FIXME: If the misalignment remains fixed across the iterations of
1131 the containing loop, the following cost should be added to the
1133 if (targetm
.vectorize
.builtin_mask_for_load
)
1134 *inside_cost
+= record_stmt_cost (body_cost_vec
, 1, vector_stmt
,
1135 stmt_info
, 0, vect_body
);
1137 if (dump_enabled_p ())
1138 dump_printf_loc (MSG_NOTE
, vect_location
,
1139 "vect_model_load_cost: explicit realign\n");
1143 case dr_explicit_realign_optimized
:
1145 if (dump_enabled_p ())
1146 dump_printf_loc (MSG_NOTE
, vect_location
,
1147 "vect_model_load_cost: unaligned software "
1150 /* Unaligned software pipeline has a load of an address, an initial
1151 load, and possibly a mask operation to "prime" the loop. However,
1152 if this is an access in a group of loads, which provide grouped
1153 access, then the above cost should only be considered for one
1154 access in the group. Inside the loop, there is a load op
1155 and a realignment op. */
1157 if (add_realign_cost
&& record_prologue_costs
)
1159 *prologue_cost
+= record_stmt_cost (prologue_cost_vec
, 2,
1160 vector_stmt
, stmt_info
,
1162 if (targetm
.vectorize
.builtin_mask_for_load
)
1163 *prologue_cost
+= record_stmt_cost (prologue_cost_vec
, 1,
1164 vector_stmt
, stmt_info
,
1168 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
, vector_load
,
1169 stmt_info
, 0, vect_body
);
1170 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
, vec_perm
,
1171 stmt_info
, 0, vect_body
);
1173 if (dump_enabled_p ())
1174 dump_printf_loc (MSG_NOTE
, vect_location
,
1175 "vect_model_load_cost: explicit realign optimized"
1181 case dr_unaligned_unsupported
:
1183 *inside_cost
= VECT_MAX_COST
;
1185 if (dump_enabled_p ())
1186 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1187 "vect_model_load_cost: unsupported access.\n");
1196 /* Insert the new stmt NEW_STMT at *GSI or at the appropriate place in
1197 the loop preheader for the vectorized stmt STMT. */
1200 vect_init_vector_1 (gimple
*stmt
, gimple
*new_stmt
, gimple_stmt_iterator
*gsi
)
1203 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
1206 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (stmt
);
1207 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_vinfo
);
1211 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
1215 if (nested_in_vect_loop_p (loop
, stmt
))
1218 pe
= loop_preheader_edge (loop
);
1219 new_bb
= gsi_insert_on_edge_immediate (pe
, new_stmt
);
1220 gcc_assert (!new_bb
);
1224 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_vinfo
);
1226 gimple_stmt_iterator gsi_bb_start
;
1228 gcc_assert (bb_vinfo
);
1229 bb
= BB_VINFO_BB (bb_vinfo
);
1230 gsi_bb_start
= gsi_after_labels (bb
);
1231 gsi_insert_before (&gsi_bb_start
, new_stmt
, GSI_SAME_STMT
);
1235 if (dump_enabled_p ())
1237 dump_printf_loc (MSG_NOTE
, vect_location
,
1238 "created new init_stmt: ");
1239 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, new_stmt
, 0);
1243 /* Function vect_init_vector.
1245 Insert a new stmt (INIT_STMT) that initializes a new variable of type
1246 TYPE with the value VAL. If TYPE is a vector type and VAL does not have
1247 vector type a vector with all elements equal to VAL is created first.
1248 Place the initialization at BSI if it is not NULL. Otherwise, place the
1249 initialization at the loop preheader.
1250 Return the DEF of INIT_STMT.
1251 It will be used in the vectorization of STMT. */
1254 vect_init_vector (gimple
*stmt
, tree val
, tree type
, gimple_stmt_iterator
*gsi
)
1259 if (TREE_CODE (type
) == VECTOR_TYPE
1260 && TREE_CODE (TREE_TYPE (val
)) != VECTOR_TYPE
)
1262 if (!types_compatible_p (TREE_TYPE (type
), TREE_TYPE (val
)))
1264 /* Scalar boolean value should be transformed into
1265 all zeros or all ones value before building a vector. */
1266 if (VECTOR_BOOLEAN_TYPE_P (type
))
1268 tree true_val
= build_all_ones_cst (TREE_TYPE (type
));
1269 tree false_val
= build_zero_cst (TREE_TYPE (type
));
1271 if (CONSTANT_CLASS_P (val
))
1272 val
= integer_zerop (val
) ? false_val
: true_val
;
1275 new_temp
= make_ssa_name (TREE_TYPE (type
));
1276 init_stmt
= gimple_build_assign (new_temp
, COND_EXPR
,
1277 val
, true_val
, false_val
);
1278 vect_init_vector_1 (stmt
, init_stmt
, gsi
);
1282 else if (CONSTANT_CLASS_P (val
))
1283 val
= fold_convert (TREE_TYPE (type
), val
);
1286 new_temp
= make_ssa_name (TREE_TYPE (type
));
1287 init_stmt
= gimple_build_assign (new_temp
, NOP_EXPR
, val
);
1288 vect_init_vector_1 (stmt
, init_stmt
, gsi
);
1292 val
= build_vector_from_val (type
, val
);
1295 new_temp
= vect_get_new_ssa_name (type
, vect_simple_var
, "cst_");
1296 init_stmt
= gimple_build_assign (new_temp
, val
);
1297 vect_init_vector_1 (stmt
, init_stmt
, gsi
);
1302 /* Function vect_get_vec_def_for_operand.
1304 OP is an operand in STMT. This function returns a (vector) def that will be
1305 used in the vectorized stmt for STMT.
1307 In the case that OP is an SSA_NAME which is defined in the loop, then
1308 STMT_VINFO_VEC_STMT of the defining stmt holds the relevant def.
1310 In case OP is an invariant or constant, a new stmt that creates a vector def
1311 needs to be introduced. VECTYPE may be used to specify a required type for
1312 vector invariant. */
1315 vect_get_vec_def_for_operand (tree op
, gimple
*stmt
, tree vectype
)
1320 stmt_vec_info def_stmt_info
= NULL
;
1321 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (stmt
);
1322 tree stmt_vectype
= STMT_VINFO_VECTYPE (stmt_vinfo
);
1323 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_vinfo
);
1324 enum vect_def_type dt
;
1328 if (dump_enabled_p ())
1330 dump_printf_loc (MSG_NOTE
, vect_location
,
1331 "vect_get_vec_def_for_operand: ");
1332 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, op
);
1333 dump_printf (MSG_NOTE
, "\n");
1336 is_simple_use
= vect_is_simple_use (op
, loop_vinfo
, &def_stmt
, &dt
);
1337 gcc_assert (is_simple_use
);
1338 if (dump_enabled_p ())
1340 int loc_printed
= 0;
1344 dump_printf (MSG_NOTE
, " def_stmt = ");
1346 dump_printf_loc (MSG_NOTE
, vect_location
, " def_stmt = ");
1347 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, def_stmt
, 0);
1353 /* operand is a constant or a loop invariant. */
1354 case vect_constant_def
:
1355 case vect_external_def
:
1358 vector_type
= vectype
;
1359 else if (TREE_CODE (TREE_TYPE (op
)) == BOOLEAN_TYPE
1360 && VECTOR_BOOLEAN_TYPE_P (stmt_vectype
))
1361 vector_type
= build_same_sized_truth_vector_type (stmt_vectype
);
1363 vector_type
= get_vectype_for_scalar_type (TREE_TYPE (op
));
1365 gcc_assert (vector_type
);
1366 return vect_init_vector (stmt
, op
, vector_type
, NULL
);
1369 /* operand is defined inside the loop. */
1370 case vect_internal_def
:
1372 /* Get the def from the vectorized stmt. */
1373 def_stmt_info
= vinfo_for_stmt (def_stmt
);
1375 vec_stmt
= STMT_VINFO_VEC_STMT (def_stmt_info
);
1376 /* Get vectorized pattern statement. */
1378 && STMT_VINFO_IN_PATTERN_P (def_stmt_info
)
1379 && !STMT_VINFO_RELEVANT (def_stmt_info
))
1380 vec_stmt
= STMT_VINFO_VEC_STMT (vinfo_for_stmt (
1381 STMT_VINFO_RELATED_STMT (def_stmt_info
)));
1382 gcc_assert (vec_stmt
);
1383 if (gimple_code (vec_stmt
) == GIMPLE_PHI
)
1384 vec_oprnd
= PHI_RESULT (vec_stmt
);
1385 else if (is_gimple_call (vec_stmt
))
1386 vec_oprnd
= gimple_call_lhs (vec_stmt
);
1388 vec_oprnd
= gimple_assign_lhs (vec_stmt
);
1392 /* operand is defined by a loop header phi - reduction */
1393 case vect_reduction_def
:
1394 case vect_double_reduction_def
:
1395 case vect_nested_cycle
:
1396 /* Code should use get_initial_def_for_reduction. */
1399 /* operand is defined by loop-header phi - induction. */
1400 case vect_induction_def
:
1402 gcc_assert (gimple_code (def_stmt
) == GIMPLE_PHI
);
1404 /* Get the def from the vectorized stmt. */
1405 def_stmt_info
= vinfo_for_stmt (def_stmt
);
1406 vec_stmt
= STMT_VINFO_VEC_STMT (def_stmt_info
);
1407 if (gimple_code (vec_stmt
) == GIMPLE_PHI
)
1408 vec_oprnd
= PHI_RESULT (vec_stmt
);
1410 vec_oprnd
= gimple_get_lhs (vec_stmt
);
1420 /* Function vect_get_vec_def_for_stmt_copy
1422 Return a vector-def for an operand. This function is used when the
1423 vectorized stmt to be created (by the caller to this function) is a "copy"
1424 created in case the vectorized result cannot fit in one vector, and several
1425 copies of the vector-stmt are required. In this case the vector-def is
1426 retrieved from the vector stmt recorded in the STMT_VINFO_RELATED_STMT field
1427 of the stmt that defines VEC_OPRND.
1428 DT is the type of the vector def VEC_OPRND.
1431 In case the vectorization factor (VF) is bigger than the number
1432 of elements that can fit in a vectype (nunits), we have to generate
1433 more than one vector stmt to vectorize the scalar stmt. This situation
1434 arises when there are multiple data-types operated upon in the loop; the
1435 smallest data-type determines the VF, and as a result, when vectorizing
1436 stmts operating on wider types we need to create 'VF/nunits' "copies" of the
1437 vector stmt (each computing a vector of 'nunits' results, and together
1438 computing 'VF' results in each iteration). This function is called when
1439 vectorizing such a stmt (e.g. vectorizing S2 in the illustration below, in
1440 which VF=16 and nunits=4, so the number of copies required is 4):
1442 scalar stmt: vectorized into: STMT_VINFO_RELATED_STMT
1444 S1: x = load VS1.0: vx.0 = memref0 VS1.1
1445 VS1.1: vx.1 = memref1 VS1.2
1446 VS1.2: vx.2 = memref2 VS1.3
1447 VS1.3: vx.3 = memref3
1449 S2: z = x + ... VSnew.0: vz0 = vx.0 + ... VSnew.1
1450 VSnew.1: vz1 = vx.1 + ... VSnew.2
1451 VSnew.2: vz2 = vx.2 + ... VSnew.3
1452 VSnew.3: vz3 = vx.3 + ...
1454 The vectorization of S1 is explained in vectorizable_load.
1455 The vectorization of S2:
1456 To create the first vector-stmt out of the 4 copies - VSnew.0 -
1457 the function 'vect_get_vec_def_for_operand' is called to
1458 get the relevant vector-def for each operand of S2. For operand x it
1459 returns the vector-def 'vx.0'.
1461 To create the remaining copies of the vector-stmt (VSnew.j), this
1462 function is called to get the relevant vector-def for each operand. It is
1463 obtained from the respective VS1.j stmt, which is recorded in the
1464 STMT_VINFO_RELATED_STMT field of the stmt that defines VEC_OPRND.
1466 For example, to obtain the vector-def 'vx.1' in order to create the
1467 vector stmt 'VSnew.1', this function is called with VEC_OPRND='vx.0'.
1468 Given 'vx0' we obtain the stmt that defines it ('VS1.0'); from the
1469 STMT_VINFO_RELATED_STMT field of 'VS1.0' we obtain the next copy - 'VS1.1',
1470 and return its def ('vx.1').
1471 Overall, to create the above sequence this function will be called 3 times:
1472 vx.1 = vect_get_vec_def_for_stmt_copy (dt, vx.0);
1473 vx.2 = vect_get_vec_def_for_stmt_copy (dt, vx.1);
1474 vx.3 = vect_get_vec_def_for_stmt_copy (dt, vx.2); */
1477 vect_get_vec_def_for_stmt_copy (enum vect_def_type dt
, tree vec_oprnd
)
1479 gimple
*vec_stmt_for_operand
;
1480 stmt_vec_info def_stmt_info
;
1482 /* Do nothing; can reuse same def. */
1483 if (dt
== vect_external_def
|| dt
== vect_constant_def
)
1486 vec_stmt_for_operand
= SSA_NAME_DEF_STMT (vec_oprnd
);
1487 def_stmt_info
= vinfo_for_stmt (vec_stmt_for_operand
);
1488 gcc_assert (def_stmt_info
);
1489 vec_stmt_for_operand
= STMT_VINFO_RELATED_STMT (def_stmt_info
);
1490 gcc_assert (vec_stmt_for_operand
);
1491 if (gimple_code (vec_stmt_for_operand
) == GIMPLE_PHI
)
1492 vec_oprnd
= PHI_RESULT (vec_stmt_for_operand
);
1494 vec_oprnd
= gimple_get_lhs (vec_stmt_for_operand
);
1499 /* Get vectorized definitions for the operands to create a copy of an original
1500 stmt. See vect_get_vec_def_for_stmt_copy () for details. */
1503 vect_get_vec_defs_for_stmt_copy (enum vect_def_type
*dt
,
1504 vec
<tree
> *vec_oprnds0
,
1505 vec
<tree
> *vec_oprnds1
)
1507 tree vec_oprnd
= vec_oprnds0
->pop ();
1509 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
[0], vec_oprnd
);
1510 vec_oprnds0
->quick_push (vec_oprnd
);
1512 if (vec_oprnds1
&& vec_oprnds1
->length ())
1514 vec_oprnd
= vec_oprnds1
->pop ();
1515 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
[1], vec_oprnd
);
1516 vec_oprnds1
->quick_push (vec_oprnd
);
1521 /* Get vectorized definitions for OP0 and OP1.
1522 REDUC_INDEX is the index of reduction operand in case of reduction,
1523 and -1 otherwise. */
1526 vect_get_vec_defs (tree op0
, tree op1
, gimple
*stmt
,
1527 vec
<tree
> *vec_oprnds0
,
1528 vec
<tree
> *vec_oprnds1
,
1529 slp_tree slp_node
, int reduc_index
)
1533 int nops
= (op1
== NULL_TREE
) ? 1 : 2;
1534 auto_vec
<tree
> ops (nops
);
1535 auto_vec
<vec
<tree
> > vec_defs (nops
);
1537 ops
.quick_push (op0
);
1539 ops
.quick_push (op1
);
1541 vect_get_slp_defs (ops
, slp_node
, &vec_defs
, reduc_index
);
1543 *vec_oprnds0
= vec_defs
[0];
1545 *vec_oprnds1
= vec_defs
[1];
1551 vec_oprnds0
->create (1);
1552 vec_oprnd
= vect_get_vec_def_for_operand (op0
, stmt
);
1553 vec_oprnds0
->quick_push (vec_oprnd
);
1557 vec_oprnds1
->create (1);
1558 vec_oprnd
= vect_get_vec_def_for_operand (op1
, stmt
);
1559 vec_oprnds1
->quick_push (vec_oprnd
);
1565 /* Function vect_finish_stmt_generation.
1567 Insert a new stmt. */
1570 vect_finish_stmt_generation (gimple
*stmt
, gimple
*vec_stmt
,
1571 gimple_stmt_iterator
*gsi
)
1573 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1574 vec_info
*vinfo
= stmt_info
->vinfo
;
1576 gcc_assert (gimple_code (stmt
) != GIMPLE_LABEL
);
1578 if (!gsi_end_p (*gsi
)
1579 && gimple_has_mem_ops (vec_stmt
))
1581 gimple
*at_stmt
= gsi_stmt (*gsi
);
1582 tree vuse
= gimple_vuse (at_stmt
);
1583 if (vuse
&& TREE_CODE (vuse
) == SSA_NAME
)
1585 tree vdef
= gimple_vdef (at_stmt
);
1586 gimple_set_vuse (vec_stmt
, gimple_vuse (at_stmt
));
1587 /* If we have an SSA vuse and insert a store, update virtual
1588 SSA form to avoid triggering the renamer. Do so only
1589 if we can easily see all uses - which is what almost always
1590 happens with the way vectorized stmts are inserted. */
1591 if ((vdef
&& TREE_CODE (vdef
) == SSA_NAME
)
1592 && ((is_gimple_assign (vec_stmt
)
1593 && !is_gimple_reg (gimple_assign_lhs (vec_stmt
)))
1594 || (is_gimple_call (vec_stmt
)
1595 && !(gimple_call_flags (vec_stmt
)
1596 & (ECF_CONST
|ECF_PURE
|ECF_NOVOPS
)))))
1598 tree new_vdef
= copy_ssa_name (vuse
, vec_stmt
);
1599 gimple_set_vdef (vec_stmt
, new_vdef
);
1600 SET_USE (gimple_vuse_op (at_stmt
), new_vdef
);
1604 gsi_insert_before (gsi
, vec_stmt
, GSI_SAME_STMT
);
1606 set_vinfo_for_stmt (vec_stmt
, new_stmt_vec_info (vec_stmt
, vinfo
));
1608 if (dump_enabled_p ())
1610 dump_printf_loc (MSG_NOTE
, vect_location
, "add new stmt: ");
1611 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, vec_stmt
, 0);
1614 gimple_set_location (vec_stmt
, gimple_location (stmt
));
1616 /* While EH edges will generally prevent vectorization, stmt might
1617 e.g. be in a must-not-throw region. Ensure newly created stmts
1618 that could throw are part of the same region. */
1619 int lp_nr
= lookup_stmt_eh_lp (stmt
);
1620 if (lp_nr
!= 0 && stmt_could_throw_p (vec_stmt
))
1621 add_stmt_to_eh_lp (vec_stmt
, lp_nr
);
1624 /* We want to vectorize a call to combined function CFN with function
1625 decl FNDECL, using VECTYPE_OUT as the type of the output and VECTYPE_IN
1626 as the types of all inputs. Check whether this is possible using
1627 an internal function, returning its code if so or IFN_LAST if not. */
1630 vectorizable_internal_function (combined_fn cfn
, tree fndecl
,
1631 tree vectype_out
, tree vectype_in
)
1634 if (internal_fn_p (cfn
))
1635 ifn
= as_internal_fn (cfn
);
1637 ifn
= associated_internal_fn (fndecl
);
1638 if (ifn
!= IFN_LAST
&& direct_internal_fn_p (ifn
))
1640 const direct_internal_fn_info
&info
= direct_internal_fn (ifn
);
1641 if (info
.vectorizable
)
1643 tree type0
= (info
.type0
< 0 ? vectype_out
: vectype_in
);
1644 tree type1
= (info
.type1
< 0 ? vectype_out
: vectype_in
);
1645 if (direct_internal_fn_supported_p (ifn
, tree_pair (type0
, type1
),
1646 OPTIMIZE_FOR_SPEED
))
1654 static tree
permute_vec_elements (tree
, tree
, tree
, gimple
*,
1655 gimple_stmt_iterator
*);
1658 /* Function vectorizable_mask_load_store.
1660 Check if STMT performs a conditional load or store that can be vectorized.
1661 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
1662 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
1663 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1666 vectorizable_mask_load_store (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
1667 gimple
**vec_stmt
, slp_tree slp_node
)
1669 tree vec_dest
= NULL
;
1670 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1671 stmt_vec_info prev_stmt_info
;
1672 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
1673 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
1674 bool nested_in_vect_loop
= nested_in_vect_loop_p (loop
, stmt
);
1675 struct data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
);
1676 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
1677 tree rhs_vectype
= NULL_TREE
;
1682 tree dataref_ptr
= NULL_TREE
;
1684 int nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
1688 tree gather_base
= NULL_TREE
, gather_off
= NULL_TREE
;
1689 tree gather_off_vectype
= NULL_TREE
, gather_decl
= NULL_TREE
;
1690 int gather_scale
= 1;
1691 enum vect_def_type gather_dt
= vect_unknown_def_type
;
1695 enum vect_def_type dt
;
1697 if (slp_node
!= NULL
)
1700 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits
;
1701 gcc_assert (ncopies
>= 1);
1703 is_store
= gimple_call_internal_fn (stmt
) == IFN_MASK_STORE
;
1704 mask
= gimple_call_arg (stmt
, 2);
1706 if (TREE_CODE (TREE_TYPE (mask
)) != BOOLEAN_TYPE
)
1709 /* FORNOW. This restriction should be relaxed. */
1710 if (nested_in_vect_loop
&& ncopies
> 1)
1712 if (dump_enabled_p ())
1713 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1714 "multiple types in nested loop.");
1718 if (!STMT_VINFO_RELEVANT_P (stmt_info
))
1721 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
1725 if (!STMT_VINFO_DATA_REF (stmt_info
))
1728 elem_type
= TREE_TYPE (vectype
);
1730 if (STMT_VINFO_GROUPED_ACCESS (stmt_info
))
1733 if (STMT_VINFO_STRIDED_P (stmt_info
))
1736 if (TREE_CODE (mask
) != SSA_NAME
)
1739 if (!vect_is_simple_use (mask
, loop_vinfo
, &def_stmt
, &dt
, &mask_vectype
))
1743 mask_vectype
= get_mask_type_for_scalar_type (TREE_TYPE (vectype
));
1745 if (!mask_vectype
|| !VECTOR_BOOLEAN_TYPE_P (mask_vectype
))
1750 tree rhs
= gimple_call_arg (stmt
, 3);
1751 if (!vect_is_simple_use (rhs
, loop_vinfo
, &def_stmt
, &dt
, &rhs_vectype
))
1755 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info
))
1758 gather_decl
= vect_check_gather_scatter (stmt
, loop_vinfo
, &gather_base
,
1759 &gather_off
, &gather_scale
);
1760 gcc_assert (gather_decl
);
1761 if (!vect_is_simple_use (gather_off
, loop_vinfo
, &def_stmt
, &gather_dt
,
1762 &gather_off_vectype
))
1764 if (dump_enabled_p ())
1765 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1766 "gather index use not simple.");
1770 tree arglist
= TYPE_ARG_TYPES (TREE_TYPE (gather_decl
));
1772 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (TREE_CHAIN (arglist
))));
1773 if (TREE_CODE (masktype
) == INTEGER_TYPE
)
1775 if (dump_enabled_p ())
1776 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1777 "masked gather with integer mask not supported.");
1781 else if (tree_int_cst_compare (nested_in_vect_loop
1782 ? STMT_VINFO_DR_STEP (stmt_info
)
1783 : DR_STEP (dr
), size_zero_node
) <= 0)
1785 else if (!VECTOR_MODE_P (TYPE_MODE (vectype
))
1786 || !can_vec_mask_load_store_p (TYPE_MODE (vectype
),
1787 TYPE_MODE (mask_vectype
),
1790 && !useless_type_conversion_p (vectype
, rhs_vectype
)))
1793 if (!vec_stmt
) /* transformation not required. */
1795 STMT_VINFO_TYPE (stmt_info
) = call_vec_info_type
;
1797 vect_model_store_cost (stmt_info
, ncopies
, false, dt
,
1800 vect_model_load_cost (stmt_info
, ncopies
, false, NULL
, NULL
, NULL
);
1806 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info
))
1808 tree vec_oprnd0
= NULL_TREE
, op
;
1809 tree arglist
= TYPE_ARG_TYPES (TREE_TYPE (gather_decl
));
1810 tree rettype
, srctype
, ptrtype
, idxtype
, masktype
, scaletype
;
1811 tree ptr
, vec_mask
= NULL_TREE
, mask_op
= NULL_TREE
, var
, scale
;
1812 tree perm_mask
= NULL_TREE
, prev_res
= NULL_TREE
;
1813 tree mask_perm_mask
= NULL_TREE
;
1814 edge pe
= loop_preheader_edge (loop
);
1817 enum { NARROW
, NONE
, WIDEN
} modifier
;
1818 int gather_off_nunits
= TYPE_VECTOR_SUBPARTS (gather_off_vectype
);
1820 rettype
= TREE_TYPE (TREE_TYPE (gather_decl
));
1821 srctype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
1822 ptrtype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
1823 idxtype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
1824 masktype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
1825 scaletype
= TREE_VALUE (arglist
);
1826 gcc_checking_assert (types_compatible_p (srctype
, rettype
)
1827 && types_compatible_p (srctype
, masktype
));
1829 if (nunits
== gather_off_nunits
)
1831 else if (nunits
== gather_off_nunits
/ 2)
1833 unsigned char *sel
= XALLOCAVEC (unsigned char, gather_off_nunits
);
1836 for (i
= 0; i
< gather_off_nunits
; ++i
)
1837 sel
[i
] = i
| nunits
;
1839 perm_mask
= vect_gen_perm_mask_checked (gather_off_vectype
, sel
);
1841 else if (nunits
== gather_off_nunits
* 2)
1843 unsigned char *sel
= XALLOCAVEC (unsigned char, nunits
);
1846 for (i
= 0; i
< nunits
; ++i
)
1847 sel
[i
] = i
< gather_off_nunits
1848 ? i
: i
+ nunits
- gather_off_nunits
;
1850 perm_mask
= vect_gen_perm_mask_checked (vectype
, sel
);
1852 for (i
= 0; i
< nunits
; ++i
)
1853 sel
[i
] = i
| gather_off_nunits
;
1854 mask_perm_mask
= vect_gen_perm_mask_checked (masktype
, sel
);
1859 vec_dest
= vect_create_destination_var (gimple_call_lhs (stmt
), vectype
);
1861 ptr
= fold_convert (ptrtype
, gather_base
);
1862 if (!is_gimple_min_invariant (ptr
))
1864 ptr
= force_gimple_operand (ptr
, &seq
, true, NULL_TREE
);
1865 new_bb
= gsi_insert_seq_on_edge_immediate (pe
, seq
);
1866 gcc_assert (!new_bb
);
1869 scale
= build_int_cst (scaletype
, gather_scale
);
1871 prev_stmt_info
= NULL
;
1872 for (j
= 0; j
< ncopies
; ++j
)
1874 if (modifier
== WIDEN
&& (j
& 1))
1875 op
= permute_vec_elements (vec_oprnd0
, vec_oprnd0
,
1876 perm_mask
, stmt
, gsi
);
1879 = vect_get_vec_def_for_operand (gather_off
, stmt
);
1882 = vect_get_vec_def_for_stmt_copy (gather_dt
, vec_oprnd0
);
1884 if (!useless_type_conversion_p (idxtype
, TREE_TYPE (op
)))
1886 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op
))
1887 == TYPE_VECTOR_SUBPARTS (idxtype
));
1888 var
= vect_get_new_ssa_name (idxtype
, vect_simple_var
);
1889 op
= build1 (VIEW_CONVERT_EXPR
, idxtype
, op
);
1891 = gimple_build_assign (var
, VIEW_CONVERT_EXPR
, op
);
1892 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
1896 if (mask_perm_mask
&& (j
& 1))
1897 mask_op
= permute_vec_elements (mask_op
, mask_op
,
1898 mask_perm_mask
, stmt
, gsi
);
1902 vec_mask
= vect_get_vec_def_for_operand (mask
, stmt
);
1905 vect_is_simple_use (vec_mask
, loop_vinfo
, &def_stmt
, &dt
);
1906 vec_mask
= vect_get_vec_def_for_stmt_copy (dt
, vec_mask
);
1910 if (!useless_type_conversion_p (masktype
, TREE_TYPE (vec_mask
)))
1912 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (mask_op
))
1913 == TYPE_VECTOR_SUBPARTS (masktype
));
1914 var
= vect_get_new_ssa_name (masktype
, vect_simple_var
);
1915 mask_op
= build1 (VIEW_CONVERT_EXPR
, masktype
, mask_op
);
1917 = gimple_build_assign (var
, VIEW_CONVERT_EXPR
, mask_op
);
1918 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
1924 = gimple_build_call (gather_decl
, 5, mask_op
, ptr
, op
, mask_op
,
1927 if (!useless_type_conversion_p (vectype
, rettype
))
1929 gcc_assert (TYPE_VECTOR_SUBPARTS (vectype
)
1930 == TYPE_VECTOR_SUBPARTS (rettype
));
1931 op
= vect_get_new_ssa_name (rettype
, vect_simple_var
);
1932 gimple_call_set_lhs (new_stmt
, op
);
1933 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
1934 var
= make_ssa_name (vec_dest
);
1935 op
= build1 (VIEW_CONVERT_EXPR
, vectype
, op
);
1936 new_stmt
= gimple_build_assign (var
, VIEW_CONVERT_EXPR
, op
);
1940 var
= make_ssa_name (vec_dest
, new_stmt
);
1941 gimple_call_set_lhs (new_stmt
, var
);
1944 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
1946 if (modifier
== NARROW
)
1953 var
= permute_vec_elements (prev_res
, var
,
1954 perm_mask
, stmt
, gsi
);
1955 new_stmt
= SSA_NAME_DEF_STMT (var
);
1958 if (prev_stmt_info
== NULL
)
1959 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
1961 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
1962 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
1965 /* Ensure that even with -fno-tree-dce the scalar MASK_LOAD is removed
1967 if (STMT_VINFO_RELATED_STMT (stmt_info
))
1969 stmt
= STMT_VINFO_RELATED_STMT (stmt_info
);
1970 stmt_info
= vinfo_for_stmt (stmt
);
1972 tree lhs
= gimple_call_lhs (stmt
);
1973 new_stmt
= gimple_build_assign (lhs
, build_zero_cst (TREE_TYPE (lhs
)));
1974 set_vinfo_for_stmt (new_stmt
, stmt_info
);
1975 set_vinfo_for_stmt (stmt
, NULL
);
1976 STMT_VINFO_STMT (stmt_info
) = new_stmt
;
1977 gsi_replace (gsi
, new_stmt
, true);
1982 tree vec_rhs
= NULL_TREE
, vec_mask
= NULL_TREE
;
1983 prev_stmt_info
= NULL
;
1984 LOOP_VINFO_HAS_MASK_STORE (loop_vinfo
) = true;
1985 for (i
= 0; i
< ncopies
; i
++)
1987 unsigned align
, misalign
;
1991 tree rhs
= gimple_call_arg (stmt
, 3);
1992 vec_rhs
= vect_get_vec_def_for_operand (rhs
, stmt
);
1993 vec_mask
= vect_get_vec_def_for_operand (mask
, stmt
);
1994 /* We should have catched mismatched types earlier. */
1995 gcc_assert (useless_type_conversion_p (vectype
,
1996 TREE_TYPE (vec_rhs
)));
1997 dataref_ptr
= vect_create_data_ref_ptr (stmt
, vectype
, NULL
,
1998 NULL_TREE
, &dummy
, gsi
,
1999 &ptr_incr
, false, &inv_p
);
2000 gcc_assert (!inv_p
);
2004 vect_is_simple_use (vec_rhs
, loop_vinfo
, &def_stmt
, &dt
);
2005 vec_rhs
= vect_get_vec_def_for_stmt_copy (dt
, vec_rhs
);
2006 vect_is_simple_use (vec_mask
, loop_vinfo
, &def_stmt
, &dt
);
2007 vec_mask
= vect_get_vec_def_for_stmt_copy (dt
, vec_mask
);
2008 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
, stmt
,
2009 TYPE_SIZE_UNIT (vectype
));
2012 align
= TYPE_ALIGN_UNIT (vectype
);
2013 if (aligned_access_p (dr
))
2015 else if (DR_MISALIGNMENT (dr
) == -1)
2017 align
= TYPE_ALIGN_UNIT (elem_type
);
2021 misalign
= DR_MISALIGNMENT (dr
);
2022 set_ptr_info_alignment (get_ptr_info (dataref_ptr
), align
,
2024 tree ptr
= build_int_cst (TREE_TYPE (gimple_call_arg (stmt
, 1)),
2025 misalign
? misalign
& -misalign
: align
);
2027 = gimple_build_call_internal (IFN_MASK_STORE
, 4, dataref_ptr
,
2028 ptr
, vec_mask
, vec_rhs
);
2029 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2031 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
2033 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
2034 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
2039 tree vec_mask
= NULL_TREE
;
2040 prev_stmt_info
= NULL
;
2041 vec_dest
= vect_create_destination_var (gimple_call_lhs (stmt
), vectype
);
2042 for (i
= 0; i
< ncopies
; i
++)
2044 unsigned align
, misalign
;
2048 vec_mask
= vect_get_vec_def_for_operand (mask
, stmt
);
2049 dataref_ptr
= vect_create_data_ref_ptr (stmt
, vectype
, NULL
,
2050 NULL_TREE
, &dummy
, gsi
,
2051 &ptr_incr
, false, &inv_p
);
2052 gcc_assert (!inv_p
);
2056 vect_is_simple_use (vec_mask
, loop_vinfo
, &def_stmt
, &dt
);
2057 vec_mask
= vect_get_vec_def_for_stmt_copy (dt
, vec_mask
);
2058 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
, stmt
,
2059 TYPE_SIZE_UNIT (vectype
));
2062 align
= TYPE_ALIGN_UNIT (vectype
);
2063 if (aligned_access_p (dr
))
2065 else if (DR_MISALIGNMENT (dr
) == -1)
2067 align
= TYPE_ALIGN_UNIT (elem_type
);
2071 misalign
= DR_MISALIGNMENT (dr
);
2072 set_ptr_info_alignment (get_ptr_info (dataref_ptr
), align
,
2074 tree ptr
= build_int_cst (TREE_TYPE (gimple_call_arg (stmt
, 1)),
2075 misalign
? misalign
& -misalign
: align
);
2077 = gimple_build_call_internal (IFN_MASK_LOAD
, 3, dataref_ptr
,
2079 gimple_call_set_lhs (new_stmt
, make_ssa_name (vec_dest
));
2080 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2082 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
2084 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
2085 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
2091 /* Ensure that even with -fno-tree-dce the scalar MASK_LOAD is removed
2093 if (STMT_VINFO_RELATED_STMT (stmt_info
))
2095 stmt
= STMT_VINFO_RELATED_STMT (stmt_info
);
2096 stmt_info
= vinfo_for_stmt (stmt
);
2098 tree lhs
= gimple_call_lhs (stmt
);
2099 new_stmt
= gimple_build_assign (lhs
, build_zero_cst (TREE_TYPE (lhs
)));
2100 set_vinfo_for_stmt (new_stmt
, stmt_info
);
2101 set_vinfo_for_stmt (stmt
, NULL
);
2102 STMT_VINFO_STMT (stmt_info
) = new_stmt
;
2103 gsi_replace (gsi
, new_stmt
, true);
2109 /* Return true if vector types VECTYPE_IN and VECTYPE_OUT have
2110 integer elements and if we can narrow VECTYPE_IN to VECTYPE_OUT
2111 in a single step. On success, store the binary pack code in
2115 simple_integer_narrowing (tree vectype_out
, tree vectype_in
,
2116 tree_code
*convert_code
)
2118 if (!INTEGRAL_TYPE_P (TREE_TYPE (vectype_out
))
2119 || !INTEGRAL_TYPE_P (TREE_TYPE (vectype_in
)))
2123 int multi_step_cvt
= 0;
2124 auto_vec
<tree
, 8> interm_types
;
2125 if (!supportable_narrowing_operation (NOP_EXPR
, vectype_out
, vectype_in
,
2126 &code
, &multi_step_cvt
,
2131 *convert_code
= code
;
2135 /* Function vectorizable_call.
2137 Check if GS performs a function call that can be vectorized.
2138 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2139 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2140 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2143 vectorizable_call (gimple
*gs
, gimple_stmt_iterator
*gsi
, gimple
**vec_stmt
,
2150 tree vec_oprnd0
= NULL_TREE
, vec_oprnd1
= NULL_TREE
;
2151 stmt_vec_info stmt_info
= vinfo_for_stmt (gs
), prev_stmt_info
;
2152 tree vectype_out
, vectype_in
;
2155 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
2156 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
2157 vec_info
*vinfo
= stmt_info
->vinfo
;
2158 tree fndecl
, new_temp
, rhs_type
;
2160 enum vect_def_type dt
[3]
2161 = {vect_unknown_def_type
, vect_unknown_def_type
, vect_unknown_def_type
};
2162 gimple
*new_stmt
= NULL
;
2164 vec
<tree
> vargs
= vNULL
;
2165 enum { NARROW
, NONE
, WIDEN
} modifier
;
2169 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
2172 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
2176 /* Is GS a vectorizable call? */
2177 stmt
= dyn_cast
<gcall
*> (gs
);
2181 if (gimple_call_internal_p (stmt
)
2182 && (gimple_call_internal_fn (stmt
) == IFN_MASK_LOAD
2183 || gimple_call_internal_fn (stmt
) == IFN_MASK_STORE
))
2184 return vectorizable_mask_load_store (stmt
, gsi
, vec_stmt
,
2187 if (gimple_call_lhs (stmt
) == NULL_TREE
2188 || TREE_CODE (gimple_call_lhs (stmt
)) != SSA_NAME
)
2191 gcc_checking_assert (!stmt_can_throw_internal (stmt
));
2193 vectype_out
= STMT_VINFO_VECTYPE (stmt_info
);
2195 /* Process function arguments. */
2196 rhs_type
= NULL_TREE
;
2197 vectype_in
= NULL_TREE
;
2198 nargs
= gimple_call_num_args (stmt
);
2200 /* Bail out if the function has more than three arguments, we do not have
2201 interesting builtin functions to vectorize with more than two arguments
2202 except for fma. No arguments is also not good. */
2203 if (nargs
== 0 || nargs
> 3)
2206 /* Ignore the argument of IFN_GOMP_SIMD_LANE, it is magic. */
2207 if (gimple_call_internal_p (stmt
)
2208 && gimple_call_internal_fn (stmt
) == IFN_GOMP_SIMD_LANE
)
2211 rhs_type
= unsigned_type_node
;
2214 for (i
= 0; i
< nargs
; i
++)
2218 op
= gimple_call_arg (stmt
, i
);
2220 /* We can only handle calls with arguments of the same type. */
2222 && !types_compatible_p (rhs_type
, TREE_TYPE (op
)))
2224 if (dump_enabled_p ())
2225 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2226 "argument types differ.\n");
2230 rhs_type
= TREE_TYPE (op
);
2232 if (!vect_is_simple_use (op
, vinfo
, &def_stmt
, &dt
[i
], &opvectype
))
2234 if (dump_enabled_p ())
2235 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2236 "use not simple.\n");
2241 vectype_in
= opvectype
;
2243 && opvectype
!= vectype_in
)
2245 if (dump_enabled_p ())
2246 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2247 "argument vector types differ.\n");
2251 /* If all arguments are external or constant defs use a vector type with
2252 the same size as the output vector type. */
2254 vectype_in
= get_same_sized_vectype (rhs_type
, vectype_out
);
2256 gcc_assert (vectype_in
);
2259 if (dump_enabled_p ())
2261 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2262 "no vectype for scalar type ");
2263 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, rhs_type
);
2264 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
2271 nunits_in
= TYPE_VECTOR_SUBPARTS (vectype_in
);
2272 nunits_out
= TYPE_VECTOR_SUBPARTS (vectype_out
);
2273 if (nunits_in
== nunits_out
/ 2)
2275 else if (nunits_out
== nunits_in
)
2277 else if (nunits_out
== nunits_in
/ 2)
2282 /* We only handle functions that do not read or clobber memory. */
2283 if (gimple_vuse (stmt
))
2285 if (dump_enabled_p ())
2286 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2287 "function reads from or writes to memory.\n");
2291 /* For now, we only vectorize functions if a target specific builtin
2292 is available. TODO -- in some cases, it might be profitable to
2293 insert the calls for pieces of the vector, in order to be able
2294 to vectorize other operations in the loop. */
2296 internal_fn ifn
= IFN_LAST
;
2297 combined_fn cfn
= gimple_call_combined_fn (stmt
);
2298 tree callee
= gimple_call_fndecl (stmt
);
2300 /* First try using an internal function. */
2301 tree_code convert_code
= ERROR_MARK
;
2303 && (modifier
== NONE
2304 || (modifier
== NARROW
2305 && simple_integer_narrowing (vectype_out
, vectype_in
,
2307 ifn
= vectorizable_internal_function (cfn
, callee
, vectype_out
,
2310 /* If that fails, try asking for a target-specific built-in function. */
2311 if (ifn
== IFN_LAST
)
2313 if (cfn
!= CFN_LAST
)
2314 fndecl
= targetm
.vectorize
.builtin_vectorized_function
2315 (cfn
, vectype_out
, vectype_in
);
2317 fndecl
= targetm
.vectorize
.builtin_md_vectorized_function
2318 (callee
, vectype_out
, vectype_in
);
2321 if (ifn
== IFN_LAST
&& !fndecl
)
2323 if (cfn
== CFN_GOMP_SIMD_LANE
2326 && LOOP_VINFO_LOOP (loop_vinfo
)->simduid
2327 && TREE_CODE (gimple_call_arg (stmt
, 0)) == SSA_NAME
2328 && LOOP_VINFO_LOOP (loop_vinfo
)->simduid
2329 == SSA_NAME_VAR (gimple_call_arg (stmt
, 0)))
2331 /* We can handle IFN_GOMP_SIMD_LANE by returning a
2332 { 0, 1, 2, ... vf - 1 } vector. */
2333 gcc_assert (nargs
== 0);
2337 if (dump_enabled_p ())
2338 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2339 "function is not vectorizable.\n");
2344 if (slp_node
|| PURE_SLP_STMT (stmt_info
))
2346 else if (modifier
== NARROW
&& ifn
== IFN_LAST
)
2347 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits_out
;
2349 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits_in
;
2351 /* Sanity check: make sure that at least one copy of the vectorized stmt
2352 needs to be generated. */
2353 gcc_assert (ncopies
>= 1);
2355 if (!vec_stmt
) /* transformation not required. */
2357 STMT_VINFO_TYPE (stmt_info
) = call_vec_info_type
;
2358 if (dump_enabled_p ())
2359 dump_printf_loc (MSG_NOTE
, vect_location
, "=== vectorizable_call ==="
2361 vect_model_simple_cost (stmt_info
, ncopies
, dt
, NULL
, NULL
);
2362 if (ifn
!= IFN_LAST
&& modifier
== NARROW
&& !slp_node
)
2363 add_stmt_cost (stmt_info
->vinfo
->target_cost_data
, ncopies
/ 2,
2364 vec_promote_demote
, stmt_info
, 0, vect_body
);
2371 if (dump_enabled_p ())
2372 dump_printf_loc (MSG_NOTE
, vect_location
, "transform call.\n");
2375 scalar_dest
= gimple_call_lhs (stmt
);
2376 vec_dest
= vect_create_destination_var (scalar_dest
, vectype_out
);
2378 prev_stmt_info
= NULL
;
2379 if (modifier
== NONE
|| ifn
!= IFN_LAST
)
2381 tree prev_res
= NULL_TREE
;
2382 for (j
= 0; j
< ncopies
; ++j
)
2384 /* Build argument list for the vectorized call. */
2386 vargs
.create (nargs
);
2392 auto_vec
<vec
<tree
> > vec_defs (nargs
);
2393 vec
<tree
> vec_oprnds0
;
2395 for (i
= 0; i
< nargs
; i
++)
2396 vargs
.quick_push (gimple_call_arg (stmt
, i
));
2397 vect_get_slp_defs (vargs
, slp_node
, &vec_defs
, -1);
2398 vec_oprnds0
= vec_defs
[0];
2400 /* Arguments are ready. Create the new vector stmt. */
2401 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vec_oprnd0
)
2404 for (k
= 0; k
< nargs
; k
++)
2406 vec
<tree
> vec_oprndsk
= vec_defs
[k
];
2407 vargs
[k
] = vec_oprndsk
[i
];
2409 if (modifier
== NARROW
)
2411 tree half_res
= make_ssa_name (vectype_in
);
2412 new_stmt
= gimple_build_call_internal_vec (ifn
, vargs
);
2413 gimple_call_set_lhs (new_stmt
, half_res
);
2414 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2417 prev_res
= half_res
;
2420 new_temp
= make_ssa_name (vec_dest
);
2421 new_stmt
= gimple_build_assign (new_temp
, convert_code
,
2422 prev_res
, half_res
);
2426 if (ifn
!= IFN_LAST
)
2427 new_stmt
= gimple_build_call_internal_vec (ifn
, vargs
);
2429 new_stmt
= gimple_build_call_vec (fndecl
, vargs
);
2430 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
2431 gimple_call_set_lhs (new_stmt
, new_temp
);
2433 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2434 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
2437 for (i
= 0; i
< nargs
; i
++)
2439 vec
<tree
> vec_oprndsi
= vec_defs
[i
];
2440 vec_oprndsi
.release ();
2445 for (i
= 0; i
< nargs
; i
++)
2447 op
= gimple_call_arg (stmt
, i
);
2450 = vect_get_vec_def_for_operand (op
, stmt
);
2453 vec_oprnd0
= gimple_call_arg (new_stmt
, i
);
2455 = vect_get_vec_def_for_stmt_copy (dt
[i
], vec_oprnd0
);
2458 vargs
.quick_push (vec_oprnd0
);
2461 if (gimple_call_internal_p (stmt
)
2462 && gimple_call_internal_fn (stmt
) == IFN_GOMP_SIMD_LANE
)
2464 tree
*v
= XALLOCAVEC (tree
, nunits_out
);
2466 for (k
= 0; k
< nunits_out
; ++k
)
2467 v
[k
] = build_int_cst (unsigned_type_node
, j
* nunits_out
+ k
);
2468 tree cst
= build_vector (vectype_out
, v
);
2470 = vect_get_new_ssa_name (vectype_out
, vect_simple_var
, "cst_");
2471 gimple
*init_stmt
= gimple_build_assign (new_var
, cst
);
2472 vect_init_vector_1 (stmt
, init_stmt
, NULL
);
2473 new_temp
= make_ssa_name (vec_dest
);
2474 new_stmt
= gimple_build_assign (new_temp
, new_var
);
2476 else if (modifier
== NARROW
)
2478 tree half_res
= make_ssa_name (vectype_in
);
2479 new_stmt
= gimple_build_call_internal_vec (ifn
, vargs
);
2480 gimple_call_set_lhs (new_stmt
, half_res
);
2481 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2484 prev_res
= half_res
;
2487 new_temp
= make_ssa_name (vec_dest
);
2488 new_stmt
= gimple_build_assign (new_temp
, convert_code
,
2489 prev_res
, half_res
);
2493 if (ifn
!= IFN_LAST
)
2494 new_stmt
= gimple_build_call_internal_vec (ifn
, vargs
);
2496 new_stmt
= gimple_build_call_vec (fndecl
, vargs
);
2497 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
2498 gimple_call_set_lhs (new_stmt
, new_temp
);
2500 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2502 if (j
== (modifier
== NARROW
? 1 : 0))
2503 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
2505 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
2507 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
2510 else if (modifier
== NARROW
)
2512 for (j
= 0; j
< ncopies
; ++j
)
2514 /* Build argument list for the vectorized call. */
2516 vargs
.create (nargs
* 2);
2522 auto_vec
<vec
<tree
> > vec_defs (nargs
);
2523 vec
<tree
> vec_oprnds0
;
2525 for (i
= 0; i
< nargs
; i
++)
2526 vargs
.quick_push (gimple_call_arg (stmt
, i
));
2527 vect_get_slp_defs (vargs
, slp_node
, &vec_defs
, -1);
2528 vec_oprnds0
= vec_defs
[0];
2530 /* Arguments are ready. Create the new vector stmt. */
2531 for (i
= 0; vec_oprnds0
.iterate (i
, &vec_oprnd0
); i
+= 2)
2535 for (k
= 0; k
< nargs
; k
++)
2537 vec
<tree
> vec_oprndsk
= vec_defs
[k
];
2538 vargs
.quick_push (vec_oprndsk
[i
]);
2539 vargs
.quick_push (vec_oprndsk
[i
+ 1]);
2541 if (ifn
!= IFN_LAST
)
2542 new_stmt
= gimple_build_call_internal_vec (ifn
, vargs
);
2544 new_stmt
= gimple_build_call_vec (fndecl
, vargs
);
2545 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
2546 gimple_call_set_lhs (new_stmt
, new_temp
);
2547 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2548 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
2551 for (i
= 0; i
< nargs
; i
++)
2553 vec
<tree
> vec_oprndsi
= vec_defs
[i
];
2554 vec_oprndsi
.release ();
2559 for (i
= 0; i
< nargs
; i
++)
2561 op
= gimple_call_arg (stmt
, i
);
2565 = vect_get_vec_def_for_operand (op
, stmt
);
2567 = vect_get_vec_def_for_stmt_copy (dt
[i
], vec_oprnd0
);
2571 vec_oprnd1
= gimple_call_arg (new_stmt
, 2*i
+ 1);
2573 = vect_get_vec_def_for_stmt_copy (dt
[i
], vec_oprnd1
);
2575 = vect_get_vec_def_for_stmt_copy (dt
[i
], vec_oprnd0
);
2578 vargs
.quick_push (vec_oprnd0
);
2579 vargs
.quick_push (vec_oprnd1
);
2582 new_stmt
= gimple_build_call_vec (fndecl
, vargs
);
2583 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
2584 gimple_call_set_lhs (new_stmt
, new_temp
);
2585 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2588 STMT_VINFO_VEC_STMT (stmt_info
) = new_stmt
;
2590 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
2592 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
2595 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
2598 /* No current target implements this case. */
2603 /* The call in STMT might prevent it from being removed in dce.
2604 We however cannot remove it here, due to the way the ssa name
2605 it defines is mapped to the new definition. So just replace
2606 rhs of the statement with something harmless. */
2611 type
= TREE_TYPE (scalar_dest
);
2612 if (is_pattern_stmt_p (stmt_info
))
2613 lhs
= gimple_call_lhs (STMT_VINFO_RELATED_STMT (stmt_info
));
2615 lhs
= gimple_call_lhs (stmt
);
2617 if (gimple_call_internal_p (stmt
)
2618 && gimple_call_internal_fn (stmt
) == IFN_GOMP_SIMD_LANE
)
2620 /* Replace uses of the lhs of GOMP_SIMD_LANE call outside the loop
2621 with vf - 1 rather than 0, that is the last iteration of the
2623 imm_use_iterator iter
;
2624 use_operand_p use_p
;
2626 FOR_EACH_IMM_USE_STMT (use_stmt
, iter
, lhs
)
2628 basic_block use_bb
= gimple_bb (use_stmt
);
2630 && !flow_bb_inside_loop_p (LOOP_VINFO_LOOP (loop_vinfo
), use_bb
))
2632 FOR_EACH_IMM_USE_ON_STMT (use_p
, iter
)
2633 SET_USE (use_p
, build_int_cst (TREE_TYPE (lhs
),
2634 ncopies
* nunits_out
- 1));
2635 update_stmt (use_stmt
);
2640 new_stmt
= gimple_build_assign (lhs
, build_zero_cst (type
));
2641 set_vinfo_for_stmt (new_stmt
, stmt_info
);
2642 set_vinfo_for_stmt (stmt
, NULL
);
2643 STMT_VINFO_STMT (stmt_info
) = new_stmt
;
2644 gsi_replace (gsi
, new_stmt
, false);
2650 struct simd_call_arg_info
2654 enum vect_def_type dt
;
2655 HOST_WIDE_INT linear_step
;
2657 bool simd_lane_linear
;
2660 /* Helper function of vectorizable_simd_clone_call. If OP, an SSA_NAME,
2661 is linear within simd lane (but not within whole loop), note it in
2665 vect_simd_lane_linear (tree op
, struct loop
*loop
,
2666 struct simd_call_arg_info
*arginfo
)
2668 gimple
*def_stmt
= SSA_NAME_DEF_STMT (op
);
2670 if (!is_gimple_assign (def_stmt
)
2671 || gimple_assign_rhs_code (def_stmt
) != POINTER_PLUS_EXPR
2672 || !is_gimple_min_invariant (gimple_assign_rhs1 (def_stmt
)))
2675 tree base
= gimple_assign_rhs1 (def_stmt
);
2676 HOST_WIDE_INT linear_step
= 0;
2677 tree v
= gimple_assign_rhs2 (def_stmt
);
2678 while (TREE_CODE (v
) == SSA_NAME
)
2681 def_stmt
= SSA_NAME_DEF_STMT (v
);
2682 if (is_gimple_assign (def_stmt
))
2683 switch (gimple_assign_rhs_code (def_stmt
))
2686 t
= gimple_assign_rhs2 (def_stmt
);
2687 if (linear_step
|| TREE_CODE (t
) != INTEGER_CST
)
2689 base
= fold_build2 (POINTER_PLUS_EXPR
, TREE_TYPE (base
), base
, t
);
2690 v
= gimple_assign_rhs1 (def_stmt
);
2693 t
= gimple_assign_rhs2 (def_stmt
);
2694 if (linear_step
|| !tree_fits_shwi_p (t
) || integer_zerop (t
))
2696 linear_step
= tree_to_shwi (t
);
2697 v
= gimple_assign_rhs1 (def_stmt
);
2700 t
= gimple_assign_rhs1 (def_stmt
);
2701 if (TREE_CODE (TREE_TYPE (t
)) != INTEGER_TYPE
2702 || (TYPE_PRECISION (TREE_TYPE (v
))
2703 < TYPE_PRECISION (TREE_TYPE (t
))))
2712 else if (is_gimple_call (def_stmt
)
2713 && gimple_call_internal_p (def_stmt
)
2714 && gimple_call_internal_fn (def_stmt
) == IFN_GOMP_SIMD_LANE
2716 && TREE_CODE (gimple_call_arg (def_stmt
, 0)) == SSA_NAME
2717 && (SSA_NAME_VAR (gimple_call_arg (def_stmt
, 0))
2722 arginfo
->linear_step
= linear_step
;
2724 arginfo
->simd_lane_linear
= true;
2730 /* Function vectorizable_simd_clone_call.
2732 Check if STMT performs a function call that can be vectorized
2733 by calling a simd clone of the function.
2734 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2735 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2736 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2739 vectorizable_simd_clone_call (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
2740 gimple
**vec_stmt
, slp_tree slp_node
)
2745 tree vec_oprnd0
= NULL_TREE
;
2746 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
), prev_stmt_info
;
2748 unsigned int nunits
;
2749 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
2750 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
2751 vec_info
*vinfo
= stmt_info
->vinfo
;
2752 struct loop
*loop
= loop_vinfo
? LOOP_VINFO_LOOP (loop_vinfo
) : NULL
;
2753 tree fndecl
, new_temp
;
2755 gimple
*new_stmt
= NULL
;
2757 vec
<simd_call_arg_info
> arginfo
= vNULL
;
2758 vec
<tree
> vargs
= vNULL
;
2760 tree lhs
, rtype
, ratype
;
2761 vec
<constructor_elt
, va_gc
> *ret_ctor_elts
;
2763 /* Is STMT a vectorizable call? */
2764 if (!is_gimple_call (stmt
))
2767 fndecl
= gimple_call_fndecl (stmt
);
2768 if (fndecl
== NULL_TREE
)
2771 struct cgraph_node
*node
= cgraph_node::get (fndecl
);
2772 if (node
== NULL
|| node
->simd_clones
== NULL
)
2775 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
2778 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
2782 if (gimple_call_lhs (stmt
)
2783 && TREE_CODE (gimple_call_lhs (stmt
)) != SSA_NAME
)
2786 gcc_checking_assert (!stmt_can_throw_internal (stmt
));
2788 vectype
= STMT_VINFO_VECTYPE (stmt_info
);
2790 if (loop_vinfo
&& nested_in_vect_loop_p (loop
, stmt
))
2794 if (slp_node
|| PURE_SLP_STMT (stmt_info
))
2797 /* Process function arguments. */
2798 nargs
= gimple_call_num_args (stmt
);
2800 /* Bail out if the function has zero arguments. */
2804 arginfo
.create (nargs
);
2806 for (i
= 0; i
< nargs
; i
++)
2808 simd_call_arg_info thisarginfo
;
2811 thisarginfo
.linear_step
= 0;
2812 thisarginfo
.align
= 0;
2813 thisarginfo
.op
= NULL_TREE
;
2814 thisarginfo
.simd_lane_linear
= false;
2816 op
= gimple_call_arg (stmt
, i
);
2817 if (!vect_is_simple_use (op
, vinfo
, &def_stmt
, &thisarginfo
.dt
,
2818 &thisarginfo
.vectype
)
2819 || thisarginfo
.dt
== vect_uninitialized_def
)
2821 if (dump_enabled_p ())
2822 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2823 "use not simple.\n");
2828 if (thisarginfo
.dt
== vect_constant_def
2829 || thisarginfo
.dt
== vect_external_def
)
2830 gcc_assert (thisarginfo
.vectype
== NULL_TREE
);
2832 gcc_assert (thisarginfo
.vectype
!= NULL_TREE
);
2834 /* For linear arguments, the analyze phase should have saved
2835 the base and step in STMT_VINFO_SIMD_CLONE_INFO. */
2836 if (i
* 3 + 4 <= STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).length ()
2837 && STMT_VINFO_SIMD_CLONE_INFO (stmt_info
)[i
* 3 + 2])
2839 gcc_assert (vec_stmt
);
2840 thisarginfo
.linear_step
2841 = tree_to_shwi (STMT_VINFO_SIMD_CLONE_INFO (stmt_info
)[i
* 3 + 2]);
2843 = STMT_VINFO_SIMD_CLONE_INFO (stmt_info
)[i
* 3 + 1];
2844 thisarginfo
.simd_lane_linear
2845 = (STMT_VINFO_SIMD_CLONE_INFO (stmt_info
)[i
* 3 + 3]
2846 == boolean_true_node
);
2847 /* If loop has been peeled for alignment, we need to adjust it. */
2848 tree n1
= LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo
);
2849 tree n2
= LOOP_VINFO_NITERS (loop_vinfo
);
2850 if (n1
!= n2
&& !thisarginfo
.simd_lane_linear
)
2852 tree bias
= fold_build2 (MINUS_EXPR
, TREE_TYPE (n1
), n1
, n2
);
2853 tree step
= STMT_VINFO_SIMD_CLONE_INFO (stmt_info
)[i
* 3 + 2];
2854 tree opt
= TREE_TYPE (thisarginfo
.op
);
2855 bias
= fold_convert (TREE_TYPE (step
), bias
);
2856 bias
= fold_build2 (MULT_EXPR
, TREE_TYPE (step
), bias
, step
);
2858 = fold_build2 (POINTER_TYPE_P (opt
)
2859 ? POINTER_PLUS_EXPR
: PLUS_EXPR
, opt
,
2860 thisarginfo
.op
, bias
);
2864 && thisarginfo
.dt
!= vect_constant_def
2865 && thisarginfo
.dt
!= vect_external_def
2867 && TREE_CODE (op
) == SSA_NAME
2868 && simple_iv (loop
, loop_containing_stmt (stmt
), op
,
2870 && tree_fits_shwi_p (iv
.step
))
2872 thisarginfo
.linear_step
= tree_to_shwi (iv
.step
);
2873 thisarginfo
.op
= iv
.base
;
2875 else if ((thisarginfo
.dt
== vect_constant_def
2876 || thisarginfo
.dt
== vect_external_def
)
2877 && POINTER_TYPE_P (TREE_TYPE (op
)))
2878 thisarginfo
.align
= get_pointer_alignment (op
) / BITS_PER_UNIT
;
2879 /* Addresses of array elements indexed by GOMP_SIMD_LANE are
2881 if (POINTER_TYPE_P (TREE_TYPE (op
))
2882 && !thisarginfo
.linear_step
2884 && thisarginfo
.dt
!= vect_constant_def
2885 && thisarginfo
.dt
!= vect_external_def
2888 && TREE_CODE (op
) == SSA_NAME
)
2889 vect_simd_lane_linear (op
, loop
, &thisarginfo
);
2891 arginfo
.quick_push (thisarginfo
);
2894 unsigned int badness
= 0;
2895 struct cgraph_node
*bestn
= NULL
;
2896 if (STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).exists ())
2897 bestn
= cgraph_node::get (STMT_VINFO_SIMD_CLONE_INFO (stmt_info
)[0]);
2899 for (struct cgraph_node
*n
= node
->simd_clones
; n
!= NULL
;
2900 n
= n
->simdclone
->next_clone
)
2902 unsigned int this_badness
= 0;
2903 if (n
->simdclone
->simdlen
2904 > (unsigned) LOOP_VINFO_VECT_FACTOR (loop_vinfo
)
2905 || n
->simdclone
->nargs
!= nargs
)
2907 if (n
->simdclone
->simdlen
2908 < (unsigned) LOOP_VINFO_VECT_FACTOR (loop_vinfo
))
2909 this_badness
+= (exact_log2 (LOOP_VINFO_VECT_FACTOR (loop_vinfo
))
2910 - exact_log2 (n
->simdclone
->simdlen
)) * 1024;
2911 if (n
->simdclone
->inbranch
)
2912 this_badness
+= 2048;
2913 int target_badness
= targetm
.simd_clone
.usable (n
);
2914 if (target_badness
< 0)
2916 this_badness
+= target_badness
* 512;
2917 /* FORNOW: Have to add code to add the mask argument. */
2918 if (n
->simdclone
->inbranch
)
2920 for (i
= 0; i
< nargs
; i
++)
2922 switch (n
->simdclone
->args
[i
].arg_type
)
2924 case SIMD_CLONE_ARG_TYPE_VECTOR
:
2925 if (!useless_type_conversion_p
2926 (n
->simdclone
->args
[i
].orig_type
,
2927 TREE_TYPE (gimple_call_arg (stmt
, i
))))
2929 else if (arginfo
[i
].dt
== vect_constant_def
2930 || arginfo
[i
].dt
== vect_external_def
2931 || arginfo
[i
].linear_step
)
2934 case SIMD_CLONE_ARG_TYPE_UNIFORM
:
2935 if (arginfo
[i
].dt
!= vect_constant_def
2936 && arginfo
[i
].dt
!= vect_external_def
)
2939 case SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP
:
2940 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP
:
2941 if (arginfo
[i
].dt
== vect_constant_def
2942 || arginfo
[i
].dt
== vect_external_def
2943 || (arginfo
[i
].linear_step
2944 != n
->simdclone
->args
[i
].linear_step
))
2947 case SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP
:
2948 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_CONSTANT_STEP
:
2949 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_CONSTANT_STEP
:
2950 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_VARIABLE_STEP
:
2951 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_VARIABLE_STEP
:
2952 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_VARIABLE_STEP
:
2956 case SIMD_CLONE_ARG_TYPE_MASK
:
2959 if (i
== (size_t) -1)
2961 if (n
->simdclone
->args
[i
].alignment
> arginfo
[i
].align
)
2966 if (arginfo
[i
].align
)
2967 this_badness
+= (exact_log2 (arginfo
[i
].align
)
2968 - exact_log2 (n
->simdclone
->args
[i
].alignment
));
2970 if (i
== (size_t) -1)
2972 if (bestn
== NULL
|| this_badness
< badness
)
2975 badness
= this_badness
;
2985 for (i
= 0; i
< nargs
; i
++)
2986 if ((arginfo
[i
].dt
== vect_constant_def
2987 || arginfo
[i
].dt
== vect_external_def
)
2988 && bestn
->simdclone
->args
[i
].arg_type
== SIMD_CLONE_ARG_TYPE_VECTOR
)
2991 = get_vectype_for_scalar_type (TREE_TYPE (gimple_call_arg (stmt
,
2993 if (arginfo
[i
].vectype
== NULL
2994 || (TYPE_VECTOR_SUBPARTS (arginfo
[i
].vectype
)
2995 > bestn
->simdclone
->simdlen
))
3002 fndecl
= bestn
->decl
;
3003 nunits
= bestn
->simdclone
->simdlen
;
3004 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits
;
3006 /* If the function isn't const, only allow it in simd loops where user
3007 has asserted that at least nunits consecutive iterations can be
3008 performed using SIMD instructions. */
3009 if ((loop
== NULL
|| (unsigned) loop
->safelen
< nunits
)
3010 && gimple_vuse (stmt
))
3016 /* Sanity check: make sure that at least one copy of the vectorized stmt
3017 needs to be generated. */
3018 gcc_assert (ncopies
>= 1);
3020 if (!vec_stmt
) /* transformation not required. */
3022 STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).safe_push (bestn
->decl
);
3023 for (i
= 0; i
< nargs
; i
++)
3024 if (bestn
->simdclone
->args
[i
].arg_type
3025 == SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP
)
3027 STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).safe_grow_cleared (i
* 3
3029 STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).safe_push (arginfo
[i
].op
);
3030 tree lst
= POINTER_TYPE_P (TREE_TYPE (arginfo
[i
].op
))
3031 ? size_type_node
: TREE_TYPE (arginfo
[i
].op
);
3032 tree ls
= build_int_cst (lst
, arginfo
[i
].linear_step
);
3033 STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).safe_push (ls
);
3034 tree sll
= arginfo
[i
].simd_lane_linear
3035 ? boolean_true_node
: boolean_false_node
;
3036 STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).safe_push (sll
);
3038 STMT_VINFO_TYPE (stmt_info
) = call_simd_clone_vec_info_type
;
3039 if (dump_enabled_p ())
3040 dump_printf_loc (MSG_NOTE
, vect_location
,
3041 "=== vectorizable_simd_clone_call ===\n");
3042 /* vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL); */
3049 if (dump_enabled_p ())
3050 dump_printf_loc (MSG_NOTE
, vect_location
, "transform call.\n");
3053 scalar_dest
= gimple_call_lhs (stmt
);
3054 vec_dest
= NULL_TREE
;
3059 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
3060 rtype
= TREE_TYPE (TREE_TYPE (fndecl
));
3061 if (TREE_CODE (rtype
) == ARRAY_TYPE
)
3064 rtype
= TREE_TYPE (ratype
);
3068 prev_stmt_info
= NULL
;
3069 for (j
= 0; j
< ncopies
; ++j
)
3071 /* Build argument list for the vectorized call. */
3073 vargs
.create (nargs
);
3077 for (i
= 0; i
< nargs
; i
++)
3079 unsigned int k
, l
, m
, o
;
3081 op
= gimple_call_arg (stmt
, i
);
3082 switch (bestn
->simdclone
->args
[i
].arg_type
)
3084 case SIMD_CLONE_ARG_TYPE_VECTOR
:
3085 atype
= bestn
->simdclone
->args
[i
].vector_type
;
3086 o
= nunits
/ TYPE_VECTOR_SUBPARTS (atype
);
3087 for (m
= j
* o
; m
< (j
+ 1) * o
; m
++)
3089 if (TYPE_VECTOR_SUBPARTS (atype
)
3090 < TYPE_VECTOR_SUBPARTS (arginfo
[i
].vectype
))
3092 unsigned int prec
= GET_MODE_BITSIZE (TYPE_MODE (atype
));
3093 k
= (TYPE_VECTOR_SUBPARTS (arginfo
[i
].vectype
)
3094 / TYPE_VECTOR_SUBPARTS (atype
));
3095 gcc_assert ((k
& (k
- 1)) == 0);
3098 = vect_get_vec_def_for_operand (op
, stmt
);
3101 vec_oprnd0
= arginfo
[i
].op
;
3102 if ((m
& (k
- 1)) == 0)
3104 = vect_get_vec_def_for_stmt_copy (arginfo
[i
].dt
,
3107 arginfo
[i
].op
= vec_oprnd0
;
3109 = build3 (BIT_FIELD_REF
, atype
, vec_oprnd0
,
3111 bitsize_int ((m
& (k
- 1)) * prec
));
3113 = gimple_build_assign (make_ssa_name (atype
),
3115 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3116 vargs
.safe_push (gimple_assign_lhs (new_stmt
));
3120 k
= (TYPE_VECTOR_SUBPARTS (atype
)
3121 / TYPE_VECTOR_SUBPARTS (arginfo
[i
].vectype
));
3122 gcc_assert ((k
& (k
- 1)) == 0);
3123 vec
<constructor_elt
, va_gc
> *ctor_elts
;
3125 vec_alloc (ctor_elts
, k
);
3128 for (l
= 0; l
< k
; l
++)
3130 if (m
== 0 && l
== 0)
3132 = vect_get_vec_def_for_operand (op
, stmt
);
3135 = vect_get_vec_def_for_stmt_copy (arginfo
[i
].dt
,
3137 arginfo
[i
].op
= vec_oprnd0
;
3140 CONSTRUCTOR_APPEND_ELT (ctor_elts
, NULL_TREE
,
3144 vargs
.safe_push (vec_oprnd0
);
3147 vec_oprnd0
= build_constructor (atype
, ctor_elts
);
3149 = gimple_build_assign (make_ssa_name (atype
),
3151 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3152 vargs
.safe_push (gimple_assign_lhs (new_stmt
));
3157 case SIMD_CLONE_ARG_TYPE_UNIFORM
:
3158 vargs
.safe_push (op
);
3160 case SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP
:
3165 = force_gimple_operand (arginfo
[i
].op
, &stmts
, true,
3170 edge pe
= loop_preheader_edge (loop
);
3171 new_bb
= gsi_insert_seq_on_edge_immediate (pe
, stmts
);
3172 gcc_assert (!new_bb
);
3174 if (arginfo
[i
].simd_lane_linear
)
3176 vargs
.safe_push (arginfo
[i
].op
);
3179 tree phi_res
= copy_ssa_name (op
);
3180 gphi
*new_phi
= create_phi_node (phi_res
, loop
->header
);
3181 set_vinfo_for_stmt (new_phi
,
3182 new_stmt_vec_info (new_phi
, loop_vinfo
));
3183 add_phi_arg (new_phi
, arginfo
[i
].op
,
3184 loop_preheader_edge (loop
), UNKNOWN_LOCATION
);
3186 = POINTER_TYPE_P (TREE_TYPE (op
))
3187 ? POINTER_PLUS_EXPR
: PLUS_EXPR
;
3188 tree type
= POINTER_TYPE_P (TREE_TYPE (op
))
3189 ? sizetype
: TREE_TYPE (op
);
3191 = wi::mul (bestn
->simdclone
->args
[i
].linear_step
,
3193 tree tcst
= wide_int_to_tree (type
, cst
);
3194 tree phi_arg
= copy_ssa_name (op
);
3196 = gimple_build_assign (phi_arg
, code
, phi_res
, tcst
);
3197 gimple_stmt_iterator si
= gsi_after_labels (loop
->header
);
3198 gsi_insert_after (&si
, new_stmt
, GSI_NEW_STMT
);
3199 set_vinfo_for_stmt (new_stmt
,
3200 new_stmt_vec_info (new_stmt
, loop_vinfo
));
3201 add_phi_arg (new_phi
, phi_arg
, loop_latch_edge (loop
),
3203 arginfo
[i
].op
= phi_res
;
3204 vargs
.safe_push (phi_res
);
3209 = POINTER_TYPE_P (TREE_TYPE (op
))
3210 ? POINTER_PLUS_EXPR
: PLUS_EXPR
;
3211 tree type
= POINTER_TYPE_P (TREE_TYPE (op
))
3212 ? sizetype
: TREE_TYPE (op
);
3214 = wi::mul (bestn
->simdclone
->args
[i
].linear_step
,
3216 tree tcst
= wide_int_to_tree (type
, cst
);
3217 new_temp
= make_ssa_name (TREE_TYPE (op
));
3218 new_stmt
= gimple_build_assign (new_temp
, code
,
3219 arginfo
[i
].op
, tcst
);
3220 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3221 vargs
.safe_push (new_temp
);
3224 case SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP
:
3225 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_VARIABLE_STEP
:
3226 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_VARIABLE_STEP
:
3227 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_VARIABLE_STEP
:
3233 new_stmt
= gimple_build_call_vec (fndecl
, vargs
);
3236 gcc_assert (ratype
|| TYPE_VECTOR_SUBPARTS (rtype
) == nunits
);
3238 new_temp
= create_tmp_var (ratype
);
3239 else if (TYPE_VECTOR_SUBPARTS (vectype
)
3240 == TYPE_VECTOR_SUBPARTS (rtype
))
3241 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
3243 new_temp
= make_ssa_name (rtype
, new_stmt
);
3244 gimple_call_set_lhs (new_stmt
, new_temp
);
3246 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3250 if (TYPE_VECTOR_SUBPARTS (vectype
) < nunits
)
3253 unsigned int prec
= GET_MODE_BITSIZE (TYPE_MODE (vectype
));
3254 k
= nunits
/ TYPE_VECTOR_SUBPARTS (vectype
);
3255 gcc_assert ((k
& (k
- 1)) == 0);
3256 for (l
= 0; l
< k
; l
++)
3261 t
= build_fold_addr_expr (new_temp
);
3262 t
= build2 (MEM_REF
, vectype
, t
,
3263 build_int_cst (TREE_TYPE (t
),
3264 l
* prec
/ BITS_PER_UNIT
));
3267 t
= build3 (BIT_FIELD_REF
, vectype
, new_temp
,
3268 size_int (prec
), bitsize_int (l
* prec
));
3270 = gimple_build_assign (make_ssa_name (vectype
), t
);
3271 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3272 if (j
== 0 && l
== 0)
3273 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
3275 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
3277 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
3282 tree clobber
= build_constructor (ratype
, NULL
);
3283 TREE_THIS_VOLATILE (clobber
) = 1;
3284 new_stmt
= gimple_build_assign (new_temp
, clobber
);
3285 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3289 else if (TYPE_VECTOR_SUBPARTS (vectype
) > nunits
)
3291 unsigned int k
= (TYPE_VECTOR_SUBPARTS (vectype
)
3292 / TYPE_VECTOR_SUBPARTS (rtype
));
3293 gcc_assert ((k
& (k
- 1)) == 0);
3294 if ((j
& (k
- 1)) == 0)
3295 vec_alloc (ret_ctor_elts
, k
);
3298 unsigned int m
, o
= nunits
/ TYPE_VECTOR_SUBPARTS (rtype
);
3299 for (m
= 0; m
< o
; m
++)
3301 tree tem
= build4 (ARRAY_REF
, rtype
, new_temp
,
3302 size_int (m
), NULL_TREE
, NULL_TREE
);
3304 = gimple_build_assign (make_ssa_name (rtype
), tem
);
3305 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3306 CONSTRUCTOR_APPEND_ELT (ret_ctor_elts
, NULL_TREE
,
3307 gimple_assign_lhs (new_stmt
));
3309 tree clobber
= build_constructor (ratype
, NULL
);
3310 TREE_THIS_VOLATILE (clobber
) = 1;
3311 new_stmt
= gimple_build_assign (new_temp
, clobber
);
3312 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3315 CONSTRUCTOR_APPEND_ELT (ret_ctor_elts
, NULL_TREE
, new_temp
);
3316 if ((j
& (k
- 1)) != k
- 1)
3318 vec_oprnd0
= build_constructor (vectype
, ret_ctor_elts
);
3320 = gimple_build_assign (make_ssa_name (vec_dest
), vec_oprnd0
);
3321 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3323 if ((unsigned) j
== k
- 1)
3324 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
3326 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
3328 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
3333 tree t
= build_fold_addr_expr (new_temp
);
3334 t
= build2 (MEM_REF
, vectype
, t
,
3335 build_int_cst (TREE_TYPE (t
), 0));
3337 = gimple_build_assign (make_ssa_name (vec_dest
), t
);
3338 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3339 tree clobber
= build_constructor (ratype
, NULL
);
3340 TREE_THIS_VOLATILE (clobber
) = 1;
3341 vect_finish_stmt_generation (stmt
,
3342 gimple_build_assign (new_temp
,
3348 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
3350 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
3352 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
3357 /* The call in STMT might prevent it from being removed in dce.
3358 We however cannot remove it here, due to the way the ssa name
3359 it defines is mapped to the new definition. So just replace
3360 rhs of the statement with something harmless. */
3367 type
= TREE_TYPE (scalar_dest
);
3368 if (is_pattern_stmt_p (stmt_info
))
3369 lhs
= gimple_call_lhs (STMT_VINFO_RELATED_STMT (stmt_info
));
3371 lhs
= gimple_call_lhs (stmt
);
3372 new_stmt
= gimple_build_assign (lhs
, build_zero_cst (type
));
3375 new_stmt
= gimple_build_nop ();
3376 set_vinfo_for_stmt (new_stmt
, stmt_info
);
3377 set_vinfo_for_stmt (stmt
, NULL
);
3378 STMT_VINFO_STMT (stmt_info
) = new_stmt
;
3379 gsi_replace (gsi
, new_stmt
, true);
3380 unlink_stmt_vdef (stmt
);
3386 /* Function vect_gen_widened_results_half
3388 Create a vector stmt whose code, type, number of arguments, and result
3389 variable are CODE, OP_TYPE, and VEC_DEST, and its arguments are
3390 VEC_OPRND0 and VEC_OPRND1. The new vector stmt is to be inserted at BSI.
3391 In the case that CODE is a CALL_EXPR, this means that a call to DECL
3392 needs to be created (DECL is a function-decl of a target-builtin).
3393 STMT is the original scalar stmt that we are vectorizing. */
3396 vect_gen_widened_results_half (enum tree_code code
,
3398 tree vec_oprnd0
, tree vec_oprnd1
, int op_type
,
3399 tree vec_dest
, gimple_stmt_iterator
*gsi
,
3405 /* Generate half of the widened result: */
3406 if (code
== CALL_EXPR
)
3408 /* Target specific support */
3409 if (op_type
== binary_op
)
3410 new_stmt
= gimple_build_call (decl
, 2, vec_oprnd0
, vec_oprnd1
);
3412 new_stmt
= gimple_build_call (decl
, 1, vec_oprnd0
);
3413 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
3414 gimple_call_set_lhs (new_stmt
, new_temp
);
3418 /* Generic support */
3419 gcc_assert (op_type
== TREE_CODE_LENGTH (code
));
3420 if (op_type
!= binary_op
)
3422 new_stmt
= gimple_build_assign (vec_dest
, code
, vec_oprnd0
, vec_oprnd1
);
3423 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
3424 gimple_assign_set_lhs (new_stmt
, new_temp
);
3426 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3432 /* Get vectorized definitions for loop-based vectorization. For the first
3433 operand we call vect_get_vec_def_for_operand() (with OPRND containing
3434 scalar operand), and for the rest we get a copy with
3435 vect_get_vec_def_for_stmt_copy() using the previous vector definition
3436 (stored in OPRND). See vect_get_vec_def_for_stmt_copy() for details.
3437 The vectors are collected into VEC_OPRNDS. */
3440 vect_get_loop_based_defs (tree
*oprnd
, gimple
*stmt
, enum vect_def_type dt
,
3441 vec
<tree
> *vec_oprnds
, int multi_step_cvt
)
3445 /* Get first vector operand. */
3446 /* All the vector operands except the very first one (that is scalar oprnd)
3448 if (TREE_CODE (TREE_TYPE (*oprnd
)) != VECTOR_TYPE
)
3449 vec_oprnd
= vect_get_vec_def_for_operand (*oprnd
, stmt
);
3451 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
, *oprnd
);
3453 vec_oprnds
->quick_push (vec_oprnd
);
3455 /* Get second vector operand. */
3456 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
, vec_oprnd
);
3457 vec_oprnds
->quick_push (vec_oprnd
);
3461 /* For conversion in multiple steps, continue to get operands
3464 vect_get_loop_based_defs (oprnd
, stmt
, dt
, vec_oprnds
, multi_step_cvt
- 1);
3468 /* Create vectorized demotion statements for vector operands from VEC_OPRNDS.
3469 For multi-step conversions store the resulting vectors and call the function
3473 vect_create_vectorized_demotion_stmts (vec
<tree
> *vec_oprnds
,
3474 int multi_step_cvt
, gimple
*stmt
,
3476 gimple_stmt_iterator
*gsi
,
3477 slp_tree slp_node
, enum tree_code code
,
3478 stmt_vec_info
*prev_stmt_info
)
3481 tree vop0
, vop1
, new_tmp
, vec_dest
;
3483 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
3485 vec_dest
= vec_dsts
.pop ();
3487 for (i
= 0; i
< vec_oprnds
->length (); i
+= 2)
3489 /* Create demotion operation. */
3490 vop0
= (*vec_oprnds
)[i
];
3491 vop1
= (*vec_oprnds
)[i
+ 1];
3492 new_stmt
= gimple_build_assign (vec_dest
, code
, vop0
, vop1
);
3493 new_tmp
= make_ssa_name (vec_dest
, new_stmt
);
3494 gimple_assign_set_lhs (new_stmt
, new_tmp
);
3495 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3498 /* Store the resulting vector for next recursive call. */
3499 (*vec_oprnds
)[i
/2] = new_tmp
;
3502 /* This is the last step of the conversion sequence. Store the
3503 vectors in SLP_NODE or in vector info of the scalar statement
3504 (or in STMT_VINFO_RELATED_STMT chain). */
3506 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
3509 if (!*prev_stmt_info
)
3510 STMT_VINFO_VEC_STMT (stmt_info
) = new_stmt
;
3512 STMT_VINFO_RELATED_STMT (*prev_stmt_info
) = new_stmt
;
3514 *prev_stmt_info
= vinfo_for_stmt (new_stmt
);
3519 /* For multi-step demotion operations we first generate demotion operations
3520 from the source type to the intermediate types, and then combine the
3521 results (stored in VEC_OPRNDS) in demotion operation to the destination
3525 /* At each level of recursion we have half of the operands we had at the
3527 vec_oprnds
->truncate ((i
+1)/2);
3528 vect_create_vectorized_demotion_stmts (vec_oprnds
, multi_step_cvt
- 1,
3529 stmt
, vec_dsts
, gsi
, slp_node
,
3530 VEC_PACK_TRUNC_EXPR
,
3534 vec_dsts
.quick_push (vec_dest
);
3538 /* Create vectorized promotion statements for vector operands from VEC_OPRNDS0
3539 and VEC_OPRNDS1 (for binary operations). For multi-step conversions store
3540 the resulting vectors and call the function recursively. */
3543 vect_create_vectorized_promotion_stmts (vec
<tree
> *vec_oprnds0
,
3544 vec
<tree
> *vec_oprnds1
,
3545 gimple
*stmt
, tree vec_dest
,
3546 gimple_stmt_iterator
*gsi
,
3547 enum tree_code code1
,
3548 enum tree_code code2
, tree decl1
,
3549 tree decl2
, int op_type
)
3552 tree vop0
, vop1
, new_tmp1
, new_tmp2
;
3553 gimple
*new_stmt1
, *new_stmt2
;
3554 vec
<tree
> vec_tmp
= vNULL
;
3556 vec_tmp
.create (vec_oprnds0
->length () * 2);
3557 FOR_EACH_VEC_ELT (*vec_oprnds0
, i
, vop0
)
3559 if (op_type
== binary_op
)
3560 vop1
= (*vec_oprnds1
)[i
];
3564 /* Generate the two halves of promotion operation. */
3565 new_stmt1
= vect_gen_widened_results_half (code1
, decl1
, vop0
, vop1
,
3566 op_type
, vec_dest
, gsi
, stmt
);
3567 new_stmt2
= vect_gen_widened_results_half (code2
, decl2
, vop0
, vop1
,
3568 op_type
, vec_dest
, gsi
, stmt
);
3569 if (is_gimple_call (new_stmt1
))
3571 new_tmp1
= gimple_call_lhs (new_stmt1
);
3572 new_tmp2
= gimple_call_lhs (new_stmt2
);
3576 new_tmp1
= gimple_assign_lhs (new_stmt1
);
3577 new_tmp2
= gimple_assign_lhs (new_stmt2
);
3580 /* Store the results for the next step. */
3581 vec_tmp
.quick_push (new_tmp1
);
3582 vec_tmp
.quick_push (new_tmp2
);
3585 vec_oprnds0
->release ();
3586 *vec_oprnds0
= vec_tmp
;
3590 /* Check if STMT performs a conversion operation, that can be vectorized.
3591 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3592 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
3593 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3596 vectorizable_conversion (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
3597 gimple
**vec_stmt
, slp_tree slp_node
)
3601 tree op0
, op1
= NULL_TREE
;
3602 tree vec_oprnd0
= NULL_TREE
, vec_oprnd1
= NULL_TREE
;
3603 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
3604 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
3605 enum tree_code code
, code1
= ERROR_MARK
, code2
= ERROR_MARK
;
3606 enum tree_code codecvt1
= ERROR_MARK
, codecvt2
= ERROR_MARK
;
3607 tree decl1
= NULL_TREE
, decl2
= NULL_TREE
;
3610 enum vect_def_type dt
[2] = {vect_unknown_def_type
, vect_unknown_def_type
};
3611 gimple
*new_stmt
= NULL
;
3612 stmt_vec_info prev_stmt_info
;
3615 tree vectype_out
, vectype_in
;
3617 tree lhs_type
, rhs_type
;
3618 enum { NARROW
, NONE
, WIDEN
} modifier
;
3619 vec
<tree
> vec_oprnds0
= vNULL
;
3620 vec
<tree
> vec_oprnds1
= vNULL
;
3622 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
3623 vec_info
*vinfo
= stmt_info
->vinfo
;
3624 int multi_step_cvt
= 0;
3625 vec
<tree
> vec_dsts
= vNULL
;
3626 vec
<tree
> interm_types
= vNULL
;
3627 tree last_oprnd
, intermediate_type
, cvt_type
= NULL_TREE
;
3629 machine_mode rhs_mode
;
3630 unsigned short fltsz
;
3632 /* Is STMT a vectorizable conversion? */
3634 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
3637 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
3641 if (!is_gimple_assign (stmt
))
3644 if (TREE_CODE (gimple_assign_lhs (stmt
)) != SSA_NAME
)
3647 code
= gimple_assign_rhs_code (stmt
);
3648 if (!CONVERT_EXPR_CODE_P (code
)
3649 && code
!= FIX_TRUNC_EXPR
3650 && code
!= FLOAT_EXPR
3651 && code
!= WIDEN_MULT_EXPR
3652 && code
!= WIDEN_LSHIFT_EXPR
)
3655 op_type
= TREE_CODE_LENGTH (code
);
3657 /* Check types of lhs and rhs. */
3658 scalar_dest
= gimple_assign_lhs (stmt
);
3659 lhs_type
= TREE_TYPE (scalar_dest
);
3660 vectype_out
= STMT_VINFO_VECTYPE (stmt_info
);
3662 op0
= gimple_assign_rhs1 (stmt
);
3663 rhs_type
= TREE_TYPE (op0
);
3665 if ((code
!= FIX_TRUNC_EXPR
&& code
!= FLOAT_EXPR
)
3666 && !((INTEGRAL_TYPE_P (lhs_type
)
3667 && INTEGRAL_TYPE_P (rhs_type
))
3668 || (SCALAR_FLOAT_TYPE_P (lhs_type
)
3669 && SCALAR_FLOAT_TYPE_P (rhs_type
))))
3672 if (!VECTOR_BOOLEAN_TYPE_P (vectype_out
)
3673 && ((INTEGRAL_TYPE_P (lhs_type
)
3674 && (TYPE_PRECISION (lhs_type
)
3675 != GET_MODE_PRECISION (TYPE_MODE (lhs_type
))))
3676 || (INTEGRAL_TYPE_P (rhs_type
)
3677 && (TYPE_PRECISION (rhs_type
)
3678 != GET_MODE_PRECISION (TYPE_MODE (rhs_type
))))))
3680 if (dump_enabled_p ())
3681 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3682 "type conversion to/from bit-precision unsupported."
3687 /* Check the operands of the operation. */
3688 if (!vect_is_simple_use (op0
, vinfo
, &def_stmt
, &dt
[0], &vectype_in
))
3690 if (dump_enabled_p ())
3691 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3692 "use not simple.\n");
3695 if (op_type
== binary_op
)
3699 op1
= gimple_assign_rhs2 (stmt
);
3700 gcc_assert (code
== WIDEN_MULT_EXPR
|| code
== WIDEN_LSHIFT_EXPR
);
3701 /* For WIDEN_MULT_EXPR, if OP0 is a constant, use the type of
3703 if (CONSTANT_CLASS_P (op0
))
3704 ok
= vect_is_simple_use (op1
, vinfo
, &def_stmt
, &dt
[1], &vectype_in
);
3706 ok
= vect_is_simple_use (op1
, vinfo
, &def_stmt
, &dt
[1]);
3710 if (dump_enabled_p ())
3711 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3712 "use not simple.\n");
3717 /* If op0 is an external or constant defs use a vector type of
3718 the same size as the output vector type. */
3720 vectype_in
= get_same_sized_vectype (rhs_type
, vectype_out
);
3722 gcc_assert (vectype_in
);
3725 if (dump_enabled_p ())
3727 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3728 "no vectype for scalar type ");
3729 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, rhs_type
);
3730 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
3736 if (VECTOR_BOOLEAN_TYPE_P (vectype_out
)
3737 && !VECTOR_BOOLEAN_TYPE_P (vectype_in
))
3739 if (dump_enabled_p ())
3741 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3742 "can't convert between boolean and non "
3744 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, rhs_type
);
3745 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
3751 nunits_in
= TYPE_VECTOR_SUBPARTS (vectype_in
);
3752 nunits_out
= TYPE_VECTOR_SUBPARTS (vectype_out
);
3753 if (nunits_in
< nunits_out
)
3755 else if (nunits_out
== nunits_in
)
3760 /* Multiple types in SLP are handled by creating the appropriate number of
3761 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
3763 if (slp_node
|| PURE_SLP_STMT (stmt_info
))
3765 else if (modifier
== NARROW
)
3766 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits_out
;
3768 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits_in
;
3770 /* Sanity check: make sure that at least one copy of the vectorized stmt
3771 needs to be generated. */
3772 gcc_assert (ncopies
>= 1);
3774 /* Supportable by target? */
3778 if (code
!= FIX_TRUNC_EXPR
&& code
!= FLOAT_EXPR
)
3780 if (supportable_convert_operation (code
, vectype_out
, vectype_in
,
3785 if (dump_enabled_p ())
3786 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3787 "conversion not supported by target.\n");
3791 if (supportable_widening_operation (code
, stmt
, vectype_out
, vectype_in
,
3792 &code1
, &code2
, &multi_step_cvt
,
3795 /* Binary widening operation can only be supported directly by the
3797 gcc_assert (!(multi_step_cvt
&& op_type
== binary_op
));
3801 if (code
!= FLOAT_EXPR
3802 || (GET_MODE_SIZE (TYPE_MODE (lhs_type
))
3803 <= GET_MODE_SIZE (TYPE_MODE (rhs_type
))))
3806 rhs_mode
= TYPE_MODE (rhs_type
);
3807 fltsz
= GET_MODE_SIZE (TYPE_MODE (lhs_type
));
3808 for (rhs_mode
= GET_MODE_2XWIDER_MODE (TYPE_MODE (rhs_type
));
3809 rhs_mode
!= VOIDmode
&& GET_MODE_SIZE (rhs_mode
) <= fltsz
;
3810 rhs_mode
= GET_MODE_2XWIDER_MODE (rhs_mode
))
3813 = build_nonstandard_integer_type (GET_MODE_BITSIZE (rhs_mode
), 0);
3814 cvt_type
= get_same_sized_vectype (cvt_type
, vectype_in
);
3815 if (cvt_type
== NULL_TREE
)
3818 if (GET_MODE_SIZE (rhs_mode
) == fltsz
)
3820 if (!supportable_convert_operation (code
, vectype_out
,
3821 cvt_type
, &decl1
, &codecvt1
))
3824 else if (!supportable_widening_operation (code
, stmt
, vectype_out
,
3825 cvt_type
, &codecvt1
,
3826 &codecvt2
, &multi_step_cvt
,
3830 gcc_assert (multi_step_cvt
== 0);
3832 if (supportable_widening_operation (NOP_EXPR
, stmt
, cvt_type
,
3833 vectype_in
, &code1
, &code2
,
3834 &multi_step_cvt
, &interm_types
))
3838 if (rhs_mode
== VOIDmode
|| GET_MODE_SIZE (rhs_mode
) > fltsz
)
3841 if (GET_MODE_SIZE (rhs_mode
) == fltsz
)
3842 codecvt2
= ERROR_MARK
;
3846 interm_types
.safe_push (cvt_type
);
3847 cvt_type
= NULL_TREE
;
3852 gcc_assert (op_type
== unary_op
);
3853 if (supportable_narrowing_operation (code
, vectype_out
, vectype_in
,
3854 &code1
, &multi_step_cvt
,
3858 if (code
!= FIX_TRUNC_EXPR
3859 || (GET_MODE_SIZE (TYPE_MODE (lhs_type
))
3860 >= GET_MODE_SIZE (TYPE_MODE (rhs_type
))))
3863 rhs_mode
= TYPE_MODE (rhs_type
);
3865 = build_nonstandard_integer_type (GET_MODE_BITSIZE (rhs_mode
), 0);
3866 cvt_type
= get_same_sized_vectype (cvt_type
, vectype_in
);
3867 if (cvt_type
== NULL_TREE
)
3869 if (!supportable_convert_operation (code
, cvt_type
, vectype_in
,
3872 if (supportable_narrowing_operation (NOP_EXPR
, vectype_out
, cvt_type
,
3873 &code1
, &multi_step_cvt
,
3882 if (!vec_stmt
) /* transformation not required. */
3884 if (dump_enabled_p ())
3885 dump_printf_loc (MSG_NOTE
, vect_location
,
3886 "=== vectorizable_conversion ===\n");
3887 if (code
== FIX_TRUNC_EXPR
|| code
== FLOAT_EXPR
)
3889 STMT_VINFO_TYPE (stmt_info
) = type_conversion_vec_info_type
;
3890 vect_model_simple_cost (stmt_info
, ncopies
, dt
, NULL
, NULL
);
3892 else if (modifier
== NARROW
)
3894 STMT_VINFO_TYPE (stmt_info
) = type_demotion_vec_info_type
;
3895 vect_model_promotion_demotion_cost (stmt_info
, dt
, multi_step_cvt
);
3899 STMT_VINFO_TYPE (stmt_info
) = type_promotion_vec_info_type
;
3900 vect_model_promotion_demotion_cost (stmt_info
, dt
, multi_step_cvt
);
3902 interm_types
.release ();
3907 if (dump_enabled_p ())
3908 dump_printf_loc (MSG_NOTE
, vect_location
,
3909 "transform conversion. ncopies = %d.\n", ncopies
);
3911 if (op_type
== binary_op
)
3913 if (CONSTANT_CLASS_P (op0
))
3914 op0
= fold_convert (TREE_TYPE (op1
), op0
);
3915 else if (CONSTANT_CLASS_P (op1
))
3916 op1
= fold_convert (TREE_TYPE (op0
), op1
);
3919 /* In case of multi-step conversion, we first generate conversion operations
3920 to the intermediate types, and then from that types to the final one.
3921 We create vector destinations for the intermediate type (TYPES) received
3922 from supportable_*_operation, and store them in the correct order
3923 for future use in vect_create_vectorized_*_stmts (). */
3924 vec_dsts
.create (multi_step_cvt
+ 1);
3925 vec_dest
= vect_create_destination_var (scalar_dest
,
3926 (cvt_type
&& modifier
== WIDEN
)
3927 ? cvt_type
: vectype_out
);
3928 vec_dsts
.quick_push (vec_dest
);
3932 for (i
= interm_types
.length () - 1;
3933 interm_types
.iterate (i
, &intermediate_type
); i
--)
3935 vec_dest
= vect_create_destination_var (scalar_dest
,
3937 vec_dsts
.quick_push (vec_dest
);
3942 vec_dest
= vect_create_destination_var (scalar_dest
,
3944 ? vectype_out
: cvt_type
);
3948 if (modifier
== WIDEN
)
3950 vec_oprnds0
.create (multi_step_cvt
? vect_pow2 (multi_step_cvt
) : 1);
3951 if (op_type
== binary_op
)
3952 vec_oprnds1
.create (1);
3954 else if (modifier
== NARROW
)
3955 vec_oprnds0
.create (
3956 2 * (multi_step_cvt
? vect_pow2 (multi_step_cvt
) : 1));
3958 else if (code
== WIDEN_LSHIFT_EXPR
)
3959 vec_oprnds1
.create (slp_node
->vec_stmts_size
);
3962 prev_stmt_info
= NULL
;
3966 for (j
= 0; j
< ncopies
; j
++)
3969 vect_get_vec_defs (op0
, NULL
, stmt
, &vec_oprnds0
, NULL
, slp_node
,
3972 vect_get_vec_defs_for_stmt_copy (dt
, &vec_oprnds0
, NULL
);
3974 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vop0
)
3976 /* Arguments are ready, create the new vector stmt. */
3977 if (code1
== CALL_EXPR
)
3979 new_stmt
= gimple_build_call (decl1
, 1, vop0
);
3980 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
3981 gimple_call_set_lhs (new_stmt
, new_temp
);
3985 gcc_assert (TREE_CODE_LENGTH (code1
) == unary_op
);
3986 new_stmt
= gimple_build_assign (vec_dest
, code1
, vop0
);
3987 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
3988 gimple_assign_set_lhs (new_stmt
, new_temp
);
3991 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3993 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
3996 if (!prev_stmt_info
)
3997 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
3999 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
4000 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
4007 /* In case the vectorization factor (VF) is bigger than the number
4008 of elements that we can fit in a vectype (nunits), we have to
4009 generate more than one vector stmt - i.e - we need to "unroll"
4010 the vector stmt by a factor VF/nunits. */
4011 for (j
= 0; j
< ncopies
; j
++)
4018 if (code
== WIDEN_LSHIFT_EXPR
)
4023 /* Store vec_oprnd1 for every vector stmt to be created
4024 for SLP_NODE. We check during the analysis that all
4025 the shift arguments are the same. */
4026 for (k
= 0; k
< slp_node
->vec_stmts_size
- 1; k
++)
4027 vec_oprnds1
.quick_push (vec_oprnd1
);
4029 vect_get_vec_defs (op0
, NULL_TREE
, stmt
, &vec_oprnds0
, NULL
,
4033 vect_get_vec_defs (op0
, op1
, stmt
, &vec_oprnds0
,
4034 &vec_oprnds1
, slp_node
, -1);
4038 vec_oprnd0
= vect_get_vec_def_for_operand (op0
, stmt
);
4039 vec_oprnds0
.quick_push (vec_oprnd0
);
4040 if (op_type
== binary_op
)
4042 if (code
== WIDEN_LSHIFT_EXPR
)
4045 vec_oprnd1
= vect_get_vec_def_for_operand (op1
, stmt
);
4046 vec_oprnds1
.quick_push (vec_oprnd1
);
4052 vec_oprnd0
= vect_get_vec_def_for_stmt_copy (dt
[0], vec_oprnd0
);
4053 vec_oprnds0
.truncate (0);
4054 vec_oprnds0
.quick_push (vec_oprnd0
);
4055 if (op_type
== binary_op
)
4057 if (code
== WIDEN_LSHIFT_EXPR
)
4060 vec_oprnd1
= vect_get_vec_def_for_stmt_copy (dt
[1],
4062 vec_oprnds1
.truncate (0);
4063 vec_oprnds1
.quick_push (vec_oprnd1
);
4067 /* Arguments are ready. Create the new vector stmts. */
4068 for (i
= multi_step_cvt
; i
>= 0; i
--)
4070 tree this_dest
= vec_dsts
[i
];
4071 enum tree_code c1
= code1
, c2
= code2
;
4072 if (i
== 0 && codecvt2
!= ERROR_MARK
)
4077 vect_create_vectorized_promotion_stmts (&vec_oprnds0
,
4079 stmt
, this_dest
, gsi
,
4080 c1
, c2
, decl1
, decl2
,
4084 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vop0
)
4088 if (codecvt1
== CALL_EXPR
)
4090 new_stmt
= gimple_build_call (decl1
, 1, vop0
);
4091 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
4092 gimple_call_set_lhs (new_stmt
, new_temp
);
4096 gcc_assert (TREE_CODE_LENGTH (codecvt1
) == unary_op
);
4097 new_temp
= make_ssa_name (vec_dest
);
4098 new_stmt
= gimple_build_assign (new_temp
, codecvt1
,
4102 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
4105 new_stmt
= SSA_NAME_DEF_STMT (vop0
);
4108 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
4111 if (!prev_stmt_info
)
4112 STMT_VINFO_VEC_STMT (stmt_info
) = new_stmt
;
4114 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
4115 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
4120 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
4124 /* In case the vectorization factor (VF) is bigger than the number
4125 of elements that we can fit in a vectype (nunits), we have to
4126 generate more than one vector stmt - i.e - we need to "unroll"
4127 the vector stmt by a factor VF/nunits. */
4128 for (j
= 0; j
< ncopies
; j
++)
4132 vect_get_vec_defs (op0
, NULL_TREE
, stmt
, &vec_oprnds0
, NULL
,
4136 vec_oprnds0
.truncate (0);
4137 vect_get_loop_based_defs (&last_oprnd
, stmt
, dt
[0], &vec_oprnds0
,
4138 vect_pow2 (multi_step_cvt
) - 1);
4141 /* Arguments are ready. Create the new vector stmts. */
4143 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vop0
)
4145 if (codecvt1
== CALL_EXPR
)
4147 new_stmt
= gimple_build_call (decl1
, 1, vop0
);
4148 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
4149 gimple_call_set_lhs (new_stmt
, new_temp
);
4153 gcc_assert (TREE_CODE_LENGTH (codecvt1
) == unary_op
);
4154 new_temp
= make_ssa_name (vec_dest
);
4155 new_stmt
= gimple_build_assign (new_temp
, codecvt1
,
4159 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
4160 vec_oprnds0
[i
] = new_temp
;
4163 vect_create_vectorized_demotion_stmts (&vec_oprnds0
, multi_step_cvt
,
4164 stmt
, vec_dsts
, gsi
,
4169 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
4173 vec_oprnds0
.release ();
4174 vec_oprnds1
.release ();
4175 vec_dsts
.release ();
4176 interm_types
.release ();
4182 /* Function vectorizable_assignment.
4184 Check if STMT performs an assignment (copy) that can be vectorized.
4185 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4186 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
4187 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4190 vectorizable_assignment (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
4191 gimple
**vec_stmt
, slp_tree slp_node
)
4196 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
4197 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
4200 enum vect_def_type dt
[2] = {vect_unknown_def_type
, vect_unknown_def_type
};
4203 vec
<tree
> vec_oprnds
= vNULL
;
4205 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
4206 vec_info
*vinfo
= stmt_info
->vinfo
;
4207 gimple
*new_stmt
= NULL
;
4208 stmt_vec_info prev_stmt_info
= NULL
;
4209 enum tree_code code
;
4212 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
4215 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
4219 /* Is vectorizable assignment? */
4220 if (!is_gimple_assign (stmt
))
4223 scalar_dest
= gimple_assign_lhs (stmt
);
4224 if (TREE_CODE (scalar_dest
) != SSA_NAME
)
4227 code
= gimple_assign_rhs_code (stmt
);
4228 if (gimple_assign_single_p (stmt
)
4229 || code
== PAREN_EXPR
4230 || CONVERT_EXPR_CODE_P (code
))
4231 op
= gimple_assign_rhs1 (stmt
);
4235 if (code
== VIEW_CONVERT_EXPR
)
4236 op
= TREE_OPERAND (op
, 0);
4238 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
4239 unsigned int nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
4241 /* Multiple types in SLP are handled by creating the appropriate number of
4242 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4244 if (slp_node
|| PURE_SLP_STMT (stmt_info
))
4247 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits
;
4249 gcc_assert (ncopies
>= 1);
4251 if (!vect_is_simple_use (op
, vinfo
, &def_stmt
, &dt
[0], &vectype_in
))
4253 if (dump_enabled_p ())
4254 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4255 "use not simple.\n");
4259 /* We can handle NOP_EXPR conversions that do not change the number
4260 of elements or the vector size. */
4261 if ((CONVERT_EXPR_CODE_P (code
)
4262 || code
== VIEW_CONVERT_EXPR
)
4264 || TYPE_VECTOR_SUBPARTS (vectype_in
) != nunits
4265 || (GET_MODE_SIZE (TYPE_MODE (vectype
))
4266 != GET_MODE_SIZE (TYPE_MODE (vectype_in
)))))
4269 /* We do not handle bit-precision changes. */
4270 if ((CONVERT_EXPR_CODE_P (code
)
4271 || code
== VIEW_CONVERT_EXPR
)
4272 && INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest
))
4273 && ((TYPE_PRECISION (TREE_TYPE (scalar_dest
))
4274 != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (scalar_dest
))))
4275 || ((TYPE_PRECISION (TREE_TYPE (op
))
4276 != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (op
))))))
4277 /* But a conversion that does not change the bit-pattern is ok. */
4278 && !((TYPE_PRECISION (TREE_TYPE (scalar_dest
))
4279 > TYPE_PRECISION (TREE_TYPE (op
)))
4280 && TYPE_UNSIGNED (TREE_TYPE (op
)))
4281 /* Conversion between boolean types of different sizes is
4282 a simple assignment in case their vectypes are same
4284 && (!VECTOR_BOOLEAN_TYPE_P (vectype
)
4285 || !VECTOR_BOOLEAN_TYPE_P (vectype_in
)))
4287 if (dump_enabled_p ())
4288 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4289 "type conversion to/from bit-precision "
4294 if (!vec_stmt
) /* transformation not required. */
4296 STMT_VINFO_TYPE (stmt_info
) = assignment_vec_info_type
;
4297 if (dump_enabled_p ())
4298 dump_printf_loc (MSG_NOTE
, vect_location
,
4299 "=== vectorizable_assignment ===\n");
4300 vect_model_simple_cost (stmt_info
, ncopies
, dt
, NULL
, NULL
);
4305 if (dump_enabled_p ())
4306 dump_printf_loc (MSG_NOTE
, vect_location
, "transform assignment.\n");
4309 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
4312 for (j
= 0; j
< ncopies
; j
++)
4316 vect_get_vec_defs (op
, NULL
, stmt
, &vec_oprnds
, NULL
, slp_node
, -1);
4318 vect_get_vec_defs_for_stmt_copy (dt
, &vec_oprnds
, NULL
);
4320 /* Arguments are ready. create the new vector stmt. */
4321 FOR_EACH_VEC_ELT (vec_oprnds
, i
, vop
)
4323 if (CONVERT_EXPR_CODE_P (code
)
4324 || code
== VIEW_CONVERT_EXPR
)
4325 vop
= build1 (VIEW_CONVERT_EXPR
, vectype
, vop
);
4326 new_stmt
= gimple_build_assign (vec_dest
, vop
);
4327 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
4328 gimple_assign_set_lhs (new_stmt
, new_temp
);
4329 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
4331 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
4338 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
4340 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
4342 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
4345 vec_oprnds
.release ();
4350 /* Return TRUE if CODE (a shift operation) is supported for SCALAR_TYPE
4351 either as shift by a scalar or by a vector. */
4354 vect_supportable_shift (enum tree_code code
, tree scalar_type
)
4357 machine_mode vec_mode
;
4362 vectype
= get_vectype_for_scalar_type (scalar_type
);
4366 optab
= optab_for_tree_code (code
, vectype
, optab_scalar
);
4368 || optab_handler (optab
, TYPE_MODE (vectype
)) == CODE_FOR_nothing
)
4370 optab
= optab_for_tree_code (code
, vectype
, optab_vector
);
4372 || (optab_handler (optab
, TYPE_MODE (vectype
))
4373 == CODE_FOR_nothing
))
4377 vec_mode
= TYPE_MODE (vectype
);
4378 icode
= (int) optab_handler (optab
, vec_mode
);
4379 if (icode
== CODE_FOR_nothing
)
4386 /* Function vectorizable_shift.
4388 Check if STMT performs a shift operation that can be vectorized.
4389 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4390 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
4391 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4394 vectorizable_shift (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
4395 gimple
**vec_stmt
, slp_tree slp_node
)
4399 tree op0
, op1
= NULL
;
4400 tree vec_oprnd1
= NULL_TREE
;
4401 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
4403 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
4404 enum tree_code code
;
4405 machine_mode vec_mode
;
4409 machine_mode optab_op2_mode
;
4411 enum vect_def_type dt
[2] = {vect_unknown_def_type
, vect_unknown_def_type
};
4412 gimple
*new_stmt
= NULL
;
4413 stmt_vec_info prev_stmt_info
;
4420 vec
<tree
> vec_oprnds0
= vNULL
;
4421 vec
<tree
> vec_oprnds1
= vNULL
;
4424 bool scalar_shift_arg
= true;
4425 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
4426 vec_info
*vinfo
= stmt_info
->vinfo
;
4429 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
4432 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
4436 /* Is STMT a vectorizable binary/unary operation? */
4437 if (!is_gimple_assign (stmt
))
4440 if (TREE_CODE (gimple_assign_lhs (stmt
)) != SSA_NAME
)
4443 code
= gimple_assign_rhs_code (stmt
);
4445 if (!(code
== LSHIFT_EXPR
|| code
== RSHIFT_EXPR
|| code
== LROTATE_EXPR
4446 || code
== RROTATE_EXPR
))
4449 scalar_dest
= gimple_assign_lhs (stmt
);
4450 vectype_out
= STMT_VINFO_VECTYPE (stmt_info
);
4451 if (TYPE_PRECISION (TREE_TYPE (scalar_dest
))
4452 != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (scalar_dest
))))
4454 if (dump_enabled_p ())
4455 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4456 "bit-precision shifts not supported.\n");
4460 op0
= gimple_assign_rhs1 (stmt
);
4461 if (!vect_is_simple_use (op0
, vinfo
, &def_stmt
, &dt
[0], &vectype
))
4463 if (dump_enabled_p ())
4464 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4465 "use not simple.\n");
4468 /* If op0 is an external or constant def use a vector type with
4469 the same size as the output vector type. */
4471 vectype
= get_same_sized_vectype (TREE_TYPE (op0
), vectype_out
);
4473 gcc_assert (vectype
);
4476 if (dump_enabled_p ())
4477 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4478 "no vectype for scalar type\n");
4482 nunits_out
= TYPE_VECTOR_SUBPARTS (vectype_out
);
4483 nunits_in
= TYPE_VECTOR_SUBPARTS (vectype
);
4484 if (nunits_out
!= nunits_in
)
4487 op1
= gimple_assign_rhs2 (stmt
);
4488 if (!vect_is_simple_use (op1
, vinfo
, &def_stmt
, &dt
[1], &op1_vectype
))
4490 if (dump_enabled_p ())
4491 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4492 "use not simple.\n");
4497 vf
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
4501 /* Multiple types in SLP are handled by creating the appropriate number of
4502 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4504 if (slp_node
|| PURE_SLP_STMT (stmt_info
))
4507 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits_in
;
4509 gcc_assert (ncopies
>= 1);
4511 /* Determine whether the shift amount is a vector, or scalar. If the
4512 shift/rotate amount is a vector, use the vector/vector shift optabs. */
4514 if ((dt
[1] == vect_internal_def
4515 || dt
[1] == vect_induction_def
)
4517 scalar_shift_arg
= false;
4518 else if (dt
[1] == vect_constant_def
4519 || dt
[1] == vect_external_def
4520 || dt
[1] == vect_internal_def
)
4522 /* In SLP, need to check whether the shift count is the same,
4523 in loops if it is a constant or invariant, it is always
4527 vec
<gimple
*> stmts
= SLP_TREE_SCALAR_STMTS (slp_node
);
4530 FOR_EACH_VEC_ELT (stmts
, k
, slpstmt
)
4531 if (!operand_equal_p (gimple_assign_rhs2 (slpstmt
), op1
, 0))
4532 scalar_shift_arg
= false;
4537 if (dump_enabled_p ())
4538 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4539 "operand mode requires invariant argument.\n");
4543 /* Vector shifted by vector. */
4544 if (!scalar_shift_arg
)
4546 optab
= optab_for_tree_code (code
, vectype
, optab_vector
);
4547 if (dump_enabled_p ())
4548 dump_printf_loc (MSG_NOTE
, vect_location
,
4549 "vector/vector shift/rotate found.\n");
4552 op1_vectype
= get_same_sized_vectype (TREE_TYPE (op1
), vectype_out
);
4553 if (op1_vectype
== NULL_TREE
4554 || TYPE_MODE (op1_vectype
) != TYPE_MODE (vectype
))
4556 if (dump_enabled_p ())
4557 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4558 "unusable type for last operand in"
4559 " vector/vector shift/rotate.\n");
4563 /* See if the machine has a vector shifted by scalar insn and if not
4564 then see if it has a vector shifted by vector insn. */
4567 optab
= optab_for_tree_code (code
, vectype
, optab_scalar
);
4569 && optab_handler (optab
, TYPE_MODE (vectype
)) != CODE_FOR_nothing
)
4571 if (dump_enabled_p ())
4572 dump_printf_loc (MSG_NOTE
, vect_location
,
4573 "vector/scalar shift/rotate found.\n");
4577 optab
= optab_for_tree_code (code
, vectype
, optab_vector
);
4579 && (optab_handler (optab
, TYPE_MODE (vectype
))
4580 != CODE_FOR_nothing
))
4582 scalar_shift_arg
= false;
4584 if (dump_enabled_p ())
4585 dump_printf_loc (MSG_NOTE
, vect_location
,
4586 "vector/vector shift/rotate found.\n");
4588 /* Unlike the other binary operators, shifts/rotates have
4589 the rhs being int, instead of the same type as the lhs,
4590 so make sure the scalar is the right type if we are
4591 dealing with vectors of long long/long/short/char. */
4592 if (dt
[1] == vect_constant_def
)
4593 op1
= fold_convert (TREE_TYPE (vectype
), op1
);
4594 else if (!useless_type_conversion_p (TREE_TYPE (vectype
),
4598 && TYPE_MODE (TREE_TYPE (vectype
))
4599 != TYPE_MODE (TREE_TYPE (op1
)))
4601 if (dump_enabled_p ())
4602 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4603 "unusable type for last operand in"
4604 " vector/vector shift/rotate.\n");
4607 if (vec_stmt
&& !slp_node
)
4609 op1
= fold_convert (TREE_TYPE (vectype
), op1
);
4610 op1
= vect_init_vector (stmt
, op1
,
4611 TREE_TYPE (vectype
), NULL
);
4618 /* Supportable by target? */
4621 if (dump_enabled_p ())
4622 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4626 vec_mode
= TYPE_MODE (vectype
);
4627 icode
= (int) optab_handler (optab
, vec_mode
);
4628 if (icode
== CODE_FOR_nothing
)
4630 if (dump_enabled_p ())
4631 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4632 "op not supported by target.\n");
4633 /* Check only during analysis. */
4634 if (GET_MODE_SIZE (vec_mode
) != UNITS_PER_WORD
4635 || (vf
< vect_min_worthwhile_factor (code
)
4638 if (dump_enabled_p ())
4639 dump_printf_loc (MSG_NOTE
, vect_location
,
4640 "proceeding using word mode.\n");
4643 /* Worthwhile without SIMD support? Check only during analysis. */
4644 if (!VECTOR_MODE_P (TYPE_MODE (vectype
))
4645 && vf
< vect_min_worthwhile_factor (code
)
4648 if (dump_enabled_p ())
4649 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4650 "not worthwhile without SIMD support.\n");
4654 if (!vec_stmt
) /* transformation not required. */
4656 STMT_VINFO_TYPE (stmt_info
) = shift_vec_info_type
;
4657 if (dump_enabled_p ())
4658 dump_printf_loc (MSG_NOTE
, vect_location
,
4659 "=== vectorizable_shift ===\n");
4660 vect_model_simple_cost (stmt_info
, ncopies
, dt
, NULL
, NULL
);
4666 if (dump_enabled_p ())
4667 dump_printf_loc (MSG_NOTE
, vect_location
,
4668 "transform binary/unary operation.\n");
4671 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
4673 prev_stmt_info
= NULL
;
4674 for (j
= 0; j
< ncopies
; j
++)
4679 if (scalar_shift_arg
)
4681 /* Vector shl and shr insn patterns can be defined with scalar
4682 operand 2 (shift operand). In this case, use constant or loop
4683 invariant op1 directly, without extending it to vector mode
4685 optab_op2_mode
= insn_data
[icode
].operand
[2].mode
;
4686 if (!VECTOR_MODE_P (optab_op2_mode
))
4688 if (dump_enabled_p ())
4689 dump_printf_loc (MSG_NOTE
, vect_location
,
4690 "operand 1 using scalar mode.\n");
4692 vec_oprnds1
.create (slp_node
? slp_node
->vec_stmts_size
: 1);
4693 vec_oprnds1
.quick_push (vec_oprnd1
);
4696 /* Store vec_oprnd1 for every vector stmt to be created
4697 for SLP_NODE. We check during the analysis that all
4698 the shift arguments are the same.
4699 TODO: Allow different constants for different vector
4700 stmts generated for an SLP instance. */
4701 for (k
= 0; k
< slp_node
->vec_stmts_size
- 1; k
++)
4702 vec_oprnds1
.quick_push (vec_oprnd1
);
4707 /* vec_oprnd1 is available if operand 1 should be of a scalar-type
4708 (a special case for certain kind of vector shifts); otherwise,
4709 operand 1 should be of a vector type (the usual case). */
4711 vect_get_vec_defs (op0
, NULL_TREE
, stmt
, &vec_oprnds0
, NULL
,
4714 vect_get_vec_defs (op0
, op1
, stmt
, &vec_oprnds0
, &vec_oprnds1
,
4718 vect_get_vec_defs_for_stmt_copy (dt
, &vec_oprnds0
, &vec_oprnds1
);
4720 /* Arguments are ready. Create the new vector stmt. */
4721 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vop0
)
4723 vop1
= vec_oprnds1
[i
];
4724 new_stmt
= gimple_build_assign (vec_dest
, code
, vop0
, vop1
);
4725 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
4726 gimple_assign_set_lhs (new_stmt
, new_temp
);
4727 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
4729 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
4736 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
4738 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
4739 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
4742 vec_oprnds0
.release ();
4743 vec_oprnds1
.release ();
4749 /* Function vectorizable_operation.
4751 Check if STMT performs a binary, unary or ternary operation that can
4753 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4754 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
4755 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4758 vectorizable_operation (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
4759 gimple
**vec_stmt
, slp_tree slp_node
)
4763 tree op0
, op1
= NULL_TREE
, op2
= NULL_TREE
;
4764 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
4766 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
4767 enum tree_code code
;
4768 machine_mode vec_mode
;
4772 bool target_support_p
;
4774 enum vect_def_type dt
[3]
4775 = {vect_unknown_def_type
, vect_unknown_def_type
, vect_unknown_def_type
};
4776 gimple
*new_stmt
= NULL
;
4777 stmt_vec_info prev_stmt_info
;
4783 vec
<tree
> vec_oprnds0
= vNULL
;
4784 vec
<tree
> vec_oprnds1
= vNULL
;
4785 vec
<tree
> vec_oprnds2
= vNULL
;
4786 tree vop0
, vop1
, vop2
;
4787 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
4788 vec_info
*vinfo
= stmt_info
->vinfo
;
4791 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
4794 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
4798 /* Is STMT a vectorizable binary/unary operation? */
4799 if (!is_gimple_assign (stmt
))
4802 if (TREE_CODE (gimple_assign_lhs (stmt
)) != SSA_NAME
)
4805 code
= gimple_assign_rhs_code (stmt
);
4807 /* For pointer addition, we should use the normal plus for
4808 the vector addition. */
4809 if (code
== POINTER_PLUS_EXPR
)
4812 /* Support only unary or binary operations. */
4813 op_type
= TREE_CODE_LENGTH (code
);
4814 if (op_type
!= unary_op
&& op_type
!= binary_op
&& op_type
!= ternary_op
)
4816 if (dump_enabled_p ())
4817 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4818 "num. args = %d (not unary/binary/ternary op).\n",
4823 scalar_dest
= gimple_assign_lhs (stmt
);
4824 vectype_out
= STMT_VINFO_VECTYPE (stmt_info
);
4826 /* Most operations cannot handle bit-precision types without extra
4828 if (!VECTOR_BOOLEAN_TYPE_P (vectype_out
)
4829 && (TYPE_PRECISION (TREE_TYPE (scalar_dest
))
4830 != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (scalar_dest
))))
4831 /* Exception are bitwise binary operations. */
4832 && code
!= BIT_IOR_EXPR
4833 && code
!= BIT_XOR_EXPR
4834 && code
!= BIT_AND_EXPR
)
4836 if (dump_enabled_p ())
4837 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4838 "bit-precision arithmetic not supported.\n");
4842 op0
= gimple_assign_rhs1 (stmt
);
4843 if (!vect_is_simple_use (op0
, vinfo
, &def_stmt
, &dt
[0], &vectype
))
4845 if (dump_enabled_p ())
4846 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4847 "use not simple.\n");
4850 /* If op0 is an external or constant def use a vector type with
4851 the same size as the output vector type. */
4854 /* For boolean type we cannot determine vectype by
4855 invariant value (don't know whether it is a vector
4856 of booleans or vector of integers). We use output
4857 vectype because operations on boolean don't change
4859 if (TREE_CODE (TREE_TYPE (op0
)) == BOOLEAN_TYPE
)
4861 if (TREE_CODE (TREE_TYPE (scalar_dest
)) != BOOLEAN_TYPE
)
4863 if (dump_enabled_p ())
4864 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4865 "not supported operation on bool value.\n");
4868 vectype
= vectype_out
;
4871 vectype
= get_same_sized_vectype (TREE_TYPE (op0
), vectype_out
);
4874 gcc_assert (vectype
);
4877 if (dump_enabled_p ())
4879 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4880 "no vectype for scalar type ");
4881 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
4883 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
4889 nunits_out
= TYPE_VECTOR_SUBPARTS (vectype_out
);
4890 nunits_in
= TYPE_VECTOR_SUBPARTS (vectype
);
4891 if (nunits_out
!= nunits_in
)
4894 if (op_type
== binary_op
|| op_type
== ternary_op
)
4896 op1
= gimple_assign_rhs2 (stmt
);
4897 if (!vect_is_simple_use (op1
, vinfo
, &def_stmt
, &dt
[1]))
4899 if (dump_enabled_p ())
4900 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4901 "use not simple.\n");
4905 if (op_type
== ternary_op
)
4907 op2
= gimple_assign_rhs3 (stmt
);
4908 if (!vect_is_simple_use (op2
, vinfo
, &def_stmt
, &dt
[2]))
4910 if (dump_enabled_p ())
4911 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4912 "use not simple.\n");
4918 vf
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
4922 /* Multiple types in SLP are handled by creating the appropriate number of
4923 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4925 if (slp_node
|| PURE_SLP_STMT (stmt_info
))
4928 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits_in
;
4930 gcc_assert (ncopies
>= 1);
4932 /* Shifts are handled in vectorizable_shift (). */
4933 if (code
== LSHIFT_EXPR
|| code
== RSHIFT_EXPR
|| code
== LROTATE_EXPR
4934 || code
== RROTATE_EXPR
)
4937 /* Supportable by target? */
4939 vec_mode
= TYPE_MODE (vectype
);
4940 if (code
== MULT_HIGHPART_EXPR
)
4941 target_support_p
= can_mult_highpart_p (vec_mode
, TYPE_UNSIGNED (vectype
));
4944 optab
= optab_for_tree_code (code
, vectype
, optab_default
);
4947 if (dump_enabled_p ())
4948 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4952 target_support_p
= (optab_handler (optab
, vec_mode
)
4953 != CODE_FOR_nothing
);
4956 if (!target_support_p
)
4958 if (dump_enabled_p ())
4959 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4960 "op not supported by target.\n");
4961 /* Check only during analysis. */
4962 if (GET_MODE_SIZE (vec_mode
) != UNITS_PER_WORD
4963 || (!vec_stmt
&& vf
< vect_min_worthwhile_factor (code
)))
4965 if (dump_enabled_p ())
4966 dump_printf_loc (MSG_NOTE
, vect_location
,
4967 "proceeding using word mode.\n");
4970 /* Worthwhile without SIMD support? Check only during analysis. */
4971 if (!VECTOR_MODE_P (vec_mode
)
4973 && vf
< vect_min_worthwhile_factor (code
))
4975 if (dump_enabled_p ())
4976 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4977 "not worthwhile without SIMD support.\n");
4981 if (!vec_stmt
) /* transformation not required. */
4983 STMT_VINFO_TYPE (stmt_info
) = op_vec_info_type
;
4984 if (dump_enabled_p ())
4985 dump_printf_loc (MSG_NOTE
, vect_location
,
4986 "=== vectorizable_operation ===\n");
4987 vect_model_simple_cost (stmt_info
, ncopies
, dt
, NULL
, NULL
);
4993 if (dump_enabled_p ())
4994 dump_printf_loc (MSG_NOTE
, vect_location
,
4995 "transform binary/unary operation.\n");
4998 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
5000 /* In case the vectorization factor (VF) is bigger than the number
5001 of elements that we can fit in a vectype (nunits), we have to generate
5002 more than one vector stmt - i.e - we need to "unroll" the
5003 vector stmt by a factor VF/nunits. In doing so, we record a pointer
5004 from one copy of the vector stmt to the next, in the field
5005 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
5006 stages to find the correct vector defs to be used when vectorizing
5007 stmts that use the defs of the current stmt. The example below
5008 illustrates the vectorization process when VF=16 and nunits=4 (i.e.,
5009 we need to create 4 vectorized stmts):
5011 before vectorization:
5012 RELATED_STMT VEC_STMT
5016 step 1: vectorize stmt S1 (done in vectorizable_load. See more details
5018 RELATED_STMT VEC_STMT
5019 VS1_0: vx0 = memref0 VS1_1 -
5020 VS1_1: vx1 = memref1 VS1_2 -
5021 VS1_2: vx2 = memref2 VS1_3 -
5022 VS1_3: vx3 = memref3 - -
5023 S1: x = load - VS1_0
5026 step2: vectorize stmt S2 (done here):
5027 To vectorize stmt S2 we first need to find the relevant vector
5028 def for the first operand 'x'. This is, as usual, obtained from
5029 the vector stmt recorded in the STMT_VINFO_VEC_STMT of the stmt
5030 that defines 'x' (S1). This way we find the stmt VS1_0, and the
5031 relevant vector def 'vx0'. Having found 'vx0' we can generate
5032 the vector stmt VS2_0, and as usual, record it in the
5033 STMT_VINFO_VEC_STMT of stmt S2.
5034 When creating the second copy (VS2_1), we obtain the relevant vector
5035 def from the vector stmt recorded in the STMT_VINFO_RELATED_STMT of
5036 stmt VS1_0. This way we find the stmt VS1_1 and the relevant
5037 vector def 'vx1'. Using 'vx1' we create stmt VS2_1 and record a
5038 pointer to it in the STMT_VINFO_RELATED_STMT of the vector stmt VS2_0.
5039 Similarly when creating stmts VS2_2 and VS2_3. This is the resulting
5040 chain of stmts and pointers:
5041 RELATED_STMT VEC_STMT
5042 VS1_0: vx0 = memref0 VS1_1 -
5043 VS1_1: vx1 = memref1 VS1_2 -
5044 VS1_2: vx2 = memref2 VS1_3 -
5045 VS1_3: vx3 = memref3 - -
5046 S1: x = load - VS1_0
5047 VS2_0: vz0 = vx0 + v1 VS2_1 -
5048 VS2_1: vz1 = vx1 + v1 VS2_2 -
5049 VS2_2: vz2 = vx2 + v1 VS2_3 -
5050 VS2_3: vz3 = vx3 + v1 - -
5051 S2: z = x + 1 - VS2_0 */
5053 prev_stmt_info
= NULL
;
5054 for (j
= 0; j
< ncopies
; j
++)
5059 if (op_type
== binary_op
|| op_type
== ternary_op
)
5060 vect_get_vec_defs (op0
, op1
, stmt
, &vec_oprnds0
, &vec_oprnds1
,
5063 vect_get_vec_defs (op0
, NULL_TREE
, stmt
, &vec_oprnds0
, NULL
,
5065 if (op_type
== ternary_op
)
5067 vec_oprnds2
.create (1);
5068 vec_oprnds2
.quick_push (vect_get_vec_def_for_operand (op2
,
5074 vect_get_vec_defs_for_stmt_copy (dt
, &vec_oprnds0
, &vec_oprnds1
);
5075 if (op_type
== ternary_op
)
5077 tree vec_oprnd
= vec_oprnds2
.pop ();
5078 vec_oprnds2
.quick_push (vect_get_vec_def_for_stmt_copy (dt
[2],
5083 /* Arguments are ready. Create the new vector stmt. */
5084 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vop0
)
5086 vop1
= ((op_type
== binary_op
|| op_type
== ternary_op
)
5087 ? vec_oprnds1
[i
] : NULL_TREE
);
5088 vop2
= ((op_type
== ternary_op
)
5089 ? vec_oprnds2
[i
] : NULL_TREE
);
5090 new_stmt
= gimple_build_assign (vec_dest
, code
, vop0
, vop1
, vop2
);
5091 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
5092 gimple_assign_set_lhs (new_stmt
, new_temp
);
5093 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
5095 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
5102 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
5104 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
5105 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
5108 vec_oprnds0
.release ();
5109 vec_oprnds1
.release ();
5110 vec_oprnds2
.release ();
5115 /* A helper function to ensure data reference DR's base alignment
5119 ensure_base_align (stmt_vec_info stmt_info
, struct data_reference
*dr
)
5124 if (DR_VECT_AUX (dr
)->base_misaligned
)
5126 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
5127 tree base_decl
= DR_VECT_AUX (dr
)->base_decl
;
5129 if (decl_in_symtab_p (base_decl
))
5130 symtab_node::get (base_decl
)->increase_alignment (TYPE_ALIGN (vectype
));
5133 DECL_ALIGN (base_decl
) = TYPE_ALIGN (vectype
);
5134 DECL_USER_ALIGN (base_decl
) = 1;
5136 DR_VECT_AUX (dr
)->base_misaligned
= false;
5141 /* Given a vector type VECTYPE returns the VECTOR_CST mask that implements
5142 reversal of the vector elements. If that is impossible to do,
5146 perm_mask_for_reverse (tree vectype
)
5151 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
5152 sel
= XALLOCAVEC (unsigned char, nunits
);
5154 for (i
= 0; i
< nunits
; ++i
)
5155 sel
[i
] = nunits
- 1 - i
;
5157 if (!can_vec_perm_p (TYPE_MODE (vectype
), false, sel
))
5159 return vect_gen_perm_mask_checked (vectype
, sel
);
5162 /* Function vectorizable_store.
5164 Check if STMT defines a non scalar data-ref (array/pointer/structure) that
5166 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
5167 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
5168 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
5171 vectorizable_store (gimple
*stmt
, gimple_stmt_iterator
*gsi
, gimple
**vec_stmt
,
5177 tree vec_oprnd
= NULL_TREE
;
5178 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
5179 struct data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
), *first_dr
= NULL
;
5181 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
5182 struct loop
*loop
= NULL
;
5183 machine_mode vec_mode
;
5185 enum dr_alignment_support alignment_support_scheme
;
5187 enum vect_def_type dt
;
5188 stmt_vec_info prev_stmt_info
= NULL
;
5189 tree dataref_ptr
= NULL_TREE
;
5190 tree dataref_offset
= NULL_TREE
;
5191 gimple
*ptr_incr
= NULL
;
5194 gimple
*next_stmt
, *first_stmt
= NULL
;
5195 bool grouped_store
= false;
5196 bool store_lanes_p
= false;
5197 unsigned int group_size
, i
;
5198 vec
<tree
> dr_chain
= vNULL
;
5199 vec
<tree
> oprnds
= vNULL
;
5200 vec
<tree
> result_chain
= vNULL
;
5202 bool negative
= false;
5203 tree offset
= NULL_TREE
;
5204 vec
<tree
> vec_oprnds
= vNULL
;
5205 bool slp
= (slp_node
!= NULL
);
5206 unsigned int vec_num
;
5207 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
5208 vec_info
*vinfo
= stmt_info
->vinfo
;
5210 tree scatter_base
= NULL_TREE
, scatter_off
= NULL_TREE
;
5211 tree scatter_off_vectype
= NULL_TREE
, scatter_decl
= NULL_TREE
;
5212 int scatter_scale
= 1;
5213 enum vect_def_type scatter_idx_dt
= vect_unknown_def_type
;
5214 enum vect_def_type scatter_src_dt
= vect_unknown_def_type
;
5217 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
5220 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
5224 /* Is vectorizable store? */
5226 if (!is_gimple_assign (stmt
))
5229 scalar_dest
= gimple_assign_lhs (stmt
);
5230 if (TREE_CODE (scalar_dest
) == VIEW_CONVERT_EXPR
5231 && is_pattern_stmt_p (stmt_info
))
5232 scalar_dest
= TREE_OPERAND (scalar_dest
, 0);
5233 if (TREE_CODE (scalar_dest
) != ARRAY_REF
5234 && TREE_CODE (scalar_dest
) != BIT_FIELD_REF
5235 && TREE_CODE (scalar_dest
) != INDIRECT_REF
5236 && TREE_CODE (scalar_dest
) != COMPONENT_REF
5237 && TREE_CODE (scalar_dest
) != IMAGPART_EXPR
5238 && TREE_CODE (scalar_dest
) != REALPART_EXPR
5239 && TREE_CODE (scalar_dest
) != MEM_REF
)
5242 gcc_assert (gimple_assign_single_p (stmt
));
5244 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
), rhs_vectype
= NULL_TREE
;
5245 unsigned int nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
5248 loop
= LOOP_VINFO_LOOP (loop_vinfo
);
5250 /* Multiple types in SLP are handled by creating the appropriate number of
5251 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
5253 if (slp
|| PURE_SLP_STMT (stmt_info
))
5256 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits
;
5258 gcc_assert (ncopies
>= 1);
5260 /* FORNOW. This restriction should be relaxed. */
5261 if (loop
&& nested_in_vect_loop_p (loop
, stmt
) && ncopies
> 1)
5263 if (dump_enabled_p ())
5264 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5265 "multiple types in nested loop.\n");
5269 op
= gimple_assign_rhs1 (stmt
);
5271 if (!vect_is_simple_use (op
, vinfo
, &def_stmt
, &dt
, &rhs_vectype
))
5273 if (dump_enabled_p ())
5274 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5275 "use not simple.\n");
5279 if (rhs_vectype
&& !useless_type_conversion_p (vectype
, rhs_vectype
))
5282 elem_type
= TREE_TYPE (vectype
);
5283 vec_mode
= TYPE_MODE (vectype
);
5285 /* FORNOW. In some cases can vectorize even if data-type not supported
5286 (e.g. - array initialization with 0). */
5287 if (optab_handler (mov_optab
, vec_mode
) == CODE_FOR_nothing
)
5290 if (!STMT_VINFO_DATA_REF (stmt_info
))
5293 if (!STMT_VINFO_STRIDED_P (stmt_info
))
5296 tree_int_cst_compare (loop
&& nested_in_vect_loop_p (loop
, stmt
)
5297 ? STMT_VINFO_DR_STEP (stmt_info
) : DR_STEP (dr
),
5298 size_zero_node
) < 0;
5299 if (negative
&& ncopies
> 1)
5301 if (dump_enabled_p ())
5302 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5303 "multiple types with negative step.\n");
5308 gcc_assert (!grouped_store
);
5309 alignment_support_scheme
= vect_supportable_dr_alignment (dr
, false);
5310 if (alignment_support_scheme
!= dr_aligned
5311 && alignment_support_scheme
!= dr_unaligned_supported
)
5313 if (dump_enabled_p ())
5314 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5315 "negative step but alignment required.\n");
5318 if (dt
!= vect_constant_def
5319 && dt
!= vect_external_def
5320 && !perm_mask_for_reverse (vectype
))
5322 if (dump_enabled_p ())
5323 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5324 "negative step and reversing not supported.\n");
5330 if (STMT_VINFO_GROUPED_ACCESS (stmt_info
))
5332 grouped_store
= true;
5333 first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
5334 group_size
= GROUP_SIZE (vinfo_for_stmt (first_stmt
));
5336 && !PURE_SLP_STMT (stmt_info
)
5337 && !STMT_VINFO_STRIDED_P (stmt_info
))
5339 if (vect_store_lanes_supported (vectype
, group_size
))
5340 store_lanes_p
= true;
5341 else if (!vect_grouped_store_supported (vectype
, group_size
))
5345 if (STMT_VINFO_STRIDED_P (stmt_info
)
5346 && (slp
|| PURE_SLP_STMT (stmt_info
))
5347 && (group_size
> nunits
5348 || nunits
% group_size
!= 0))
5350 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5351 "unhandled strided group store\n");
5355 if (first_stmt
== stmt
)
5357 /* STMT is the leader of the group. Check the operands of all the
5358 stmts of the group. */
5359 next_stmt
= GROUP_NEXT_ELEMENT (stmt_info
);
5362 gcc_assert (gimple_assign_single_p (next_stmt
));
5363 op
= gimple_assign_rhs1 (next_stmt
);
5364 if (!vect_is_simple_use (op
, vinfo
, &def_stmt
, &dt
))
5366 if (dump_enabled_p ())
5367 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5368 "use not simple.\n");
5371 next_stmt
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt
));
5376 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info
))
5379 scatter_decl
= vect_check_gather_scatter (stmt
, loop_vinfo
, &scatter_base
,
5380 &scatter_off
, &scatter_scale
);
5381 gcc_assert (scatter_decl
);
5382 if (!vect_is_simple_use (scatter_off
, vinfo
, &def_stmt
, &scatter_idx_dt
,
5383 &scatter_off_vectype
))
5385 if (dump_enabled_p ())
5386 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5387 "scatter index use not simple.");
5392 if (!vec_stmt
) /* transformation not required. */
5394 STMT_VINFO_TYPE (stmt_info
) = store_vec_info_type
;
5395 /* The SLP costs are calculated during SLP analysis. */
5396 if (!PURE_SLP_STMT (stmt_info
))
5397 vect_model_store_cost (stmt_info
, ncopies
, store_lanes_p
, dt
,
5404 ensure_base_align (stmt_info
, dr
);
5406 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info
))
5408 tree vec_oprnd0
= NULL_TREE
, vec_oprnd1
= NULL_TREE
, op
, src
;
5409 tree arglist
= TYPE_ARG_TYPES (TREE_TYPE (scatter_decl
));
5410 tree rettype
, srctype
, ptrtype
, idxtype
, masktype
, scaletype
;
5411 tree ptr
, mask
, var
, scale
, perm_mask
= NULL_TREE
;
5412 edge pe
= loop_preheader_edge (loop
);
5415 enum { NARROW
, NONE
, WIDEN
} modifier
;
5416 int scatter_off_nunits
= TYPE_VECTOR_SUBPARTS (scatter_off_vectype
);
5418 if (nunits
== (unsigned int) scatter_off_nunits
)
5420 else if (nunits
== (unsigned int) scatter_off_nunits
/ 2)
5422 unsigned char *sel
= XALLOCAVEC (unsigned char, scatter_off_nunits
);
5425 for (i
= 0; i
< (unsigned int) scatter_off_nunits
; ++i
)
5426 sel
[i
] = i
| nunits
;
5428 perm_mask
= vect_gen_perm_mask_checked (scatter_off_vectype
, sel
);
5429 gcc_assert (perm_mask
!= NULL_TREE
);
5431 else if (nunits
== (unsigned int) scatter_off_nunits
* 2)
5433 unsigned char *sel
= XALLOCAVEC (unsigned char, nunits
);
5436 for (i
= 0; i
< (unsigned int) nunits
; ++i
)
5437 sel
[i
] = i
| scatter_off_nunits
;
5439 perm_mask
= vect_gen_perm_mask_checked (vectype
, sel
);
5440 gcc_assert (perm_mask
!= NULL_TREE
);
5446 rettype
= TREE_TYPE (TREE_TYPE (scatter_decl
));
5447 ptrtype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
5448 masktype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
5449 idxtype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
5450 srctype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
5451 scaletype
= TREE_VALUE (arglist
);
5453 gcc_checking_assert (TREE_CODE (masktype
) == INTEGER_TYPE
5454 && TREE_CODE (rettype
) == VOID_TYPE
);
5456 ptr
= fold_convert (ptrtype
, scatter_base
);
5457 if (!is_gimple_min_invariant (ptr
))
5459 ptr
= force_gimple_operand (ptr
, &seq
, true, NULL_TREE
);
5460 new_bb
= gsi_insert_seq_on_edge_immediate (pe
, seq
);
5461 gcc_assert (!new_bb
);
5464 /* Currently we support only unconditional scatter stores,
5465 so mask should be all ones. */
5466 mask
= build_int_cst (masktype
, -1);
5467 mask
= vect_init_vector (stmt
, mask
, masktype
, NULL
);
5469 scale
= build_int_cst (scaletype
, scatter_scale
);
5471 prev_stmt_info
= NULL
;
5472 for (j
= 0; j
< ncopies
; ++j
)
5477 = vect_get_vec_def_for_operand (gimple_assign_rhs1 (stmt
), stmt
);
5479 = vect_get_vec_def_for_operand (scatter_off
, stmt
);
5481 else if (modifier
!= NONE
&& (j
& 1))
5483 if (modifier
== WIDEN
)
5486 = vect_get_vec_def_for_stmt_copy (scatter_src_dt
, vec_oprnd1
);
5487 op
= permute_vec_elements (vec_oprnd0
, vec_oprnd0
, perm_mask
,
5490 else if (modifier
== NARROW
)
5492 src
= permute_vec_elements (vec_oprnd1
, vec_oprnd1
, perm_mask
,
5495 = vect_get_vec_def_for_stmt_copy (scatter_idx_dt
, vec_oprnd0
);
5503 = vect_get_vec_def_for_stmt_copy (scatter_src_dt
, vec_oprnd1
);
5505 = vect_get_vec_def_for_stmt_copy (scatter_idx_dt
, vec_oprnd0
);
5508 if (!useless_type_conversion_p (srctype
, TREE_TYPE (src
)))
5510 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (src
))
5511 == TYPE_VECTOR_SUBPARTS (srctype
));
5512 var
= vect_get_new_ssa_name (srctype
, vect_simple_var
);
5513 src
= build1 (VIEW_CONVERT_EXPR
, srctype
, src
);
5514 new_stmt
= gimple_build_assign (var
, VIEW_CONVERT_EXPR
, src
);
5515 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
5519 if (!useless_type_conversion_p (idxtype
, TREE_TYPE (op
)))
5521 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op
))
5522 == TYPE_VECTOR_SUBPARTS (idxtype
));
5523 var
= vect_get_new_ssa_name (idxtype
, vect_simple_var
);
5524 op
= build1 (VIEW_CONVERT_EXPR
, idxtype
, op
);
5525 new_stmt
= gimple_build_assign (var
, VIEW_CONVERT_EXPR
, op
);
5526 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
5531 = gimple_build_call (scatter_decl
, 5, ptr
, mask
, op
, src
, scale
);
5533 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
5535 if (prev_stmt_info
== NULL
)
5536 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
5538 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
5539 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
5546 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
5547 group_size
= GROUP_SIZE (vinfo_for_stmt (first_stmt
));
5549 GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt
))++;
5552 gcc_assert (!loop
|| !nested_in_vect_loop_p (loop
, stmt
));
5554 /* We vectorize all the stmts of the interleaving group when we
5555 reach the last stmt in the group. */
5556 if (GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt
))
5557 < GROUP_SIZE (vinfo_for_stmt (first_stmt
))
5566 grouped_store
= false;
5567 /* VEC_NUM is the number of vect stmts to be created for this
5569 vec_num
= SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node
);
5570 first_stmt
= SLP_TREE_SCALAR_STMTS (slp_node
)[0];
5571 gcc_assert (GROUP_FIRST_ELEMENT (vinfo_for_stmt (first_stmt
)) == first_stmt
);
5572 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
5573 op
= gimple_assign_rhs1 (first_stmt
);
5576 /* VEC_NUM is the number of vect stmts to be created for this
5578 vec_num
= group_size
;
5584 group_size
= vec_num
= 1;
5587 if (dump_enabled_p ())
5588 dump_printf_loc (MSG_NOTE
, vect_location
,
5589 "transform store. ncopies = %d\n", ncopies
);
5591 if (STMT_VINFO_STRIDED_P (stmt_info
))
5593 gimple_stmt_iterator incr_gsi
;
5599 gimple_seq stmts
= NULL
;
5600 tree stride_base
, stride_step
, alias_off
;
5604 gcc_assert (!nested_in_vect_loop_p (loop
, stmt
));
5607 = fold_build_pointer_plus
5608 (unshare_expr (DR_BASE_ADDRESS (first_dr
)),
5609 size_binop (PLUS_EXPR
,
5610 convert_to_ptrofftype (unshare_expr (DR_OFFSET (first_dr
))),
5611 convert_to_ptrofftype (DR_INIT(first_dr
))));
5612 stride_step
= fold_convert (sizetype
, unshare_expr (DR_STEP (first_dr
)));
5614 /* For a store with loop-invariant (but other than power-of-2)
5615 stride (i.e. not a grouped access) like so:
5617 for (i = 0; i < n; i += stride)
5620 we generate a new induction variable and new stores from
5621 the components of the (vectorized) rhs:
5623 for (j = 0; ; j += VF*stride)
5628 array[j + stride] = tmp2;
5632 unsigned nstores
= nunits
;
5633 tree ltype
= elem_type
;
5636 nstores
= nunits
/ group_size
;
5637 if (group_size
< nunits
)
5638 ltype
= build_vector_type (elem_type
, group_size
);
5641 ltype
= build_aligned_type (ltype
, TYPE_ALIGN (elem_type
));
5642 ncopies
= SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node
);
5646 ivstep
= stride_step
;
5647 ivstep
= fold_build2 (MULT_EXPR
, TREE_TYPE (ivstep
), ivstep
,
5648 build_int_cst (TREE_TYPE (ivstep
),
5649 ncopies
* nstores
));
5651 standard_iv_increment_position (loop
, &incr_gsi
, &insert_after
);
5653 create_iv (stride_base
, ivstep
, NULL
,
5654 loop
, &incr_gsi
, insert_after
,
5656 incr
= gsi_stmt (incr_gsi
);
5657 set_vinfo_for_stmt (incr
, new_stmt_vec_info (incr
, loop_vinfo
));
5659 stride_step
= force_gimple_operand (stride_step
, &stmts
, true, NULL_TREE
);
5661 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop
), stmts
);
5663 prev_stmt_info
= NULL
;
5664 alias_off
= build_int_cst (reference_alias_ptr_type (DR_REF (first_dr
)), 0);
5665 next_stmt
= first_stmt
;
5666 for (g
= 0; g
< group_size
; g
++)
5668 running_off
= offvar
;
5671 tree size
= TYPE_SIZE_UNIT (ltype
);
5672 tree pos
= fold_build2 (MULT_EXPR
, sizetype
, size_int (g
),
5674 tree newoff
= copy_ssa_name (running_off
, NULL
);
5675 incr
= gimple_build_assign (newoff
, POINTER_PLUS_EXPR
,
5677 vect_finish_stmt_generation (stmt
, incr
, gsi
);
5678 running_off
= newoff
;
5680 for (j
= 0; j
< ncopies
; j
++)
5682 /* We've set op and dt above, from gimple_assign_rhs1(stmt),
5683 and first_stmt == stmt. */
5688 vect_get_vec_defs (op
, NULL_TREE
, stmt
, &vec_oprnds
, NULL
,
5690 vec_oprnd
= vec_oprnds
[0];
5694 gcc_assert (gimple_assign_single_p (next_stmt
));
5695 op
= gimple_assign_rhs1 (next_stmt
);
5696 vec_oprnd
= vect_get_vec_def_for_operand (op
, next_stmt
);
5702 vec_oprnd
= vec_oprnds
[j
];
5705 vect_is_simple_use (vec_oprnd
, vinfo
, &def_stmt
, &dt
);
5706 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
, vec_oprnd
);
5710 for (i
= 0; i
< nstores
; i
++)
5712 tree newref
, newoff
;
5713 gimple
*incr
, *assign
;
5714 tree size
= TYPE_SIZE (ltype
);
5715 /* Extract the i'th component. */
5716 tree pos
= fold_build2 (MULT_EXPR
, bitsizetype
,
5717 bitsize_int (i
), size
);
5718 tree elem
= fold_build3 (BIT_FIELD_REF
, ltype
, vec_oprnd
,
5721 elem
= force_gimple_operand_gsi (gsi
, elem
, true,
5725 newref
= build2 (MEM_REF
, ltype
,
5726 running_off
, alias_off
);
5728 /* And store it to *running_off. */
5729 assign
= gimple_build_assign (newref
, elem
);
5730 vect_finish_stmt_generation (stmt
, assign
, gsi
);
5732 newoff
= copy_ssa_name (running_off
, NULL
);
5733 incr
= gimple_build_assign (newoff
, POINTER_PLUS_EXPR
,
5734 running_off
, stride_step
);
5735 vect_finish_stmt_generation (stmt
, incr
, gsi
);
5737 running_off
= newoff
;
5738 if (g
== group_size
- 1
5741 if (j
== 0 && i
== 0)
5742 STMT_VINFO_VEC_STMT (stmt_info
)
5743 = *vec_stmt
= assign
;
5745 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = assign
;
5746 prev_stmt_info
= vinfo_for_stmt (assign
);
5750 next_stmt
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt
));
5755 dr_chain
.create (group_size
);
5756 oprnds
.create (group_size
);
5758 alignment_support_scheme
= vect_supportable_dr_alignment (first_dr
, false);
5759 gcc_assert (alignment_support_scheme
);
5760 /* Targets with store-lane instructions must not require explicit
5762 gcc_assert (!store_lanes_p
5763 || alignment_support_scheme
== dr_aligned
5764 || alignment_support_scheme
== dr_unaligned_supported
);
5767 offset
= size_int (-TYPE_VECTOR_SUBPARTS (vectype
) + 1);
5770 aggr_type
= build_array_type_nelts (elem_type
, vec_num
* nunits
);
5772 aggr_type
= vectype
;
5774 /* In case the vectorization factor (VF) is bigger than the number
5775 of elements that we can fit in a vectype (nunits), we have to generate
5776 more than one vector stmt - i.e - we need to "unroll" the
5777 vector stmt by a factor VF/nunits. For more details see documentation in
5778 vect_get_vec_def_for_copy_stmt. */
5780 /* In case of interleaving (non-unit grouped access):
5787 We create vectorized stores starting from base address (the access of the
5788 first stmt in the chain (S2 in the above example), when the last store stmt
5789 of the chain (S4) is reached:
5792 VS2: &base + vec_size*1 = vx0
5793 VS3: &base + vec_size*2 = vx1
5794 VS4: &base + vec_size*3 = vx3
5796 Then permutation statements are generated:
5798 VS5: vx5 = VEC_PERM_EXPR < vx0, vx3, {0, 8, 1, 9, 2, 10, 3, 11} >
5799 VS6: vx6 = VEC_PERM_EXPR < vx0, vx3, {4, 12, 5, 13, 6, 14, 7, 15} >
5802 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
5803 (the order of the data-refs in the output of vect_permute_store_chain
5804 corresponds to the order of scalar stmts in the interleaving chain - see
5805 the documentation of vect_permute_store_chain()).
5807 In case of both multiple types and interleaving, above vector stores and
5808 permutation stmts are created for every copy. The result vector stmts are
5809 put in STMT_VINFO_VEC_STMT for the first copy and in the corresponding
5810 STMT_VINFO_RELATED_STMT for the next copies.
5813 prev_stmt_info
= NULL
;
5814 for (j
= 0; j
< ncopies
; j
++)
5821 /* Get vectorized arguments for SLP_NODE. */
5822 vect_get_vec_defs (op
, NULL_TREE
, stmt
, &vec_oprnds
,
5823 NULL
, slp_node
, -1);
5825 vec_oprnd
= vec_oprnds
[0];
5829 /* For interleaved stores we collect vectorized defs for all the
5830 stores in the group in DR_CHAIN and OPRNDS. DR_CHAIN is then
5831 used as an input to vect_permute_store_chain(), and OPRNDS as
5832 an input to vect_get_vec_def_for_stmt_copy() for the next copy.
5834 If the store is not grouped, GROUP_SIZE is 1, and DR_CHAIN and
5835 OPRNDS are of size 1. */
5836 next_stmt
= first_stmt
;
5837 for (i
= 0; i
< group_size
; i
++)
5839 /* Since gaps are not supported for interleaved stores,
5840 GROUP_SIZE is the exact number of stmts in the chain.
5841 Therefore, NEXT_STMT can't be NULL_TREE. In case that
5842 there is no interleaving, GROUP_SIZE is 1, and only one
5843 iteration of the loop will be executed. */
5844 gcc_assert (next_stmt
5845 && gimple_assign_single_p (next_stmt
));
5846 op
= gimple_assign_rhs1 (next_stmt
);
5848 vec_oprnd
= vect_get_vec_def_for_operand (op
, next_stmt
);
5849 dr_chain
.quick_push (vec_oprnd
);
5850 oprnds
.quick_push (vec_oprnd
);
5851 next_stmt
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt
));
5855 /* We should have catched mismatched types earlier. */
5856 gcc_assert (useless_type_conversion_p (vectype
,
5857 TREE_TYPE (vec_oprnd
)));
5858 bool simd_lane_access_p
5859 = STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info
);
5860 if (simd_lane_access_p
5861 && TREE_CODE (DR_BASE_ADDRESS (first_dr
)) == ADDR_EXPR
5862 && VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (first_dr
), 0))
5863 && integer_zerop (DR_OFFSET (first_dr
))
5864 && integer_zerop (DR_INIT (first_dr
))
5865 && alias_sets_conflict_p (get_alias_set (aggr_type
),
5866 get_alias_set (DR_REF (first_dr
))))
5868 dataref_ptr
= unshare_expr (DR_BASE_ADDRESS (first_dr
));
5869 dataref_offset
= build_int_cst (reference_alias_ptr_type
5870 (DR_REF (first_dr
)), 0);
5875 = vect_create_data_ref_ptr (first_stmt
, aggr_type
,
5876 simd_lane_access_p
? loop
: NULL
,
5877 offset
, &dummy
, gsi
, &ptr_incr
,
5878 simd_lane_access_p
, &inv_p
);
5879 gcc_assert (bb_vinfo
|| !inv_p
);
5883 /* For interleaved stores we created vectorized defs for all the
5884 defs stored in OPRNDS in the previous iteration (previous copy).
5885 DR_CHAIN is then used as an input to vect_permute_store_chain(),
5886 and OPRNDS as an input to vect_get_vec_def_for_stmt_copy() for the
5888 If the store is not grouped, GROUP_SIZE is 1, and DR_CHAIN and
5889 OPRNDS are of size 1. */
5890 for (i
= 0; i
< group_size
; i
++)
5893 vect_is_simple_use (op
, vinfo
, &def_stmt
, &dt
);
5894 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
, op
);
5895 dr_chain
[i
] = vec_oprnd
;
5896 oprnds
[i
] = vec_oprnd
;
5900 = int_const_binop (PLUS_EXPR
, dataref_offset
,
5901 TYPE_SIZE_UNIT (aggr_type
));
5903 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
, stmt
,
5904 TYPE_SIZE_UNIT (aggr_type
));
5911 /* Combine all the vectors into an array. */
5912 vec_array
= create_vector_array (vectype
, vec_num
);
5913 for (i
= 0; i
< vec_num
; i
++)
5915 vec_oprnd
= dr_chain
[i
];
5916 write_vector_array (stmt
, gsi
, vec_oprnd
, vec_array
, i
);
5920 MEM_REF[...all elements...] = STORE_LANES (VEC_ARRAY). */
5921 data_ref
= create_array_ref (aggr_type
, dataref_ptr
, first_dr
);
5922 new_stmt
= gimple_build_call_internal (IFN_STORE_LANES
, 1, vec_array
);
5923 gimple_call_set_lhs (new_stmt
, data_ref
);
5924 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
5932 result_chain
.create (group_size
);
5934 vect_permute_store_chain (dr_chain
, group_size
, stmt
, gsi
,
5938 next_stmt
= first_stmt
;
5939 for (i
= 0; i
< vec_num
; i
++)
5941 unsigned align
, misalign
;
5944 /* Bump the vector pointer. */
5945 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
,
5949 vec_oprnd
= vec_oprnds
[i
];
5950 else if (grouped_store
)
5951 /* For grouped stores vectorized defs are interleaved in
5952 vect_permute_store_chain(). */
5953 vec_oprnd
= result_chain
[i
];
5955 data_ref
= fold_build2 (MEM_REF
, TREE_TYPE (vec_oprnd
),
5959 : build_int_cst (reference_alias_ptr_type
5960 (DR_REF (first_dr
)), 0));
5961 align
= TYPE_ALIGN_UNIT (vectype
);
5962 if (aligned_access_p (first_dr
))
5964 else if (DR_MISALIGNMENT (first_dr
) == -1)
5966 if (DR_VECT_AUX (first_dr
)->base_element_aligned
)
5967 align
= TYPE_ALIGN_UNIT (elem_type
);
5969 align
= get_object_alignment (DR_REF (first_dr
))
5972 TREE_TYPE (data_ref
)
5973 = build_aligned_type (TREE_TYPE (data_ref
),
5974 align
* BITS_PER_UNIT
);
5978 TREE_TYPE (data_ref
)
5979 = build_aligned_type (TREE_TYPE (data_ref
),
5980 TYPE_ALIGN (elem_type
));
5981 misalign
= DR_MISALIGNMENT (first_dr
);
5983 if (dataref_offset
== NULL_TREE
5984 && TREE_CODE (dataref_ptr
) == SSA_NAME
)
5985 set_ptr_info_alignment (get_ptr_info (dataref_ptr
), align
,
5989 && dt
!= vect_constant_def
5990 && dt
!= vect_external_def
)
5992 tree perm_mask
= perm_mask_for_reverse (vectype
);
5994 = vect_create_destination_var (gimple_assign_rhs1 (stmt
),
5996 tree new_temp
= make_ssa_name (perm_dest
);
5998 /* Generate the permute statement. */
6000 = gimple_build_assign (new_temp
, VEC_PERM_EXPR
, vec_oprnd
,
6001 vec_oprnd
, perm_mask
);
6002 vect_finish_stmt_generation (stmt
, perm_stmt
, gsi
);
6004 perm_stmt
= SSA_NAME_DEF_STMT (new_temp
);
6005 vec_oprnd
= new_temp
;
6008 /* Arguments are ready. Create the new vector stmt. */
6009 new_stmt
= gimple_build_assign (data_ref
, vec_oprnd
);
6010 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6015 next_stmt
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt
));
6023 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
6025 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
6026 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
6030 dr_chain
.release ();
6032 result_chain
.release ();
6033 vec_oprnds
.release ();
6038 /* Given a vector type VECTYPE, turns permutation SEL into the equivalent
6039 VECTOR_CST mask. No checks are made that the target platform supports the
6040 mask, so callers may wish to test can_vec_perm_p separately, or use
6041 vect_gen_perm_mask_checked. */
6044 vect_gen_perm_mask_any (tree vectype
, const unsigned char *sel
)
6046 tree mask_elt_type
, mask_type
, mask_vec
, *mask_elts
;
6049 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
6051 mask_elt_type
= lang_hooks
.types
.type_for_mode
6052 (int_mode_for_mode (TYPE_MODE (TREE_TYPE (vectype
))), 1);
6053 mask_type
= get_vectype_for_scalar_type (mask_elt_type
);
6055 mask_elts
= XALLOCAVEC (tree
, nunits
);
6056 for (i
= nunits
- 1; i
>= 0; i
--)
6057 mask_elts
[i
] = build_int_cst (mask_elt_type
, sel
[i
]);
6058 mask_vec
= build_vector (mask_type
, mask_elts
);
6063 /* Checked version of vect_gen_perm_mask_any. Asserts can_vec_perm_p,
6064 i.e. that the target supports the pattern _for arbitrary input vectors_. */
6067 vect_gen_perm_mask_checked (tree vectype
, const unsigned char *sel
)
6069 gcc_assert (can_vec_perm_p (TYPE_MODE (vectype
), false, sel
));
6070 return vect_gen_perm_mask_any (vectype
, sel
);
6073 /* Given a vector variable X and Y, that was generated for the scalar
6074 STMT, generate instructions to permute the vector elements of X and Y
6075 using permutation mask MASK_VEC, insert them at *GSI and return the
6076 permuted vector variable. */
6079 permute_vec_elements (tree x
, tree y
, tree mask_vec
, gimple
*stmt
,
6080 gimple_stmt_iterator
*gsi
)
6082 tree vectype
= TREE_TYPE (x
);
6083 tree perm_dest
, data_ref
;
6086 perm_dest
= vect_create_destination_var (gimple_get_lhs (stmt
), vectype
);
6087 data_ref
= make_ssa_name (perm_dest
);
6089 /* Generate the permute statement. */
6090 perm_stmt
= gimple_build_assign (data_ref
, VEC_PERM_EXPR
, x
, y
, mask_vec
);
6091 vect_finish_stmt_generation (stmt
, perm_stmt
, gsi
);
6096 /* Hoist the definitions of all SSA uses on STMT out of the loop LOOP,
6097 inserting them on the loops preheader edge. Returns true if we
6098 were successful in doing so (and thus STMT can be moved then),
6099 otherwise returns false. */
6102 hoist_defs_of_uses (gimple
*stmt
, struct loop
*loop
)
6108 FOR_EACH_SSA_TREE_OPERAND (op
, stmt
, i
, SSA_OP_USE
)
6110 gimple
*def_stmt
= SSA_NAME_DEF_STMT (op
);
6111 if (!gimple_nop_p (def_stmt
)
6112 && flow_bb_inside_loop_p (loop
, gimple_bb (def_stmt
)))
6114 /* Make sure we don't need to recurse. While we could do
6115 so in simple cases when there are more complex use webs
6116 we don't have an easy way to preserve stmt order to fulfil
6117 dependencies within them. */
6120 if (gimple_code (def_stmt
) == GIMPLE_PHI
)
6122 FOR_EACH_SSA_TREE_OPERAND (op2
, def_stmt
, i2
, SSA_OP_USE
)
6124 gimple
*def_stmt2
= SSA_NAME_DEF_STMT (op2
);
6125 if (!gimple_nop_p (def_stmt2
)
6126 && flow_bb_inside_loop_p (loop
, gimple_bb (def_stmt2
)))
6136 FOR_EACH_SSA_TREE_OPERAND (op
, stmt
, i
, SSA_OP_USE
)
6138 gimple
*def_stmt
= SSA_NAME_DEF_STMT (op
);
6139 if (!gimple_nop_p (def_stmt
)
6140 && flow_bb_inside_loop_p (loop
, gimple_bb (def_stmt
)))
6142 gimple_stmt_iterator gsi
= gsi_for_stmt (def_stmt
);
6143 gsi_remove (&gsi
, false);
6144 gsi_insert_on_edge_immediate (loop_preheader_edge (loop
), def_stmt
);
6151 /* vectorizable_load.
6153 Check if STMT reads a non scalar data-ref (array/pointer/structure) that
6155 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
6156 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
6157 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
6160 vectorizable_load (gimple
*stmt
, gimple_stmt_iterator
*gsi
, gimple
**vec_stmt
,
6161 slp_tree slp_node
, slp_instance slp_node_instance
)
6164 tree vec_dest
= NULL
;
6165 tree data_ref
= NULL
;
6166 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
6167 stmt_vec_info prev_stmt_info
;
6168 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
6169 struct loop
*loop
= NULL
;
6170 struct loop
*containing_loop
= (gimple_bb (stmt
))->loop_father
;
6171 bool nested_in_vect_loop
= false;
6172 struct data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
), *first_dr
= NULL
;
6176 gimple
*new_stmt
= NULL
;
6178 enum dr_alignment_support alignment_support_scheme
;
6179 tree dataref_ptr
= NULL_TREE
;
6180 tree dataref_offset
= NULL_TREE
;
6181 gimple
*ptr_incr
= NULL
;
6183 int i
, j
, group_size
= -1, group_gap_adj
;
6184 tree msq
= NULL_TREE
, lsq
;
6185 tree offset
= NULL_TREE
;
6186 tree byte_offset
= NULL_TREE
;
6187 tree realignment_token
= NULL_TREE
;
6189 vec
<tree
> dr_chain
= vNULL
;
6190 bool grouped_load
= false;
6191 bool load_lanes_p
= false;
6193 gimple
*first_stmt_for_drptr
= NULL
;
6195 bool negative
= false;
6196 bool compute_in_loop
= false;
6197 struct loop
*at_loop
;
6199 bool slp
= (slp_node
!= NULL
);
6200 bool slp_perm
= false;
6201 enum tree_code code
;
6202 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
6205 tree gather_base
= NULL_TREE
, gather_off
= NULL_TREE
;
6206 tree gather_off_vectype
= NULL_TREE
, gather_decl
= NULL_TREE
;
6207 int gather_scale
= 1;
6208 enum vect_def_type gather_dt
= vect_unknown_def_type
;
6209 vec_info
*vinfo
= stmt_info
->vinfo
;
6211 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
6214 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
6218 /* Is vectorizable load? */
6219 if (!is_gimple_assign (stmt
))
6222 scalar_dest
= gimple_assign_lhs (stmt
);
6223 if (TREE_CODE (scalar_dest
) != SSA_NAME
)
6226 code
= gimple_assign_rhs_code (stmt
);
6227 if (code
!= ARRAY_REF
6228 && code
!= BIT_FIELD_REF
6229 && code
!= INDIRECT_REF
6230 && code
!= COMPONENT_REF
6231 && code
!= IMAGPART_EXPR
6232 && code
!= REALPART_EXPR
6234 && TREE_CODE_CLASS (code
) != tcc_declaration
)
6237 if (!STMT_VINFO_DATA_REF (stmt_info
))
6240 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
6241 int nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
6245 loop
= LOOP_VINFO_LOOP (loop_vinfo
);
6246 nested_in_vect_loop
= nested_in_vect_loop_p (loop
, stmt
);
6247 vf
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
6252 /* Multiple types in SLP are handled by creating the appropriate number of
6253 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
6255 if (slp
|| PURE_SLP_STMT (stmt_info
))
6258 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits
;
6260 gcc_assert (ncopies
>= 1);
6262 /* FORNOW. This restriction should be relaxed. */
6263 if (nested_in_vect_loop
&& ncopies
> 1)
6265 if (dump_enabled_p ())
6266 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6267 "multiple types in nested loop.\n");
6271 /* Invalidate assumptions made by dependence analysis when vectorization
6272 on the unrolled body effectively re-orders stmts. */
6274 && STMT_VINFO_MIN_NEG_DIST (stmt_info
) != 0
6275 && ((unsigned)LOOP_VINFO_VECT_FACTOR (loop_vinfo
)
6276 > STMT_VINFO_MIN_NEG_DIST (stmt_info
)))
6278 if (dump_enabled_p ())
6279 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6280 "cannot perform implicit CSE when unrolling "
6281 "with negative dependence distance\n");
6285 elem_type
= TREE_TYPE (vectype
);
6286 mode
= TYPE_MODE (vectype
);
6288 /* FORNOW. In some cases can vectorize even if data-type not supported
6289 (e.g. - data copies). */
6290 if (optab_handler (mov_optab
, mode
) == CODE_FOR_nothing
)
6292 if (dump_enabled_p ())
6293 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6294 "Aligned load, but unsupported type.\n");
6298 /* Check if the load is a part of an interleaving chain. */
6299 if (STMT_VINFO_GROUPED_ACCESS (stmt_info
))
6301 grouped_load
= true;
6303 gcc_assert (!nested_in_vect_loop
&& !STMT_VINFO_GATHER_SCATTER_P (stmt_info
));
6305 first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
6307 /* If this is single-element interleaving with an element distance
6308 that leaves unused vector loads around punt - we at least create
6309 very sub-optimal code in that case (and blow up memory,
6311 bool force_peeling
= false;
6312 if (first_stmt
== stmt
6313 && !GROUP_NEXT_ELEMENT (stmt_info
))
6315 if (GROUP_SIZE (stmt_info
) > TYPE_VECTOR_SUBPARTS (vectype
))
6317 if (dump_enabled_p ())
6318 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6319 "single-element interleaving not supported "
6320 "for not adjacent vector loads\n");
6324 /* Single-element interleaving requires peeling for gaps. */
6325 force_peeling
= true;
6328 /* If there is a gap in the end of the group or the group size cannot
6329 be made a multiple of the vector element count then we access excess
6330 elements in the last iteration and thus need to peel that off. */
6332 && ! STMT_VINFO_STRIDED_P (stmt_info
)
6334 || GROUP_GAP (vinfo_for_stmt (first_stmt
)) != 0
6335 || (!slp
&& vf
% GROUP_SIZE (vinfo_for_stmt (first_stmt
)) != 0)))
6337 if (dump_enabled_p ())
6338 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6339 "Data access with gaps requires scalar "
6343 if (dump_enabled_p ())
6344 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6345 "Peeling for outer loop is not supported\n");
6349 LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo
) = true;
6352 if (slp
&& SLP_TREE_LOAD_PERMUTATION (slp_node
).exists ())
6355 group_size
= GROUP_SIZE (vinfo_for_stmt (first_stmt
));
6357 /* ??? The following is overly pessimistic (as well as the loop
6358 case above) in the case we can statically determine the excess
6359 elements loaded are within the bounds of a decl that is accessed.
6360 Likewise for BB vectorizations using masked loads is a possibility. */
6361 if (bb_vinfo
&& slp_perm
&& group_size
% nunits
!= 0)
6363 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6364 "BB vectorization with gaps at the end of a load "
6365 "is not supported\n");
6370 && !PURE_SLP_STMT (stmt_info
)
6371 && !STMT_VINFO_STRIDED_P (stmt_info
))
6373 if (vect_load_lanes_supported (vectype
, group_size
))
6374 load_lanes_p
= true;
6375 else if (!vect_grouped_load_supported (vectype
, group_size
))
6379 /* Invalidate assumptions made by dependence analysis when vectorization
6380 on the unrolled body effectively re-orders stmts. */
6381 if (!PURE_SLP_STMT (stmt_info
)
6382 && STMT_VINFO_MIN_NEG_DIST (stmt_info
) != 0
6383 && ((unsigned)LOOP_VINFO_VECT_FACTOR (loop_vinfo
)
6384 > STMT_VINFO_MIN_NEG_DIST (stmt_info
)))
6386 if (dump_enabled_p ())
6387 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6388 "cannot perform implicit CSE when performing "
6389 "group loads with negative dependence distance\n");
6393 /* Similarly when the stmt is a load that is both part of a SLP
6394 instance and a loop vectorized stmt via the same-dr mechanism
6395 we have to give up. */
6396 if (STMT_VINFO_GROUP_SAME_DR_STMT (stmt_info
)
6397 && (STMT_SLP_TYPE (stmt_info
)
6398 != STMT_SLP_TYPE (vinfo_for_stmt
6399 (STMT_VINFO_GROUP_SAME_DR_STMT (stmt_info
)))))
6401 if (dump_enabled_p ())
6402 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6403 "conflicting SLP types for CSEd load\n");
6409 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info
))
6412 gather_decl
= vect_check_gather_scatter (stmt
, loop_vinfo
, &gather_base
,
6413 &gather_off
, &gather_scale
);
6414 gcc_assert (gather_decl
);
6415 if (!vect_is_simple_use (gather_off
, vinfo
, &def_stmt
, &gather_dt
,
6416 &gather_off_vectype
))
6418 if (dump_enabled_p ())
6419 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6420 "gather index use not simple.\n");
6424 else if (STMT_VINFO_STRIDED_P (stmt_info
))
6427 && (slp
|| PURE_SLP_STMT (stmt_info
)))
6428 && (group_size
> nunits
6429 || nunits
% group_size
!= 0))
6431 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6432 "unhandled strided group load\n");
6438 negative
= tree_int_cst_compare (nested_in_vect_loop
6439 ? STMT_VINFO_DR_STEP (stmt_info
)
6441 size_zero_node
) < 0;
6442 if (negative
&& ncopies
> 1)
6444 if (dump_enabled_p ())
6445 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6446 "multiple types with negative step.\n");
6454 if (dump_enabled_p ())
6455 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6456 "negative step for group load not supported"
6460 alignment_support_scheme
= vect_supportable_dr_alignment (dr
, false);
6461 if (alignment_support_scheme
!= dr_aligned
6462 && alignment_support_scheme
!= dr_unaligned_supported
)
6464 if (dump_enabled_p ())
6465 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6466 "negative step but alignment required.\n");
6469 if (!perm_mask_for_reverse (vectype
))
6471 if (dump_enabled_p ())
6472 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6473 "negative step and reversing not supported."
6480 if (!vec_stmt
) /* transformation not required. */
6482 STMT_VINFO_TYPE (stmt_info
) = load_vec_info_type
;
6483 /* The SLP costs are calculated during SLP analysis. */
6484 if (!PURE_SLP_STMT (stmt_info
))
6485 vect_model_load_cost (stmt_info
, ncopies
, load_lanes_p
,
6490 if (dump_enabled_p ())
6491 dump_printf_loc (MSG_NOTE
, vect_location
,
6492 "transform load. ncopies = %d\n", ncopies
);
6496 ensure_base_align (stmt_info
, dr
);
6498 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info
))
6500 tree vec_oprnd0
= NULL_TREE
, op
;
6501 tree arglist
= TYPE_ARG_TYPES (TREE_TYPE (gather_decl
));
6502 tree rettype
, srctype
, ptrtype
, idxtype
, masktype
, scaletype
;
6503 tree ptr
, mask
, var
, scale
, merge
, perm_mask
= NULL_TREE
, prev_res
= NULL_TREE
;
6504 edge pe
= loop_preheader_edge (loop
);
6507 enum { NARROW
, NONE
, WIDEN
} modifier
;
6508 int gather_off_nunits
= TYPE_VECTOR_SUBPARTS (gather_off_vectype
);
6510 if (nunits
== gather_off_nunits
)
6512 else if (nunits
== gather_off_nunits
/ 2)
6514 unsigned char *sel
= XALLOCAVEC (unsigned char, gather_off_nunits
);
6517 for (i
= 0; i
< gather_off_nunits
; ++i
)
6518 sel
[i
] = i
| nunits
;
6520 perm_mask
= vect_gen_perm_mask_checked (gather_off_vectype
, sel
);
6522 else if (nunits
== gather_off_nunits
* 2)
6524 unsigned char *sel
= XALLOCAVEC (unsigned char, nunits
);
6527 for (i
= 0; i
< nunits
; ++i
)
6528 sel
[i
] = i
< gather_off_nunits
6529 ? i
: i
+ nunits
- gather_off_nunits
;
6531 perm_mask
= vect_gen_perm_mask_checked (vectype
, sel
);
6537 rettype
= TREE_TYPE (TREE_TYPE (gather_decl
));
6538 srctype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
6539 ptrtype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
6540 idxtype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
6541 masktype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
6542 scaletype
= TREE_VALUE (arglist
);
6543 gcc_checking_assert (types_compatible_p (srctype
, rettype
));
6545 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
6547 ptr
= fold_convert (ptrtype
, gather_base
);
6548 if (!is_gimple_min_invariant (ptr
))
6550 ptr
= force_gimple_operand (ptr
, &seq
, true, NULL_TREE
);
6551 new_bb
= gsi_insert_seq_on_edge_immediate (pe
, seq
);
6552 gcc_assert (!new_bb
);
6555 /* Currently we support only unconditional gather loads,
6556 so mask should be all ones. */
6557 if (TREE_CODE (masktype
) == INTEGER_TYPE
)
6558 mask
= build_int_cst (masktype
, -1);
6559 else if (TREE_CODE (TREE_TYPE (masktype
)) == INTEGER_TYPE
)
6561 mask
= build_int_cst (TREE_TYPE (masktype
), -1);
6562 mask
= build_vector_from_val (masktype
, mask
);
6563 mask
= vect_init_vector (stmt
, mask
, masktype
, NULL
);
6565 else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (masktype
)))
6569 for (j
= 0; j
< 6; ++j
)
6571 real_from_target (&r
, tmp
, TYPE_MODE (TREE_TYPE (masktype
)));
6572 mask
= build_real (TREE_TYPE (masktype
), r
);
6573 mask
= build_vector_from_val (masktype
, mask
);
6574 mask
= vect_init_vector (stmt
, mask
, masktype
, NULL
);
6579 scale
= build_int_cst (scaletype
, gather_scale
);
6581 if (TREE_CODE (TREE_TYPE (rettype
)) == INTEGER_TYPE
)
6582 merge
= build_int_cst (TREE_TYPE (rettype
), 0);
6583 else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (rettype
)))
6587 for (j
= 0; j
< 6; ++j
)
6589 real_from_target (&r
, tmp
, TYPE_MODE (TREE_TYPE (rettype
)));
6590 merge
= build_real (TREE_TYPE (rettype
), r
);
6594 merge
= build_vector_from_val (rettype
, merge
);
6595 merge
= vect_init_vector (stmt
, merge
, rettype
, NULL
);
6597 prev_stmt_info
= NULL
;
6598 for (j
= 0; j
< ncopies
; ++j
)
6600 if (modifier
== WIDEN
&& (j
& 1))
6601 op
= permute_vec_elements (vec_oprnd0
, vec_oprnd0
,
6602 perm_mask
, stmt
, gsi
);
6605 = vect_get_vec_def_for_operand (gather_off
, stmt
);
6608 = vect_get_vec_def_for_stmt_copy (gather_dt
, vec_oprnd0
);
6610 if (!useless_type_conversion_p (idxtype
, TREE_TYPE (op
)))
6612 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op
))
6613 == TYPE_VECTOR_SUBPARTS (idxtype
));
6614 var
= vect_get_new_ssa_name (idxtype
, vect_simple_var
);
6615 op
= build1 (VIEW_CONVERT_EXPR
, idxtype
, op
);
6617 = gimple_build_assign (var
, VIEW_CONVERT_EXPR
, op
);
6618 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6623 = gimple_build_call (gather_decl
, 5, merge
, ptr
, op
, mask
, scale
);
6625 if (!useless_type_conversion_p (vectype
, rettype
))
6627 gcc_assert (TYPE_VECTOR_SUBPARTS (vectype
)
6628 == TYPE_VECTOR_SUBPARTS (rettype
));
6629 op
= vect_get_new_ssa_name (rettype
, vect_simple_var
);
6630 gimple_call_set_lhs (new_stmt
, op
);
6631 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6632 var
= make_ssa_name (vec_dest
);
6633 op
= build1 (VIEW_CONVERT_EXPR
, vectype
, op
);
6635 = gimple_build_assign (var
, VIEW_CONVERT_EXPR
, op
);
6639 var
= make_ssa_name (vec_dest
, new_stmt
);
6640 gimple_call_set_lhs (new_stmt
, var
);
6643 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6645 if (modifier
== NARROW
)
6652 var
= permute_vec_elements (prev_res
, var
,
6653 perm_mask
, stmt
, gsi
);
6654 new_stmt
= SSA_NAME_DEF_STMT (var
);
6657 if (prev_stmt_info
== NULL
)
6658 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
6660 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
6661 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
6665 else if (STMT_VINFO_STRIDED_P (stmt_info
))
6667 gimple_stmt_iterator incr_gsi
;
6673 vec
<constructor_elt
, va_gc
> *v
= NULL
;
6674 gimple_seq stmts
= NULL
;
6675 tree stride_base
, stride_step
, alias_off
;
6677 gcc_assert (!nested_in_vect_loop
);
6679 if (slp
&& grouped_load
)
6680 first_dr
= STMT_VINFO_DATA_REF
6681 (vinfo_for_stmt (GROUP_FIRST_ELEMENT (stmt_info
)));
6686 = fold_build_pointer_plus
6687 (DR_BASE_ADDRESS (first_dr
),
6688 size_binop (PLUS_EXPR
,
6689 convert_to_ptrofftype (DR_OFFSET (first_dr
)),
6690 convert_to_ptrofftype (DR_INIT (first_dr
))));
6691 stride_step
= fold_convert (sizetype
, DR_STEP (first_dr
));
6693 /* For a load with loop-invariant (but other than power-of-2)
6694 stride (i.e. not a grouped access) like so:
6696 for (i = 0; i < n; i += stride)
6699 we generate a new induction variable and new accesses to
6700 form a new vector (or vectors, depending on ncopies):
6702 for (j = 0; ; j += VF*stride)
6704 tmp2 = array[j + stride];
6706 vectemp = {tmp1, tmp2, ...}
6709 ivstep
= fold_build2 (MULT_EXPR
, TREE_TYPE (stride_step
), stride_step
,
6710 build_int_cst (TREE_TYPE (stride_step
), vf
));
6712 standard_iv_increment_position (loop
, &incr_gsi
, &insert_after
);
6714 create_iv (unshare_expr (stride_base
), unshare_expr (ivstep
), NULL
,
6715 loop
, &incr_gsi
, insert_after
,
6717 incr
= gsi_stmt (incr_gsi
);
6718 set_vinfo_for_stmt (incr
, new_stmt_vec_info (incr
, loop_vinfo
));
6720 stride_step
= force_gimple_operand (unshare_expr (stride_step
),
6721 &stmts
, true, NULL_TREE
);
6723 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop
), stmts
);
6725 prev_stmt_info
= NULL
;
6726 running_off
= offvar
;
6727 alias_off
= build_int_cst (reference_alias_ptr_type (DR_REF (first_dr
)), 0);
6728 int nloads
= nunits
;
6729 tree ltype
= TREE_TYPE (vectype
);
6730 auto_vec
<tree
> dr_chain
;
6733 nloads
= nunits
/ group_size
;
6734 if (group_size
< nunits
)
6735 ltype
= build_vector_type (TREE_TYPE (vectype
), group_size
);
6738 ltype
= build_aligned_type (ltype
, TYPE_ALIGN (TREE_TYPE (vectype
)));
6739 /* For SLP permutation support we need to load the whole group,
6740 not only the number of vector stmts the permutation result
6744 ncopies
= (group_size
* vf
+ nunits
- 1) / nunits
;
6745 dr_chain
.create (ncopies
);
6748 ncopies
= SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node
);
6750 for (j
= 0; j
< ncopies
; j
++)
6756 vec_alloc (v
, nloads
);
6757 for (i
= 0; i
< nloads
; i
++)
6759 tree newref
, newoff
;
6761 newref
= build2 (MEM_REF
, ltype
, running_off
, alias_off
);
6763 newref
= force_gimple_operand_gsi (gsi
, newref
, true,
6766 CONSTRUCTOR_APPEND_ELT (v
, NULL_TREE
, newref
);
6767 newoff
= copy_ssa_name (running_off
);
6768 incr
= gimple_build_assign (newoff
, POINTER_PLUS_EXPR
,
6769 running_off
, stride_step
);
6770 vect_finish_stmt_generation (stmt
, incr
, gsi
);
6772 running_off
= newoff
;
6775 vec_inv
= build_constructor (vectype
, v
);
6776 new_temp
= vect_init_vector (stmt
, vec_inv
, vectype
, gsi
);
6777 new_stmt
= SSA_NAME_DEF_STMT (new_temp
);
6781 new_stmt
= gimple_build_assign (make_ssa_name (ltype
),
6782 build2 (MEM_REF
, ltype
,
6783 running_off
, alias_off
));
6784 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6786 tree newoff
= copy_ssa_name (running_off
);
6787 gimple
*incr
= gimple_build_assign (newoff
, POINTER_PLUS_EXPR
,
6788 running_off
, stride_step
);
6789 vect_finish_stmt_generation (stmt
, incr
, gsi
);
6791 running_off
= newoff
;
6797 dr_chain
.quick_push (gimple_assign_lhs (new_stmt
));
6799 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
6804 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
6806 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
6807 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
6811 vect_transform_slp_perm_load (slp_node
, dr_chain
, gsi
, vf
,
6812 slp_node_instance
, false);
6818 first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
6819 /* For SLP vectorization we directly vectorize a subchain
6820 without permutation. */
6821 if (slp
&& ! SLP_TREE_LOAD_PERMUTATION (slp_node
).exists ())
6822 first_stmt
= SLP_TREE_SCALAR_STMTS (slp_node
)[0];
6823 /* For BB vectorization always use the first stmt to base
6824 the data ref pointer on. */
6826 first_stmt_for_drptr
= SLP_TREE_SCALAR_STMTS (slp_node
)[0];
6828 /* Check if the chain of loads is already vectorized. */
6829 if (STMT_VINFO_VEC_STMT (vinfo_for_stmt (first_stmt
))
6830 /* For SLP we would need to copy over SLP_TREE_VEC_STMTS.
6831 ??? But we can only do so if there is exactly one
6832 as we have no way to get at the rest. Leave the CSE
6834 ??? With the group load eventually participating
6835 in multiple different permutations (having multiple
6836 slp nodes which refer to the same group) the CSE
6837 is even wrong code. See PR56270. */
6840 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
6843 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
6844 group_size
= GROUP_SIZE (vinfo_for_stmt (first_stmt
));
6847 /* VEC_NUM is the number of vect stmts to be created for this group. */
6850 grouped_load
= false;
6851 /* For SLP permutation support we need to load the whole group,
6852 not only the number of vector stmts the permutation result
6855 vec_num
= (group_size
* vf
+ nunits
- 1) / nunits
;
6857 vec_num
= SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node
);
6858 group_gap_adj
= vf
* group_size
- nunits
* vec_num
;
6861 vec_num
= group_size
;
6867 group_size
= vec_num
= 1;
6871 alignment_support_scheme
= vect_supportable_dr_alignment (first_dr
, false);
6872 gcc_assert (alignment_support_scheme
);
6873 /* Targets with load-lane instructions must not require explicit
6875 gcc_assert (!load_lanes_p
6876 || alignment_support_scheme
== dr_aligned
6877 || alignment_support_scheme
== dr_unaligned_supported
);
6879 /* In case the vectorization factor (VF) is bigger than the number
6880 of elements that we can fit in a vectype (nunits), we have to generate
6881 more than one vector stmt - i.e - we need to "unroll" the
6882 vector stmt by a factor VF/nunits. In doing so, we record a pointer
6883 from one copy of the vector stmt to the next, in the field
6884 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
6885 stages to find the correct vector defs to be used when vectorizing
6886 stmts that use the defs of the current stmt. The example below
6887 illustrates the vectorization process when VF=16 and nunits=4 (i.e., we
6888 need to create 4 vectorized stmts):
6890 before vectorization:
6891 RELATED_STMT VEC_STMT
6895 step 1: vectorize stmt S1:
6896 We first create the vector stmt VS1_0, and, as usual, record a
6897 pointer to it in the STMT_VINFO_VEC_STMT of the scalar stmt S1.
6898 Next, we create the vector stmt VS1_1, and record a pointer to
6899 it in the STMT_VINFO_RELATED_STMT of the vector stmt VS1_0.
6900 Similarly, for VS1_2 and VS1_3. This is the resulting chain of
6902 RELATED_STMT VEC_STMT
6903 VS1_0: vx0 = memref0 VS1_1 -
6904 VS1_1: vx1 = memref1 VS1_2 -
6905 VS1_2: vx2 = memref2 VS1_3 -
6906 VS1_3: vx3 = memref3 - -
6907 S1: x = load - VS1_0
6910 See in documentation in vect_get_vec_def_for_stmt_copy for how the
6911 information we recorded in RELATED_STMT field is used to vectorize
6914 /* In case of interleaving (non-unit grouped access):
6921 Vectorized loads are created in the order of memory accesses
6922 starting from the access of the first stmt of the chain:
6925 VS2: vx1 = &base + vec_size*1
6926 VS3: vx3 = &base + vec_size*2
6927 VS4: vx4 = &base + vec_size*3
6929 Then permutation statements are generated:
6931 VS5: vx5 = VEC_PERM_EXPR < vx0, vx1, { 0, 2, ..., i*2 } >
6932 VS6: vx6 = VEC_PERM_EXPR < vx0, vx1, { 1, 3, ..., i*2+1 } >
6935 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
6936 (the order of the data-refs in the output of vect_permute_load_chain
6937 corresponds to the order of scalar stmts in the interleaving chain - see
6938 the documentation of vect_permute_load_chain()).
6939 The generation of permutation stmts and recording them in
6940 STMT_VINFO_VEC_STMT is done in vect_transform_grouped_load().
6942 In case of both multiple types and interleaving, the vector loads and
6943 permutation stmts above are created for every copy. The result vector
6944 stmts are put in STMT_VINFO_VEC_STMT for the first copy and in the
6945 corresponding STMT_VINFO_RELATED_STMT for the next copies. */
6947 /* If the data reference is aligned (dr_aligned) or potentially unaligned
6948 on a target that supports unaligned accesses (dr_unaligned_supported)
6949 we generate the following code:
6953 p = p + indx * vectype_size;
6958 Otherwise, the data reference is potentially unaligned on a target that
6959 does not support unaligned accesses (dr_explicit_realign_optimized) -
6960 then generate the following code, in which the data in each iteration is
6961 obtained by two vector loads, one from the previous iteration, and one
6962 from the current iteration:
6964 msq_init = *(floor(p1))
6965 p2 = initial_addr + VS - 1;
6966 realignment_token = call target_builtin;
6969 p2 = p2 + indx * vectype_size
6971 vec_dest = realign_load (msq, lsq, realignment_token)
6976 /* If the misalignment remains the same throughout the execution of the
6977 loop, we can create the init_addr and permutation mask at the loop
6978 preheader. Otherwise, it needs to be created inside the loop.
6979 This can only occur when vectorizing memory accesses in the inner-loop
6980 nested within an outer-loop that is being vectorized. */
6982 if (nested_in_vect_loop
6983 && (TREE_INT_CST_LOW (DR_STEP (dr
))
6984 % GET_MODE_SIZE (TYPE_MODE (vectype
)) != 0))
6986 gcc_assert (alignment_support_scheme
!= dr_explicit_realign_optimized
);
6987 compute_in_loop
= true;
6990 if ((alignment_support_scheme
== dr_explicit_realign_optimized
6991 || alignment_support_scheme
== dr_explicit_realign
)
6992 && !compute_in_loop
)
6994 msq
= vect_setup_realignment (first_stmt
, gsi
, &realignment_token
,
6995 alignment_support_scheme
, NULL_TREE
,
6997 if (alignment_support_scheme
== dr_explicit_realign_optimized
)
6999 phi
= as_a
<gphi
*> (SSA_NAME_DEF_STMT (msq
));
7000 byte_offset
= size_binop (MINUS_EXPR
, TYPE_SIZE_UNIT (vectype
),
7008 offset
= size_int (-TYPE_VECTOR_SUBPARTS (vectype
) + 1);
7011 aggr_type
= build_array_type_nelts (elem_type
, vec_num
* nunits
);
7013 aggr_type
= vectype
;
7015 prev_stmt_info
= NULL
;
7016 for (j
= 0; j
< ncopies
; j
++)
7018 /* 1. Create the vector or array pointer update chain. */
7021 bool simd_lane_access_p
7022 = STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info
);
7023 if (simd_lane_access_p
7024 && TREE_CODE (DR_BASE_ADDRESS (first_dr
)) == ADDR_EXPR
7025 && VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (first_dr
), 0))
7026 && integer_zerop (DR_OFFSET (first_dr
))
7027 && integer_zerop (DR_INIT (first_dr
))
7028 && alias_sets_conflict_p (get_alias_set (aggr_type
),
7029 get_alias_set (DR_REF (first_dr
)))
7030 && (alignment_support_scheme
== dr_aligned
7031 || alignment_support_scheme
== dr_unaligned_supported
))
7033 dataref_ptr
= unshare_expr (DR_BASE_ADDRESS (first_dr
));
7034 dataref_offset
= build_int_cst (reference_alias_ptr_type
7035 (DR_REF (first_dr
)), 0);
7038 else if (first_stmt_for_drptr
7039 && first_stmt
!= first_stmt_for_drptr
)
7042 = vect_create_data_ref_ptr (first_stmt_for_drptr
, aggr_type
,
7043 at_loop
, offset
, &dummy
, gsi
,
7044 &ptr_incr
, simd_lane_access_p
,
7045 &inv_p
, byte_offset
);
7046 /* Adjust the pointer by the difference to first_stmt. */
7047 data_reference_p ptrdr
7048 = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt_for_drptr
));
7049 tree diff
= fold_convert (sizetype
,
7050 size_binop (MINUS_EXPR
,
7053 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
,
7058 = vect_create_data_ref_ptr (first_stmt
, aggr_type
, at_loop
,
7059 offset
, &dummy
, gsi
, &ptr_incr
,
7060 simd_lane_access_p
, &inv_p
,
7063 else if (dataref_offset
)
7064 dataref_offset
= int_const_binop (PLUS_EXPR
, dataref_offset
,
7065 TYPE_SIZE_UNIT (aggr_type
));
7067 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
, stmt
,
7068 TYPE_SIZE_UNIT (aggr_type
));
7070 if (grouped_load
|| slp_perm
)
7071 dr_chain
.create (vec_num
);
7077 vec_array
= create_vector_array (vectype
, vec_num
);
7080 VEC_ARRAY = LOAD_LANES (MEM_REF[...all elements...]). */
7081 data_ref
= create_array_ref (aggr_type
, dataref_ptr
, first_dr
);
7082 new_stmt
= gimple_build_call_internal (IFN_LOAD_LANES
, 1, data_ref
);
7083 gimple_call_set_lhs (new_stmt
, vec_array
);
7084 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
7086 /* Extract each vector into an SSA_NAME. */
7087 for (i
= 0; i
< vec_num
; i
++)
7089 new_temp
= read_vector_array (stmt
, gsi
, scalar_dest
,
7091 dr_chain
.quick_push (new_temp
);
7094 /* Record the mapping between SSA_NAMEs and statements. */
7095 vect_record_grouped_load_vectors (stmt
, dr_chain
);
7099 for (i
= 0; i
< vec_num
; i
++)
7102 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
,
7105 /* 2. Create the vector-load in the loop. */
7106 switch (alignment_support_scheme
)
7109 case dr_unaligned_supported
:
7111 unsigned int align
, misalign
;
7114 = fold_build2 (MEM_REF
, vectype
, dataref_ptr
,
7117 : build_int_cst (reference_alias_ptr_type
7118 (DR_REF (first_dr
)), 0));
7119 align
= TYPE_ALIGN_UNIT (vectype
);
7120 if (alignment_support_scheme
== dr_aligned
)
7122 gcc_assert (aligned_access_p (first_dr
));
7125 else if (DR_MISALIGNMENT (first_dr
) == -1)
7127 if (DR_VECT_AUX (first_dr
)->base_element_aligned
)
7128 align
= TYPE_ALIGN_UNIT (elem_type
);
7130 align
= (get_object_alignment (DR_REF (first_dr
))
7133 TREE_TYPE (data_ref
)
7134 = build_aligned_type (TREE_TYPE (data_ref
),
7135 align
* BITS_PER_UNIT
);
7139 TREE_TYPE (data_ref
)
7140 = build_aligned_type (TREE_TYPE (data_ref
),
7141 TYPE_ALIGN (elem_type
));
7142 misalign
= DR_MISALIGNMENT (first_dr
);
7144 if (dataref_offset
== NULL_TREE
7145 && TREE_CODE (dataref_ptr
) == SSA_NAME
)
7146 set_ptr_info_alignment (get_ptr_info (dataref_ptr
),
7150 case dr_explicit_realign
:
7154 tree vs
= size_int (TYPE_VECTOR_SUBPARTS (vectype
));
7156 if (compute_in_loop
)
7157 msq
= vect_setup_realignment (first_stmt
, gsi
,
7159 dr_explicit_realign
,
7162 if (TREE_CODE (dataref_ptr
) == SSA_NAME
)
7163 ptr
= copy_ssa_name (dataref_ptr
);
7165 ptr
= make_ssa_name (TREE_TYPE (dataref_ptr
));
7166 new_stmt
= gimple_build_assign
7167 (ptr
, BIT_AND_EXPR
, dataref_ptr
,
7169 (TREE_TYPE (dataref_ptr
),
7170 -(HOST_WIDE_INT
)TYPE_ALIGN_UNIT (vectype
)));
7171 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
7173 = build2 (MEM_REF
, vectype
, ptr
,
7174 build_int_cst (reference_alias_ptr_type
7175 (DR_REF (first_dr
)), 0));
7176 vec_dest
= vect_create_destination_var (scalar_dest
,
7178 new_stmt
= gimple_build_assign (vec_dest
, data_ref
);
7179 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
7180 gimple_assign_set_lhs (new_stmt
, new_temp
);
7181 gimple_set_vdef (new_stmt
, gimple_vdef (stmt
));
7182 gimple_set_vuse (new_stmt
, gimple_vuse (stmt
));
7183 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
7186 bump
= size_binop (MULT_EXPR
, vs
,
7187 TYPE_SIZE_UNIT (elem_type
));
7188 bump
= size_binop (MINUS_EXPR
, bump
, size_one_node
);
7189 ptr
= bump_vector_ptr (dataref_ptr
, NULL
, gsi
, stmt
, bump
);
7190 new_stmt
= gimple_build_assign
7191 (NULL_TREE
, BIT_AND_EXPR
, ptr
,
7194 -(HOST_WIDE_INT
)TYPE_ALIGN_UNIT (vectype
)));
7195 ptr
= copy_ssa_name (ptr
, new_stmt
);
7196 gimple_assign_set_lhs (new_stmt
, ptr
);
7197 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
7199 = build2 (MEM_REF
, vectype
, ptr
,
7200 build_int_cst (reference_alias_ptr_type
7201 (DR_REF (first_dr
)), 0));
7204 case dr_explicit_realign_optimized
:
7205 if (TREE_CODE (dataref_ptr
) == SSA_NAME
)
7206 new_temp
= copy_ssa_name (dataref_ptr
);
7208 new_temp
= make_ssa_name (TREE_TYPE (dataref_ptr
));
7209 new_stmt
= gimple_build_assign
7210 (new_temp
, BIT_AND_EXPR
, dataref_ptr
,
7212 (TREE_TYPE (dataref_ptr
),
7213 -(HOST_WIDE_INT
)TYPE_ALIGN_UNIT (vectype
)));
7214 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
7216 = build2 (MEM_REF
, vectype
, new_temp
,
7217 build_int_cst (reference_alias_ptr_type
7218 (DR_REF (first_dr
)), 0));
7223 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
7224 new_stmt
= gimple_build_assign (vec_dest
, data_ref
);
7225 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
7226 gimple_assign_set_lhs (new_stmt
, new_temp
);
7227 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
7229 /* 3. Handle explicit realignment if necessary/supported.
7231 vec_dest = realign_load (msq, lsq, realignment_token) */
7232 if (alignment_support_scheme
== dr_explicit_realign_optimized
7233 || alignment_support_scheme
== dr_explicit_realign
)
7235 lsq
= gimple_assign_lhs (new_stmt
);
7236 if (!realignment_token
)
7237 realignment_token
= dataref_ptr
;
7238 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
7239 new_stmt
= gimple_build_assign (vec_dest
, REALIGN_LOAD_EXPR
,
7240 msq
, lsq
, realignment_token
);
7241 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
7242 gimple_assign_set_lhs (new_stmt
, new_temp
);
7243 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
7245 if (alignment_support_scheme
== dr_explicit_realign_optimized
)
7248 if (i
== vec_num
- 1 && j
== ncopies
- 1)
7249 add_phi_arg (phi
, lsq
,
7250 loop_latch_edge (containing_loop
),
7256 /* 4. Handle invariant-load. */
7257 if (inv_p
&& !bb_vinfo
)
7259 gcc_assert (!grouped_load
);
7260 /* If we have versioned for aliasing or the loop doesn't
7261 have any data dependencies that would preclude this,
7262 then we are sure this is a loop invariant load and
7263 thus we can insert it on the preheader edge. */
7264 if (LOOP_VINFO_NO_DATA_DEPENDENCIES (loop_vinfo
)
7265 && !nested_in_vect_loop
7266 && hoist_defs_of_uses (stmt
, loop
))
7268 if (dump_enabled_p ())
7270 dump_printf_loc (MSG_NOTE
, vect_location
,
7271 "hoisting out of the vectorized "
7273 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
7275 tree tem
= copy_ssa_name (scalar_dest
);
7276 gsi_insert_on_edge_immediate
7277 (loop_preheader_edge (loop
),
7278 gimple_build_assign (tem
,
7280 (gimple_assign_rhs1 (stmt
))));
7281 new_temp
= vect_init_vector (stmt
, tem
, vectype
, NULL
);
7282 new_stmt
= SSA_NAME_DEF_STMT (new_temp
);
7283 set_vinfo_for_stmt (new_stmt
,
7284 new_stmt_vec_info (new_stmt
, vinfo
));
7288 gimple_stmt_iterator gsi2
= *gsi
;
7290 new_temp
= vect_init_vector (stmt
, scalar_dest
,
7292 new_stmt
= SSA_NAME_DEF_STMT (new_temp
);
7298 tree perm_mask
= perm_mask_for_reverse (vectype
);
7299 new_temp
= permute_vec_elements (new_temp
, new_temp
,
7300 perm_mask
, stmt
, gsi
);
7301 new_stmt
= SSA_NAME_DEF_STMT (new_temp
);
7304 /* Collect vector loads and later create their permutation in
7305 vect_transform_grouped_load (). */
7306 if (grouped_load
|| slp_perm
)
7307 dr_chain
.quick_push (new_temp
);
7309 /* Store vector loads in the corresponding SLP_NODE. */
7310 if (slp
&& !slp_perm
)
7311 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
7313 /* Bump the vector pointer to account for a gap or for excess
7314 elements loaded for a permuted SLP load. */
7315 if (group_gap_adj
!= 0)
7319 = wide_int_to_tree (sizetype
,
7320 wi::smul (TYPE_SIZE_UNIT (elem_type
),
7321 group_gap_adj
, &ovf
));
7322 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
,
7327 if (slp
&& !slp_perm
)
7332 if (!vect_transform_slp_perm_load (slp_node
, dr_chain
, gsi
, vf
,
7333 slp_node_instance
, false))
7335 dr_chain
.release ();
7344 vect_transform_grouped_load (stmt
, dr_chain
, group_size
, gsi
);
7345 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
7350 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
7352 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
7353 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
7356 dr_chain
.release ();
7362 /* Function vect_is_simple_cond.
7365 LOOP - the loop that is being vectorized.
7366 COND - Condition that is checked for simple use.
7369 *COMP_VECTYPE - the vector type for the comparison.
7371 Returns whether a COND can be vectorized. Checks whether
7372 condition operands are supportable using vec_is_simple_use. */
7375 vect_is_simple_cond (tree cond
, vec_info
*vinfo
, tree
*comp_vectype
)
7378 enum vect_def_type dt
;
7379 tree vectype1
= NULL_TREE
, vectype2
= NULL_TREE
;
7382 if (TREE_CODE (cond
) == SSA_NAME
7383 && TREE_CODE (TREE_TYPE (cond
)) == BOOLEAN_TYPE
)
7385 gimple
*lhs_def_stmt
= SSA_NAME_DEF_STMT (cond
);
7386 if (!vect_is_simple_use (cond
, vinfo
, &lhs_def_stmt
,
7389 || !VECTOR_BOOLEAN_TYPE_P (*comp_vectype
))
7394 if (!COMPARISON_CLASS_P (cond
))
7397 lhs
= TREE_OPERAND (cond
, 0);
7398 rhs
= TREE_OPERAND (cond
, 1);
7400 if (TREE_CODE (lhs
) == SSA_NAME
)
7402 gimple
*lhs_def_stmt
= SSA_NAME_DEF_STMT (lhs
);
7403 if (!vect_is_simple_use (lhs
, vinfo
, &lhs_def_stmt
, &dt
, &vectype1
))
7406 else if (TREE_CODE (lhs
) != INTEGER_CST
&& TREE_CODE (lhs
) != REAL_CST
7407 && TREE_CODE (lhs
) != FIXED_CST
)
7410 if (TREE_CODE (rhs
) == SSA_NAME
)
7412 gimple
*rhs_def_stmt
= SSA_NAME_DEF_STMT (rhs
);
7413 if (!vect_is_simple_use (rhs
, vinfo
, &rhs_def_stmt
, &dt
, &vectype2
))
7416 else if (TREE_CODE (rhs
) != INTEGER_CST
&& TREE_CODE (rhs
) != REAL_CST
7417 && TREE_CODE (rhs
) != FIXED_CST
)
7420 if (vectype1
&& vectype2
7421 && TYPE_VECTOR_SUBPARTS (vectype1
) != TYPE_VECTOR_SUBPARTS (vectype2
))
7424 *comp_vectype
= vectype1
? vectype1
: vectype2
;
7428 /* vectorizable_condition.
7430 Check if STMT is conditional modify expression that can be vectorized.
7431 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
7432 stmt using VEC_COND_EXPR to replace it, put it in VEC_STMT, and insert it
7435 When STMT is vectorized as nested cycle, REDUC_DEF is the vector variable
7436 to be used at REDUC_INDEX (in then clause if REDUC_INDEX is 1, and in
7437 else clause if it is 2).
7439 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
7442 vectorizable_condition (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
7443 gimple
**vec_stmt
, tree reduc_def
, int reduc_index
,
7446 tree scalar_dest
= NULL_TREE
;
7447 tree vec_dest
= NULL_TREE
;
7448 tree cond_expr
, then_clause
, else_clause
;
7449 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
7450 tree comp_vectype
= NULL_TREE
;
7451 tree vec_cond_lhs
= NULL_TREE
, vec_cond_rhs
= NULL_TREE
;
7452 tree vec_then_clause
= NULL_TREE
, vec_else_clause
= NULL_TREE
;
7455 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
7456 enum vect_def_type dt
, dts
[4];
7458 enum tree_code code
;
7459 stmt_vec_info prev_stmt_info
= NULL
;
7461 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
7462 vec
<tree
> vec_oprnds0
= vNULL
;
7463 vec
<tree
> vec_oprnds1
= vNULL
;
7464 vec
<tree
> vec_oprnds2
= vNULL
;
7465 vec
<tree
> vec_oprnds3
= vNULL
;
7467 bool masked
= false;
7469 if (reduc_index
&& STMT_SLP_TYPE (stmt_info
))
7472 if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info
) == TREE_CODE_REDUCTION
)
7474 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
7477 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
7478 && !(STMT_VINFO_DEF_TYPE (stmt_info
) == vect_nested_cycle
7482 /* FORNOW: not yet supported. */
7483 if (STMT_VINFO_LIVE_P (stmt_info
))
7485 if (dump_enabled_p ())
7486 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7487 "value used after loop.\n");
7492 /* Is vectorizable conditional operation? */
7493 if (!is_gimple_assign (stmt
))
7496 code
= gimple_assign_rhs_code (stmt
);
7498 if (code
!= COND_EXPR
)
7501 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
7502 int nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
7503 tree vectype1
= NULL_TREE
, vectype2
= NULL_TREE
;
7505 if (slp_node
|| PURE_SLP_STMT (stmt_info
))
7508 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits
;
7510 gcc_assert (ncopies
>= 1);
7511 if (reduc_index
&& ncopies
> 1)
7512 return false; /* FORNOW */
7514 cond_expr
= gimple_assign_rhs1 (stmt
);
7515 then_clause
= gimple_assign_rhs2 (stmt
);
7516 else_clause
= gimple_assign_rhs3 (stmt
);
7518 if (!vect_is_simple_cond (cond_expr
, stmt_info
->vinfo
, &comp_vectype
)
7523 if (!vect_is_simple_use (then_clause
, stmt_info
->vinfo
, &def_stmt
, &dt
,
7526 if (!vect_is_simple_use (else_clause
, stmt_info
->vinfo
, &def_stmt
, &dt
,
7530 if (vectype1
&& !useless_type_conversion_p (vectype
, vectype1
))
7533 if (vectype2
&& !useless_type_conversion_p (vectype
, vectype2
))
7536 masked
= !COMPARISON_CLASS_P (cond_expr
);
7537 vec_cmp_type
= build_same_sized_truth_vector_type (comp_vectype
);
7539 if (vec_cmp_type
== NULL_TREE
)
7544 STMT_VINFO_TYPE (stmt_info
) = condition_vec_info_type
;
7545 return expand_vec_cond_expr_p (vectype
, comp_vectype
);
7552 vec_oprnds0
.create (1);
7553 vec_oprnds1
.create (1);
7554 vec_oprnds2
.create (1);
7555 vec_oprnds3
.create (1);
7559 scalar_dest
= gimple_assign_lhs (stmt
);
7560 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
7562 /* Handle cond expr. */
7563 for (j
= 0; j
< ncopies
; j
++)
7565 gassign
*new_stmt
= NULL
;
7570 auto_vec
<tree
, 4> ops
;
7571 auto_vec
<vec
<tree
>, 4> vec_defs
;
7574 ops
.safe_push (cond_expr
);
7577 ops
.safe_push (TREE_OPERAND (cond_expr
, 0));
7578 ops
.safe_push (TREE_OPERAND (cond_expr
, 1));
7580 ops
.safe_push (then_clause
);
7581 ops
.safe_push (else_clause
);
7582 vect_get_slp_defs (ops
, slp_node
, &vec_defs
, -1);
7583 vec_oprnds3
= vec_defs
.pop ();
7584 vec_oprnds2
= vec_defs
.pop ();
7586 vec_oprnds1
= vec_defs
.pop ();
7587 vec_oprnds0
= vec_defs
.pop ();
7590 vec_defs
.release ();
7598 = vect_get_vec_def_for_operand (cond_expr
, stmt
,
7600 vect_is_simple_use (cond_expr
, stmt_info
->vinfo
,
7606 vect_get_vec_def_for_operand (TREE_OPERAND (cond_expr
, 0),
7607 stmt
, comp_vectype
);
7608 vect_is_simple_use (TREE_OPERAND (cond_expr
, 0),
7609 loop_vinfo
, >emp
, &dts
[0]);
7612 vect_get_vec_def_for_operand (TREE_OPERAND (cond_expr
, 1),
7613 stmt
, comp_vectype
);
7614 vect_is_simple_use (TREE_OPERAND (cond_expr
, 1),
7615 loop_vinfo
, >emp
, &dts
[1]);
7617 if (reduc_index
== 1)
7618 vec_then_clause
= reduc_def
;
7621 vec_then_clause
= vect_get_vec_def_for_operand (then_clause
,
7623 vect_is_simple_use (then_clause
, loop_vinfo
,
7626 if (reduc_index
== 2)
7627 vec_else_clause
= reduc_def
;
7630 vec_else_clause
= vect_get_vec_def_for_operand (else_clause
,
7632 vect_is_simple_use (else_clause
, loop_vinfo
, >emp
, &dts
[3]);
7639 = vect_get_vec_def_for_stmt_copy (dts
[0],
7640 vec_oprnds0
.pop ());
7643 = vect_get_vec_def_for_stmt_copy (dts
[1],
7644 vec_oprnds1
.pop ());
7646 vec_then_clause
= vect_get_vec_def_for_stmt_copy (dts
[2],
7647 vec_oprnds2
.pop ());
7648 vec_else_clause
= vect_get_vec_def_for_stmt_copy (dts
[3],
7649 vec_oprnds3
.pop ());
7654 vec_oprnds0
.quick_push (vec_cond_lhs
);
7656 vec_oprnds1
.quick_push (vec_cond_rhs
);
7657 vec_oprnds2
.quick_push (vec_then_clause
);
7658 vec_oprnds3
.quick_push (vec_else_clause
);
7661 /* Arguments are ready. Create the new vector stmt. */
7662 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vec_cond_lhs
)
7664 vec_then_clause
= vec_oprnds2
[i
];
7665 vec_else_clause
= vec_oprnds3
[i
];
7668 vec_compare
= vec_cond_lhs
;
7671 vec_cond_rhs
= vec_oprnds1
[i
];
7672 vec_compare
= build2 (TREE_CODE (cond_expr
), vec_cmp_type
,
7673 vec_cond_lhs
, vec_cond_rhs
);
7675 new_temp
= make_ssa_name (vec_dest
);
7676 new_stmt
= gimple_build_assign (new_temp
, VEC_COND_EXPR
,
7677 vec_compare
, vec_then_clause
,
7679 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
7681 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
7688 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
7690 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
7692 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
7695 vec_oprnds0
.release ();
7696 vec_oprnds1
.release ();
7697 vec_oprnds2
.release ();
7698 vec_oprnds3
.release ();
7703 /* vectorizable_comparison.
7705 Check if STMT is comparison expression that can be vectorized.
7706 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
7707 comparison, put it in VEC_STMT, and insert it at GSI.
7709 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
7712 vectorizable_comparison (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
7713 gimple
**vec_stmt
, tree reduc_def
,
7716 tree lhs
, rhs1
, rhs2
;
7717 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
7718 tree vectype1
= NULL_TREE
, vectype2
= NULL_TREE
;
7719 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
7720 tree vec_rhs1
= NULL_TREE
, vec_rhs2
= NULL_TREE
;
7722 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
7723 enum vect_def_type dts
[2] = {vect_unknown_def_type
, vect_unknown_def_type
};
7726 enum tree_code code
;
7727 stmt_vec_info prev_stmt_info
= NULL
;
7729 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
7730 vec
<tree
> vec_oprnds0
= vNULL
;
7731 vec
<tree
> vec_oprnds1
= vNULL
;
7736 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
7739 if (!vectype
|| !VECTOR_BOOLEAN_TYPE_P (vectype
))
7742 mask_type
= vectype
;
7743 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
7745 if (slp_node
|| PURE_SLP_STMT (stmt_info
))
7748 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits
;
7750 gcc_assert (ncopies
>= 1);
7751 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
7752 && !(STMT_VINFO_DEF_TYPE (stmt_info
) == vect_nested_cycle
7756 if (STMT_VINFO_LIVE_P (stmt_info
))
7758 if (dump_enabled_p ())
7759 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7760 "value used after loop.\n");
7764 if (!is_gimple_assign (stmt
))
7767 code
= gimple_assign_rhs_code (stmt
);
7769 if (TREE_CODE_CLASS (code
) != tcc_comparison
)
7772 rhs1
= gimple_assign_rhs1 (stmt
);
7773 rhs2
= gimple_assign_rhs2 (stmt
);
7775 if (!vect_is_simple_use (rhs1
, stmt_info
->vinfo
, &def_stmt
,
7776 &dts
[0], &vectype1
))
7779 if (!vect_is_simple_use (rhs2
, stmt_info
->vinfo
, &def_stmt
,
7780 &dts
[1], &vectype2
))
7783 if (vectype1
&& vectype2
7784 && TYPE_VECTOR_SUBPARTS (vectype1
) != TYPE_VECTOR_SUBPARTS (vectype2
))
7787 vectype
= vectype1
? vectype1
: vectype2
;
7789 /* Invariant comparison. */
7792 vectype
= build_vector_type (TREE_TYPE (rhs1
), nunits
);
7793 if (tree_to_shwi (TYPE_SIZE_UNIT (vectype
)) != current_vector_size
)
7796 else if (nunits
!= TYPE_VECTOR_SUBPARTS (vectype
))
7801 STMT_VINFO_TYPE (stmt_info
) = comparison_vec_info_type
;
7802 vect_model_simple_cost (stmt_info
, ncopies
, dts
, NULL
, NULL
);
7803 return expand_vec_cmp_expr_p (vectype
, mask_type
);
7809 vec_oprnds0
.create (1);
7810 vec_oprnds1
.create (1);
7814 lhs
= gimple_assign_lhs (stmt
);
7815 mask
= vect_create_destination_var (lhs
, mask_type
);
7817 /* Handle cmp expr. */
7818 for (j
= 0; j
< ncopies
; j
++)
7820 gassign
*new_stmt
= NULL
;
7825 auto_vec
<tree
, 2> ops
;
7826 auto_vec
<vec
<tree
>, 2> vec_defs
;
7828 ops
.safe_push (rhs1
);
7829 ops
.safe_push (rhs2
);
7830 vect_get_slp_defs (ops
, slp_node
, &vec_defs
, -1);
7831 vec_oprnds1
= vec_defs
.pop ();
7832 vec_oprnds0
= vec_defs
.pop ();
7836 vec_rhs1
= vect_get_vec_def_for_operand (rhs1
, stmt
, vectype
);
7837 vec_rhs2
= vect_get_vec_def_for_operand (rhs2
, stmt
, vectype
);
7842 vec_rhs1
= vect_get_vec_def_for_stmt_copy (dts
[0],
7843 vec_oprnds0
.pop ());
7844 vec_rhs2
= vect_get_vec_def_for_stmt_copy (dts
[1],
7845 vec_oprnds1
.pop ());
7850 vec_oprnds0
.quick_push (vec_rhs1
);
7851 vec_oprnds1
.quick_push (vec_rhs2
);
7854 /* Arguments are ready. Create the new vector stmt. */
7855 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vec_rhs1
)
7857 vec_rhs2
= vec_oprnds1
[i
];
7859 new_temp
= make_ssa_name (mask
);
7860 new_stmt
= gimple_build_assign (new_temp
, code
, vec_rhs1
, vec_rhs2
);
7861 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
7863 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
7870 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
7872 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
7874 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
7877 vec_oprnds0
.release ();
7878 vec_oprnds1
.release ();
7883 /* Make sure the statement is vectorizable. */
7886 vect_analyze_stmt (gimple
*stmt
, bool *need_to_vectorize
, slp_tree node
)
7888 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
7889 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
7890 enum vect_relevant relevance
= STMT_VINFO_RELEVANT (stmt_info
);
7892 tree scalar_type
, vectype
;
7893 gimple
*pattern_stmt
;
7894 gimple_seq pattern_def_seq
;
7896 if (dump_enabled_p ())
7898 dump_printf_loc (MSG_NOTE
, vect_location
, "==> examining statement: ");
7899 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
7902 if (gimple_has_volatile_ops (stmt
))
7904 if (dump_enabled_p ())
7905 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7906 "not vectorized: stmt has volatile operands\n");
7911 /* Skip stmts that do not need to be vectorized. In loops this is expected
7913 - the COND_EXPR which is the loop exit condition
7914 - any LABEL_EXPRs in the loop
7915 - computations that are used only for array indexing or loop control.
7916 In basic blocks we only analyze statements that are a part of some SLP
7917 instance, therefore, all the statements are relevant.
7919 Pattern statement needs to be analyzed instead of the original statement
7920 if the original statement is not relevant. Otherwise, we analyze both
7921 statements. In basic blocks we are called from some SLP instance
7922 traversal, don't analyze pattern stmts instead, the pattern stmts
7923 already will be part of SLP instance. */
7925 pattern_stmt
= STMT_VINFO_RELATED_STMT (stmt_info
);
7926 if (!STMT_VINFO_RELEVANT_P (stmt_info
)
7927 && !STMT_VINFO_LIVE_P (stmt_info
))
7929 if (STMT_VINFO_IN_PATTERN_P (stmt_info
)
7931 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt
))
7932 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt
))))
7934 /* Analyze PATTERN_STMT instead of the original stmt. */
7935 stmt
= pattern_stmt
;
7936 stmt_info
= vinfo_for_stmt (pattern_stmt
);
7937 if (dump_enabled_p ())
7939 dump_printf_loc (MSG_NOTE
, vect_location
,
7940 "==> examining pattern statement: ");
7941 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
7946 if (dump_enabled_p ())
7947 dump_printf_loc (MSG_NOTE
, vect_location
, "irrelevant.\n");
7952 else if (STMT_VINFO_IN_PATTERN_P (stmt_info
)
7955 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt
))
7956 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt
))))
7958 /* Analyze PATTERN_STMT too. */
7959 if (dump_enabled_p ())
7961 dump_printf_loc (MSG_NOTE
, vect_location
,
7962 "==> examining pattern statement: ");
7963 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
7966 if (!vect_analyze_stmt (pattern_stmt
, need_to_vectorize
, node
))
7970 if (is_pattern_stmt_p (stmt_info
)
7972 && (pattern_def_seq
= STMT_VINFO_PATTERN_DEF_SEQ (stmt_info
)))
7974 gimple_stmt_iterator si
;
7976 for (si
= gsi_start (pattern_def_seq
); !gsi_end_p (si
); gsi_next (&si
))
7978 gimple
*pattern_def_stmt
= gsi_stmt (si
);
7979 if (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_def_stmt
))
7980 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_def_stmt
)))
7982 /* Analyze def stmt of STMT if it's a pattern stmt. */
7983 if (dump_enabled_p ())
7985 dump_printf_loc (MSG_NOTE
, vect_location
,
7986 "==> examining pattern def statement: ");
7987 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, pattern_def_stmt
, 0);
7990 if (!vect_analyze_stmt (pattern_def_stmt
,
7991 need_to_vectorize
, node
))
7997 switch (STMT_VINFO_DEF_TYPE (stmt_info
))
7999 case vect_internal_def
:
8002 case vect_reduction_def
:
8003 case vect_nested_cycle
:
8004 gcc_assert (!bb_vinfo
8005 && (relevance
== vect_used_in_outer
8006 || relevance
== vect_used_in_outer_by_reduction
8007 || relevance
== vect_used_by_reduction
8008 || relevance
== vect_unused_in_scope
));
8011 case vect_induction_def
:
8012 case vect_constant_def
:
8013 case vect_external_def
:
8014 case vect_unknown_def_type
:
8021 gcc_assert (PURE_SLP_STMT (stmt_info
));
8023 scalar_type
= TREE_TYPE (gimple_get_lhs (stmt
));
8024 if (dump_enabled_p ())
8026 dump_printf_loc (MSG_NOTE
, vect_location
,
8027 "get vectype for scalar type: ");
8028 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, scalar_type
);
8029 dump_printf (MSG_NOTE
, "\n");
8032 vectype
= get_vectype_for_scalar_type (scalar_type
);
8035 if (dump_enabled_p ())
8037 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
8038 "not SLPed: unsupported data-type ");
8039 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
8041 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
8046 if (dump_enabled_p ())
8048 dump_printf_loc (MSG_NOTE
, vect_location
, "vectype: ");
8049 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, vectype
);
8050 dump_printf (MSG_NOTE
, "\n");
8053 STMT_VINFO_VECTYPE (stmt_info
) = vectype
;
8056 if (STMT_VINFO_RELEVANT_P (stmt_info
))
8058 gcc_assert (!VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt
))));
8059 gcc_assert (STMT_VINFO_VECTYPE (stmt_info
)
8060 || (is_gimple_call (stmt
)
8061 && gimple_call_lhs (stmt
) == NULL_TREE
));
8062 *need_to_vectorize
= true;
8065 if (PURE_SLP_STMT (stmt_info
) && !node
)
8067 dump_printf_loc (MSG_NOTE
, vect_location
,
8068 "handled only by SLP analysis\n");
8074 && (STMT_VINFO_RELEVANT_P (stmt_info
)
8075 || STMT_VINFO_DEF_TYPE (stmt_info
) == vect_reduction_def
))
8076 ok
= (vectorizable_simd_clone_call (stmt
, NULL
, NULL
, node
)
8077 || vectorizable_conversion (stmt
, NULL
, NULL
, node
)
8078 || vectorizable_shift (stmt
, NULL
, NULL
, node
)
8079 || vectorizable_operation (stmt
, NULL
, NULL
, node
)
8080 || vectorizable_assignment (stmt
, NULL
, NULL
, node
)
8081 || vectorizable_load (stmt
, NULL
, NULL
, node
, NULL
)
8082 || vectorizable_call (stmt
, NULL
, NULL
, node
)
8083 || vectorizable_store (stmt
, NULL
, NULL
, node
)
8084 || vectorizable_reduction (stmt
, NULL
, NULL
, node
)
8085 || vectorizable_condition (stmt
, NULL
, NULL
, NULL
, 0, node
)
8086 || vectorizable_comparison (stmt
, NULL
, NULL
, NULL
, node
));
8090 ok
= (vectorizable_simd_clone_call (stmt
, NULL
, NULL
, node
)
8091 || vectorizable_conversion (stmt
, NULL
, NULL
, node
)
8092 || vectorizable_shift (stmt
, NULL
, NULL
, node
)
8093 || vectorizable_operation (stmt
, NULL
, NULL
, node
)
8094 || vectorizable_assignment (stmt
, NULL
, NULL
, node
)
8095 || vectorizable_load (stmt
, NULL
, NULL
, node
, NULL
)
8096 || vectorizable_call (stmt
, NULL
, NULL
, node
)
8097 || vectorizable_store (stmt
, NULL
, NULL
, node
)
8098 || vectorizable_condition (stmt
, NULL
, NULL
, NULL
, 0, node
)
8099 || vectorizable_comparison (stmt
, NULL
, NULL
, NULL
, node
));
8104 if (dump_enabled_p ())
8106 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
8107 "not vectorized: relevant stmt not ");
8108 dump_printf (MSG_MISSED_OPTIMIZATION
, "supported: ");
8109 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
8118 /* Stmts that are (also) "live" (i.e. - that are used out of the loop)
8119 need extra handling, except for vectorizable reductions. */
8120 if (STMT_VINFO_LIVE_P (stmt_info
)
8121 && STMT_VINFO_TYPE (stmt_info
) != reduc_vec_info_type
)
8122 ok
= vectorizable_live_operation (stmt
, NULL
, NULL
);
8126 if (dump_enabled_p ())
8128 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
8129 "not vectorized: live stmt not ");
8130 dump_printf (MSG_MISSED_OPTIMIZATION
, "supported: ");
8131 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
8141 /* Function vect_transform_stmt.
8143 Create a vectorized stmt to replace STMT, and insert it at BSI. */
8146 vect_transform_stmt (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
8147 bool *grouped_store
, slp_tree slp_node
,
8148 slp_instance slp_node_instance
)
8150 bool is_store
= false;
8151 gimple
*vec_stmt
= NULL
;
8152 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
8155 gimple
*old_vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
8157 switch (STMT_VINFO_TYPE (stmt_info
))
8159 case type_demotion_vec_info_type
:
8160 case type_promotion_vec_info_type
:
8161 case type_conversion_vec_info_type
:
8162 done
= vectorizable_conversion (stmt
, gsi
, &vec_stmt
, slp_node
);
8166 case induc_vec_info_type
:
8167 gcc_assert (!slp_node
);
8168 done
= vectorizable_induction (stmt
, gsi
, &vec_stmt
);
8172 case shift_vec_info_type
:
8173 done
= vectorizable_shift (stmt
, gsi
, &vec_stmt
, slp_node
);
8177 case op_vec_info_type
:
8178 done
= vectorizable_operation (stmt
, gsi
, &vec_stmt
, slp_node
);
8182 case assignment_vec_info_type
:
8183 done
= vectorizable_assignment (stmt
, gsi
, &vec_stmt
, slp_node
);
8187 case load_vec_info_type
:
8188 done
= vectorizable_load (stmt
, gsi
, &vec_stmt
, slp_node
,
8193 case store_vec_info_type
:
8194 done
= vectorizable_store (stmt
, gsi
, &vec_stmt
, slp_node
);
8196 if (STMT_VINFO_GROUPED_ACCESS (stmt_info
) && !slp_node
)
8198 /* In case of interleaving, the whole chain is vectorized when the
8199 last store in the chain is reached. Store stmts before the last
8200 one are skipped, and there vec_stmt_info shouldn't be freed
8202 *grouped_store
= true;
8203 if (STMT_VINFO_VEC_STMT (stmt_info
))
8210 case condition_vec_info_type
:
8211 done
= vectorizable_condition (stmt
, gsi
, &vec_stmt
, NULL
, 0, slp_node
);
8215 case comparison_vec_info_type
:
8216 done
= vectorizable_comparison (stmt
, gsi
, &vec_stmt
, NULL
, slp_node
);
8220 case call_vec_info_type
:
8221 done
= vectorizable_call (stmt
, gsi
, &vec_stmt
, slp_node
);
8222 stmt
= gsi_stmt (*gsi
);
8223 if (is_gimple_call (stmt
)
8224 && gimple_call_internal_p (stmt
)
8225 && gimple_call_internal_fn (stmt
) == IFN_MASK_STORE
)
8229 case call_simd_clone_vec_info_type
:
8230 done
= vectorizable_simd_clone_call (stmt
, gsi
, &vec_stmt
, slp_node
);
8231 stmt
= gsi_stmt (*gsi
);
8234 case reduc_vec_info_type
:
8235 done
= vectorizable_reduction (stmt
, gsi
, &vec_stmt
, slp_node
);
8240 if (!STMT_VINFO_LIVE_P (stmt_info
))
8242 if (dump_enabled_p ())
8243 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
8244 "stmt not supported.\n");
8249 /* Verify SLP vectorization doesn't mess with STMT_VINFO_VEC_STMT.
8250 This would break hybrid SLP vectorization. */
8252 gcc_assert (!vec_stmt
8253 && STMT_VINFO_VEC_STMT (stmt_info
) == old_vec_stmt
);
8255 /* Handle inner-loop stmts whose DEF is used in the loop-nest that
8256 is being vectorized, but outside the immediately enclosing loop. */
8258 && STMT_VINFO_LOOP_VINFO (stmt_info
)
8259 && nested_in_vect_loop_p (LOOP_VINFO_LOOP (
8260 STMT_VINFO_LOOP_VINFO (stmt_info
)), stmt
)
8261 && STMT_VINFO_TYPE (stmt_info
) != reduc_vec_info_type
8262 && (STMT_VINFO_RELEVANT (stmt_info
) == vect_used_in_outer
8263 || STMT_VINFO_RELEVANT (stmt_info
) ==
8264 vect_used_in_outer_by_reduction
))
8266 struct loop
*innerloop
= LOOP_VINFO_LOOP (
8267 STMT_VINFO_LOOP_VINFO (stmt_info
))->inner
;
8268 imm_use_iterator imm_iter
;
8269 use_operand_p use_p
;
8273 if (dump_enabled_p ())
8274 dump_printf_loc (MSG_NOTE
, vect_location
,
8275 "Record the vdef for outer-loop vectorization.\n");
8277 /* Find the relevant loop-exit phi-node, and reord the vec_stmt there
8278 (to be used when vectorizing outer-loop stmts that use the DEF of
8280 if (gimple_code (stmt
) == GIMPLE_PHI
)
8281 scalar_dest
= PHI_RESULT (stmt
);
8283 scalar_dest
= gimple_assign_lhs (stmt
);
8285 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, scalar_dest
)
8287 if (!flow_bb_inside_loop_p (innerloop
, gimple_bb (USE_STMT (use_p
))))
8289 exit_phi
= USE_STMT (use_p
);
8290 STMT_VINFO_VEC_STMT (vinfo_for_stmt (exit_phi
)) = vec_stmt
;
8295 /* Handle stmts whose DEF is used outside the loop-nest that is
8296 being vectorized. */
8297 if (STMT_VINFO_LIVE_P (stmt_info
)
8298 && STMT_VINFO_TYPE (stmt_info
) != reduc_vec_info_type
)
8300 done
= vectorizable_live_operation (stmt
, gsi
, &vec_stmt
);
8305 STMT_VINFO_VEC_STMT (stmt_info
) = vec_stmt
;
8311 /* Remove a group of stores (for SLP or interleaving), free their
8315 vect_remove_stores (gimple
*first_stmt
)
8317 gimple
*next
= first_stmt
;
8319 gimple_stmt_iterator next_si
;
8323 stmt_vec_info stmt_info
= vinfo_for_stmt (next
);
8325 tmp
= GROUP_NEXT_ELEMENT (stmt_info
);
8326 if (is_pattern_stmt_p (stmt_info
))
8327 next
= STMT_VINFO_RELATED_STMT (stmt_info
);
8328 /* Free the attached stmt_vec_info and remove the stmt. */
8329 next_si
= gsi_for_stmt (next
);
8330 unlink_stmt_vdef (next
);
8331 gsi_remove (&next_si
, true);
8332 release_defs (next
);
8333 free_stmt_vec_info (next
);
8339 /* Function new_stmt_vec_info.
8341 Create and initialize a new stmt_vec_info struct for STMT. */
8344 new_stmt_vec_info (gimple
*stmt
, vec_info
*vinfo
)
8347 res
= (stmt_vec_info
) xcalloc (1, sizeof (struct _stmt_vec_info
));
8349 STMT_VINFO_TYPE (res
) = undef_vec_info_type
;
8350 STMT_VINFO_STMT (res
) = stmt
;
8352 STMT_VINFO_RELEVANT (res
) = vect_unused_in_scope
;
8353 STMT_VINFO_LIVE_P (res
) = false;
8354 STMT_VINFO_VECTYPE (res
) = NULL
;
8355 STMT_VINFO_VEC_STMT (res
) = NULL
;
8356 STMT_VINFO_VECTORIZABLE (res
) = true;
8357 STMT_VINFO_IN_PATTERN_P (res
) = false;
8358 STMT_VINFO_RELATED_STMT (res
) = NULL
;
8359 STMT_VINFO_PATTERN_DEF_SEQ (res
) = NULL
;
8360 STMT_VINFO_DATA_REF (res
) = NULL
;
8361 STMT_VINFO_VEC_REDUCTION_TYPE (res
) = TREE_CODE_REDUCTION
;
8363 STMT_VINFO_DR_BASE_ADDRESS (res
) = NULL
;
8364 STMT_VINFO_DR_OFFSET (res
) = NULL
;
8365 STMT_VINFO_DR_INIT (res
) = NULL
;
8366 STMT_VINFO_DR_STEP (res
) = NULL
;
8367 STMT_VINFO_DR_ALIGNED_TO (res
) = NULL
;
8369 if (gimple_code (stmt
) == GIMPLE_PHI
8370 && is_loop_header_bb_p (gimple_bb (stmt
)))
8371 STMT_VINFO_DEF_TYPE (res
) = vect_unknown_def_type
;
8373 STMT_VINFO_DEF_TYPE (res
) = vect_internal_def
;
8375 STMT_VINFO_SAME_ALIGN_REFS (res
).create (0);
8376 STMT_SLP_TYPE (res
) = loop_vect
;
8377 STMT_VINFO_NUM_SLP_USES (res
) = 0;
8379 GROUP_FIRST_ELEMENT (res
) = NULL
;
8380 GROUP_NEXT_ELEMENT (res
) = NULL
;
8381 GROUP_SIZE (res
) = 0;
8382 GROUP_STORE_COUNT (res
) = 0;
8383 GROUP_GAP (res
) = 0;
8384 GROUP_SAME_DR_STMT (res
) = NULL
;
8390 /* Create a hash table for stmt_vec_info. */
8393 init_stmt_vec_info_vec (void)
8395 gcc_assert (!stmt_vec_info_vec
.exists ());
8396 stmt_vec_info_vec
.create (50);
8400 /* Free hash table for stmt_vec_info. */
8403 free_stmt_vec_info_vec (void)
8407 FOR_EACH_VEC_ELT (stmt_vec_info_vec
, i
, info
)
8409 free_stmt_vec_info (STMT_VINFO_STMT (info
));
8410 gcc_assert (stmt_vec_info_vec
.exists ());
8411 stmt_vec_info_vec
.release ();
8415 /* Free stmt vectorization related info. */
8418 free_stmt_vec_info (gimple
*stmt
)
8420 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
8425 /* Check if this statement has a related "pattern stmt"
8426 (introduced by the vectorizer during the pattern recognition
8427 pass). Free pattern's stmt_vec_info and def stmt's stmt_vec_info
8429 if (STMT_VINFO_IN_PATTERN_P (stmt_info
))
8431 stmt_vec_info patt_info
8432 = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info
));
8435 gimple_seq seq
= STMT_VINFO_PATTERN_DEF_SEQ (patt_info
);
8436 gimple
*patt_stmt
= STMT_VINFO_STMT (patt_info
);
8437 gimple_set_bb (patt_stmt
, NULL
);
8438 tree lhs
= gimple_get_lhs (patt_stmt
);
8439 if (lhs
&& TREE_CODE (lhs
) == SSA_NAME
)
8440 release_ssa_name (lhs
);
8443 gimple_stmt_iterator si
;
8444 for (si
= gsi_start (seq
); !gsi_end_p (si
); gsi_next (&si
))
8446 gimple
*seq_stmt
= gsi_stmt (si
);
8447 gimple_set_bb (seq_stmt
, NULL
);
8448 lhs
= gimple_get_lhs (seq_stmt
);
8449 if (lhs
&& TREE_CODE (lhs
) == SSA_NAME
)
8450 release_ssa_name (lhs
);
8451 free_stmt_vec_info (seq_stmt
);
8454 free_stmt_vec_info (patt_stmt
);
8458 STMT_VINFO_SAME_ALIGN_REFS (stmt_info
).release ();
8459 STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).release ();
8460 set_vinfo_for_stmt (stmt
, NULL
);
8465 /* Function get_vectype_for_scalar_type_and_size.
8467 Returns the vector type corresponding to SCALAR_TYPE and SIZE as supported
8471 get_vectype_for_scalar_type_and_size (tree scalar_type
, unsigned size
)
8473 machine_mode inner_mode
= TYPE_MODE (scalar_type
);
8474 machine_mode simd_mode
;
8475 unsigned int nbytes
= GET_MODE_SIZE (inner_mode
);
8482 if (GET_MODE_CLASS (inner_mode
) != MODE_INT
8483 && GET_MODE_CLASS (inner_mode
) != MODE_FLOAT
)
8486 /* For vector types of elements whose mode precision doesn't
8487 match their types precision we use a element type of mode
8488 precision. The vectorization routines will have to make sure
8489 they support the proper result truncation/extension.
8490 We also make sure to build vector types with INTEGER_TYPE
8491 component type only. */
8492 if (INTEGRAL_TYPE_P (scalar_type
)
8493 && (GET_MODE_BITSIZE (inner_mode
) != TYPE_PRECISION (scalar_type
)
8494 || TREE_CODE (scalar_type
) != INTEGER_TYPE
))
8495 scalar_type
= build_nonstandard_integer_type (GET_MODE_BITSIZE (inner_mode
),
8496 TYPE_UNSIGNED (scalar_type
));
8498 /* We shouldn't end up building VECTOR_TYPEs of non-scalar components.
8499 When the component mode passes the above test simply use a type
8500 corresponding to that mode. The theory is that any use that
8501 would cause problems with this will disable vectorization anyway. */
8502 else if (!SCALAR_FLOAT_TYPE_P (scalar_type
)
8503 && !INTEGRAL_TYPE_P (scalar_type
))
8504 scalar_type
= lang_hooks
.types
.type_for_mode (inner_mode
, 1);
8506 /* We can't build a vector type of elements with alignment bigger than
8508 else if (nbytes
< TYPE_ALIGN_UNIT (scalar_type
))
8509 scalar_type
= lang_hooks
.types
.type_for_mode (inner_mode
,
8510 TYPE_UNSIGNED (scalar_type
));
8512 /* If we felt back to using the mode fail if there was
8513 no scalar type for it. */
8514 if (scalar_type
== NULL_TREE
)
8517 /* If no size was supplied use the mode the target prefers. Otherwise
8518 lookup a vector mode of the specified size. */
8520 simd_mode
= targetm
.vectorize
.preferred_simd_mode (inner_mode
);
8522 simd_mode
= mode_for_vector (inner_mode
, size
/ nbytes
);
8523 nunits
= GET_MODE_SIZE (simd_mode
) / nbytes
;
8527 vectype
= build_vector_type (scalar_type
, nunits
);
8529 if (!VECTOR_MODE_P (TYPE_MODE (vectype
))
8530 && !INTEGRAL_MODE_P (TYPE_MODE (vectype
)))
8536 unsigned int current_vector_size
;
8538 /* Function get_vectype_for_scalar_type.
8540 Returns the vector type corresponding to SCALAR_TYPE as supported
8544 get_vectype_for_scalar_type (tree scalar_type
)
8547 vectype
= get_vectype_for_scalar_type_and_size (scalar_type
,
8548 current_vector_size
);
8550 && current_vector_size
== 0)
8551 current_vector_size
= GET_MODE_SIZE (TYPE_MODE (vectype
));
8555 /* Function get_mask_type_for_scalar_type.
8557 Returns the mask type corresponding to a result of comparison
8558 of vectors of specified SCALAR_TYPE as supported by target. */
8561 get_mask_type_for_scalar_type (tree scalar_type
)
8563 tree vectype
= get_vectype_for_scalar_type (scalar_type
);
8568 return build_truth_vector_type (TYPE_VECTOR_SUBPARTS (vectype
),
8569 current_vector_size
);
8572 /* Function get_same_sized_vectype
8574 Returns a vector type corresponding to SCALAR_TYPE of size
8575 VECTOR_TYPE if supported by the target. */
8578 get_same_sized_vectype (tree scalar_type
, tree vector_type
)
8580 if (TREE_CODE (scalar_type
) == BOOLEAN_TYPE
)
8581 return build_same_sized_truth_vector_type (vector_type
);
8583 return get_vectype_for_scalar_type_and_size
8584 (scalar_type
, GET_MODE_SIZE (TYPE_MODE (vector_type
)));
8587 /* Function vect_is_simple_use.
8590 VINFO - the vect info of the loop or basic block that is being vectorized.
8591 OPERAND - operand in the loop or bb.
8593 DEF_STMT - the defining stmt in case OPERAND is an SSA_NAME.
8594 DT - the type of definition
8596 Returns whether a stmt with OPERAND can be vectorized.
8597 For loops, supportable operands are constants, loop invariants, and operands
8598 that are defined by the current iteration of the loop. Unsupportable
8599 operands are those that are defined by a previous iteration of the loop (as
8600 is the case in reduction/induction computations).
8601 For basic blocks, supportable operands are constants and bb invariants.
8602 For now, operands defined outside the basic block are not supported. */
8605 vect_is_simple_use (tree operand
, vec_info
*vinfo
,
8606 gimple
**def_stmt
, enum vect_def_type
*dt
)
8609 *dt
= vect_unknown_def_type
;
8611 if (dump_enabled_p ())
8613 dump_printf_loc (MSG_NOTE
, vect_location
,
8614 "vect_is_simple_use: operand ");
8615 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, operand
);
8616 dump_printf (MSG_NOTE
, "\n");
8619 if (CONSTANT_CLASS_P (operand
))
8621 *dt
= vect_constant_def
;
8625 if (is_gimple_min_invariant (operand
))
8627 *dt
= vect_external_def
;
8631 if (TREE_CODE (operand
) != SSA_NAME
)
8633 if (dump_enabled_p ())
8634 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
8639 if (SSA_NAME_IS_DEFAULT_DEF (operand
))
8641 *dt
= vect_external_def
;
8645 *def_stmt
= SSA_NAME_DEF_STMT (operand
);
8646 if (dump_enabled_p ())
8648 dump_printf_loc (MSG_NOTE
, vect_location
, "def_stmt: ");
8649 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, *def_stmt
, 0);
8652 if (! vect_stmt_in_region_p (vinfo
, *def_stmt
))
8653 *dt
= vect_external_def
;
8656 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (*def_stmt
);
8657 *dt
= STMT_VINFO_DEF_TYPE (stmt_vinfo
);
8660 if (dump_enabled_p ())
8662 dump_printf_loc (MSG_NOTE
, vect_location
, "type of def: ");
8665 case vect_uninitialized_def
:
8666 dump_printf (MSG_NOTE
, "uninitialized\n");
8668 case vect_constant_def
:
8669 dump_printf (MSG_NOTE
, "constant\n");
8671 case vect_external_def
:
8672 dump_printf (MSG_NOTE
, "external\n");
8674 case vect_internal_def
:
8675 dump_printf (MSG_NOTE
, "internal\n");
8677 case vect_induction_def
:
8678 dump_printf (MSG_NOTE
, "induction\n");
8680 case vect_reduction_def
:
8681 dump_printf (MSG_NOTE
, "reduction\n");
8683 case vect_double_reduction_def
:
8684 dump_printf (MSG_NOTE
, "double reduction\n");
8686 case vect_nested_cycle
:
8687 dump_printf (MSG_NOTE
, "nested cycle\n");
8689 case vect_unknown_def_type
:
8690 dump_printf (MSG_NOTE
, "unknown\n");
8695 if (*dt
== vect_unknown_def_type
)
8697 if (dump_enabled_p ())
8698 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
8699 "Unsupported pattern.\n");
8703 switch (gimple_code (*def_stmt
))
8710 if (dump_enabled_p ())
8711 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
8712 "unsupported defining stmt:\n");
8719 /* Function vect_is_simple_use.
8721 Same as vect_is_simple_use but also determines the vector operand
8722 type of OPERAND and stores it to *VECTYPE. If the definition of
8723 OPERAND is vect_uninitialized_def, vect_constant_def or
8724 vect_external_def *VECTYPE will be set to NULL_TREE and the caller
8725 is responsible to compute the best suited vector type for the
8729 vect_is_simple_use (tree operand
, vec_info
*vinfo
,
8730 gimple
**def_stmt
, enum vect_def_type
*dt
, tree
*vectype
)
8732 if (!vect_is_simple_use (operand
, vinfo
, def_stmt
, dt
))
8735 /* Now get a vector type if the def is internal, otherwise supply
8736 NULL_TREE and leave it up to the caller to figure out a proper
8737 type for the use stmt. */
8738 if (*dt
== vect_internal_def
8739 || *dt
== vect_induction_def
8740 || *dt
== vect_reduction_def
8741 || *dt
== vect_double_reduction_def
8742 || *dt
== vect_nested_cycle
)
8744 stmt_vec_info stmt_info
= vinfo_for_stmt (*def_stmt
);
8746 if (STMT_VINFO_IN_PATTERN_P (stmt_info
)
8747 && !STMT_VINFO_RELEVANT (stmt_info
)
8748 && !STMT_VINFO_LIVE_P (stmt_info
))
8749 stmt_info
= vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info
));
8751 *vectype
= STMT_VINFO_VECTYPE (stmt_info
);
8752 gcc_assert (*vectype
!= NULL_TREE
);
8754 else if (*dt
== vect_uninitialized_def
8755 || *dt
== vect_constant_def
8756 || *dt
== vect_external_def
)
8757 *vectype
= NULL_TREE
;
8765 /* Function supportable_widening_operation
8767 Check whether an operation represented by the code CODE is a
8768 widening operation that is supported by the target platform in
8769 vector form (i.e., when operating on arguments of type VECTYPE_IN
8770 producing a result of type VECTYPE_OUT).
8772 Widening operations we currently support are NOP (CONVERT), FLOAT
8773 and WIDEN_MULT. This function checks if these operations are supported
8774 by the target platform either directly (via vector tree-codes), or via
8778 - CODE1 and CODE2 are codes of vector operations to be used when
8779 vectorizing the operation, if available.
8780 - MULTI_STEP_CVT determines the number of required intermediate steps in
8781 case of multi-step conversion (like char->short->int - in that case
8782 MULTI_STEP_CVT will be 1).
8783 - INTERM_TYPES contains the intermediate type required to perform the
8784 widening operation (short in the above example). */
8787 supportable_widening_operation (enum tree_code code
, gimple
*stmt
,
8788 tree vectype_out
, tree vectype_in
,
8789 enum tree_code
*code1
, enum tree_code
*code2
,
8790 int *multi_step_cvt
,
8791 vec
<tree
> *interm_types
)
8793 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
8794 loop_vec_info loop_info
= STMT_VINFO_LOOP_VINFO (stmt_info
);
8795 struct loop
*vect_loop
= NULL
;
8796 machine_mode vec_mode
;
8797 enum insn_code icode1
, icode2
;
8798 optab optab1
, optab2
;
8799 tree vectype
= vectype_in
;
8800 tree wide_vectype
= vectype_out
;
8801 enum tree_code c1
, c2
;
8803 tree prev_type
, intermediate_type
;
8804 machine_mode intermediate_mode
, prev_mode
;
8805 optab optab3
, optab4
;
8807 *multi_step_cvt
= 0;
8809 vect_loop
= LOOP_VINFO_LOOP (loop_info
);
8813 case WIDEN_MULT_EXPR
:
8814 /* The result of a vectorized widening operation usually requires
8815 two vectors (because the widened results do not fit into one vector).
8816 The generated vector results would normally be expected to be
8817 generated in the same order as in the original scalar computation,
8818 i.e. if 8 results are generated in each vector iteration, they are
8819 to be organized as follows:
8820 vect1: [res1,res2,res3,res4],
8821 vect2: [res5,res6,res7,res8].
8823 However, in the special case that the result of the widening
8824 operation is used in a reduction computation only, the order doesn't
8825 matter (because when vectorizing a reduction we change the order of
8826 the computation). Some targets can take advantage of this and
8827 generate more efficient code. For example, targets like Altivec,
8828 that support widen_mult using a sequence of {mult_even,mult_odd}
8829 generate the following vectors:
8830 vect1: [res1,res3,res5,res7],
8831 vect2: [res2,res4,res6,res8].
8833 When vectorizing outer-loops, we execute the inner-loop sequentially
8834 (each vectorized inner-loop iteration contributes to VF outer-loop
8835 iterations in parallel). We therefore don't allow to change the
8836 order of the computation in the inner-loop during outer-loop
8838 /* TODO: Another case in which order doesn't *really* matter is when we
8839 widen and then contract again, e.g. (short)((int)x * y >> 8).
8840 Normally, pack_trunc performs an even/odd permute, whereas the
8841 repack from an even/odd expansion would be an interleave, which
8842 would be significantly simpler for e.g. AVX2. */
8843 /* In any case, in order to avoid duplicating the code below, recurse
8844 on VEC_WIDEN_MULT_EVEN_EXPR. If it succeeds, all the return values
8845 are properly set up for the caller. If we fail, we'll continue with
8846 a VEC_WIDEN_MULT_LO/HI_EXPR check. */
8848 && STMT_VINFO_RELEVANT (stmt_info
) == vect_used_by_reduction
8849 && !nested_in_vect_loop_p (vect_loop
, stmt
)
8850 && supportable_widening_operation (VEC_WIDEN_MULT_EVEN_EXPR
,
8851 stmt
, vectype_out
, vectype_in
,
8852 code1
, code2
, multi_step_cvt
,
8855 /* Elements in a vector with vect_used_by_reduction property cannot
8856 be reordered if the use chain with this property does not have the
8857 same operation. One such an example is s += a * b, where elements
8858 in a and b cannot be reordered. Here we check if the vector defined
8859 by STMT is only directly used in the reduction statement. */
8860 tree lhs
= gimple_assign_lhs (stmt
);
8861 use_operand_p dummy
;
8863 stmt_vec_info use_stmt_info
= NULL
;
8864 if (single_imm_use (lhs
, &dummy
, &use_stmt
)
8865 && (use_stmt_info
= vinfo_for_stmt (use_stmt
))
8866 && STMT_VINFO_DEF_TYPE (use_stmt_info
) == vect_reduction_def
)
8869 c1
= VEC_WIDEN_MULT_LO_EXPR
;
8870 c2
= VEC_WIDEN_MULT_HI_EXPR
;
8883 case VEC_WIDEN_MULT_EVEN_EXPR
:
8884 /* Support the recursion induced just above. */
8885 c1
= VEC_WIDEN_MULT_EVEN_EXPR
;
8886 c2
= VEC_WIDEN_MULT_ODD_EXPR
;
8889 case WIDEN_LSHIFT_EXPR
:
8890 c1
= VEC_WIDEN_LSHIFT_LO_EXPR
;
8891 c2
= VEC_WIDEN_LSHIFT_HI_EXPR
;
8895 c1
= VEC_UNPACK_LO_EXPR
;
8896 c2
= VEC_UNPACK_HI_EXPR
;
8900 c1
= VEC_UNPACK_FLOAT_LO_EXPR
;
8901 c2
= VEC_UNPACK_FLOAT_HI_EXPR
;
8904 case FIX_TRUNC_EXPR
:
8905 /* ??? Not yet implemented due to missing VEC_UNPACK_FIX_TRUNC_HI_EXPR/
8906 VEC_UNPACK_FIX_TRUNC_LO_EXPR tree codes and optabs used for
8907 computing the operation. */
8914 if (BYTES_BIG_ENDIAN
&& c1
!= VEC_WIDEN_MULT_EVEN_EXPR
)
8917 if (code
== FIX_TRUNC_EXPR
)
8919 /* The signedness is determined from output operand. */
8920 optab1
= optab_for_tree_code (c1
, vectype_out
, optab_default
);
8921 optab2
= optab_for_tree_code (c2
, vectype_out
, optab_default
);
8925 optab1
= optab_for_tree_code (c1
, vectype
, optab_default
);
8926 optab2
= optab_for_tree_code (c2
, vectype
, optab_default
);
8929 if (!optab1
|| !optab2
)
8932 vec_mode
= TYPE_MODE (vectype
);
8933 if ((icode1
= optab_handler (optab1
, vec_mode
)) == CODE_FOR_nothing
8934 || (icode2
= optab_handler (optab2
, vec_mode
)) == CODE_FOR_nothing
)
8940 if (insn_data
[icode1
].operand
[0].mode
== TYPE_MODE (wide_vectype
)
8941 && insn_data
[icode2
].operand
[0].mode
== TYPE_MODE (wide_vectype
))
8944 /* Check if it's a multi-step conversion that can be done using intermediate
8947 prev_type
= vectype
;
8948 prev_mode
= vec_mode
;
8950 if (!CONVERT_EXPR_CODE_P (code
))
8953 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
8954 intermediate steps in promotion sequence. We try
8955 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do
8957 interm_types
->create (MAX_INTERM_CVT_STEPS
);
8958 for (i
= 0; i
< MAX_INTERM_CVT_STEPS
; i
++)
8960 intermediate_mode
= insn_data
[icode1
].operand
[0].mode
;
8961 if (VECTOR_BOOLEAN_TYPE_P (prev_type
))
8964 = build_truth_vector_type (TYPE_VECTOR_SUBPARTS (prev_type
) / 2,
8965 current_vector_size
);
8966 if (intermediate_mode
!= TYPE_MODE (intermediate_type
))
8971 = lang_hooks
.types
.type_for_mode (intermediate_mode
,
8972 TYPE_UNSIGNED (prev_type
));
8974 optab3
= optab_for_tree_code (c1
, intermediate_type
, optab_default
);
8975 optab4
= optab_for_tree_code (c2
, intermediate_type
, optab_default
);
8977 if (!optab3
|| !optab4
8978 || (icode1
= optab_handler (optab1
, prev_mode
)) == CODE_FOR_nothing
8979 || insn_data
[icode1
].operand
[0].mode
!= intermediate_mode
8980 || (icode2
= optab_handler (optab2
, prev_mode
)) == CODE_FOR_nothing
8981 || insn_data
[icode2
].operand
[0].mode
!= intermediate_mode
8982 || ((icode1
= optab_handler (optab3
, intermediate_mode
))
8983 == CODE_FOR_nothing
)
8984 || ((icode2
= optab_handler (optab4
, intermediate_mode
))
8985 == CODE_FOR_nothing
))
8988 interm_types
->quick_push (intermediate_type
);
8989 (*multi_step_cvt
)++;
8991 if (insn_data
[icode1
].operand
[0].mode
== TYPE_MODE (wide_vectype
)
8992 && insn_data
[icode2
].operand
[0].mode
== TYPE_MODE (wide_vectype
))
8995 prev_type
= intermediate_type
;
8996 prev_mode
= intermediate_mode
;
8999 interm_types
->release ();
9004 /* Function supportable_narrowing_operation
9006 Check whether an operation represented by the code CODE is a
9007 narrowing operation that is supported by the target platform in
9008 vector form (i.e., when operating on arguments of type VECTYPE_IN
9009 and producing a result of type VECTYPE_OUT).
9011 Narrowing operations we currently support are NOP (CONVERT) and
9012 FIX_TRUNC. This function checks if these operations are supported by
9013 the target platform directly via vector tree-codes.
9016 - CODE1 is the code of a vector operation to be used when
9017 vectorizing the operation, if available.
9018 - MULTI_STEP_CVT determines the number of required intermediate steps in
9019 case of multi-step conversion (like int->short->char - in that case
9020 MULTI_STEP_CVT will be 1).
9021 - INTERM_TYPES contains the intermediate type required to perform the
9022 narrowing operation (short in the above example). */
9025 supportable_narrowing_operation (enum tree_code code
,
9026 tree vectype_out
, tree vectype_in
,
9027 enum tree_code
*code1
, int *multi_step_cvt
,
9028 vec
<tree
> *interm_types
)
9030 machine_mode vec_mode
;
9031 enum insn_code icode1
;
9032 optab optab1
, interm_optab
;
9033 tree vectype
= vectype_in
;
9034 tree narrow_vectype
= vectype_out
;
9036 tree intermediate_type
, prev_type
;
9037 machine_mode intermediate_mode
, prev_mode
;
9041 *multi_step_cvt
= 0;
9045 c1
= VEC_PACK_TRUNC_EXPR
;
9048 case FIX_TRUNC_EXPR
:
9049 c1
= VEC_PACK_FIX_TRUNC_EXPR
;
9053 /* ??? Not yet implemented due to missing VEC_PACK_FLOAT_EXPR
9054 tree code and optabs used for computing the operation. */
9061 if (code
== FIX_TRUNC_EXPR
)
9062 /* The signedness is determined from output operand. */
9063 optab1
= optab_for_tree_code (c1
, vectype_out
, optab_default
);
9065 optab1
= optab_for_tree_code (c1
, vectype
, optab_default
);
9070 vec_mode
= TYPE_MODE (vectype
);
9071 if ((icode1
= optab_handler (optab1
, vec_mode
)) == CODE_FOR_nothing
)
9076 if (insn_data
[icode1
].operand
[0].mode
== TYPE_MODE (narrow_vectype
))
9079 /* Check if it's a multi-step conversion that can be done using intermediate
9081 prev_mode
= vec_mode
;
9082 prev_type
= vectype
;
9083 if (code
== FIX_TRUNC_EXPR
)
9084 uns
= TYPE_UNSIGNED (vectype_out
);
9086 uns
= TYPE_UNSIGNED (vectype
);
9088 /* For multi-step FIX_TRUNC_EXPR prefer signed floating to integer
9089 conversion over unsigned, as unsigned FIX_TRUNC_EXPR is often more
9090 costly than signed. */
9091 if (code
== FIX_TRUNC_EXPR
&& uns
)
9093 enum insn_code icode2
;
9096 = lang_hooks
.types
.type_for_mode (TYPE_MODE (vectype_out
), 0);
9098 = optab_for_tree_code (c1
, intermediate_type
, optab_default
);
9099 if (interm_optab
!= unknown_optab
9100 && (icode2
= optab_handler (optab1
, vec_mode
)) != CODE_FOR_nothing
9101 && insn_data
[icode1
].operand
[0].mode
9102 == insn_data
[icode2
].operand
[0].mode
)
9105 optab1
= interm_optab
;
9110 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
9111 intermediate steps in promotion sequence. We try
9112 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do not. */
9113 interm_types
->create (MAX_INTERM_CVT_STEPS
);
9114 for (i
= 0; i
< MAX_INTERM_CVT_STEPS
; i
++)
9116 intermediate_mode
= insn_data
[icode1
].operand
[0].mode
;
9117 if (VECTOR_BOOLEAN_TYPE_P (prev_type
))
9120 = build_truth_vector_type (TYPE_VECTOR_SUBPARTS (prev_type
) * 2,
9121 current_vector_size
);
9122 if (intermediate_mode
!= TYPE_MODE (intermediate_type
))
9127 = lang_hooks
.types
.type_for_mode (intermediate_mode
, uns
);
9129 = optab_for_tree_code (VEC_PACK_TRUNC_EXPR
, intermediate_type
,
9132 || ((icode1
= optab_handler (optab1
, prev_mode
)) == CODE_FOR_nothing
)
9133 || insn_data
[icode1
].operand
[0].mode
!= intermediate_mode
9134 || ((icode1
= optab_handler (interm_optab
, intermediate_mode
))
9135 == CODE_FOR_nothing
))
9138 interm_types
->quick_push (intermediate_type
);
9139 (*multi_step_cvt
)++;
9141 if (insn_data
[icode1
].operand
[0].mode
== TYPE_MODE (narrow_vectype
))
9144 prev_mode
= intermediate_mode
;
9145 prev_type
= intermediate_type
;
9146 optab1
= interm_optab
;
9149 interm_types
->release ();