1 /* Statement Analysis and Transformation for Vectorization
2 Copyright (C) 2003-2015 Free Software Foundation, Inc.
3 Contributed by Dorit Naishlos <dorit@il.ibm.com>
4 and Ira Rosen <irar@il.ibm.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
24 #include "coretypes.h"
31 #include "optabs-tree.h"
32 #include "insn-config.h"
33 #include "recog.h" /* FIXME: for insn_data */
37 #include "fold-const.h"
38 #include "stor-layout.h"
41 #include "gimple-iterator.h"
42 #include "gimplify-me.h"
44 #include "tree-ssa-loop-manip.h"
46 #include "tree-ssa-loop.h"
47 #include "tree-scalar-evolution.h"
48 #include "tree-vectorizer.h"
50 #include "internal-fn.h"
52 /* For lang_hooks.types.type_for_mode. */
53 #include "langhooks.h"
55 /* Return the vectorized type for the given statement. */
58 stmt_vectype (struct _stmt_vec_info
*stmt_info
)
60 return STMT_VINFO_VECTYPE (stmt_info
);
63 /* Return TRUE iff the given statement is in an inner loop relative to
64 the loop being vectorized. */
66 stmt_in_inner_loop_p (struct _stmt_vec_info
*stmt_info
)
68 gimple
*stmt
= STMT_VINFO_STMT (stmt_info
);
69 basic_block bb
= gimple_bb (stmt
);
70 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
76 loop
= LOOP_VINFO_LOOP (loop_vinfo
);
78 return (bb
->loop_father
== loop
->inner
);
81 /* Record the cost of a statement, either by directly informing the
82 target model or by saving it in a vector for later processing.
83 Return a preliminary estimate of the statement's cost. */
86 record_stmt_cost (stmt_vector_for_cost
*body_cost_vec
, int count
,
87 enum vect_cost_for_stmt kind
, stmt_vec_info stmt_info
,
88 int misalign
, enum vect_cost_model_location where
)
92 tree vectype
= stmt_info
? stmt_vectype (stmt_info
) : NULL_TREE
;
93 stmt_info_for_cost si
= { count
, kind
,
94 stmt_info
? STMT_VINFO_STMT (stmt_info
) : NULL
,
96 body_cost_vec
->safe_push (si
);
98 (builtin_vectorization_cost (kind
, vectype
, misalign
) * count
);
101 return add_stmt_cost (stmt_info
->vinfo
->target_cost_data
,
102 count
, kind
, stmt_info
, misalign
, where
);
105 /* Return a variable of type ELEM_TYPE[NELEMS]. */
108 create_vector_array (tree elem_type
, unsigned HOST_WIDE_INT nelems
)
110 return create_tmp_var (build_array_type_nelts (elem_type
, nelems
),
114 /* ARRAY is an array of vectors created by create_vector_array.
115 Return an SSA_NAME for the vector in index N. The reference
116 is part of the vectorization of STMT and the vector is associated
117 with scalar destination SCALAR_DEST. */
120 read_vector_array (gimple
*stmt
, gimple_stmt_iterator
*gsi
, tree scalar_dest
,
121 tree array
, unsigned HOST_WIDE_INT n
)
123 tree vect_type
, vect
, vect_name
, array_ref
;
126 gcc_assert (TREE_CODE (TREE_TYPE (array
)) == ARRAY_TYPE
);
127 vect_type
= TREE_TYPE (TREE_TYPE (array
));
128 vect
= vect_create_destination_var (scalar_dest
, vect_type
);
129 array_ref
= build4 (ARRAY_REF
, vect_type
, array
,
130 build_int_cst (size_type_node
, n
),
131 NULL_TREE
, NULL_TREE
);
133 new_stmt
= gimple_build_assign (vect
, array_ref
);
134 vect_name
= make_ssa_name (vect
, new_stmt
);
135 gimple_assign_set_lhs (new_stmt
, vect_name
);
136 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
141 /* ARRAY is an array of vectors created by create_vector_array.
142 Emit code to store SSA_NAME VECT in index N of the array.
143 The store is part of the vectorization of STMT. */
146 write_vector_array (gimple
*stmt
, gimple_stmt_iterator
*gsi
, tree vect
,
147 tree array
, unsigned HOST_WIDE_INT n
)
152 array_ref
= build4 (ARRAY_REF
, TREE_TYPE (vect
), array
,
153 build_int_cst (size_type_node
, n
),
154 NULL_TREE
, NULL_TREE
);
156 new_stmt
= gimple_build_assign (array_ref
, vect
);
157 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
160 /* PTR is a pointer to an array of type TYPE. Return a representation
161 of *PTR. The memory reference replaces those in FIRST_DR
165 create_array_ref (tree type
, tree ptr
, struct data_reference
*first_dr
)
167 tree mem_ref
, alias_ptr_type
;
169 alias_ptr_type
= reference_alias_ptr_type (DR_REF (first_dr
));
170 mem_ref
= build2 (MEM_REF
, type
, ptr
, build_int_cst (alias_ptr_type
, 0));
171 /* Arrays have the same alignment as their type. */
172 set_ptr_info_alignment (get_ptr_info (ptr
), TYPE_ALIGN_UNIT (type
), 0);
176 /* Utility functions used by vect_mark_stmts_to_be_vectorized. */
178 /* Function vect_mark_relevant.
180 Mark STMT as "relevant for vectorization" and add it to WORKLIST. */
183 vect_mark_relevant (vec
<gimple
*> *worklist
, gimple
*stmt
,
184 enum vect_relevant relevant
, bool live_p
,
185 bool used_in_pattern
)
187 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
188 enum vect_relevant save_relevant
= STMT_VINFO_RELEVANT (stmt_info
);
189 bool save_live_p
= STMT_VINFO_LIVE_P (stmt_info
);
190 gimple
*pattern_stmt
;
192 if (dump_enabled_p ())
193 dump_printf_loc (MSG_NOTE
, vect_location
,
194 "mark relevant %d, live %d.\n", relevant
, live_p
);
196 /* If this stmt is an original stmt in a pattern, we might need to mark its
197 related pattern stmt instead of the original stmt. However, such stmts
198 may have their own uses that are not in any pattern, in such cases the
199 stmt itself should be marked. */
200 if (STMT_VINFO_IN_PATTERN_P (stmt_info
))
203 if (!used_in_pattern
)
205 imm_use_iterator imm_iter
;
209 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
210 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
212 if (is_gimple_assign (stmt
))
213 lhs
= gimple_assign_lhs (stmt
);
215 lhs
= gimple_call_lhs (stmt
);
217 /* This use is out of pattern use, if LHS has other uses that are
218 pattern uses, we should mark the stmt itself, and not the pattern
220 if (lhs
&& TREE_CODE (lhs
) == SSA_NAME
)
221 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, lhs
)
223 if (is_gimple_debug (USE_STMT (use_p
)))
225 use_stmt
= USE_STMT (use_p
);
227 if (!flow_bb_inside_loop_p (loop
, gimple_bb (use_stmt
)))
230 if (vinfo_for_stmt (use_stmt
)
231 && STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (use_stmt
)))
241 /* This is the last stmt in a sequence that was detected as a
242 pattern that can potentially be vectorized. Don't mark the stmt
243 as relevant/live because it's not going to be vectorized.
244 Instead mark the pattern-stmt that replaces it. */
246 pattern_stmt
= STMT_VINFO_RELATED_STMT (stmt_info
);
248 if (dump_enabled_p ())
249 dump_printf_loc (MSG_NOTE
, vect_location
,
250 "last stmt in pattern. don't mark"
251 " relevant/live.\n");
252 stmt_info
= vinfo_for_stmt (pattern_stmt
);
253 gcc_assert (STMT_VINFO_RELATED_STMT (stmt_info
) == stmt
);
254 save_relevant
= STMT_VINFO_RELEVANT (stmt_info
);
255 save_live_p
= STMT_VINFO_LIVE_P (stmt_info
);
260 STMT_VINFO_LIVE_P (stmt_info
) |= live_p
;
261 if (relevant
> STMT_VINFO_RELEVANT (stmt_info
))
262 STMT_VINFO_RELEVANT (stmt_info
) = relevant
;
264 if (STMT_VINFO_RELEVANT (stmt_info
) == save_relevant
265 && STMT_VINFO_LIVE_P (stmt_info
) == save_live_p
)
267 if (dump_enabled_p ())
268 dump_printf_loc (MSG_NOTE
, vect_location
,
269 "already marked relevant/live.\n");
273 worklist
->safe_push (stmt
);
277 /* Function vect_stmt_relevant_p.
279 Return true if STMT in loop that is represented by LOOP_VINFO is
280 "relevant for vectorization".
282 A stmt is considered "relevant for vectorization" if:
283 - it has uses outside the loop.
284 - it has vdefs (it alters memory).
285 - control stmts in the loop (except for the exit condition).
287 CHECKME: what other side effects would the vectorizer allow? */
290 vect_stmt_relevant_p (gimple
*stmt
, loop_vec_info loop_vinfo
,
291 enum vect_relevant
*relevant
, bool *live_p
)
293 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
295 imm_use_iterator imm_iter
;
299 *relevant
= vect_unused_in_scope
;
302 /* cond stmt other than loop exit cond. */
303 if (is_ctrl_stmt (stmt
)
304 && STMT_VINFO_TYPE (vinfo_for_stmt (stmt
))
305 != loop_exit_ctrl_vec_info_type
)
306 *relevant
= vect_used_in_scope
;
308 /* changing memory. */
309 if (gimple_code (stmt
) != GIMPLE_PHI
)
310 if (gimple_vdef (stmt
)
311 && !gimple_clobber_p (stmt
))
313 if (dump_enabled_p ())
314 dump_printf_loc (MSG_NOTE
, vect_location
,
315 "vec_stmt_relevant_p: stmt has vdefs.\n");
316 *relevant
= vect_used_in_scope
;
319 /* uses outside the loop. */
320 FOR_EACH_PHI_OR_STMT_DEF (def_p
, stmt
, op_iter
, SSA_OP_DEF
)
322 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, DEF_FROM_PTR (def_p
))
324 basic_block bb
= gimple_bb (USE_STMT (use_p
));
325 if (!flow_bb_inside_loop_p (loop
, bb
))
327 if (dump_enabled_p ())
328 dump_printf_loc (MSG_NOTE
, vect_location
,
329 "vec_stmt_relevant_p: used out of loop.\n");
331 if (is_gimple_debug (USE_STMT (use_p
)))
334 /* We expect all such uses to be in the loop exit phis
335 (because of loop closed form) */
336 gcc_assert (gimple_code (USE_STMT (use_p
)) == GIMPLE_PHI
);
337 gcc_assert (bb
== single_exit (loop
)->dest
);
344 return (*live_p
|| *relevant
);
348 /* Function exist_non_indexing_operands_for_use_p
350 USE is one of the uses attached to STMT. Check if USE is
351 used in STMT for anything other than indexing an array. */
354 exist_non_indexing_operands_for_use_p (tree use
, gimple
*stmt
)
357 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
359 /* USE corresponds to some operand in STMT. If there is no data
360 reference in STMT, then any operand that corresponds to USE
361 is not indexing an array. */
362 if (!STMT_VINFO_DATA_REF (stmt_info
))
365 /* STMT has a data_ref. FORNOW this means that its of one of
369 (This should have been verified in analyze_data_refs).
371 'var' in the second case corresponds to a def, not a use,
372 so USE cannot correspond to any operands that are not used
375 Therefore, all we need to check is if STMT falls into the
376 first case, and whether var corresponds to USE. */
378 if (!gimple_assign_copy_p (stmt
))
380 if (is_gimple_call (stmt
)
381 && gimple_call_internal_p (stmt
))
382 switch (gimple_call_internal_fn (stmt
))
385 operand
= gimple_call_arg (stmt
, 3);
390 operand
= gimple_call_arg (stmt
, 2);
400 if (TREE_CODE (gimple_assign_lhs (stmt
)) == SSA_NAME
)
402 operand
= gimple_assign_rhs1 (stmt
);
403 if (TREE_CODE (operand
) != SSA_NAME
)
414 Function process_use.
417 - a USE in STMT in a loop represented by LOOP_VINFO
418 - LIVE_P, RELEVANT - enum values to be set in the STMT_VINFO of the stmt
419 that defined USE. This is done by calling mark_relevant and passing it
420 the WORKLIST (to add DEF_STMT to the WORKLIST in case it is relevant).
421 - FORCE is true if exist_non_indexing_operands_for_use_p check shouldn't
425 Generally, LIVE_P and RELEVANT are used to define the liveness and
426 relevance info of the DEF_STMT of this USE:
427 STMT_VINFO_LIVE_P (DEF_STMT_info) <-- live_p
428 STMT_VINFO_RELEVANT (DEF_STMT_info) <-- relevant
430 - case 1: If USE is used only for address computations (e.g. array indexing),
431 which does not need to be directly vectorized, then the liveness/relevance
432 of the respective DEF_STMT is left unchanged.
433 - case 2: If STMT is a reduction phi and DEF_STMT is a reduction stmt, we
434 skip DEF_STMT cause it had already been processed.
435 - case 3: If DEF_STMT and STMT are in different nests, then "relevant" will
436 be modified accordingly.
438 Return true if everything is as expected. Return false otherwise. */
441 process_use (gimple
*stmt
, tree use
, loop_vec_info loop_vinfo
, bool live_p
,
442 enum vect_relevant relevant
, vec
<gimple
*> *worklist
,
445 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
446 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (stmt
);
447 stmt_vec_info dstmt_vinfo
;
448 basic_block bb
, def_bb
;
450 enum vect_def_type dt
;
452 /* case 1: we are only interested in uses that need to be vectorized. Uses
453 that are used for address computation are not considered relevant. */
454 if (!force
&& !exist_non_indexing_operands_for_use_p (use
, stmt
))
457 if (!vect_is_simple_use (use
, loop_vinfo
, &def_stmt
, &dt
))
459 if (dump_enabled_p ())
460 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
461 "not vectorized: unsupported use in stmt.\n");
465 if (!def_stmt
|| gimple_nop_p (def_stmt
))
468 def_bb
= gimple_bb (def_stmt
);
469 if (!flow_bb_inside_loop_p (loop
, def_bb
))
471 if (dump_enabled_p ())
472 dump_printf_loc (MSG_NOTE
, vect_location
, "def_stmt is out of loop.\n");
476 /* case 2: A reduction phi (STMT) defined by a reduction stmt (DEF_STMT).
477 DEF_STMT must have already been processed, because this should be the
478 only way that STMT, which is a reduction-phi, was put in the worklist,
479 as there should be no other uses for DEF_STMT in the loop. So we just
480 check that everything is as expected, and we are done. */
481 dstmt_vinfo
= vinfo_for_stmt (def_stmt
);
482 bb
= gimple_bb (stmt
);
483 if (gimple_code (stmt
) == GIMPLE_PHI
484 && STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_reduction_def
485 && gimple_code (def_stmt
) != GIMPLE_PHI
486 && STMT_VINFO_DEF_TYPE (dstmt_vinfo
) == vect_reduction_def
487 && bb
->loop_father
== def_bb
->loop_father
)
489 if (dump_enabled_p ())
490 dump_printf_loc (MSG_NOTE
, vect_location
,
491 "reduc-stmt defining reduc-phi in the same nest.\n");
492 if (STMT_VINFO_IN_PATTERN_P (dstmt_vinfo
))
493 dstmt_vinfo
= vinfo_for_stmt (STMT_VINFO_RELATED_STMT (dstmt_vinfo
));
494 gcc_assert (STMT_VINFO_RELEVANT (dstmt_vinfo
) < vect_used_by_reduction
);
495 gcc_assert (STMT_VINFO_LIVE_P (dstmt_vinfo
)
496 || STMT_VINFO_RELEVANT (dstmt_vinfo
) > vect_unused_in_scope
);
500 /* case 3a: outer-loop stmt defining an inner-loop stmt:
501 outer-loop-header-bb:
507 if (flow_loop_nested_p (def_bb
->loop_father
, bb
->loop_father
))
509 if (dump_enabled_p ())
510 dump_printf_loc (MSG_NOTE
, vect_location
,
511 "outer-loop def-stmt defining inner-loop stmt.\n");
515 case vect_unused_in_scope
:
516 relevant
= (STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_nested_cycle
) ?
517 vect_used_in_scope
: vect_unused_in_scope
;
520 case vect_used_in_outer_by_reduction
:
521 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo
) != vect_reduction_def
);
522 relevant
= vect_used_by_reduction
;
525 case vect_used_in_outer
:
526 gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo
) != vect_reduction_def
);
527 relevant
= vect_used_in_scope
;
530 case vect_used_in_scope
:
538 /* case 3b: inner-loop stmt defining an outer-loop stmt:
539 outer-loop-header-bb:
543 outer-loop-tail-bb (or outer-loop-exit-bb in double reduction):
545 else if (flow_loop_nested_p (bb
->loop_father
, def_bb
->loop_father
))
547 if (dump_enabled_p ())
548 dump_printf_loc (MSG_NOTE
, vect_location
,
549 "inner-loop def-stmt defining outer-loop stmt.\n");
553 case vect_unused_in_scope
:
554 relevant
= (STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_reduction_def
555 || STMT_VINFO_DEF_TYPE (stmt_vinfo
) == vect_double_reduction_def
) ?
556 vect_used_in_outer_by_reduction
: vect_unused_in_scope
;
559 case vect_used_by_reduction
:
560 relevant
= vect_used_in_outer_by_reduction
;
563 case vect_used_in_scope
:
564 relevant
= vect_used_in_outer
;
572 vect_mark_relevant (worklist
, def_stmt
, relevant
, live_p
,
573 is_pattern_stmt_p (stmt_vinfo
));
578 /* Function vect_mark_stmts_to_be_vectorized.
580 Not all stmts in the loop need to be vectorized. For example:
589 Stmt 1 and 3 do not need to be vectorized, because loop control and
590 addressing of vectorized data-refs are handled differently.
592 This pass detects such stmts. */
595 vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo
)
597 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
598 basic_block
*bbs
= LOOP_VINFO_BBS (loop_vinfo
);
599 unsigned int nbbs
= loop
->num_nodes
;
600 gimple_stmt_iterator si
;
603 stmt_vec_info stmt_vinfo
;
607 enum vect_relevant relevant
, tmp_relevant
;
608 enum vect_def_type def_type
;
610 if (dump_enabled_p ())
611 dump_printf_loc (MSG_NOTE
, vect_location
,
612 "=== vect_mark_stmts_to_be_vectorized ===\n");
614 auto_vec
<gimple
*, 64> worklist
;
616 /* 1. Init worklist. */
617 for (i
= 0; i
< nbbs
; i
++)
620 for (si
= gsi_start_phis (bb
); !gsi_end_p (si
); gsi_next (&si
))
623 if (dump_enabled_p ())
625 dump_printf_loc (MSG_NOTE
, vect_location
, "init: phi relevant? ");
626 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, phi
, 0);
629 if (vect_stmt_relevant_p (phi
, loop_vinfo
, &relevant
, &live_p
))
630 vect_mark_relevant (&worklist
, phi
, relevant
, live_p
, false);
632 for (si
= gsi_start_bb (bb
); !gsi_end_p (si
); gsi_next (&si
))
634 stmt
= gsi_stmt (si
);
635 if (dump_enabled_p ())
637 dump_printf_loc (MSG_NOTE
, vect_location
, "init: stmt relevant? ");
638 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
641 if (vect_stmt_relevant_p (stmt
, loop_vinfo
, &relevant
, &live_p
))
642 vect_mark_relevant (&worklist
, stmt
, relevant
, live_p
, false);
646 /* 2. Process_worklist */
647 while (worklist
.length () > 0)
652 stmt
= worklist
.pop ();
653 if (dump_enabled_p ())
655 dump_printf_loc (MSG_NOTE
, vect_location
, "worklist: examine stmt: ");
656 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
659 /* Examine the USEs of STMT. For each USE, mark the stmt that defines it
660 (DEF_STMT) as relevant/irrelevant and live/dead according to the
661 liveness and relevance properties of STMT. */
662 stmt_vinfo
= vinfo_for_stmt (stmt
);
663 relevant
= STMT_VINFO_RELEVANT (stmt_vinfo
);
664 live_p
= STMT_VINFO_LIVE_P (stmt_vinfo
);
666 /* Generally, the liveness and relevance properties of STMT are
667 propagated as is to the DEF_STMTs of its USEs:
668 live_p <-- STMT_VINFO_LIVE_P (STMT_VINFO)
669 relevant <-- STMT_VINFO_RELEVANT (STMT_VINFO)
671 One exception is when STMT has been identified as defining a reduction
672 variable; in this case we set the liveness/relevance as follows:
674 relevant = vect_used_by_reduction
675 This is because we distinguish between two kinds of relevant stmts -
676 those that are used by a reduction computation, and those that are
677 (also) used by a regular computation. This allows us later on to
678 identify stmts that are used solely by a reduction, and therefore the
679 order of the results that they produce does not have to be kept. */
681 def_type
= STMT_VINFO_DEF_TYPE (stmt_vinfo
);
682 tmp_relevant
= relevant
;
685 case vect_reduction_def
:
686 switch (tmp_relevant
)
688 case vect_unused_in_scope
:
689 relevant
= vect_used_by_reduction
;
692 case vect_used_by_reduction
:
693 if (gimple_code (stmt
) == GIMPLE_PHI
)
698 if (dump_enabled_p ())
699 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
700 "unsupported use of reduction.\n");
707 case vect_nested_cycle
:
708 if (tmp_relevant
!= vect_unused_in_scope
709 && tmp_relevant
!= vect_used_in_outer_by_reduction
710 && tmp_relevant
!= vect_used_in_outer
)
712 if (dump_enabled_p ())
713 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
714 "unsupported use of nested cycle.\n");
722 case vect_double_reduction_def
:
723 if (tmp_relevant
!= vect_unused_in_scope
724 && tmp_relevant
!= vect_used_by_reduction
)
726 if (dump_enabled_p ())
727 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
728 "unsupported use of double reduction.\n");
740 if (is_pattern_stmt_p (stmt_vinfo
))
742 /* Pattern statements are not inserted into the code, so
743 FOR_EACH_PHI_OR_STMT_USE optimizes their operands out, and we
744 have to scan the RHS or function arguments instead. */
745 if (is_gimple_assign (stmt
))
747 enum tree_code rhs_code
= gimple_assign_rhs_code (stmt
);
748 tree op
= gimple_assign_rhs1 (stmt
);
751 if (rhs_code
== COND_EXPR
&& COMPARISON_CLASS_P (op
))
753 if (!process_use (stmt
, TREE_OPERAND (op
, 0), loop_vinfo
,
754 live_p
, relevant
, &worklist
, false)
755 || !process_use (stmt
, TREE_OPERAND (op
, 1), loop_vinfo
,
756 live_p
, relevant
, &worklist
, false))
760 for (; i
< gimple_num_ops (stmt
); i
++)
762 op
= gimple_op (stmt
, i
);
763 if (TREE_CODE (op
) == SSA_NAME
764 && !process_use (stmt
, op
, loop_vinfo
, live_p
, relevant
,
769 else if (is_gimple_call (stmt
))
771 for (i
= 0; i
< gimple_call_num_args (stmt
); i
++)
773 tree arg
= gimple_call_arg (stmt
, i
);
774 if (!process_use (stmt
, arg
, loop_vinfo
, live_p
, relevant
,
781 FOR_EACH_PHI_OR_STMT_USE (use_p
, stmt
, iter
, SSA_OP_USE
)
783 tree op
= USE_FROM_PTR (use_p
);
784 if (!process_use (stmt
, op
, loop_vinfo
, live_p
, relevant
,
789 if (STMT_VINFO_GATHER_SCATTER_P (stmt_vinfo
))
792 tree decl
= vect_check_gather_scatter (stmt
, loop_vinfo
, NULL
, &off
, NULL
);
794 if (!process_use (stmt
, off
, loop_vinfo
, live_p
, relevant
,
798 } /* while worklist */
804 /* Function vect_model_simple_cost.
806 Models cost for simple operations, i.e. those that only emit ncopies of a
807 single op. Right now, this does not account for multiple insns that could
808 be generated for the single vector op. We will handle that shortly. */
811 vect_model_simple_cost (stmt_vec_info stmt_info
, int ncopies
,
812 enum vect_def_type
*dt
,
813 stmt_vector_for_cost
*prologue_cost_vec
,
814 stmt_vector_for_cost
*body_cost_vec
)
817 int inside_cost
= 0, prologue_cost
= 0;
819 /* The SLP costs were already calculated during SLP tree build. */
820 if (PURE_SLP_STMT (stmt_info
))
823 /* FORNOW: Assuming maximum 2 args per stmts. */
824 for (i
= 0; i
< 2; i
++)
825 if (dt
[i
] == vect_constant_def
|| dt
[i
] == vect_external_def
)
826 prologue_cost
+= record_stmt_cost (prologue_cost_vec
, 1, vector_stmt
,
827 stmt_info
, 0, vect_prologue
);
829 /* Pass the inside-of-loop statements to the target-specific cost model. */
830 inside_cost
= record_stmt_cost (body_cost_vec
, ncopies
, vector_stmt
,
831 stmt_info
, 0, vect_body
);
833 if (dump_enabled_p ())
834 dump_printf_loc (MSG_NOTE
, vect_location
,
835 "vect_model_simple_cost: inside_cost = %d, "
836 "prologue_cost = %d .\n", inside_cost
, prologue_cost
);
840 /* Model cost for type demotion and promotion operations. PWR is normally
841 zero for single-step promotions and demotions. It will be one if
842 two-step promotion/demotion is required, and so on. Each additional
843 step doubles the number of instructions required. */
846 vect_model_promotion_demotion_cost (stmt_vec_info stmt_info
,
847 enum vect_def_type
*dt
, int pwr
)
850 int inside_cost
= 0, prologue_cost
= 0;
851 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
852 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
853 void *target_cost_data
;
855 /* The SLP costs were already calculated during SLP tree build. */
856 if (PURE_SLP_STMT (stmt_info
))
860 target_cost_data
= LOOP_VINFO_TARGET_COST_DATA (loop_vinfo
);
862 target_cost_data
= BB_VINFO_TARGET_COST_DATA (bb_vinfo
);
864 for (i
= 0; i
< pwr
+ 1; i
++)
866 tmp
= (STMT_VINFO_TYPE (stmt_info
) == type_promotion_vec_info_type
) ?
868 inside_cost
+= add_stmt_cost (target_cost_data
, vect_pow2 (tmp
),
869 vec_promote_demote
, stmt_info
, 0,
873 /* FORNOW: Assuming maximum 2 args per stmts. */
874 for (i
= 0; i
< 2; i
++)
875 if (dt
[i
] == vect_constant_def
|| dt
[i
] == vect_external_def
)
876 prologue_cost
+= add_stmt_cost (target_cost_data
, 1, vector_stmt
,
877 stmt_info
, 0, vect_prologue
);
879 if (dump_enabled_p ())
880 dump_printf_loc (MSG_NOTE
, vect_location
,
881 "vect_model_promotion_demotion_cost: inside_cost = %d, "
882 "prologue_cost = %d .\n", inside_cost
, prologue_cost
);
885 /* Function vect_cost_group_size
887 For grouped load or store, return the group_size only if it is the first
888 load or store of a group, else return 1. This ensures that group size is
889 only returned once per group. */
892 vect_cost_group_size (stmt_vec_info stmt_info
)
894 gimple
*first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
896 if (first_stmt
== STMT_VINFO_STMT (stmt_info
))
897 return GROUP_SIZE (stmt_info
);
903 /* Function vect_model_store_cost
905 Models cost for stores. In the case of grouped accesses, one access
906 has the overhead of the grouped access attributed to it. */
909 vect_model_store_cost (stmt_vec_info stmt_info
, int ncopies
,
910 bool store_lanes_p
, enum vect_def_type dt
,
912 stmt_vector_for_cost
*prologue_cost_vec
,
913 stmt_vector_for_cost
*body_cost_vec
)
916 unsigned int inside_cost
= 0, prologue_cost
= 0;
917 struct data_reference
*first_dr
;
920 if (dt
== vect_constant_def
|| dt
== vect_external_def
)
921 prologue_cost
+= record_stmt_cost (prologue_cost_vec
, 1, scalar_to_vec
,
922 stmt_info
, 0, vect_prologue
);
924 /* Grouped access? */
925 if (STMT_VINFO_GROUPED_ACCESS (stmt_info
))
929 first_stmt
= SLP_TREE_SCALAR_STMTS (slp_node
)[0];
934 first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
935 group_size
= vect_cost_group_size (stmt_info
);
938 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
940 /* Not a grouped access. */
944 first_dr
= STMT_VINFO_DATA_REF (stmt_info
);
947 /* We assume that the cost of a single store-lanes instruction is
948 equivalent to the cost of GROUP_SIZE separate stores. If a grouped
949 access is instead being provided by a permute-and-store operation,
950 include the cost of the permutes. */
951 if (!store_lanes_p
&& group_size
> 1
952 && !STMT_VINFO_STRIDED_P (stmt_info
))
954 /* Uses a high and low interleave or shuffle operations for each
956 int nstmts
= ncopies
* ceil_log2 (group_size
) * group_size
;
957 inside_cost
= record_stmt_cost (body_cost_vec
, nstmts
, vec_perm
,
958 stmt_info
, 0, vect_body
);
960 if (dump_enabled_p ())
961 dump_printf_loc (MSG_NOTE
, vect_location
,
962 "vect_model_store_cost: strided group_size = %d .\n",
966 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
967 /* Costs of the stores. */
968 if (STMT_VINFO_STRIDED_P (stmt_info
)
969 && !STMT_VINFO_GROUPED_ACCESS (stmt_info
))
971 /* N scalar stores plus extracting the elements. */
972 inside_cost
+= record_stmt_cost (body_cost_vec
,
973 ncopies
* TYPE_VECTOR_SUBPARTS (vectype
),
974 scalar_store
, stmt_info
, 0, vect_body
);
977 vect_get_store_cost (first_dr
, ncopies
, &inside_cost
, body_cost_vec
);
979 if (STMT_VINFO_STRIDED_P (stmt_info
))
980 inside_cost
+= record_stmt_cost (body_cost_vec
,
981 ncopies
* TYPE_VECTOR_SUBPARTS (vectype
),
982 vec_to_scalar
, stmt_info
, 0, vect_body
);
984 if (dump_enabled_p ())
985 dump_printf_loc (MSG_NOTE
, vect_location
,
986 "vect_model_store_cost: inside_cost = %d, "
987 "prologue_cost = %d .\n", inside_cost
, prologue_cost
);
991 /* Calculate cost of DR's memory access. */
993 vect_get_store_cost (struct data_reference
*dr
, int ncopies
,
994 unsigned int *inside_cost
,
995 stmt_vector_for_cost
*body_cost_vec
)
997 int alignment_support_scheme
= vect_supportable_dr_alignment (dr
, false);
998 gimple
*stmt
= DR_STMT (dr
);
999 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1001 switch (alignment_support_scheme
)
1005 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
,
1006 vector_store
, stmt_info
, 0,
1009 if (dump_enabled_p ())
1010 dump_printf_loc (MSG_NOTE
, vect_location
,
1011 "vect_model_store_cost: aligned.\n");
1015 case dr_unaligned_supported
:
1017 /* Here, we assign an additional cost for the unaligned store. */
1018 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
,
1019 unaligned_store
, stmt_info
,
1020 DR_MISALIGNMENT (dr
), vect_body
);
1021 if (dump_enabled_p ())
1022 dump_printf_loc (MSG_NOTE
, vect_location
,
1023 "vect_model_store_cost: unaligned supported by "
1028 case dr_unaligned_unsupported
:
1030 *inside_cost
= VECT_MAX_COST
;
1032 if (dump_enabled_p ())
1033 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1034 "vect_model_store_cost: unsupported access.\n");
1044 /* Function vect_model_load_cost
1046 Models cost for loads. In the case of grouped accesses, the last access
1047 has the overhead of the grouped access attributed to it. Since unaligned
1048 accesses are supported for loads, we also account for the costs of the
1049 access scheme chosen. */
1052 vect_model_load_cost (stmt_vec_info stmt_info
, int ncopies
,
1053 bool load_lanes_p
, slp_tree slp_node
,
1054 stmt_vector_for_cost
*prologue_cost_vec
,
1055 stmt_vector_for_cost
*body_cost_vec
)
1059 struct data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
), *first_dr
;
1060 unsigned int inside_cost
= 0, prologue_cost
= 0;
1062 /* Grouped accesses? */
1063 first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
1064 if (STMT_VINFO_GROUPED_ACCESS (stmt_info
) && first_stmt
&& !slp_node
)
1066 group_size
= vect_cost_group_size (stmt_info
);
1067 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
1069 /* Not a grouped access. */
1076 /* We assume that the cost of a single load-lanes instruction is
1077 equivalent to the cost of GROUP_SIZE separate loads. If a grouped
1078 access is instead being provided by a load-and-permute operation,
1079 include the cost of the permutes. */
1080 if (!load_lanes_p
&& group_size
> 1
1081 && !STMT_VINFO_STRIDED_P (stmt_info
))
1083 /* Uses an even and odd extract operations or shuffle operations
1084 for each needed permute. */
1085 int nstmts
= ncopies
* ceil_log2 (group_size
) * group_size
;
1086 inside_cost
= record_stmt_cost (body_cost_vec
, nstmts
, vec_perm
,
1087 stmt_info
, 0, vect_body
);
1089 if (dump_enabled_p ())
1090 dump_printf_loc (MSG_NOTE
, vect_location
,
1091 "vect_model_load_cost: strided group_size = %d .\n",
1095 /* The loads themselves. */
1096 if (STMT_VINFO_STRIDED_P (stmt_info
)
1097 && !STMT_VINFO_GROUPED_ACCESS (stmt_info
))
1099 /* N scalar loads plus gathering them into a vector. */
1100 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
1101 inside_cost
+= record_stmt_cost (body_cost_vec
,
1102 ncopies
* TYPE_VECTOR_SUBPARTS (vectype
),
1103 scalar_load
, stmt_info
, 0, vect_body
);
1106 vect_get_load_cost (first_dr
, ncopies
,
1107 ((!STMT_VINFO_GROUPED_ACCESS (stmt_info
))
1108 || group_size
> 1 || slp_node
),
1109 &inside_cost
, &prologue_cost
,
1110 prologue_cost_vec
, body_cost_vec
, true);
1111 if (STMT_VINFO_STRIDED_P (stmt_info
))
1112 inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
, vec_construct
,
1113 stmt_info
, 0, vect_body
);
1115 if (dump_enabled_p ())
1116 dump_printf_loc (MSG_NOTE
, vect_location
,
1117 "vect_model_load_cost: inside_cost = %d, "
1118 "prologue_cost = %d .\n", inside_cost
, prologue_cost
);
1122 /* Calculate cost of DR's memory access. */
1124 vect_get_load_cost (struct data_reference
*dr
, int ncopies
,
1125 bool add_realign_cost
, unsigned int *inside_cost
,
1126 unsigned int *prologue_cost
,
1127 stmt_vector_for_cost
*prologue_cost_vec
,
1128 stmt_vector_for_cost
*body_cost_vec
,
1129 bool record_prologue_costs
)
1131 int alignment_support_scheme
= vect_supportable_dr_alignment (dr
, false);
1132 gimple
*stmt
= DR_STMT (dr
);
1133 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1135 switch (alignment_support_scheme
)
1139 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
, vector_load
,
1140 stmt_info
, 0, vect_body
);
1142 if (dump_enabled_p ())
1143 dump_printf_loc (MSG_NOTE
, vect_location
,
1144 "vect_model_load_cost: aligned.\n");
1148 case dr_unaligned_supported
:
1150 /* Here, we assign an additional cost for the unaligned load. */
1151 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
,
1152 unaligned_load
, stmt_info
,
1153 DR_MISALIGNMENT (dr
), vect_body
);
1155 if (dump_enabled_p ())
1156 dump_printf_loc (MSG_NOTE
, vect_location
,
1157 "vect_model_load_cost: unaligned supported by "
1162 case dr_explicit_realign
:
1164 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
* 2,
1165 vector_load
, stmt_info
, 0, vect_body
);
1166 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
,
1167 vec_perm
, stmt_info
, 0, vect_body
);
1169 /* FIXME: If the misalignment remains fixed across the iterations of
1170 the containing loop, the following cost should be added to the
1172 if (targetm
.vectorize
.builtin_mask_for_load
)
1173 *inside_cost
+= record_stmt_cost (body_cost_vec
, 1, vector_stmt
,
1174 stmt_info
, 0, vect_body
);
1176 if (dump_enabled_p ())
1177 dump_printf_loc (MSG_NOTE
, vect_location
,
1178 "vect_model_load_cost: explicit realign\n");
1182 case dr_explicit_realign_optimized
:
1184 if (dump_enabled_p ())
1185 dump_printf_loc (MSG_NOTE
, vect_location
,
1186 "vect_model_load_cost: unaligned software "
1189 /* Unaligned software pipeline has a load of an address, an initial
1190 load, and possibly a mask operation to "prime" the loop. However,
1191 if this is an access in a group of loads, which provide grouped
1192 access, then the above cost should only be considered for one
1193 access in the group. Inside the loop, there is a load op
1194 and a realignment op. */
1196 if (add_realign_cost
&& record_prologue_costs
)
1198 *prologue_cost
+= record_stmt_cost (prologue_cost_vec
, 2,
1199 vector_stmt
, stmt_info
,
1201 if (targetm
.vectorize
.builtin_mask_for_load
)
1202 *prologue_cost
+= record_stmt_cost (prologue_cost_vec
, 1,
1203 vector_stmt
, stmt_info
,
1207 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
, vector_load
,
1208 stmt_info
, 0, vect_body
);
1209 *inside_cost
+= record_stmt_cost (body_cost_vec
, ncopies
, vec_perm
,
1210 stmt_info
, 0, vect_body
);
1212 if (dump_enabled_p ())
1213 dump_printf_loc (MSG_NOTE
, vect_location
,
1214 "vect_model_load_cost: explicit realign optimized"
1220 case dr_unaligned_unsupported
:
1222 *inside_cost
= VECT_MAX_COST
;
1224 if (dump_enabled_p ())
1225 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1226 "vect_model_load_cost: unsupported access.\n");
1235 /* Insert the new stmt NEW_STMT at *GSI or at the appropriate place in
1236 the loop preheader for the vectorized stmt STMT. */
1239 vect_init_vector_1 (gimple
*stmt
, gimple
*new_stmt
, gimple_stmt_iterator
*gsi
)
1242 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
1245 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (stmt
);
1246 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_vinfo
);
1250 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
1254 if (nested_in_vect_loop_p (loop
, stmt
))
1257 pe
= loop_preheader_edge (loop
);
1258 new_bb
= gsi_insert_on_edge_immediate (pe
, new_stmt
);
1259 gcc_assert (!new_bb
);
1263 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_vinfo
);
1265 gimple_stmt_iterator gsi_bb_start
;
1267 gcc_assert (bb_vinfo
);
1268 bb
= BB_VINFO_BB (bb_vinfo
);
1269 gsi_bb_start
= gsi_after_labels (bb
);
1270 gsi_insert_before (&gsi_bb_start
, new_stmt
, GSI_SAME_STMT
);
1274 if (dump_enabled_p ())
1276 dump_printf_loc (MSG_NOTE
, vect_location
,
1277 "created new init_stmt: ");
1278 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, new_stmt
, 0);
1282 /* Function vect_init_vector.
1284 Insert a new stmt (INIT_STMT) that initializes a new variable of type
1285 TYPE with the value VAL. If TYPE is a vector type and VAL does not have
1286 vector type a vector with all elements equal to VAL is created first.
1287 Place the initialization at BSI if it is not NULL. Otherwise, place the
1288 initialization at the loop preheader.
1289 Return the DEF of INIT_STMT.
1290 It will be used in the vectorization of STMT. */
1293 vect_init_vector (gimple
*stmt
, tree val
, tree type
, gimple_stmt_iterator
*gsi
)
1298 if (TREE_CODE (type
) == VECTOR_TYPE
1299 && TREE_CODE (TREE_TYPE (val
)) != VECTOR_TYPE
)
1301 if (!types_compatible_p (TREE_TYPE (type
), TREE_TYPE (val
)))
1303 if (CONSTANT_CLASS_P (val
))
1304 val
= fold_convert (TREE_TYPE (type
), val
);
1307 new_temp
= make_ssa_name (TREE_TYPE (type
));
1308 init_stmt
= gimple_build_assign (new_temp
, NOP_EXPR
, val
);
1309 vect_init_vector_1 (stmt
, init_stmt
, gsi
);
1313 val
= build_vector_from_val (type
, val
);
1316 new_temp
= vect_get_new_ssa_name (type
, vect_simple_var
, "cst_");
1317 init_stmt
= gimple_build_assign (new_temp
, val
);
1318 vect_init_vector_1 (stmt
, init_stmt
, gsi
);
1323 /* Function vect_get_vec_def_for_operand.
1325 OP is an operand in STMT. This function returns a (vector) def that will be
1326 used in the vectorized stmt for STMT.
1328 In the case that OP is an SSA_NAME which is defined in the loop, then
1329 STMT_VINFO_VEC_STMT of the defining stmt holds the relevant def.
1331 In case OP is an invariant or constant, a new stmt that creates a vector def
1332 needs to be introduced. VECTYPE may be used to specify a required type for
1333 vector invariant. */
1336 vect_get_vec_def_for_operand (tree op
, gimple
*stmt
, tree vectype
)
1341 stmt_vec_info def_stmt_info
= NULL
;
1342 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (stmt
);
1343 tree stmt_vectype
= STMT_VINFO_VECTYPE (stmt_vinfo
);
1344 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_vinfo
);
1345 enum vect_def_type dt
;
1349 if (dump_enabled_p ())
1351 dump_printf_loc (MSG_NOTE
, vect_location
,
1352 "vect_get_vec_def_for_operand: ");
1353 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, op
);
1354 dump_printf (MSG_NOTE
, "\n");
1357 is_simple_use
= vect_is_simple_use (op
, loop_vinfo
, &def_stmt
, &dt
);
1358 gcc_assert (is_simple_use
);
1359 if (dump_enabled_p ())
1361 int loc_printed
= 0;
1365 dump_printf (MSG_NOTE
, " def_stmt = ");
1367 dump_printf_loc (MSG_NOTE
, vect_location
, " def_stmt = ");
1368 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, def_stmt
, 0);
1374 /* operand is a constant or a loop invariant. */
1375 case vect_constant_def
:
1376 case vect_external_def
:
1379 vector_type
= vectype
;
1380 else if (TREE_CODE (TREE_TYPE (op
)) == BOOLEAN_TYPE
1381 && VECTOR_BOOLEAN_TYPE_P (stmt_vectype
))
1382 vector_type
= build_same_sized_truth_vector_type (stmt_vectype
);
1384 vector_type
= get_vectype_for_scalar_type (TREE_TYPE (op
));
1386 gcc_assert (vector_type
);
1387 return vect_init_vector (stmt
, op
, vector_type
, NULL
);
1390 /* operand is defined inside the loop. */
1391 case vect_internal_def
:
1393 /* Get the def from the vectorized stmt. */
1394 def_stmt_info
= vinfo_for_stmt (def_stmt
);
1396 vec_stmt
= STMT_VINFO_VEC_STMT (def_stmt_info
);
1397 /* Get vectorized pattern statement. */
1399 && STMT_VINFO_IN_PATTERN_P (def_stmt_info
)
1400 && !STMT_VINFO_RELEVANT (def_stmt_info
))
1401 vec_stmt
= STMT_VINFO_VEC_STMT (vinfo_for_stmt (
1402 STMT_VINFO_RELATED_STMT (def_stmt_info
)));
1403 gcc_assert (vec_stmt
);
1404 if (gimple_code (vec_stmt
) == GIMPLE_PHI
)
1405 vec_oprnd
= PHI_RESULT (vec_stmt
);
1406 else if (is_gimple_call (vec_stmt
))
1407 vec_oprnd
= gimple_call_lhs (vec_stmt
);
1409 vec_oprnd
= gimple_assign_lhs (vec_stmt
);
1413 /* operand is defined by a loop header phi - reduction */
1414 case vect_reduction_def
:
1415 case vect_double_reduction_def
:
1416 case vect_nested_cycle
:
1417 /* Code should use get_initial_def_for_reduction. */
1420 /* operand is defined by loop-header phi - induction. */
1421 case vect_induction_def
:
1423 gcc_assert (gimple_code (def_stmt
) == GIMPLE_PHI
);
1425 /* Get the def from the vectorized stmt. */
1426 def_stmt_info
= vinfo_for_stmt (def_stmt
);
1427 vec_stmt
= STMT_VINFO_VEC_STMT (def_stmt_info
);
1428 if (gimple_code (vec_stmt
) == GIMPLE_PHI
)
1429 vec_oprnd
= PHI_RESULT (vec_stmt
);
1431 vec_oprnd
= gimple_get_lhs (vec_stmt
);
1441 /* Function vect_get_vec_def_for_stmt_copy
1443 Return a vector-def for an operand. This function is used when the
1444 vectorized stmt to be created (by the caller to this function) is a "copy"
1445 created in case the vectorized result cannot fit in one vector, and several
1446 copies of the vector-stmt are required. In this case the vector-def is
1447 retrieved from the vector stmt recorded in the STMT_VINFO_RELATED_STMT field
1448 of the stmt that defines VEC_OPRND.
1449 DT is the type of the vector def VEC_OPRND.
1452 In case the vectorization factor (VF) is bigger than the number
1453 of elements that can fit in a vectype (nunits), we have to generate
1454 more than one vector stmt to vectorize the scalar stmt. This situation
1455 arises when there are multiple data-types operated upon in the loop; the
1456 smallest data-type determines the VF, and as a result, when vectorizing
1457 stmts operating on wider types we need to create 'VF/nunits' "copies" of the
1458 vector stmt (each computing a vector of 'nunits' results, and together
1459 computing 'VF' results in each iteration). This function is called when
1460 vectorizing such a stmt (e.g. vectorizing S2 in the illustration below, in
1461 which VF=16 and nunits=4, so the number of copies required is 4):
1463 scalar stmt: vectorized into: STMT_VINFO_RELATED_STMT
1465 S1: x = load VS1.0: vx.0 = memref0 VS1.1
1466 VS1.1: vx.1 = memref1 VS1.2
1467 VS1.2: vx.2 = memref2 VS1.3
1468 VS1.3: vx.3 = memref3
1470 S2: z = x + ... VSnew.0: vz0 = vx.0 + ... VSnew.1
1471 VSnew.1: vz1 = vx.1 + ... VSnew.2
1472 VSnew.2: vz2 = vx.2 + ... VSnew.3
1473 VSnew.3: vz3 = vx.3 + ...
1475 The vectorization of S1 is explained in vectorizable_load.
1476 The vectorization of S2:
1477 To create the first vector-stmt out of the 4 copies - VSnew.0 -
1478 the function 'vect_get_vec_def_for_operand' is called to
1479 get the relevant vector-def for each operand of S2. For operand x it
1480 returns the vector-def 'vx.0'.
1482 To create the remaining copies of the vector-stmt (VSnew.j), this
1483 function is called to get the relevant vector-def for each operand. It is
1484 obtained from the respective VS1.j stmt, which is recorded in the
1485 STMT_VINFO_RELATED_STMT field of the stmt that defines VEC_OPRND.
1487 For example, to obtain the vector-def 'vx.1' in order to create the
1488 vector stmt 'VSnew.1', this function is called with VEC_OPRND='vx.0'.
1489 Given 'vx0' we obtain the stmt that defines it ('VS1.0'); from the
1490 STMT_VINFO_RELATED_STMT field of 'VS1.0' we obtain the next copy - 'VS1.1',
1491 and return its def ('vx.1').
1492 Overall, to create the above sequence this function will be called 3 times:
1493 vx.1 = vect_get_vec_def_for_stmt_copy (dt, vx.0);
1494 vx.2 = vect_get_vec_def_for_stmt_copy (dt, vx.1);
1495 vx.3 = vect_get_vec_def_for_stmt_copy (dt, vx.2); */
1498 vect_get_vec_def_for_stmt_copy (enum vect_def_type dt
, tree vec_oprnd
)
1500 gimple
*vec_stmt_for_operand
;
1501 stmt_vec_info def_stmt_info
;
1503 /* Do nothing; can reuse same def. */
1504 if (dt
== vect_external_def
|| dt
== vect_constant_def
)
1507 vec_stmt_for_operand
= SSA_NAME_DEF_STMT (vec_oprnd
);
1508 def_stmt_info
= vinfo_for_stmt (vec_stmt_for_operand
);
1509 gcc_assert (def_stmt_info
);
1510 vec_stmt_for_operand
= STMT_VINFO_RELATED_STMT (def_stmt_info
);
1511 gcc_assert (vec_stmt_for_operand
);
1512 if (gimple_code (vec_stmt_for_operand
) == GIMPLE_PHI
)
1513 vec_oprnd
= PHI_RESULT (vec_stmt_for_operand
);
1515 vec_oprnd
= gimple_get_lhs (vec_stmt_for_operand
);
1520 /* Get vectorized definitions for the operands to create a copy of an original
1521 stmt. See vect_get_vec_def_for_stmt_copy () for details. */
1524 vect_get_vec_defs_for_stmt_copy (enum vect_def_type
*dt
,
1525 vec
<tree
> *vec_oprnds0
,
1526 vec
<tree
> *vec_oprnds1
)
1528 tree vec_oprnd
= vec_oprnds0
->pop ();
1530 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
[0], vec_oprnd
);
1531 vec_oprnds0
->quick_push (vec_oprnd
);
1533 if (vec_oprnds1
&& vec_oprnds1
->length ())
1535 vec_oprnd
= vec_oprnds1
->pop ();
1536 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
[1], vec_oprnd
);
1537 vec_oprnds1
->quick_push (vec_oprnd
);
1542 /* Get vectorized definitions for OP0 and OP1.
1543 REDUC_INDEX is the index of reduction operand in case of reduction,
1544 and -1 otherwise. */
1547 vect_get_vec_defs (tree op0
, tree op1
, gimple
*stmt
,
1548 vec
<tree
> *vec_oprnds0
,
1549 vec
<tree
> *vec_oprnds1
,
1550 slp_tree slp_node
, int reduc_index
)
1554 int nops
= (op1
== NULL_TREE
) ? 1 : 2;
1555 auto_vec
<tree
> ops (nops
);
1556 auto_vec
<vec
<tree
> > vec_defs (nops
);
1558 ops
.quick_push (op0
);
1560 ops
.quick_push (op1
);
1562 vect_get_slp_defs (ops
, slp_node
, &vec_defs
, reduc_index
);
1564 *vec_oprnds0
= vec_defs
[0];
1566 *vec_oprnds1
= vec_defs
[1];
1572 vec_oprnds0
->create (1);
1573 vec_oprnd
= vect_get_vec_def_for_operand (op0
, stmt
);
1574 vec_oprnds0
->quick_push (vec_oprnd
);
1578 vec_oprnds1
->create (1);
1579 vec_oprnd
= vect_get_vec_def_for_operand (op1
, stmt
);
1580 vec_oprnds1
->quick_push (vec_oprnd
);
1586 /* Function vect_finish_stmt_generation.
1588 Insert a new stmt. */
1591 vect_finish_stmt_generation (gimple
*stmt
, gimple
*vec_stmt
,
1592 gimple_stmt_iterator
*gsi
)
1594 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1595 vec_info
*vinfo
= stmt_info
->vinfo
;
1597 gcc_assert (gimple_code (stmt
) != GIMPLE_LABEL
);
1599 if (!gsi_end_p (*gsi
)
1600 && gimple_has_mem_ops (vec_stmt
))
1602 gimple
*at_stmt
= gsi_stmt (*gsi
);
1603 tree vuse
= gimple_vuse (at_stmt
);
1604 if (vuse
&& TREE_CODE (vuse
) == SSA_NAME
)
1606 tree vdef
= gimple_vdef (at_stmt
);
1607 gimple_set_vuse (vec_stmt
, gimple_vuse (at_stmt
));
1608 /* If we have an SSA vuse and insert a store, update virtual
1609 SSA form to avoid triggering the renamer. Do so only
1610 if we can easily see all uses - which is what almost always
1611 happens with the way vectorized stmts are inserted. */
1612 if ((vdef
&& TREE_CODE (vdef
) == SSA_NAME
)
1613 && ((is_gimple_assign (vec_stmt
)
1614 && !is_gimple_reg (gimple_assign_lhs (vec_stmt
)))
1615 || (is_gimple_call (vec_stmt
)
1616 && !(gimple_call_flags (vec_stmt
)
1617 & (ECF_CONST
|ECF_PURE
|ECF_NOVOPS
)))))
1619 tree new_vdef
= copy_ssa_name (vuse
, vec_stmt
);
1620 gimple_set_vdef (vec_stmt
, new_vdef
);
1621 SET_USE (gimple_vuse_op (at_stmt
), new_vdef
);
1625 gsi_insert_before (gsi
, vec_stmt
, GSI_SAME_STMT
);
1627 set_vinfo_for_stmt (vec_stmt
, new_stmt_vec_info (vec_stmt
, vinfo
));
1629 if (dump_enabled_p ())
1631 dump_printf_loc (MSG_NOTE
, vect_location
, "add new stmt: ");
1632 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, vec_stmt
, 0);
1635 gimple_set_location (vec_stmt
, gimple_location (stmt
));
1637 /* While EH edges will generally prevent vectorization, stmt might
1638 e.g. be in a must-not-throw region. Ensure newly created stmts
1639 that could throw are part of the same region. */
1640 int lp_nr
= lookup_stmt_eh_lp (stmt
);
1641 if (lp_nr
!= 0 && stmt_could_throw_p (vec_stmt
))
1642 add_stmt_to_eh_lp (vec_stmt
, lp_nr
);
1645 /* We want to vectorize a call to combined function CFN with function
1646 decl FNDECL, using VECTYPE_OUT as the type of the output and VECTYPE_IN
1647 as the types of all inputs. Check whether this is possible using
1648 an internal function, returning its code if so or IFN_LAST if not. */
1651 vectorizable_internal_function (combined_fn cfn
, tree fndecl
,
1652 tree vectype_out
, tree vectype_in
)
1655 if (internal_fn_p (cfn
))
1656 ifn
= as_internal_fn (cfn
);
1658 ifn
= associated_internal_fn (fndecl
);
1659 if (ifn
!= IFN_LAST
&& direct_internal_fn_p (ifn
))
1661 const direct_internal_fn_info
&info
= direct_internal_fn (ifn
);
1662 if (info
.vectorizable
)
1664 tree type0
= (info
.type0
< 0 ? vectype_out
: vectype_in
);
1665 tree type1
= (info
.type1
< 0 ? vectype_out
: vectype_in
);
1666 if (direct_internal_fn_supported_p (ifn
, tree_pair (type0
, type1
)))
1674 static tree
permute_vec_elements (tree
, tree
, tree
, gimple
*,
1675 gimple_stmt_iterator
*);
1678 /* Function vectorizable_mask_load_store.
1680 Check if STMT performs a conditional load or store that can be vectorized.
1681 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
1682 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
1683 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
1686 vectorizable_mask_load_store (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
1687 gimple
**vec_stmt
, slp_tree slp_node
)
1689 tree vec_dest
= NULL
;
1690 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
1691 stmt_vec_info prev_stmt_info
;
1692 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
1693 struct loop
*loop
= LOOP_VINFO_LOOP (loop_vinfo
);
1694 bool nested_in_vect_loop
= nested_in_vect_loop_p (loop
, stmt
);
1695 struct data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
);
1696 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
1697 tree rhs_vectype
= NULL_TREE
;
1702 tree dataref_ptr
= NULL_TREE
;
1704 int nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
1708 tree gather_base
= NULL_TREE
, gather_off
= NULL_TREE
;
1709 tree gather_off_vectype
= NULL_TREE
, gather_decl
= NULL_TREE
;
1710 int gather_scale
= 1;
1711 enum vect_def_type gather_dt
= vect_unknown_def_type
;
1715 enum vect_def_type dt
;
1717 if (slp_node
!= NULL
)
1720 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits
;
1721 gcc_assert (ncopies
>= 1);
1723 is_store
= gimple_call_internal_fn (stmt
) == IFN_MASK_STORE
;
1724 mask
= gimple_call_arg (stmt
, 2);
1726 if (TREE_CODE (TREE_TYPE (mask
)) != BOOLEAN_TYPE
)
1729 /* FORNOW. This restriction should be relaxed. */
1730 if (nested_in_vect_loop
&& ncopies
> 1)
1732 if (dump_enabled_p ())
1733 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1734 "multiple types in nested loop.");
1738 if (!STMT_VINFO_RELEVANT_P (stmt_info
))
1741 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
1744 if (!STMT_VINFO_DATA_REF (stmt_info
))
1747 elem_type
= TREE_TYPE (vectype
);
1749 if (STMT_VINFO_GROUPED_ACCESS (stmt_info
))
1752 if (STMT_VINFO_STRIDED_P (stmt_info
))
1755 if (TREE_CODE (mask
) != SSA_NAME
)
1758 if (!vect_is_simple_use (mask
, loop_vinfo
, &def_stmt
, &dt
, &mask_vectype
))
1762 mask_vectype
= get_mask_type_for_scalar_type (TREE_TYPE (vectype
));
1769 tree rhs
= gimple_call_arg (stmt
, 3);
1770 if (!vect_is_simple_use (rhs
, loop_vinfo
, &def_stmt
, &dt
, &rhs_vectype
))
1774 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info
))
1777 gather_decl
= vect_check_gather_scatter (stmt
, loop_vinfo
, &gather_base
,
1778 &gather_off
, &gather_scale
);
1779 gcc_assert (gather_decl
);
1780 if (!vect_is_simple_use (gather_off
, loop_vinfo
, &def_stmt
, &gather_dt
,
1781 &gather_off_vectype
))
1783 if (dump_enabled_p ())
1784 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1785 "gather index use not simple.");
1789 tree arglist
= TYPE_ARG_TYPES (TREE_TYPE (gather_decl
));
1791 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (TREE_CHAIN (arglist
))));
1792 if (TREE_CODE (masktype
) == INTEGER_TYPE
)
1794 if (dump_enabled_p ())
1795 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
1796 "masked gather with integer mask not supported.");
1800 else if (tree_int_cst_compare (nested_in_vect_loop
1801 ? STMT_VINFO_DR_STEP (stmt_info
)
1802 : DR_STEP (dr
), size_zero_node
) <= 0)
1804 else if (!VECTOR_MODE_P (TYPE_MODE (vectype
))
1805 || !can_vec_mask_load_store_p (TYPE_MODE (vectype
),
1806 TYPE_MODE (mask_vectype
),
1809 && !useless_type_conversion_p (vectype
, rhs_vectype
)))
1812 if (!vec_stmt
) /* transformation not required. */
1814 STMT_VINFO_TYPE (stmt_info
) = call_vec_info_type
;
1816 vect_model_store_cost (stmt_info
, ncopies
, false, dt
,
1819 vect_model_load_cost (stmt_info
, ncopies
, false, NULL
, NULL
, NULL
);
1825 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info
))
1827 tree vec_oprnd0
= NULL_TREE
, op
;
1828 tree arglist
= TYPE_ARG_TYPES (TREE_TYPE (gather_decl
));
1829 tree rettype
, srctype
, ptrtype
, idxtype
, masktype
, scaletype
;
1830 tree ptr
, vec_mask
= NULL_TREE
, mask_op
= NULL_TREE
, var
, scale
;
1831 tree perm_mask
= NULL_TREE
, prev_res
= NULL_TREE
;
1832 tree mask_perm_mask
= NULL_TREE
;
1833 edge pe
= loop_preheader_edge (loop
);
1836 enum { NARROW
, NONE
, WIDEN
} modifier
;
1837 int gather_off_nunits
= TYPE_VECTOR_SUBPARTS (gather_off_vectype
);
1839 rettype
= TREE_TYPE (TREE_TYPE (gather_decl
));
1840 srctype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
1841 ptrtype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
1842 idxtype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
1843 masktype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
1844 scaletype
= TREE_VALUE (arglist
);
1845 gcc_checking_assert (types_compatible_p (srctype
, rettype
)
1846 && types_compatible_p (srctype
, masktype
));
1848 if (nunits
== gather_off_nunits
)
1850 else if (nunits
== gather_off_nunits
/ 2)
1852 unsigned char *sel
= XALLOCAVEC (unsigned char, gather_off_nunits
);
1855 for (i
= 0; i
< gather_off_nunits
; ++i
)
1856 sel
[i
] = i
| nunits
;
1858 perm_mask
= vect_gen_perm_mask_checked (gather_off_vectype
, sel
);
1860 else if (nunits
== gather_off_nunits
* 2)
1862 unsigned char *sel
= XALLOCAVEC (unsigned char, nunits
);
1865 for (i
= 0; i
< nunits
; ++i
)
1866 sel
[i
] = i
< gather_off_nunits
1867 ? i
: i
+ nunits
- gather_off_nunits
;
1869 perm_mask
= vect_gen_perm_mask_checked (vectype
, sel
);
1871 for (i
= 0; i
< nunits
; ++i
)
1872 sel
[i
] = i
| gather_off_nunits
;
1873 mask_perm_mask
= vect_gen_perm_mask_checked (masktype
, sel
);
1878 vec_dest
= vect_create_destination_var (gimple_call_lhs (stmt
), vectype
);
1880 ptr
= fold_convert (ptrtype
, gather_base
);
1881 if (!is_gimple_min_invariant (ptr
))
1883 ptr
= force_gimple_operand (ptr
, &seq
, true, NULL_TREE
);
1884 new_bb
= gsi_insert_seq_on_edge_immediate (pe
, seq
);
1885 gcc_assert (!new_bb
);
1888 scale
= build_int_cst (scaletype
, gather_scale
);
1890 prev_stmt_info
= NULL
;
1891 for (j
= 0; j
< ncopies
; ++j
)
1893 if (modifier
== WIDEN
&& (j
& 1))
1894 op
= permute_vec_elements (vec_oprnd0
, vec_oprnd0
,
1895 perm_mask
, stmt
, gsi
);
1898 = vect_get_vec_def_for_operand (gather_off
, stmt
);
1901 = vect_get_vec_def_for_stmt_copy (gather_dt
, vec_oprnd0
);
1903 if (!useless_type_conversion_p (idxtype
, TREE_TYPE (op
)))
1905 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op
))
1906 == TYPE_VECTOR_SUBPARTS (idxtype
));
1907 var
= vect_get_new_ssa_name (idxtype
, vect_simple_var
);
1908 op
= build1 (VIEW_CONVERT_EXPR
, idxtype
, op
);
1910 = gimple_build_assign (var
, VIEW_CONVERT_EXPR
, op
);
1911 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
1915 if (mask_perm_mask
&& (j
& 1))
1916 mask_op
= permute_vec_elements (mask_op
, mask_op
,
1917 mask_perm_mask
, stmt
, gsi
);
1921 vec_mask
= vect_get_vec_def_for_operand (mask
, stmt
);
1924 vect_is_simple_use (vec_mask
, loop_vinfo
, &def_stmt
, &dt
);
1925 vec_mask
= vect_get_vec_def_for_stmt_copy (dt
, vec_mask
);
1929 if (!useless_type_conversion_p (masktype
, TREE_TYPE (vec_mask
)))
1931 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (mask_op
))
1932 == TYPE_VECTOR_SUBPARTS (masktype
));
1933 var
= vect_get_new_ssa_name (masktype
, vect_simple_var
);
1934 mask_op
= build1 (VIEW_CONVERT_EXPR
, masktype
, mask_op
);
1936 = gimple_build_assign (var
, VIEW_CONVERT_EXPR
, mask_op
);
1937 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
1943 = gimple_build_call (gather_decl
, 5, mask_op
, ptr
, op
, mask_op
,
1946 if (!useless_type_conversion_p (vectype
, rettype
))
1948 gcc_assert (TYPE_VECTOR_SUBPARTS (vectype
)
1949 == TYPE_VECTOR_SUBPARTS (rettype
));
1950 op
= vect_get_new_ssa_name (rettype
, vect_simple_var
);
1951 gimple_call_set_lhs (new_stmt
, op
);
1952 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
1953 var
= make_ssa_name (vec_dest
);
1954 op
= build1 (VIEW_CONVERT_EXPR
, vectype
, op
);
1955 new_stmt
= gimple_build_assign (var
, VIEW_CONVERT_EXPR
, op
);
1959 var
= make_ssa_name (vec_dest
, new_stmt
);
1960 gimple_call_set_lhs (new_stmt
, var
);
1963 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
1965 if (modifier
== NARROW
)
1972 var
= permute_vec_elements (prev_res
, var
,
1973 perm_mask
, stmt
, gsi
);
1974 new_stmt
= SSA_NAME_DEF_STMT (var
);
1977 if (prev_stmt_info
== NULL
)
1978 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
1980 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
1981 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
1984 /* Ensure that even with -fno-tree-dce the scalar MASK_LOAD is removed
1986 if (STMT_VINFO_RELATED_STMT (stmt_info
))
1988 stmt
= STMT_VINFO_RELATED_STMT (stmt_info
);
1989 stmt_info
= vinfo_for_stmt (stmt
);
1991 tree lhs
= gimple_call_lhs (stmt
);
1992 new_stmt
= gimple_build_assign (lhs
, build_zero_cst (TREE_TYPE (lhs
)));
1993 set_vinfo_for_stmt (new_stmt
, stmt_info
);
1994 set_vinfo_for_stmt (stmt
, NULL
);
1995 STMT_VINFO_STMT (stmt_info
) = new_stmt
;
1996 gsi_replace (gsi
, new_stmt
, true);
2001 tree vec_rhs
= NULL_TREE
, vec_mask
= NULL_TREE
;
2002 prev_stmt_info
= NULL
;
2003 for (i
= 0; i
< ncopies
; i
++)
2005 unsigned align
, misalign
;
2009 tree rhs
= gimple_call_arg (stmt
, 3);
2010 vec_rhs
= vect_get_vec_def_for_operand (rhs
, stmt
);
2011 vec_mask
= vect_get_vec_def_for_operand (mask
, stmt
);
2012 /* We should have catched mismatched types earlier. */
2013 gcc_assert (useless_type_conversion_p (vectype
,
2014 TREE_TYPE (vec_rhs
)));
2015 dataref_ptr
= vect_create_data_ref_ptr (stmt
, vectype
, NULL
,
2016 NULL_TREE
, &dummy
, gsi
,
2017 &ptr_incr
, false, &inv_p
);
2018 gcc_assert (!inv_p
);
2022 vect_is_simple_use (vec_rhs
, loop_vinfo
, &def_stmt
, &dt
);
2023 vec_rhs
= vect_get_vec_def_for_stmt_copy (dt
, vec_rhs
);
2024 vect_is_simple_use (vec_mask
, loop_vinfo
, &def_stmt
, &dt
);
2025 vec_mask
= vect_get_vec_def_for_stmt_copy (dt
, vec_mask
);
2026 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
, stmt
,
2027 TYPE_SIZE_UNIT (vectype
));
2030 align
= TYPE_ALIGN_UNIT (vectype
);
2031 if (aligned_access_p (dr
))
2033 else if (DR_MISALIGNMENT (dr
) == -1)
2035 align
= TYPE_ALIGN_UNIT (elem_type
);
2039 misalign
= DR_MISALIGNMENT (dr
);
2040 set_ptr_info_alignment (get_ptr_info (dataref_ptr
), align
,
2043 = gimple_build_call_internal (IFN_MASK_STORE
, 4, dataref_ptr
,
2044 gimple_call_arg (stmt
, 1),
2046 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2048 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
2050 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
2051 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
2056 tree vec_mask
= NULL_TREE
;
2057 prev_stmt_info
= NULL
;
2058 vec_dest
= vect_create_destination_var (gimple_call_lhs (stmt
), vectype
);
2059 for (i
= 0; i
< ncopies
; i
++)
2061 unsigned align
, misalign
;
2065 vec_mask
= vect_get_vec_def_for_operand (mask
, stmt
);
2066 dataref_ptr
= vect_create_data_ref_ptr (stmt
, vectype
, NULL
,
2067 NULL_TREE
, &dummy
, gsi
,
2068 &ptr_incr
, false, &inv_p
);
2069 gcc_assert (!inv_p
);
2073 vect_is_simple_use (vec_mask
, loop_vinfo
, &def_stmt
, &dt
);
2074 vec_mask
= vect_get_vec_def_for_stmt_copy (dt
, vec_mask
);
2075 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
, stmt
,
2076 TYPE_SIZE_UNIT (vectype
));
2079 align
= TYPE_ALIGN_UNIT (vectype
);
2080 if (aligned_access_p (dr
))
2082 else if (DR_MISALIGNMENT (dr
) == -1)
2084 align
= TYPE_ALIGN_UNIT (elem_type
);
2088 misalign
= DR_MISALIGNMENT (dr
);
2089 set_ptr_info_alignment (get_ptr_info (dataref_ptr
), align
,
2092 = gimple_build_call_internal (IFN_MASK_LOAD
, 3, dataref_ptr
,
2093 gimple_call_arg (stmt
, 1),
2095 gimple_call_set_lhs (new_stmt
, make_ssa_name (vec_dest
));
2096 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2098 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
2100 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
2101 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
2107 /* Ensure that even with -fno-tree-dce the scalar MASK_LOAD is removed
2109 if (STMT_VINFO_RELATED_STMT (stmt_info
))
2111 stmt
= STMT_VINFO_RELATED_STMT (stmt_info
);
2112 stmt_info
= vinfo_for_stmt (stmt
);
2114 tree lhs
= gimple_call_lhs (stmt
);
2115 new_stmt
= gimple_build_assign (lhs
, build_zero_cst (TREE_TYPE (lhs
)));
2116 set_vinfo_for_stmt (new_stmt
, stmt_info
);
2117 set_vinfo_for_stmt (stmt
, NULL
);
2118 STMT_VINFO_STMT (stmt_info
) = new_stmt
;
2119 gsi_replace (gsi
, new_stmt
, true);
2126 /* Function vectorizable_call.
2128 Check if GS performs a function call that can be vectorized.
2129 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2130 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2131 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2134 vectorizable_call (gimple
*gs
, gimple_stmt_iterator
*gsi
, gimple
**vec_stmt
,
2141 tree vec_oprnd0
= NULL_TREE
, vec_oprnd1
= NULL_TREE
;
2142 stmt_vec_info stmt_info
= vinfo_for_stmt (gs
), prev_stmt_info
;
2143 tree vectype_out
, vectype_in
;
2146 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
2147 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
2148 vec_info
*vinfo
= stmt_info
->vinfo
;
2149 tree fndecl
, new_temp
, rhs_type
;
2151 enum vect_def_type dt
[3]
2152 = {vect_unknown_def_type
, vect_unknown_def_type
, vect_unknown_def_type
};
2153 gimple
*new_stmt
= NULL
;
2155 vec
<tree
> vargs
= vNULL
;
2156 enum { NARROW
, NONE
, WIDEN
} modifier
;
2160 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
2163 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
2166 /* Is GS a vectorizable call? */
2167 stmt
= dyn_cast
<gcall
*> (gs
);
2171 if (gimple_call_internal_p (stmt
)
2172 && (gimple_call_internal_fn (stmt
) == IFN_MASK_LOAD
2173 || gimple_call_internal_fn (stmt
) == IFN_MASK_STORE
))
2174 return vectorizable_mask_load_store (stmt
, gsi
, vec_stmt
,
2177 if (gimple_call_lhs (stmt
) == NULL_TREE
2178 || TREE_CODE (gimple_call_lhs (stmt
)) != SSA_NAME
)
2181 gcc_checking_assert (!stmt_can_throw_internal (stmt
));
2183 vectype_out
= STMT_VINFO_VECTYPE (stmt_info
);
2185 /* Process function arguments. */
2186 rhs_type
= NULL_TREE
;
2187 vectype_in
= NULL_TREE
;
2188 nargs
= gimple_call_num_args (stmt
);
2190 /* Bail out if the function has more than three arguments, we do not have
2191 interesting builtin functions to vectorize with more than two arguments
2192 except for fma. No arguments is also not good. */
2193 if (nargs
== 0 || nargs
> 3)
2196 /* Ignore the argument of IFN_GOMP_SIMD_LANE, it is magic. */
2197 if (gimple_call_internal_p (stmt
)
2198 && gimple_call_internal_fn (stmt
) == IFN_GOMP_SIMD_LANE
)
2201 rhs_type
= unsigned_type_node
;
2204 for (i
= 0; i
< nargs
; i
++)
2208 op
= gimple_call_arg (stmt
, i
);
2210 /* We can only handle calls with arguments of the same type. */
2212 && !types_compatible_p (rhs_type
, TREE_TYPE (op
)))
2214 if (dump_enabled_p ())
2215 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2216 "argument types differ.\n");
2220 rhs_type
= TREE_TYPE (op
);
2222 if (!vect_is_simple_use (op
, vinfo
, &def_stmt
, &dt
[i
], &opvectype
))
2224 if (dump_enabled_p ())
2225 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2226 "use not simple.\n");
2231 vectype_in
= opvectype
;
2233 && opvectype
!= vectype_in
)
2235 if (dump_enabled_p ())
2236 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2237 "argument vector types differ.\n");
2241 /* If all arguments are external or constant defs use a vector type with
2242 the same size as the output vector type. */
2244 vectype_in
= get_same_sized_vectype (rhs_type
, vectype_out
);
2246 gcc_assert (vectype_in
);
2249 if (dump_enabled_p ())
2251 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2252 "no vectype for scalar type ");
2253 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, rhs_type
);
2254 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
2261 nunits_in
= TYPE_VECTOR_SUBPARTS (vectype_in
);
2262 nunits_out
= TYPE_VECTOR_SUBPARTS (vectype_out
);
2263 if (nunits_in
== nunits_out
/ 2)
2265 else if (nunits_out
== nunits_in
)
2267 else if (nunits_out
== nunits_in
/ 2)
2272 /* We only handle functions that do not read or clobber memory. */
2273 if (gimple_vuse (stmt
))
2275 if (dump_enabled_p ())
2276 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2277 "function reads from or writes to memory.\n");
2281 /* For now, we only vectorize functions if a target specific builtin
2282 is available. TODO -- in some cases, it might be profitable to
2283 insert the calls for pieces of the vector, in order to be able
2284 to vectorize other operations in the loop. */
2286 internal_fn ifn
= IFN_LAST
;
2287 combined_fn cfn
= gimple_call_combined_fn (stmt
);
2288 tree callee
= gimple_call_fndecl (stmt
);
2290 /* First try using an internal function. */
2291 if (cfn
!= CFN_LAST
)
2292 ifn
= vectorizable_internal_function (cfn
, callee
, vectype_out
,
2295 /* If that fails, try asking for a target-specific built-in function. */
2296 if (ifn
== IFN_LAST
)
2298 if (cfn
!= CFN_LAST
)
2299 fndecl
= targetm
.vectorize
.builtin_vectorized_function
2300 (cfn
, vectype_out
, vectype_in
);
2302 fndecl
= targetm
.vectorize
.builtin_md_vectorized_function
2303 (callee
, vectype_out
, vectype_in
);
2306 if (ifn
== IFN_LAST
&& !fndecl
)
2308 if (cfn
== CFN_GOMP_SIMD_LANE
2311 && LOOP_VINFO_LOOP (loop_vinfo
)->simduid
2312 && TREE_CODE (gimple_call_arg (stmt
, 0)) == SSA_NAME
2313 && LOOP_VINFO_LOOP (loop_vinfo
)->simduid
2314 == SSA_NAME_VAR (gimple_call_arg (stmt
, 0)))
2316 /* We can handle IFN_GOMP_SIMD_LANE by returning a
2317 { 0, 1, 2, ... vf - 1 } vector. */
2318 gcc_assert (nargs
== 0);
2322 if (dump_enabled_p ())
2323 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2324 "function is not vectorizable.\n");
2329 if (slp_node
|| PURE_SLP_STMT (stmt_info
))
2331 else if (modifier
== NARROW
)
2332 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits_out
;
2334 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits_in
;
2336 /* Sanity check: make sure that at least one copy of the vectorized stmt
2337 needs to be generated. */
2338 gcc_assert (ncopies
>= 1);
2340 if (!vec_stmt
) /* transformation not required. */
2342 STMT_VINFO_TYPE (stmt_info
) = call_vec_info_type
;
2343 if (dump_enabled_p ())
2344 dump_printf_loc (MSG_NOTE
, vect_location
, "=== vectorizable_call ==="
2346 vect_model_simple_cost (stmt_info
, ncopies
, dt
, NULL
, NULL
);
2352 if (dump_enabled_p ())
2353 dump_printf_loc (MSG_NOTE
, vect_location
, "transform call.\n");
2356 scalar_dest
= gimple_call_lhs (stmt
);
2357 vec_dest
= vect_create_destination_var (scalar_dest
, vectype_out
);
2359 prev_stmt_info
= NULL
;
2363 for (j
= 0; j
< ncopies
; ++j
)
2365 /* Build argument list for the vectorized call. */
2367 vargs
.create (nargs
);
2373 auto_vec
<vec
<tree
> > vec_defs (nargs
);
2374 vec
<tree
> vec_oprnds0
;
2376 for (i
= 0; i
< nargs
; i
++)
2377 vargs
.quick_push (gimple_call_arg (stmt
, i
));
2378 vect_get_slp_defs (vargs
, slp_node
, &vec_defs
, -1);
2379 vec_oprnds0
= vec_defs
[0];
2381 /* Arguments are ready. Create the new vector stmt. */
2382 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vec_oprnd0
)
2385 for (k
= 0; k
< nargs
; k
++)
2387 vec
<tree
> vec_oprndsk
= vec_defs
[k
];
2388 vargs
[k
] = vec_oprndsk
[i
];
2390 if (ifn
!= IFN_LAST
)
2391 new_stmt
= gimple_build_call_internal_vec (ifn
, vargs
);
2393 new_stmt
= gimple_build_call_vec (fndecl
, vargs
);
2394 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
2395 gimple_call_set_lhs (new_stmt
, new_temp
);
2396 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2397 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
2400 for (i
= 0; i
< nargs
; i
++)
2402 vec
<tree
> vec_oprndsi
= vec_defs
[i
];
2403 vec_oprndsi
.release ();
2408 for (i
= 0; i
< nargs
; i
++)
2410 op
= gimple_call_arg (stmt
, i
);
2413 = vect_get_vec_def_for_operand (op
, stmt
);
2416 vec_oprnd0
= gimple_call_arg (new_stmt
, i
);
2418 = vect_get_vec_def_for_stmt_copy (dt
[i
], vec_oprnd0
);
2421 vargs
.quick_push (vec_oprnd0
);
2424 if (gimple_call_internal_p (stmt
)
2425 && gimple_call_internal_fn (stmt
) == IFN_GOMP_SIMD_LANE
)
2427 tree
*v
= XALLOCAVEC (tree
, nunits_out
);
2429 for (k
= 0; k
< nunits_out
; ++k
)
2430 v
[k
] = build_int_cst (unsigned_type_node
, j
* nunits_out
+ k
);
2431 tree cst
= build_vector (vectype_out
, v
);
2433 = vect_get_new_ssa_name (vectype_out
, vect_simple_var
, "cst_");
2434 gimple
*init_stmt
= gimple_build_assign (new_var
, cst
);
2435 vect_init_vector_1 (stmt
, init_stmt
, NULL
);
2436 new_temp
= make_ssa_name (vec_dest
);
2437 new_stmt
= gimple_build_assign (new_temp
, new_var
);
2441 if (ifn
!= IFN_LAST
)
2442 new_stmt
= gimple_build_call_internal_vec (ifn
, vargs
);
2444 new_stmt
= gimple_build_call_vec (fndecl
, vargs
);
2445 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
2446 gimple_call_set_lhs (new_stmt
, new_temp
);
2448 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2451 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
2453 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
2455 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
2461 for (j
= 0; j
< ncopies
; ++j
)
2463 /* Build argument list for the vectorized call. */
2465 vargs
.create (nargs
* 2);
2471 auto_vec
<vec
<tree
> > vec_defs (nargs
);
2472 vec
<tree
> vec_oprnds0
;
2474 for (i
= 0; i
< nargs
; i
++)
2475 vargs
.quick_push (gimple_call_arg (stmt
, i
));
2476 vect_get_slp_defs (vargs
, slp_node
, &vec_defs
, -1);
2477 vec_oprnds0
= vec_defs
[0];
2479 /* Arguments are ready. Create the new vector stmt. */
2480 for (i
= 0; vec_oprnds0
.iterate (i
, &vec_oprnd0
); i
+= 2)
2484 for (k
= 0; k
< nargs
; k
++)
2486 vec
<tree
> vec_oprndsk
= vec_defs
[k
];
2487 vargs
.quick_push (vec_oprndsk
[i
]);
2488 vargs
.quick_push (vec_oprndsk
[i
+ 1]);
2490 if (ifn
!= IFN_LAST
)
2491 new_stmt
= gimple_build_call_internal_vec (ifn
, vargs
);
2493 new_stmt
= gimple_build_call_vec (fndecl
, vargs
);
2494 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
2495 gimple_call_set_lhs (new_stmt
, new_temp
);
2496 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2497 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
2500 for (i
= 0; i
< nargs
; i
++)
2502 vec
<tree
> vec_oprndsi
= vec_defs
[i
];
2503 vec_oprndsi
.release ();
2508 for (i
= 0; i
< nargs
; i
++)
2510 op
= gimple_call_arg (stmt
, i
);
2514 = vect_get_vec_def_for_operand (op
, stmt
);
2516 = vect_get_vec_def_for_stmt_copy (dt
[i
], vec_oprnd0
);
2520 vec_oprnd1
= gimple_call_arg (new_stmt
, 2*i
+ 1);
2522 = vect_get_vec_def_for_stmt_copy (dt
[i
], vec_oprnd1
);
2524 = vect_get_vec_def_for_stmt_copy (dt
[i
], vec_oprnd0
);
2527 vargs
.quick_push (vec_oprnd0
);
2528 vargs
.quick_push (vec_oprnd1
);
2531 if (ifn
!= IFN_LAST
)
2532 new_stmt
= gimple_build_call_internal_vec (ifn
, vargs
);
2534 new_stmt
= gimple_build_call_vec (fndecl
, vargs
);
2535 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
2536 gimple_call_set_lhs (new_stmt
, new_temp
);
2537 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
2540 STMT_VINFO_VEC_STMT (stmt_info
) = new_stmt
;
2542 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
2544 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
2547 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
2552 /* No current target implements this case. */
2558 /* The call in STMT might prevent it from being removed in dce.
2559 We however cannot remove it here, due to the way the ssa name
2560 it defines is mapped to the new definition. So just replace
2561 rhs of the statement with something harmless. */
2566 type
= TREE_TYPE (scalar_dest
);
2567 if (is_pattern_stmt_p (stmt_info
))
2568 lhs
= gimple_call_lhs (STMT_VINFO_RELATED_STMT (stmt_info
));
2570 lhs
= gimple_call_lhs (stmt
);
2572 if (gimple_call_internal_p (stmt
)
2573 && gimple_call_internal_fn (stmt
) == IFN_GOMP_SIMD_LANE
)
2575 /* Replace uses of the lhs of GOMP_SIMD_LANE call outside the loop
2576 with vf - 1 rather than 0, that is the last iteration of the
2578 imm_use_iterator iter
;
2579 use_operand_p use_p
;
2581 FOR_EACH_IMM_USE_STMT (use_stmt
, iter
, lhs
)
2583 basic_block use_bb
= gimple_bb (use_stmt
);
2585 && !flow_bb_inside_loop_p (LOOP_VINFO_LOOP (loop_vinfo
), use_bb
))
2587 FOR_EACH_IMM_USE_ON_STMT (use_p
, iter
)
2588 SET_USE (use_p
, build_int_cst (TREE_TYPE (lhs
),
2589 ncopies
* nunits_out
- 1));
2590 update_stmt (use_stmt
);
2595 new_stmt
= gimple_build_assign (lhs
, build_zero_cst (type
));
2596 set_vinfo_for_stmt (new_stmt
, stmt_info
);
2597 set_vinfo_for_stmt (stmt
, NULL
);
2598 STMT_VINFO_STMT (stmt_info
) = new_stmt
;
2599 gsi_replace (gsi
, new_stmt
, false);
2605 struct simd_call_arg_info
2609 enum vect_def_type dt
;
2610 HOST_WIDE_INT linear_step
;
2612 bool simd_lane_linear
;
2615 /* Helper function of vectorizable_simd_clone_call. If OP, an SSA_NAME,
2616 is linear within simd lane (but not within whole loop), note it in
2620 vect_simd_lane_linear (tree op
, struct loop
*loop
,
2621 struct simd_call_arg_info
*arginfo
)
2623 gimple
*def_stmt
= SSA_NAME_DEF_STMT (op
);
2625 if (!is_gimple_assign (def_stmt
)
2626 || gimple_assign_rhs_code (def_stmt
) != POINTER_PLUS_EXPR
2627 || !is_gimple_min_invariant (gimple_assign_rhs1 (def_stmt
)))
2630 tree base
= gimple_assign_rhs1 (def_stmt
);
2631 HOST_WIDE_INT linear_step
= 0;
2632 tree v
= gimple_assign_rhs2 (def_stmt
);
2633 while (TREE_CODE (v
) == SSA_NAME
)
2636 def_stmt
= SSA_NAME_DEF_STMT (v
);
2637 if (is_gimple_assign (def_stmt
))
2638 switch (gimple_assign_rhs_code (def_stmt
))
2641 t
= gimple_assign_rhs2 (def_stmt
);
2642 if (linear_step
|| TREE_CODE (t
) != INTEGER_CST
)
2644 base
= fold_build2 (POINTER_PLUS_EXPR
, TREE_TYPE (base
), base
, t
);
2645 v
= gimple_assign_rhs1 (def_stmt
);
2648 t
= gimple_assign_rhs2 (def_stmt
);
2649 if (linear_step
|| !tree_fits_shwi_p (t
) || integer_zerop (t
))
2651 linear_step
= tree_to_shwi (t
);
2652 v
= gimple_assign_rhs1 (def_stmt
);
2655 t
= gimple_assign_rhs1 (def_stmt
);
2656 if (TREE_CODE (TREE_TYPE (t
)) != INTEGER_TYPE
2657 || (TYPE_PRECISION (TREE_TYPE (v
))
2658 < TYPE_PRECISION (TREE_TYPE (t
))))
2667 else if (is_gimple_call (def_stmt
)
2668 && gimple_call_internal_p (def_stmt
)
2669 && gimple_call_internal_fn (def_stmt
) == IFN_GOMP_SIMD_LANE
2671 && TREE_CODE (gimple_call_arg (def_stmt
, 0)) == SSA_NAME
2672 && (SSA_NAME_VAR (gimple_call_arg (def_stmt
, 0))
2677 arginfo
->linear_step
= linear_step
;
2679 arginfo
->simd_lane_linear
= true;
2685 /* Function vectorizable_simd_clone_call.
2687 Check if STMT performs a function call that can be vectorized
2688 by calling a simd clone of the function.
2689 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
2690 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
2691 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
2694 vectorizable_simd_clone_call (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
2695 gimple
**vec_stmt
, slp_tree slp_node
)
2700 tree vec_oprnd0
= NULL_TREE
;
2701 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
), prev_stmt_info
;
2703 unsigned int nunits
;
2704 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
2705 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
2706 vec_info
*vinfo
= stmt_info
->vinfo
;
2707 struct loop
*loop
= loop_vinfo
? LOOP_VINFO_LOOP (loop_vinfo
) : NULL
;
2708 tree fndecl
, new_temp
;
2710 gimple
*new_stmt
= NULL
;
2712 vec
<simd_call_arg_info
> arginfo
= vNULL
;
2713 vec
<tree
> vargs
= vNULL
;
2715 tree lhs
, rtype
, ratype
;
2716 vec
<constructor_elt
, va_gc
> *ret_ctor_elts
;
2718 /* Is STMT a vectorizable call? */
2719 if (!is_gimple_call (stmt
))
2722 fndecl
= gimple_call_fndecl (stmt
);
2723 if (fndecl
== NULL_TREE
)
2726 struct cgraph_node
*node
= cgraph_node::get (fndecl
);
2727 if (node
== NULL
|| node
->simd_clones
== NULL
)
2730 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
2733 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
2736 if (gimple_call_lhs (stmt
)
2737 && TREE_CODE (gimple_call_lhs (stmt
)) != SSA_NAME
)
2740 gcc_checking_assert (!stmt_can_throw_internal (stmt
));
2742 vectype
= STMT_VINFO_VECTYPE (stmt_info
);
2744 if (loop_vinfo
&& nested_in_vect_loop_p (loop
, stmt
))
2748 if (slp_node
|| PURE_SLP_STMT (stmt_info
))
2751 /* Process function arguments. */
2752 nargs
= gimple_call_num_args (stmt
);
2754 /* Bail out if the function has zero arguments. */
2758 arginfo
.create (nargs
);
2760 for (i
= 0; i
< nargs
; i
++)
2762 simd_call_arg_info thisarginfo
;
2765 thisarginfo
.linear_step
= 0;
2766 thisarginfo
.align
= 0;
2767 thisarginfo
.op
= NULL_TREE
;
2768 thisarginfo
.simd_lane_linear
= false;
2770 op
= gimple_call_arg (stmt
, i
);
2771 if (!vect_is_simple_use (op
, vinfo
, &def_stmt
, &thisarginfo
.dt
,
2772 &thisarginfo
.vectype
)
2773 || thisarginfo
.dt
== vect_uninitialized_def
)
2775 if (dump_enabled_p ())
2776 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
2777 "use not simple.\n");
2782 if (thisarginfo
.dt
== vect_constant_def
2783 || thisarginfo
.dt
== vect_external_def
)
2784 gcc_assert (thisarginfo
.vectype
== NULL_TREE
);
2786 gcc_assert (thisarginfo
.vectype
!= NULL_TREE
);
2788 /* For linear arguments, the analyze phase should have saved
2789 the base and step in STMT_VINFO_SIMD_CLONE_INFO. */
2790 if (i
* 3 + 4 <= STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).length ()
2791 && STMT_VINFO_SIMD_CLONE_INFO (stmt_info
)[i
* 3 + 2])
2793 gcc_assert (vec_stmt
);
2794 thisarginfo
.linear_step
2795 = tree_to_shwi (STMT_VINFO_SIMD_CLONE_INFO (stmt_info
)[i
* 3 + 2]);
2797 = STMT_VINFO_SIMD_CLONE_INFO (stmt_info
)[i
* 3 + 1];
2798 thisarginfo
.simd_lane_linear
2799 = (STMT_VINFO_SIMD_CLONE_INFO (stmt_info
)[i
* 3 + 3]
2800 == boolean_true_node
);
2801 /* If loop has been peeled for alignment, we need to adjust it. */
2802 tree n1
= LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo
);
2803 tree n2
= LOOP_VINFO_NITERS (loop_vinfo
);
2804 if (n1
!= n2
&& !thisarginfo
.simd_lane_linear
)
2806 tree bias
= fold_build2 (MINUS_EXPR
, TREE_TYPE (n1
), n1
, n2
);
2807 tree step
= STMT_VINFO_SIMD_CLONE_INFO (stmt_info
)[i
* 3 + 2];
2808 tree opt
= TREE_TYPE (thisarginfo
.op
);
2809 bias
= fold_convert (TREE_TYPE (step
), bias
);
2810 bias
= fold_build2 (MULT_EXPR
, TREE_TYPE (step
), bias
, step
);
2812 = fold_build2 (POINTER_TYPE_P (opt
)
2813 ? POINTER_PLUS_EXPR
: PLUS_EXPR
, opt
,
2814 thisarginfo
.op
, bias
);
2818 && thisarginfo
.dt
!= vect_constant_def
2819 && thisarginfo
.dt
!= vect_external_def
2821 && TREE_CODE (op
) == SSA_NAME
2822 && simple_iv (loop
, loop_containing_stmt (stmt
), op
,
2824 && tree_fits_shwi_p (iv
.step
))
2826 thisarginfo
.linear_step
= tree_to_shwi (iv
.step
);
2827 thisarginfo
.op
= iv
.base
;
2829 else if ((thisarginfo
.dt
== vect_constant_def
2830 || thisarginfo
.dt
== vect_external_def
)
2831 && POINTER_TYPE_P (TREE_TYPE (op
)))
2832 thisarginfo
.align
= get_pointer_alignment (op
) / BITS_PER_UNIT
;
2833 /* Addresses of array elements indexed by GOMP_SIMD_LANE are
2835 if (POINTER_TYPE_P (TREE_TYPE (op
))
2836 && !thisarginfo
.linear_step
2838 && thisarginfo
.dt
!= vect_constant_def
2839 && thisarginfo
.dt
!= vect_external_def
2842 && TREE_CODE (op
) == SSA_NAME
)
2843 vect_simd_lane_linear (op
, loop
, &thisarginfo
);
2845 arginfo
.quick_push (thisarginfo
);
2848 unsigned int badness
= 0;
2849 struct cgraph_node
*bestn
= NULL
;
2850 if (STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).exists ())
2851 bestn
= cgraph_node::get (STMT_VINFO_SIMD_CLONE_INFO (stmt_info
)[0]);
2853 for (struct cgraph_node
*n
= node
->simd_clones
; n
!= NULL
;
2854 n
= n
->simdclone
->next_clone
)
2856 unsigned int this_badness
= 0;
2857 if (n
->simdclone
->simdlen
2858 > (unsigned) LOOP_VINFO_VECT_FACTOR (loop_vinfo
)
2859 || n
->simdclone
->nargs
!= nargs
)
2861 if (n
->simdclone
->simdlen
2862 < (unsigned) LOOP_VINFO_VECT_FACTOR (loop_vinfo
))
2863 this_badness
+= (exact_log2 (LOOP_VINFO_VECT_FACTOR (loop_vinfo
))
2864 - exact_log2 (n
->simdclone
->simdlen
)) * 1024;
2865 if (n
->simdclone
->inbranch
)
2866 this_badness
+= 2048;
2867 int target_badness
= targetm
.simd_clone
.usable (n
);
2868 if (target_badness
< 0)
2870 this_badness
+= target_badness
* 512;
2871 /* FORNOW: Have to add code to add the mask argument. */
2872 if (n
->simdclone
->inbranch
)
2874 for (i
= 0; i
< nargs
; i
++)
2876 switch (n
->simdclone
->args
[i
].arg_type
)
2878 case SIMD_CLONE_ARG_TYPE_VECTOR
:
2879 if (!useless_type_conversion_p
2880 (n
->simdclone
->args
[i
].orig_type
,
2881 TREE_TYPE (gimple_call_arg (stmt
, i
))))
2883 else if (arginfo
[i
].dt
== vect_constant_def
2884 || arginfo
[i
].dt
== vect_external_def
2885 || arginfo
[i
].linear_step
)
2888 case SIMD_CLONE_ARG_TYPE_UNIFORM
:
2889 if (arginfo
[i
].dt
!= vect_constant_def
2890 && arginfo
[i
].dt
!= vect_external_def
)
2893 case SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP
:
2894 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP
:
2895 if (arginfo
[i
].dt
== vect_constant_def
2896 || arginfo
[i
].dt
== vect_external_def
2897 || (arginfo
[i
].linear_step
2898 != n
->simdclone
->args
[i
].linear_step
))
2901 case SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP
:
2902 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_CONSTANT_STEP
:
2903 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_CONSTANT_STEP
:
2904 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_VARIABLE_STEP
:
2905 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_VARIABLE_STEP
:
2906 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_VARIABLE_STEP
:
2910 case SIMD_CLONE_ARG_TYPE_MASK
:
2913 if (i
== (size_t) -1)
2915 if (n
->simdclone
->args
[i
].alignment
> arginfo
[i
].align
)
2920 if (arginfo
[i
].align
)
2921 this_badness
+= (exact_log2 (arginfo
[i
].align
)
2922 - exact_log2 (n
->simdclone
->args
[i
].alignment
));
2924 if (i
== (size_t) -1)
2926 if (bestn
== NULL
|| this_badness
< badness
)
2929 badness
= this_badness
;
2939 for (i
= 0; i
< nargs
; i
++)
2940 if ((arginfo
[i
].dt
== vect_constant_def
2941 || arginfo
[i
].dt
== vect_external_def
)
2942 && bestn
->simdclone
->args
[i
].arg_type
== SIMD_CLONE_ARG_TYPE_VECTOR
)
2945 = get_vectype_for_scalar_type (TREE_TYPE (gimple_call_arg (stmt
,
2947 if (arginfo
[i
].vectype
== NULL
2948 || (TYPE_VECTOR_SUBPARTS (arginfo
[i
].vectype
)
2949 > bestn
->simdclone
->simdlen
))
2956 fndecl
= bestn
->decl
;
2957 nunits
= bestn
->simdclone
->simdlen
;
2958 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits
;
2960 /* If the function isn't const, only allow it in simd loops where user
2961 has asserted that at least nunits consecutive iterations can be
2962 performed using SIMD instructions. */
2963 if ((loop
== NULL
|| (unsigned) loop
->safelen
< nunits
)
2964 && gimple_vuse (stmt
))
2970 /* Sanity check: make sure that at least one copy of the vectorized stmt
2971 needs to be generated. */
2972 gcc_assert (ncopies
>= 1);
2974 if (!vec_stmt
) /* transformation not required. */
2976 STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).safe_push (bestn
->decl
);
2977 for (i
= 0; i
< nargs
; i
++)
2978 if (bestn
->simdclone
->args
[i
].arg_type
2979 == SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP
)
2981 STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).safe_grow_cleared (i
* 3
2983 STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).safe_push (arginfo
[i
].op
);
2984 tree lst
= POINTER_TYPE_P (TREE_TYPE (arginfo
[i
].op
))
2985 ? size_type_node
: TREE_TYPE (arginfo
[i
].op
);
2986 tree ls
= build_int_cst (lst
, arginfo
[i
].linear_step
);
2987 STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).safe_push (ls
);
2988 tree sll
= arginfo
[i
].simd_lane_linear
2989 ? boolean_true_node
: boolean_false_node
;
2990 STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).safe_push (sll
);
2992 STMT_VINFO_TYPE (stmt_info
) = call_simd_clone_vec_info_type
;
2993 if (dump_enabled_p ())
2994 dump_printf_loc (MSG_NOTE
, vect_location
,
2995 "=== vectorizable_simd_clone_call ===\n");
2996 /* vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL); */
3003 if (dump_enabled_p ())
3004 dump_printf_loc (MSG_NOTE
, vect_location
, "transform call.\n");
3007 scalar_dest
= gimple_call_lhs (stmt
);
3008 vec_dest
= NULL_TREE
;
3013 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
3014 rtype
= TREE_TYPE (TREE_TYPE (fndecl
));
3015 if (TREE_CODE (rtype
) == ARRAY_TYPE
)
3018 rtype
= TREE_TYPE (ratype
);
3022 prev_stmt_info
= NULL
;
3023 for (j
= 0; j
< ncopies
; ++j
)
3025 /* Build argument list for the vectorized call. */
3027 vargs
.create (nargs
);
3031 for (i
= 0; i
< nargs
; i
++)
3033 unsigned int k
, l
, m
, o
;
3035 op
= gimple_call_arg (stmt
, i
);
3036 switch (bestn
->simdclone
->args
[i
].arg_type
)
3038 case SIMD_CLONE_ARG_TYPE_VECTOR
:
3039 atype
= bestn
->simdclone
->args
[i
].vector_type
;
3040 o
= nunits
/ TYPE_VECTOR_SUBPARTS (atype
);
3041 for (m
= j
* o
; m
< (j
+ 1) * o
; m
++)
3043 if (TYPE_VECTOR_SUBPARTS (atype
)
3044 < TYPE_VECTOR_SUBPARTS (arginfo
[i
].vectype
))
3046 unsigned int prec
= GET_MODE_BITSIZE (TYPE_MODE (atype
));
3047 k
= (TYPE_VECTOR_SUBPARTS (arginfo
[i
].vectype
)
3048 / TYPE_VECTOR_SUBPARTS (atype
));
3049 gcc_assert ((k
& (k
- 1)) == 0);
3052 = vect_get_vec_def_for_operand (op
, stmt
);
3055 vec_oprnd0
= arginfo
[i
].op
;
3056 if ((m
& (k
- 1)) == 0)
3058 = vect_get_vec_def_for_stmt_copy (arginfo
[i
].dt
,
3061 arginfo
[i
].op
= vec_oprnd0
;
3063 = build3 (BIT_FIELD_REF
, atype
, vec_oprnd0
,
3065 bitsize_int ((m
& (k
- 1)) * prec
));
3067 = gimple_build_assign (make_ssa_name (atype
),
3069 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3070 vargs
.safe_push (gimple_assign_lhs (new_stmt
));
3074 k
= (TYPE_VECTOR_SUBPARTS (atype
)
3075 / TYPE_VECTOR_SUBPARTS (arginfo
[i
].vectype
));
3076 gcc_assert ((k
& (k
- 1)) == 0);
3077 vec
<constructor_elt
, va_gc
> *ctor_elts
;
3079 vec_alloc (ctor_elts
, k
);
3082 for (l
= 0; l
< k
; l
++)
3084 if (m
== 0 && l
== 0)
3086 = vect_get_vec_def_for_operand (op
, stmt
);
3089 = vect_get_vec_def_for_stmt_copy (arginfo
[i
].dt
,
3091 arginfo
[i
].op
= vec_oprnd0
;
3094 CONSTRUCTOR_APPEND_ELT (ctor_elts
, NULL_TREE
,
3098 vargs
.safe_push (vec_oprnd0
);
3101 vec_oprnd0
= build_constructor (atype
, ctor_elts
);
3103 = gimple_build_assign (make_ssa_name (atype
),
3105 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3106 vargs
.safe_push (gimple_assign_lhs (new_stmt
));
3111 case SIMD_CLONE_ARG_TYPE_UNIFORM
:
3112 vargs
.safe_push (op
);
3114 case SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP
:
3119 = force_gimple_operand (arginfo
[i
].op
, &stmts
, true,
3124 edge pe
= loop_preheader_edge (loop
);
3125 new_bb
= gsi_insert_seq_on_edge_immediate (pe
, stmts
);
3126 gcc_assert (!new_bb
);
3128 if (arginfo
[i
].simd_lane_linear
)
3130 vargs
.safe_push (arginfo
[i
].op
);
3133 tree phi_res
= copy_ssa_name (op
);
3134 gphi
*new_phi
= create_phi_node (phi_res
, loop
->header
);
3135 set_vinfo_for_stmt (new_phi
,
3136 new_stmt_vec_info (new_phi
, loop_vinfo
));
3137 add_phi_arg (new_phi
, arginfo
[i
].op
,
3138 loop_preheader_edge (loop
), UNKNOWN_LOCATION
);
3140 = POINTER_TYPE_P (TREE_TYPE (op
))
3141 ? POINTER_PLUS_EXPR
: PLUS_EXPR
;
3142 tree type
= POINTER_TYPE_P (TREE_TYPE (op
))
3143 ? sizetype
: TREE_TYPE (op
);
3145 = wi::mul (bestn
->simdclone
->args
[i
].linear_step
,
3147 tree tcst
= wide_int_to_tree (type
, cst
);
3148 tree phi_arg
= copy_ssa_name (op
);
3150 = gimple_build_assign (phi_arg
, code
, phi_res
, tcst
);
3151 gimple_stmt_iterator si
= gsi_after_labels (loop
->header
);
3152 gsi_insert_after (&si
, new_stmt
, GSI_NEW_STMT
);
3153 set_vinfo_for_stmt (new_stmt
,
3154 new_stmt_vec_info (new_stmt
, loop_vinfo
));
3155 add_phi_arg (new_phi
, phi_arg
, loop_latch_edge (loop
),
3157 arginfo
[i
].op
= phi_res
;
3158 vargs
.safe_push (phi_res
);
3163 = POINTER_TYPE_P (TREE_TYPE (op
))
3164 ? POINTER_PLUS_EXPR
: PLUS_EXPR
;
3165 tree type
= POINTER_TYPE_P (TREE_TYPE (op
))
3166 ? sizetype
: TREE_TYPE (op
);
3168 = wi::mul (bestn
->simdclone
->args
[i
].linear_step
,
3170 tree tcst
= wide_int_to_tree (type
, cst
);
3171 new_temp
= make_ssa_name (TREE_TYPE (op
));
3172 new_stmt
= gimple_build_assign (new_temp
, code
,
3173 arginfo
[i
].op
, tcst
);
3174 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3175 vargs
.safe_push (new_temp
);
3178 case SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP
:
3179 case SIMD_CLONE_ARG_TYPE_LINEAR_REF_VARIABLE_STEP
:
3180 case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_VARIABLE_STEP
:
3181 case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_VARIABLE_STEP
:
3187 new_stmt
= gimple_build_call_vec (fndecl
, vargs
);
3190 gcc_assert (ratype
|| TYPE_VECTOR_SUBPARTS (rtype
) == nunits
);
3192 new_temp
= create_tmp_var (ratype
);
3193 else if (TYPE_VECTOR_SUBPARTS (vectype
)
3194 == TYPE_VECTOR_SUBPARTS (rtype
))
3195 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
3197 new_temp
= make_ssa_name (rtype
, new_stmt
);
3198 gimple_call_set_lhs (new_stmt
, new_temp
);
3200 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3204 if (TYPE_VECTOR_SUBPARTS (vectype
) < nunits
)
3207 unsigned int prec
= GET_MODE_BITSIZE (TYPE_MODE (vectype
));
3208 k
= nunits
/ TYPE_VECTOR_SUBPARTS (vectype
);
3209 gcc_assert ((k
& (k
- 1)) == 0);
3210 for (l
= 0; l
< k
; l
++)
3215 t
= build_fold_addr_expr (new_temp
);
3216 t
= build2 (MEM_REF
, vectype
, t
,
3217 build_int_cst (TREE_TYPE (t
),
3218 l
* prec
/ BITS_PER_UNIT
));
3221 t
= build3 (BIT_FIELD_REF
, vectype
, new_temp
,
3222 size_int (prec
), bitsize_int (l
* prec
));
3224 = gimple_build_assign (make_ssa_name (vectype
), t
);
3225 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3226 if (j
== 0 && l
== 0)
3227 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
3229 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
3231 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
3236 tree clobber
= build_constructor (ratype
, NULL
);
3237 TREE_THIS_VOLATILE (clobber
) = 1;
3238 new_stmt
= gimple_build_assign (new_temp
, clobber
);
3239 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3243 else if (TYPE_VECTOR_SUBPARTS (vectype
) > nunits
)
3245 unsigned int k
= (TYPE_VECTOR_SUBPARTS (vectype
)
3246 / TYPE_VECTOR_SUBPARTS (rtype
));
3247 gcc_assert ((k
& (k
- 1)) == 0);
3248 if ((j
& (k
- 1)) == 0)
3249 vec_alloc (ret_ctor_elts
, k
);
3252 unsigned int m
, o
= nunits
/ TYPE_VECTOR_SUBPARTS (rtype
);
3253 for (m
= 0; m
< o
; m
++)
3255 tree tem
= build4 (ARRAY_REF
, rtype
, new_temp
,
3256 size_int (m
), NULL_TREE
, NULL_TREE
);
3258 = gimple_build_assign (make_ssa_name (rtype
), tem
);
3259 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3260 CONSTRUCTOR_APPEND_ELT (ret_ctor_elts
, NULL_TREE
,
3261 gimple_assign_lhs (new_stmt
));
3263 tree clobber
= build_constructor (ratype
, NULL
);
3264 TREE_THIS_VOLATILE (clobber
) = 1;
3265 new_stmt
= gimple_build_assign (new_temp
, clobber
);
3266 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3269 CONSTRUCTOR_APPEND_ELT (ret_ctor_elts
, NULL_TREE
, new_temp
);
3270 if ((j
& (k
- 1)) != k
- 1)
3272 vec_oprnd0
= build_constructor (vectype
, ret_ctor_elts
);
3274 = gimple_build_assign (make_ssa_name (vec_dest
), vec_oprnd0
);
3275 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3277 if ((unsigned) j
== k
- 1)
3278 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
3280 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
3282 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
3287 tree t
= build_fold_addr_expr (new_temp
);
3288 t
= build2 (MEM_REF
, vectype
, t
,
3289 build_int_cst (TREE_TYPE (t
), 0));
3291 = gimple_build_assign (make_ssa_name (vec_dest
), t
);
3292 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3293 tree clobber
= build_constructor (ratype
, NULL
);
3294 TREE_THIS_VOLATILE (clobber
) = 1;
3295 vect_finish_stmt_generation (stmt
,
3296 gimple_build_assign (new_temp
,
3302 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
3304 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
3306 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
3311 /* The call in STMT might prevent it from being removed in dce.
3312 We however cannot remove it here, due to the way the ssa name
3313 it defines is mapped to the new definition. So just replace
3314 rhs of the statement with something harmless. */
3321 type
= TREE_TYPE (scalar_dest
);
3322 if (is_pattern_stmt_p (stmt_info
))
3323 lhs
= gimple_call_lhs (STMT_VINFO_RELATED_STMT (stmt_info
));
3325 lhs
= gimple_call_lhs (stmt
);
3326 new_stmt
= gimple_build_assign (lhs
, build_zero_cst (type
));
3329 new_stmt
= gimple_build_nop ();
3330 set_vinfo_for_stmt (new_stmt
, stmt_info
);
3331 set_vinfo_for_stmt (stmt
, NULL
);
3332 STMT_VINFO_STMT (stmt_info
) = new_stmt
;
3333 gsi_replace (gsi
, new_stmt
, true);
3334 unlink_stmt_vdef (stmt
);
3340 /* Function vect_gen_widened_results_half
3342 Create a vector stmt whose code, type, number of arguments, and result
3343 variable are CODE, OP_TYPE, and VEC_DEST, and its arguments are
3344 VEC_OPRND0 and VEC_OPRND1. The new vector stmt is to be inserted at BSI.
3345 In the case that CODE is a CALL_EXPR, this means that a call to DECL
3346 needs to be created (DECL is a function-decl of a target-builtin).
3347 STMT is the original scalar stmt that we are vectorizing. */
3350 vect_gen_widened_results_half (enum tree_code code
,
3352 tree vec_oprnd0
, tree vec_oprnd1
, int op_type
,
3353 tree vec_dest
, gimple_stmt_iterator
*gsi
,
3359 /* Generate half of the widened result: */
3360 if (code
== CALL_EXPR
)
3362 /* Target specific support */
3363 if (op_type
== binary_op
)
3364 new_stmt
= gimple_build_call (decl
, 2, vec_oprnd0
, vec_oprnd1
);
3366 new_stmt
= gimple_build_call (decl
, 1, vec_oprnd0
);
3367 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
3368 gimple_call_set_lhs (new_stmt
, new_temp
);
3372 /* Generic support */
3373 gcc_assert (op_type
== TREE_CODE_LENGTH (code
));
3374 if (op_type
!= binary_op
)
3376 new_stmt
= gimple_build_assign (vec_dest
, code
, vec_oprnd0
, vec_oprnd1
);
3377 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
3378 gimple_assign_set_lhs (new_stmt
, new_temp
);
3380 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3386 /* Get vectorized definitions for loop-based vectorization. For the first
3387 operand we call vect_get_vec_def_for_operand() (with OPRND containing
3388 scalar operand), and for the rest we get a copy with
3389 vect_get_vec_def_for_stmt_copy() using the previous vector definition
3390 (stored in OPRND). See vect_get_vec_def_for_stmt_copy() for details.
3391 The vectors are collected into VEC_OPRNDS. */
3394 vect_get_loop_based_defs (tree
*oprnd
, gimple
*stmt
, enum vect_def_type dt
,
3395 vec
<tree
> *vec_oprnds
, int multi_step_cvt
)
3399 /* Get first vector operand. */
3400 /* All the vector operands except the very first one (that is scalar oprnd)
3402 if (TREE_CODE (TREE_TYPE (*oprnd
)) != VECTOR_TYPE
)
3403 vec_oprnd
= vect_get_vec_def_for_operand (*oprnd
, stmt
);
3405 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
, *oprnd
);
3407 vec_oprnds
->quick_push (vec_oprnd
);
3409 /* Get second vector operand. */
3410 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
, vec_oprnd
);
3411 vec_oprnds
->quick_push (vec_oprnd
);
3415 /* For conversion in multiple steps, continue to get operands
3418 vect_get_loop_based_defs (oprnd
, stmt
, dt
, vec_oprnds
, multi_step_cvt
- 1);
3422 /* Create vectorized demotion statements for vector operands from VEC_OPRNDS.
3423 For multi-step conversions store the resulting vectors and call the function
3427 vect_create_vectorized_demotion_stmts (vec
<tree
> *vec_oprnds
,
3428 int multi_step_cvt
, gimple
*stmt
,
3430 gimple_stmt_iterator
*gsi
,
3431 slp_tree slp_node
, enum tree_code code
,
3432 stmt_vec_info
*prev_stmt_info
)
3435 tree vop0
, vop1
, new_tmp
, vec_dest
;
3437 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
3439 vec_dest
= vec_dsts
.pop ();
3441 for (i
= 0; i
< vec_oprnds
->length (); i
+= 2)
3443 /* Create demotion operation. */
3444 vop0
= (*vec_oprnds
)[i
];
3445 vop1
= (*vec_oprnds
)[i
+ 1];
3446 new_stmt
= gimple_build_assign (vec_dest
, code
, vop0
, vop1
);
3447 new_tmp
= make_ssa_name (vec_dest
, new_stmt
);
3448 gimple_assign_set_lhs (new_stmt
, new_tmp
);
3449 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3452 /* Store the resulting vector for next recursive call. */
3453 (*vec_oprnds
)[i
/2] = new_tmp
;
3456 /* This is the last step of the conversion sequence. Store the
3457 vectors in SLP_NODE or in vector info of the scalar statement
3458 (or in STMT_VINFO_RELATED_STMT chain). */
3460 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
3463 if (!*prev_stmt_info
)
3464 STMT_VINFO_VEC_STMT (stmt_info
) = new_stmt
;
3466 STMT_VINFO_RELATED_STMT (*prev_stmt_info
) = new_stmt
;
3468 *prev_stmt_info
= vinfo_for_stmt (new_stmt
);
3473 /* For multi-step demotion operations we first generate demotion operations
3474 from the source type to the intermediate types, and then combine the
3475 results (stored in VEC_OPRNDS) in demotion operation to the destination
3479 /* At each level of recursion we have half of the operands we had at the
3481 vec_oprnds
->truncate ((i
+1)/2);
3482 vect_create_vectorized_demotion_stmts (vec_oprnds
, multi_step_cvt
- 1,
3483 stmt
, vec_dsts
, gsi
, slp_node
,
3484 VEC_PACK_TRUNC_EXPR
,
3488 vec_dsts
.quick_push (vec_dest
);
3492 /* Create vectorized promotion statements for vector operands from VEC_OPRNDS0
3493 and VEC_OPRNDS1 (for binary operations). For multi-step conversions store
3494 the resulting vectors and call the function recursively. */
3497 vect_create_vectorized_promotion_stmts (vec
<tree
> *vec_oprnds0
,
3498 vec
<tree
> *vec_oprnds1
,
3499 gimple
*stmt
, tree vec_dest
,
3500 gimple_stmt_iterator
*gsi
,
3501 enum tree_code code1
,
3502 enum tree_code code2
, tree decl1
,
3503 tree decl2
, int op_type
)
3506 tree vop0
, vop1
, new_tmp1
, new_tmp2
;
3507 gimple
*new_stmt1
, *new_stmt2
;
3508 vec
<tree
> vec_tmp
= vNULL
;
3510 vec_tmp
.create (vec_oprnds0
->length () * 2);
3511 FOR_EACH_VEC_ELT (*vec_oprnds0
, i
, vop0
)
3513 if (op_type
== binary_op
)
3514 vop1
= (*vec_oprnds1
)[i
];
3518 /* Generate the two halves of promotion operation. */
3519 new_stmt1
= vect_gen_widened_results_half (code1
, decl1
, vop0
, vop1
,
3520 op_type
, vec_dest
, gsi
, stmt
);
3521 new_stmt2
= vect_gen_widened_results_half (code2
, decl2
, vop0
, vop1
,
3522 op_type
, vec_dest
, gsi
, stmt
);
3523 if (is_gimple_call (new_stmt1
))
3525 new_tmp1
= gimple_call_lhs (new_stmt1
);
3526 new_tmp2
= gimple_call_lhs (new_stmt2
);
3530 new_tmp1
= gimple_assign_lhs (new_stmt1
);
3531 new_tmp2
= gimple_assign_lhs (new_stmt2
);
3534 /* Store the results for the next step. */
3535 vec_tmp
.quick_push (new_tmp1
);
3536 vec_tmp
.quick_push (new_tmp2
);
3539 vec_oprnds0
->release ();
3540 *vec_oprnds0
= vec_tmp
;
3544 /* Check if STMT performs a conversion operation, that can be vectorized.
3545 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
3546 stmt to replace it, put it in VEC_STMT, and insert it at GSI.
3547 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
3550 vectorizable_conversion (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
3551 gimple
**vec_stmt
, slp_tree slp_node
)
3555 tree op0
, op1
= NULL_TREE
;
3556 tree vec_oprnd0
= NULL_TREE
, vec_oprnd1
= NULL_TREE
;
3557 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
3558 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
3559 enum tree_code code
, code1
= ERROR_MARK
, code2
= ERROR_MARK
;
3560 enum tree_code codecvt1
= ERROR_MARK
, codecvt2
= ERROR_MARK
;
3561 tree decl1
= NULL_TREE
, decl2
= NULL_TREE
;
3564 enum vect_def_type dt
[2] = {vect_unknown_def_type
, vect_unknown_def_type
};
3565 gimple
*new_stmt
= NULL
;
3566 stmt_vec_info prev_stmt_info
;
3569 tree vectype_out
, vectype_in
;
3571 tree lhs_type
, rhs_type
;
3572 enum { NARROW
, NONE
, WIDEN
} modifier
;
3573 vec
<tree
> vec_oprnds0
= vNULL
;
3574 vec
<tree
> vec_oprnds1
= vNULL
;
3576 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
3577 vec_info
*vinfo
= stmt_info
->vinfo
;
3578 int multi_step_cvt
= 0;
3579 vec
<tree
> vec_dsts
= vNULL
;
3580 vec
<tree
> interm_types
= vNULL
;
3581 tree last_oprnd
, intermediate_type
, cvt_type
= NULL_TREE
;
3583 machine_mode rhs_mode
;
3584 unsigned short fltsz
;
3586 /* Is STMT a vectorizable conversion? */
3588 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
3591 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
3594 if (!is_gimple_assign (stmt
))
3597 if (TREE_CODE (gimple_assign_lhs (stmt
)) != SSA_NAME
)
3600 code
= gimple_assign_rhs_code (stmt
);
3601 if (!CONVERT_EXPR_CODE_P (code
)
3602 && code
!= FIX_TRUNC_EXPR
3603 && code
!= FLOAT_EXPR
3604 && code
!= WIDEN_MULT_EXPR
3605 && code
!= WIDEN_LSHIFT_EXPR
)
3608 op_type
= TREE_CODE_LENGTH (code
);
3610 /* Check types of lhs and rhs. */
3611 scalar_dest
= gimple_assign_lhs (stmt
);
3612 lhs_type
= TREE_TYPE (scalar_dest
);
3613 vectype_out
= STMT_VINFO_VECTYPE (stmt_info
);
3615 op0
= gimple_assign_rhs1 (stmt
);
3616 rhs_type
= TREE_TYPE (op0
);
3618 if ((code
!= FIX_TRUNC_EXPR
&& code
!= FLOAT_EXPR
)
3619 && !((INTEGRAL_TYPE_P (lhs_type
)
3620 && INTEGRAL_TYPE_P (rhs_type
))
3621 || (SCALAR_FLOAT_TYPE_P (lhs_type
)
3622 && SCALAR_FLOAT_TYPE_P (rhs_type
))))
3625 if (!VECTOR_BOOLEAN_TYPE_P (vectype_out
)
3626 && ((INTEGRAL_TYPE_P (lhs_type
)
3627 && (TYPE_PRECISION (lhs_type
)
3628 != GET_MODE_PRECISION (TYPE_MODE (lhs_type
))))
3629 || (INTEGRAL_TYPE_P (rhs_type
)
3630 && (TYPE_PRECISION (rhs_type
)
3631 != GET_MODE_PRECISION (TYPE_MODE (rhs_type
))))))
3633 if (dump_enabled_p ())
3634 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3635 "type conversion to/from bit-precision unsupported."
3640 /* Check the operands of the operation. */
3641 if (!vect_is_simple_use (op0
, vinfo
, &def_stmt
, &dt
[0], &vectype_in
))
3643 if (dump_enabled_p ())
3644 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3645 "use not simple.\n");
3648 if (op_type
== binary_op
)
3652 op1
= gimple_assign_rhs2 (stmt
);
3653 gcc_assert (code
== WIDEN_MULT_EXPR
|| code
== WIDEN_LSHIFT_EXPR
);
3654 /* For WIDEN_MULT_EXPR, if OP0 is a constant, use the type of
3656 if (CONSTANT_CLASS_P (op0
))
3657 ok
= vect_is_simple_use (op1
, vinfo
, &def_stmt
, &dt
[1], &vectype_in
);
3659 ok
= vect_is_simple_use (op1
, vinfo
, &def_stmt
, &dt
[1]);
3663 if (dump_enabled_p ())
3664 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3665 "use not simple.\n");
3670 /* If op0 is an external or constant defs use a vector type of
3671 the same size as the output vector type. */
3673 vectype_in
= get_same_sized_vectype (rhs_type
, vectype_out
);
3675 gcc_assert (vectype_in
);
3678 if (dump_enabled_p ())
3680 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3681 "no vectype for scalar type ");
3682 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, rhs_type
);
3683 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
3689 if (VECTOR_BOOLEAN_TYPE_P (vectype_out
)
3690 && !VECTOR_BOOLEAN_TYPE_P (vectype_in
))
3692 if (dump_enabled_p ())
3694 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3695 "can't convert between boolean and non "
3697 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, rhs_type
);
3698 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
3704 nunits_in
= TYPE_VECTOR_SUBPARTS (vectype_in
);
3705 nunits_out
= TYPE_VECTOR_SUBPARTS (vectype_out
);
3706 if (nunits_in
< nunits_out
)
3708 else if (nunits_out
== nunits_in
)
3713 /* Multiple types in SLP are handled by creating the appropriate number of
3714 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
3716 if (slp_node
|| PURE_SLP_STMT (stmt_info
))
3718 else if (modifier
== NARROW
)
3719 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits_out
;
3721 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits_in
;
3723 /* Sanity check: make sure that at least one copy of the vectorized stmt
3724 needs to be generated. */
3725 gcc_assert (ncopies
>= 1);
3727 /* Supportable by target? */
3731 if (code
!= FIX_TRUNC_EXPR
&& code
!= FLOAT_EXPR
)
3733 if (supportable_convert_operation (code
, vectype_out
, vectype_in
,
3738 if (dump_enabled_p ())
3739 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
3740 "conversion not supported by target.\n");
3744 if (supportable_widening_operation (code
, stmt
, vectype_out
, vectype_in
,
3745 &code1
, &code2
, &multi_step_cvt
,
3748 /* Binary widening operation can only be supported directly by the
3750 gcc_assert (!(multi_step_cvt
&& op_type
== binary_op
));
3754 if (code
!= FLOAT_EXPR
3755 || (GET_MODE_SIZE (TYPE_MODE (lhs_type
))
3756 <= GET_MODE_SIZE (TYPE_MODE (rhs_type
))))
3759 rhs_mode
= TYPE_MODE (rhs_type
);
3760 fltsz
= GET_MODE_SIZE (TYPE_MODE (lhs_type
));
3761 for (rhs_mode
= GET_MODE_2XWIDER_MODE (TYPE_MODE (rhs_type
));
3762 rhs_mode
!= VOIDmode
&& GET_MODE_SIZE (rhs_mode
) <= fltsz
;
3763 rhs_mode
= GET_MODE_2XWIDER_MODE (rhs_mode
))
3766 = build_nonstandard_integer_type (GET_MODE_BITSIZE (rhs_mode
), 0);
3767 cvt_type
= get_same_sized_vectype (cvt_type
, vectype_in
);
3768 if (cvt_type
== NULL_TREE
)
3771 if (GET_MODE_SIZE (rhs_mode
) == fltsz
)
3773 if (!supportable_convert_operation (code
, vectype_out
,
3774 cvt_type
, &decl1
, &codecvt1
))
3777 else if (!supportable_widening_operation (code
, stmt
, vectype_out
,
3778 cvt_type
, &codecvt1
,
3779 &codecvt2
, &multi_step_cvt
,
3783 gcc_assert (multi_step_cvt
== 0);
3785 if (supportable_widening_operation (NOP_EXPR
, stmt
, cvt_type
,
3786 vectype_in
, &code1
, &code2
,
3787 &multi_step_cvt
, &interm_types
))
3791 if (rhs_mode
== VOIDmode
|| GET_MODE_SIZE (rhs_mode
) > fltsz
)
3794 if (GET_MODE_SIZE (rhs_mode
) == fltsz
)
3795 codecvt2
= ERROR_MARK
;
3799 interm_types
.safe_push (cvt_type
);
3800 cvt_type
= NULL_TREE
;
3805 gcc_assert (op_type
== unary_op
);
3806 if (supportable_narrowing_operation (code
, vectype_out
, vectype_in
,
3807 &code1
, &multi_step_cvt
,
3811 if (code
!= FIX_TRUNC_EXPR
3812 || (GET_MODE_SIZE (TYPE_MODE (lhs_type
))
3813 >= GET_MODE_SIZE (TYPE_MODE (rhs_type
))))
3816 rhs_mode
= TYPE_MODE (rhs_type
);
3818 = build_nonstandard_integer_type (GET_MODE_BITSIZE (rhs_mode
), 0);
3819 cvt_type
= get_same_sized_vectype (cvt_type
, vectype_in
);
3820 if (cvt_type
== NULL_TREE
)
3822 if (!supportable_convert_operation (code
, cvt_type
, vectype_in
,
3825 if (supportable_narrowing_operation (NOP_EXPR
, vectype_out
, cvt_type
,
3826 &code1
, &multi_step_cvt
,
3835 if (!vec_stmt
) /* transformation not required. */
3837 if (dump_enabled_p ())
3838 dump_printf_loc (MSG_NOTE
, vect_location
,
3839 "=== vectorizable_conversion ===\n");
3840 if (code
== FIX_TRUNC_EXPR
|| code
== FLOAT_EXPR
)
3842 STMT_VINFO_TYPE (stmt_info
) = type_conversion_vec_info_type
;
3843 vect_model_simple_cost (stmt_info
, ncopies
, dt
, NULL
, NULL
);
3845 else if (modifier
== NARROW
)
3847 STMT_VINFO_TYPE (stmt_info
) = type_demotion_vec_info_type
;
3848 vect_model_promotion_demotion_cost (stmt_info
, dt
, multi_step_cvt
);
3852 STMT_VINFO_TYPE (stmt_info
) = type_promotion_vec_info_type
;
3853 vect_model_promotion_demotion_cost (stmt_info
, dt
, multi_step_cvt
);
3855 interm_types
.release ();
3860 if (dump_enabled_p ())
3861 dump_printf_loc (MSG_NOTE
, vect_location
,
3862 "transform conversion. ncopies = %d.\n", ncopies
);
3864 if (op_type
== binary_op
)
3866 if (CONSTANT_CLASS_P (op0
))
3867 op0
= fold_convert (TREE_TYPE (op1
), op0
);
3868 else if (CONSTANT_CLASS_P (op1
))
3869 op1
= fold_convert (TREE_TYPE (op0
), op1
);
3872 /* In case of multi-step conversion, we first generate conversion operations
3873 to the intermediate types, and then from that types to the final one.
3874 We create vector destinations for the intermediate type (TYPES) received
3875 from supportable_*_operation, and store them in the correct order
3876 for future use in vect_create_vectorized_*_stmts (). */
3877 vec_dsts
.create (multi_step_cvt
+ 1);
3878 vec_dest
= vect_create_destination_var (scalar_dest
,
3879 (cvt_type
&& modifier
== WIDEN
)
3880 ? cvt_type
: vectype_out
);
3881 vec_dsts
.quick_push (vec_dest
);
3885 for (i
= interm_types
.length () - 1;
3886 interm_types
.iterate (i
, &intermediate_type
); i
--)
3888 vec_dest
= vect_create_destination_var (scalar_dest
,
3890 vec_dsts
.quick_push (vec_dest
);
3895 vec_dest
= vect_create_destination_var (scalar_dest
,
3897 ? vectype_out
: cvt_type
);
3901 if (modifier
== WIDEN
)
3903 vec_oprnds0
.create (multi_step_cvt
? vect_pow2 (multi_step_cvt
) : 1);
3904 if (op_type
== binary_op
)
3905 vec_oprnds1
.create (1);
3907 else if (modifier
== NARROW
)
3908 vec_oprnds0
.create (
3909 2 * (multi_step_cvt
? vect_pow2 (multi_step_cvt
) : 1));
3911 else if (code
== WIDEN_LSHIFT_EXPR
)
3912 vec_oprnds1
.create (slp_node
->vec_stmts_size
);
3915 prev_stmt_info
= NULL
;
3919 for (j
= 0; j
< ncopies
; j
++)
3922 vect_get_vec_defs (op0
, NULL
, stmt
, &vec_oprnds0
, NULL
, slp_node
,
3925 vect_get_vec_defs_for_stmt_copy (dt
, &vec_oprnds0
, NULL
);
3927 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vop0
)
3929 /* Arguments are ready, create the new vector stmt. */
3930 if (code1
== CALL_EXPR
)
3932 new_stmt
= gimple_build_call (decl1
, 1, vop0
);
3933 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
3934 gimple_call_set_lhs (new_stmt
, new_temp
);
3938 gcc_assert (TREE_CODE_LENGTH (code1
) == unary_op
);
3939 new_stmt
= gimple_build_assign (vec_dest
, code1
, vop0
);
3940 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
3941 gimple_assign_set_lhs (new_stmt
, new_temp
);
3944 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
3946 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
3949 if (!prev_stmt_info
)
3950 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
3952 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
3953 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
3960 /* In case the vectorization factor (VF) is bigger than the number
3961 of elements that we can fit in a vectype (nunits), we have to
3962 generate more than one vector stmt - i.e - we need to "unroll"
3963 the vector stmt by a factor VF/nunits. */
3964 for (j
= 0; j
< ncopies
; j
++)
3971 if (code
== WIDEN_LSHIFT_EXPR
)
3976 /* Store vec_oprnd1 for every vector stmt to be created
3977 for SLP_NODE. We check during the analysis that all
3978 the shift arguments are the same. */
3979 for (k
= 0; k
< slp_node
->vec_stmts_size
- 1; k
++)
3980 vec_oprnds1
.quick_push (vec_oprnd1
);
3982 vect_get_vec_defs (op0
, NULL_TREE
, stmt
, &vec_oprnds0
, NULL
,
3986 vect_get_vec_defs (op0
, op1
, stmt
, &vec_oprnds0
,
3987 &vec_oprnds1
, slp_node
, -1);
3991 vec_oprnd0
= vect_get_vec_def_for_operand (op0
, stmt
);
3992 vec_oprnds0
.quick_push (vec_oprnd0
);
3993 if (op_type
== binary_op
)
3995 if (code
== WIDEN_LSHIFT_EXPR
)
3998 vec_oprnd1
= vect_get_vec_def_for_operand (op1
, stmt
);
3999 vec_oprnds1
.quick_push (vec_oprnd1
);
4005 vec_oprnd0
= vect_get_vec_def_for_stmt_copy (dt
[0], vec_oprnd0
);
4006 vec_oprnds0
.truncate (0);
4007 vec_oprnds0
.quick_push (vec_oprnd0
);
4008 if (op_type
== binary_op
)
4010 if (code
== WIDEN_LSHIFT_EXPR
)
4013 vec_oprnd1
= vect_get_vec_def_for_stmt_copy (dt
[1],
4015 vec_oprnds1
.truncate (0);
4016 vec_oprnds1
.quick_push (vec_oprnd1
);
4020 /* Arguments are ready. Create the new vector stmts. */
4021 for (i
= multi_step_cvt
; i
>= 0; i
--)
4023 tree this_dest
= vec_dsts
[i
];
4024 enum tree_code c1
= code1
, c2
= code2
;
4025 if (i
== 0 && codecvt2
!= ERROR_MARK
)
4030 vect_create_vectorized_promotion_stmts (&vec_oprnds0
,
4032 stmt
, this_dest
, gsi
,
4033 c1
, c2
, decl1
, decl2
,
4037 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vop0
)
4041 if (codecvt1
== CALL_EXPR
)
4043 new_stmt
= gimple_build_call (decl1
, 1, vop0
);
4044 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
4045 gimple_call_set_lhs (new_stmt
, new_temp
);
4049 gcc_assert (TREE_CODE_LENGTH (codecvt1
) == unary_op
);
4050 new_temp
= make_ssa_name (vec_dest
);
4051 new_stmt
= gimple_build_assign (new_temp
, codecvt1
,
4055 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
4058 new_stmt
= SSA_NAME_DEF_STMT (vop0
);
4061 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
4064 if (!prev_stmt_info
)
4065 STMT_VINFO_VEC_STMT (stmt_info
) = new_stmt
;
4067 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
4068 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
4073 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
4077 /* In case the vectorization factor (VF) is bigger than the number
4078 of elements that we can fit in a vectype (nunits), we have to
4079 generate more than one vector stmt - i.e - we need to "unroll"
4080 the vector stmt by a factor VF/nunits. */
4081 for (j
= 0; j
< ncopies
; j
++)
4085 vect_get_vec_defs (op0
, NULL_TREE
, stmt
, &vec_oprnds0
, NULL
,
4089 vec_oprnds0
.truncate (0);
4090 vect_get_loop_based_defs (&last_oprnd
, stmt
, dt
[0], &vec_oprnds0
,
4091 vect_pow2 (multi_step_cvt
) - 1);
4094 /* Arguments are ready. Create the new vector stmts. */
4096 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vop0
)
4098 if (codecvt1
== CALL_EXPR
)
4100 new_stmt
= gimple_build_call (decl1
, 1, vop0
);
4101 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
4102 gimple_call_set_lhs (new_stmt
, new_temp
);
4106 gcc_assert (TREE_CODE_LENGTH (codecvt1
) == unary_op
);
4107 new_temp
= make_ssa_name (vec_dest
);
4108 new_stmt
= gimple_build_assign (new_temp
, codecvt1
,
4112 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
4113 vec_oprnds0
[i
] = new_temp
;
4116 vect_create_vectorized_demotion_stmts (&vec_oprnds0
, multi_step_cvt
,
4117 stmt
, vec_dsts
, gsi
,
4122 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
4126 vec_oprnds0
.release ();
4127 vec_oprnds1
.release ();
4128 vec_dsts
.release ();
4129 interm_types
.release ();
4135 /* Function vectorizable_assignment.
4137 Check if STMT performs an assignment (copy) that can be vectorized.
4138 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4139 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
4140 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4143 vectorizable_assignment (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
4144 gimple
**vec_stmt
, slp_tree slp_node
)
4149 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
4150 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
4153 enum vect_def_type dt
[2] = {vect_unknown_def_type
, vect_unknown_def_type
};
4156 vec
<tree
> vec_oprnds
= vNULL
;
4158 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
4159 vec_info
*vinfo
= stmt_info
->vinfo
;
4160 gimple
*new_stmt
= NULL
;
4161 stmt_vec_info prev_stmt_info
= NULL
;
4162 enum tree_code code
;
4165 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
4168 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
4171 /* Is vectorizable assignment? */
4172 if (!is_gimple_assign (stmt
))
4175 scalar_dest
= gimple_assign_lhs (stmt
);
4176 if (TREE_CODE (scalar_dest
) != SSA_NAME
)
4179 code
= gimple_assign_rhs_code (stmt
);
4180 if (gimple_assign_single_p (stmt
)
4181 || code
== PAREN_EXPR
4182 || CONVERT_EXPR_CODE_P (code
))
4183 op
= gimple_assign_rhs1 (stmt
);
4187 if (code
== VIEW_CONVERT_EXPR
)
4188 op
= TREE_OPERAND (op
, 0);
4190 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
4191 unsigned int nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
4193 /* Multiple types in SLP are handled by creating the appropriate number of
4194 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4196 if (slp_node
|| PURE_SLP_STMT (stmt_info
))
4199 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits
;
4201 gcc_assert (ncopies
>= 1);
4203 if (!vect_is_simple_use (op
, vinfo
, &def_stmt
, &dt
[0], &vectype_in
))
4205 if (dump_enabled_p ())
4206 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4207 "use not simple.\n");
4211 /* We can handle NOP_EXPR conversions that do not change the number
4212 of elements or the vector size. */
4213 if ((CONVERT_EXPR_CODE_P (code
)
4214 || code
== VIEW_CONVERT_EXPR
)
4216 || TYPE_VECTOR_SUBPARTS (vectype_in
) != nunits
4217 || (GET_MODE_SIZE (TYPE_MODE (vectype
))
4218 != GET_MODE_SIZE (TYPE_MODE (vectype_in
)))))
4221 /* We do not handle bit-precision changes. */
4222 if ((CONVERT_EXPR_CODE_P (code
)
4223 || code
== VIEW_CONVERT_EXPR
)
4224 && INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest
))
4225 && ((TYPE_PRECISION (TREE_TYPE (scalar_dest
))
4226 != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (scalar_dest
))))
4227 || ((TYPE_PRECISION (TREE_TYPE (op
))
4228 != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (op
))))))
4229 /* But a conversion that does not change the bit-pattern is ok. */
4230 && !((TYPE_PRECISION (TREE_TYPE (scalar_dest
))
4231 > TYPE_PRECISION (TREE_TYPE (op
)))
4232 && TYPE_UNSIGNED (TREE_TYPE (op
))))
4234 if (dump_enabled_p ())
4235 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4236 "type conversion to/from bit-precision "
4241 if (!vec_stmt
) /* transformation not required. */
4243 STMT_VINFO_TYPE (stmt_info
) = assignment_vec_info_type
;
4244 if (dump_enabled_p ())
4245 dump_printf_loc (MSG_NOTE
, vect_location
,
4246 "=== vectorizable_assignment ===\n");
4247 vect_model_simple_cost (stmt_info
, ncopies
, dt
, NULL
, NULL
);
4252 if (dump_enabled_p ())
4253 dump_printf_loc (MSG_NOTE
, vect_location
, "transform assignment.\n");
4256 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
4259 for (j
= 0; j
< ncopies
; j
++)
4263 vect_get_vec_defs (op
, NULL
, stmt
, &vec_oprnds
, NULL
, slp_node
, -1);
4265 vect_get_vec_defs_for_stmt_copy (dt
, &vec_oprnds
, NULL
);
4267 /* Arguments are ready. create the new vector stmt. */
4268 FOR_EACH_VEC_ELT (vec_oprnds
, i
, vop
)
4270 if (CONVERT_EXPR_CODE_P (code
)
4271 || code
== VIEW_CONVERT_EXPR
)
4272 vop
= build1 (VIEW_CONVERT_EXPR
, vectype
, vop
);
4273 new_stmt
= gimple_build_assign (vec_dest
, vop
);
4274 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
4275 gimple_assign_set_lhs (new_stmt
, new_temp
);
4276 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
4278 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
4285 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
4287 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
4289 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
4292 vec_oprnds
.release ();
4297 /* Return TRUE if CODE (a shift operation) is supported for SCALAR_TYPE
4298 either as shift by a scalar or by a vector. */
4301 vect_supportable_shift (enum tree_code code
, tree scalar_type
)
4304 machine_mode vec_mode
;
4309 vectype
= get_vectype_for_scalar_type (scalar_type
);
4313 optab
= optab_for_tree_code (code
, vectype
, optab_scalar
);
4315 || optab_handler (optab
, TYPE_MODE (vectype
)) == CODE_FOR_nothing
)
4317 optab
= optab_for_tree_code (code
, vectype
, optab_vector
);
4319 || (optab_handler (optab
, TYPE_MODE (vectype
))
4320 == CODE_FOR_nothing
))
4324 vec_mode
= TYPE_MODE (vectype
);
4325 icode
= (int) optab_handler (optab
, vec_mode
);
4326 if (icode
== CODE_FOR_nothing
)
4333 /* Function vectorizable_shift.
4335 Check if STMT performs a shift operation that can be vectorized.
4336 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4337 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
4338 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4341 vectorizable_shift (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
4342 gimple
**vec_stmt
, slp_tree slp_node
)
4346 tree op0
, op1
= NULL
;
4347 tree vec_oprnd1
= NULL_TREE
;
4348 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
4350 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
4351 enum tree_code code
;
4352 machine_mode vec_mode
;
4356 machine_mode optab_op2_mode
;
4358 enum vect_def_type dt
[2] = {vect_unknown_def_type
, vect_unknown_def_type
};
4359 gimple
*new_stmt
= NULL
;
4360 stmt_vec_info prev_stmt_info
;
4367 vec
<tree
> vec_oprnds0
= vNULL
;
4368 vec
<tree
> vec_oprnds1
= vNULL
;
4371 bool scalar_shift_arg
= true;
4372 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
4373 vec_info
*vinfo
= stmt_info
->vinfo
;
4376 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
4379 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
4382 /* Is STMT a vectorizable binary/unary operation? */
4383 if (!is_gimple_assign (stmt
))
4386 if (TREE_CODE (gimple_assign_lhs (stmt
)) != SSA_NAME
)
4389 code
= gimple_assign_rhs_code (stmt
);
4391 if (!(code
== LSHIFT_EXPR
|| code
== RSHIFT_EXPR
|| code
== LROTATE_EXPR
4392 || code
== RROTATE_EXPR
))
4395 scalar_dest
= gimple_assign_lhs (stmt
);
4396 vectype_out
= STMT_VINFO_VECTYPE (stmt_info
);
4397 if (TYPE_PRECISION (TREE_TYPE (scalar_dest
))
4398 != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (scalar_dest
))))
4400 if (dump_enabled_p ())
4401 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4402 "bit-precision shifts not supported.\n");
4406 op0
= gimple_assign_rhs1 (stmt
);
4407 if (!vect_is_simple_use (op0
, vinfo
, &def_stmt
, &dt
[0], &vectype
))
4409 if (dump_enabled_p ())
4410 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4411 "use not simple.\n");
4414 /* If op0 is an external or constant def use a vector type with
4415 the same size as the output vector type. */
4417 vectype
= get_same_sized_vectype (TREE_TYPE (op0
), vectype_out
);
4419 gcc_assert (vectype
);
4422 if (dump_enabled_p ())
4423 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4424 "no vectype for scalar type\n");
4428 nunits_out
= TYPE_VECTOR_SUBPARTS (vectype_out
);
4429 nunits_in
= TYPE_VECTOR_SUBPARTS (vectype
);
4430 if (nunits_out
!= nunits_in
)
4433 op1
= gimple_assign_rhs2 (stmt
);
4434 if (!vect_is_simple_use (op1
, vinfo
, &def_stmt
, &dt
[1], &op1_vectype
))
4436 if (dump_enabled_p ())
4437 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4438 "use not simple.\n");
4443 vf
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
4447 /* Multiple types in SLP are handled by creating the appropriate number of
4448 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4450 if (slp_node
|| PURE_SLP_STMT (stmt_info
))
4453 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits_in
;
4455 gcc_assert (ncopies
>= 1);
4457 /* Determine whether the shift amount is a vector, or scalar. If the
4458 shift/rotate amount is a vector, use the vector/vector shift optabs. */
4460 if ((dt
[1] == vect_internal_def
4461 || dt
[1] == vect_induction_def
)
4463 scalar_shift_arg
= false;
4464 else if (dt
[1] == vect_constant_def
4465 || dt
[1] == vect_external_def
4466 || dt
[1] == vect_internal_def
)
4468 /* In SLP, need to check whether the shift count is the same,
4469 in loops if it is a constant or invariant, it is always
4473 vec
<gimple
*> stmts
= SLP_TREE_SCALAR_STMTS (slp_node
);
4476 FOR_EACH_VEC_ELT (stmts
, k
, slpstmt
)
4477 if (!operand_equal_p (gimple_assign_rhs2 (slpstmt
), op1
, 0))
4478 scalar_shift_arg
= false;
4483 if (dump_enabled_p ())
4484 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4485 "operand mode requires invariant argument.\n");
4489 /* Vector shifted by vector. */
4490 if (!scalar_shift_arg
)
4492 optab
= optab_for_tree_code (code
, vectype
, optab_vector
);
4493 if (dump_enabled_p ())
4494 dump_printf_loc (MSG_NOTE
, vect_location
,
4495 "vector/vector shift/rotate found.\n");
4498 op1_vectype
= get_same_sized_vectype (TREE_TYPE (op1
), vectype_out
);
4499 if (op1_vectype
== NULL_TREE
4500 || TYPE_MODE (op1_vectype
) != TYPE_MODE (vectype
))
4502 if (dump_enabled_p ())
4503 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4504 "unusable type for last operand in"
4505 " vector/vector shift/rotate.\n");
4509 /* See if the machine has a vector shifted by scalar insn and if not
4510 then see if it has a vector shifted by vector insn. */
4513 optab
= optab_for_tree_code (code
, vectype
, optab_scalar
);
4515 && optab_handler (optab
, TYPE_MODE (vectype
)) != CODE_FOR_nothing
)
4517 if (dump_enabled_p ())
4518 dump_printf_loc (MSG_NOTE
, vect_location
,
4519 "vector/scalar shift/rotate found.\n");
4523 optab
= optab_for_tree_code (code
, vectype
, optab_vector
);
4525 && (optab_handler (optab
, TYPE_MODE (vectype
))
4526 != CODE_FOR_nothing
))
4528 scalar_shift_arg
= false;
4530 if (dump_enabled_p ())
4531 dump_printf_loc (MSG_NOTE
, vect_location
,
4532 "vector/vector shift/rotate found.\n");
4534 /* Unlike the other binary operators, shifts/rotates have
4535 the rhs being int, instead of the same type as the lhs,
4536 so make sure the scalar is the right type if we are
4537 dealing with vectors of long long/long/short/char. */
4538 if (dt
[1] == vect_constant_def
)
4539 op1
= fold_convert (TREE_TYPE (vectype
), op1
);
4540 else if (!useless_type_conversion_p (TREE_TYPE (vectype
),
4544 && TYPE_MODE (TREE_TYPE (vectype
))
4545 != TYPE_MODE (TREE_TYPE (op1
)))
4547 if (dump_enabled_p ())
4548 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4549 "unusable type for last operand in"
4550 " vector/vector shift/rotate.\n");
4553 if (vec_stmt
&& !slp_node
)
4555 op1
= fold_convert (TREE_TYPE (vectype
), op1
);
4556 op1
= vect_init_vector (stmt
, op1
,
4557 TREE_TYPE (vectype
), NULL
);
4564 /* Supportable by target? */
4567 if (dump_enabled_p ())
4568 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4572 vec_mode
= TYPE_MODE (vectype
);
4573 icode
= (int) optab_handler (optab
, vec_mode
);
4574 if (icode
== CODE_FOR_nothing
)
4576 if (dump_enabled_p ())
4577 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4578 "op not supported by target.\n");
4579 /* Check only during analysis. */
4580 if (GET_MODE_SIZE (vec_mode
) != UNITS_PER_WORD
4581 || (vf
< vect_min_worthwhile_factor (code
)
4584 if (dump_enabled_p ())
4585 dump_printf_loc (MSG_NOTE
, vect_location
,
4586 "proceeding using word mode.\n");
4589 /* Worthwhile without SIMD support? Check only during analysis. */
4590 if (!VECTOR_MODE_P (TYPE_MODE (vectype
))
4591 && vf
< vect_min_worthwhile_factor (code
)
4594 if (dump_enabled_p ())
4595 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4596 "not worthwhile without SIMD support.\n");
4600 if (!vec_stmt
) /* transformation not required. */
4602 STMT_VINFO_TYPE (stmt_info
) = shift_vec_info_type
;
4603 if (dump_enabled_p ())
4604 dump_printf_loc (MSG_NOTE
, vect_location
,
4605 "=== vectorizable_shift ===\n");
4606 vect_model_simple_cost (stmt_info
, ncopies
, dt
, NULL
, NULL
);
4612 if (dump_enabled_p ())
4613 dump_printf_loc (MSG_NOTE
, vect_location
,
4614 "transform binary/unary operation.\n");
4617 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
4619 prev_stmt_info
= NULL
;
4620 for (j
= 0; j
< ncopies
; j
++)
4625 if (scalar_shift_arg
)
4627 /* Vector shl and shr insn patterns can be defined with scalar
4628 operand 2 (shift operand). In this case, use constant or loop
4629 invariant op1 directly, without extending it to vector mode
4631 optab_op2_mode
= insn_data
[icode
].operand
[2].mode
;
4632 if (!VECTOR_MODE_P (optab_op2_mode
))
4634 if (dump_enabled_p ())
4635 dump_printf_loc (MSG_NOTE
, vect_location
,
4636 "operand 1 using scalar mode.\n");
4638 vec_oprnds1
.create (slp_node
? slp_node
->vec_stmts_size
: 1);
4639 vec_oprnds1
.quick_push (vec_oprnd1
);
4642 /* Store vec_oprnd1 for every vector stmt to be created
4643 for SLP_NODE. We check during the analysis that all
4644 the shift arguments are the same.
4645 TODO: Allow different constants for different vector
4646 stmts generated for an SLP instance. */
4647 for (k
= 0; k
< slp_node
->vec_stmts_size
- 1; k
++)
4648 vec_oprnds1
.quick_push (vec_oprnd1
);
4653 /* vec_oprnd1 is available if operand 1 should be of a scalar-type
4654 (a special case for certain kind of vector shifts); otherwise,
4655 operand 1 should be of a vector type (the usual case). */
4657 vect_get_vec_defs (op0
, NULL_TREE
, stmt
, &vec_oprnds0
, NULL
,
4660 vect_get_vec_defs (op0
, op1
, stmt
, &vec_oprnds0
, &vec_oprnds1
,
4664 vect_get_vec_defs_for_stmt_copy (dt
, &vec_oprnds0
, &vec_oprnds1
);
4666 /* Arguments are ready. Create the new vector stmt. */
4667 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vop0
)
4669 vop1
= vec_oprnds1
[i
];
4670 new_stmt
= gimple_build_assign (vec_dest
, code
, vop0
, vop1
);
4671 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
4672 gimple_assign_set_lhs (new_stmt
, new_temp
);
4673 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
4675 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
4682 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
4684 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
4685 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
4688 vec_oprnds0
.release ();
4689 vec_oprnds1
.release ();
4695 /* Function vectorizable_operation.
4697 Check if STMT performs a binary, unary or ternary operation that can
4699 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
4700 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
4701 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
4704 vectorizable_operation (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
4705 gimple
**vec_stmt
, slp_tree slp_node
)
4709 tree op0
, op1
= NULL_TREE
, op2
= NULL_TREE
;
4710 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
4712 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
4713 enum tree_code code
;
4714 machine_mode vec_mode
;
4718 bool target_support_p
;
4720 enum vect_def_type dt
[3]
4721 = {vect_unknown_def_type
, vect_unknown_def_type
, vect_unknown_def_type
};
4722 gimple
*new_stmt
= NULL
;
4723 stmt_vec_info prev_stmt_info
;
4729 vec
<tree
> vec_oprnds0
= vNULL
;
4730 vec
<tree
> vec_oprnds1
= vNULL
;
4731 vec
<tree
> vec_oprnds2
= vNULL
;
4732 tree vop0
, vop1
, vop2
;
4733 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
4734 vec_info
*vinfo
= stmt_info
->vinfo
;
4737 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
4740 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
4743 /* Is STMT a vectorizable binary/unary operation? */
4744 if (!is_gimple_assign (stmt
))
4747 if (TREE_CODE (gimple_assign_lhs (stmt
)) != SSA_NAME
)
4750 code
= gimple_assign_rhs_code (stmt
);
4752 /* For pointer addition, we should use the normal plus for
4753 the vector addition. */
4754 if (code
== POINTER_PLUS_EXPR
)
4757 /* Support only unary or binary operations. */
4758 op_type
= TREE_CODE_LENGTH (code
);
4759 if (op_type
!= unary_op
&& op_type
!= binary_op
&& op_type
!= ternary_op
)
4761 if (dump_enabled_p ())
4762 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4763 "num. args = %d (not unary/binary/ternary op).\n",
4768 scalar_dest
= gimple_assign_lhs (stmt
);
4769 vectype_out
= STMT_VINFO_VECTYPE (stmt_info
);
4771 /* Most operations cannot handle bit-precision types without extra
4773 if (!VECTOR_BOOLEAN_TYPE_P (vectype_out
)
4774 && (TYPE_PRECISION (TREE_TYPE (scalar_dest
))
4775 != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (scalar_dest
))))
4776 /* Exception are bitwise binary operations. */
4777 && code
!= BIT_IOR_EXPR
4778 && code
!= BIT_XOR_EXPR
4779 && code
!= BIT_AND_EXPR
)
4781 if (dump_enabled_p ())
4782 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4783 "bit-precision arithmetic not supported.\n");
4787 op0
= gimple_assign_rhs1 (stmt
);
4788 if (!vect_is_simple_use (op0
, vinfo
, &def_stmt
, &dt
[0], &vectype
))
4790 if (dump_enabled_p ())
4791 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4792 "use not simple.\n");
4795 /* If op0 is an external or constant def use a vector type with
4796 the same size as the output vector type. */
4799 /* For boolean type we cannot determine vectype by
4800 invariant value (don't know whether it is a vector
4801 of booleans or vector of integers). We use output
4802 vectype because operations on boolean don't change
4804 if (TREE_CODE (TREE_TYPE (op0
)) == BOOLEAN_TYPE
)
4806 if (TREE_CODE (TREE_TYPE (scalar_dest
)) != BOOLEAN_TYPE
)
4808 if (dump_enabled_p ())
4809 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4810 "not supported operation on bool value.\n");
4813 vectype
= vectype_out
;
4816 vectype
= get_same_sized_vectype (TREE_TYPE (op0
), vectype_out
);
4819 gcc_assert (vectype
);
4822 if (dump_enabled_p ())
4824 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4825 "no vectype for scalar type ");
4826 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
4828 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
4834 nunits_out
= TYPE_VECTOR_SUBPARTS (vectype_out
);
4835 nunits_in
= TYPE_VECTOR_SUBPARTS (vectype
);
4836 if (nunits_out
!= nunits_in
)
4839 if (op_type
== binary_op
|| op_type
== ternary_op
)
4841 op1
= gimple_assign_rhs2 (stmt
);
4842 if (!vect_is_simple_use (op1
, vinfo
, &def_stmt
, &dt
[1]))
4844 if (dump_enabled_p ())
4845 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4846 "use not simple.\n");
4850 if (op_type
== ternary_op
)
4852 op2
= gimple_assign_rhs3 (stmt
);
4853 if (!vect_is_simple_use (op2
, vinfo
, &def_stmt
, &dt
[2]))
4855 if (dump_enabled_p ())
4856 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4857 "use not simple.\n");
4863 vf
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
4867 /* Multiple types in SLP are handled by creating the appropriate number of
4868 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
4870 if (slp_node
|| PURE_SLP_STMT (stmt_info
))
4873 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits_in
;
4875 gcc_assert (ncopies
>= 1);
4877 /* Shifts are handled in vectorizable_shift (). */
4878 if (code
== LSHIFT_EXPR
|| code
== RSHIFT_EXPR
|| code
== LROTATE_EXPR
4879 || code
== RROTATE_EXPR
)
4882 /* Supportable by target? */
4884 vec_mode
= TYPE_MODE (vectype
);
4885 if (code
== MULT_HIGHPART_EXPR
)
4886 target_support_p
= can_mult_highpart_p (vec_mode
, TYPE_UNSIGNED (vectype
));
4889 optab
= optab_for_tree_code (code
, vectype
, optab_default
);
4892 if (dump_enabled_p ())
4893 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4897 target_support_p
= (optab_handler (optab
, vec_mode
)
4898 != CODE_FOR_nothing
);
4901 if (!target_support_p
)
4903 if (dump_enabled_p ())
4904 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4905 "op not supported by target.\n");
4906 /* Check only during analysis. */
4907 if (GET_MODE_SIZE (vec_mode
) != UNITS_PER_WORD
4908 || (!vec_stmt
&& vf
< vect_min_worthwhile_factor (code
)))
4910 if (dump_enabled_p ())
4911 dump_printf_loc (MSG_NOTE
, vect_location
,
4912 "proceeding using word mode.\n");
4915 /* Worthwhile without SIMD support? Check only during analysis. */
4916 if (!VECTOR_MODE_P (vec_mode
)
4918 && vf
< vect_min_worthwhile_factor (code
))
4920 if (dump_enabled_p ())
4921 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
4922 "not worthwhile without SIMD support.\n");
4926 if (!vec_stmt
) /* transformation not required. */
4928 STMT_VINFO_TYPE (stmt_info
) = op_vec_info_type
;
4929 if (dump_enabled_p ())
4930 dump_printf_loc (MSG_NOTE
, vect_location
,
4931 "=== vectorizable_operation ===\n");
4932 vect_model_simple_cost (stmt_info
, ncopies
, dt
, NULL
, NULL
);
4938 if (dump_enabled_p ())
4939 dump_printf_loc (MSG_NOTE
, vect_location
,
4940 "transform binary/unary operation.\n");
4943 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
4945 /* In case the vectorization factor (VF) is bigger than the number
4946 of elements that we can fit in a vectype (nunits), we have to generate
4947 more than one vector stmt - i.e - we need to "unroll" the
4948 vector stmt by a factor VF/nunits. In doing so, we record a pointer
4949 from one copy of the vector stmt to the next, in the field
4950 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
4951 stages to find the correct vector defs to be used when vectorizing
4952 stmts that use the defs of the current stmt. The example below
4953 illustrates the vectorization process when VF=16 and nunits=4 (i.e.,
4954 we need to create 4 vectorized stmts):
4956 before vectorization:
4957 RELATED_STMT VEC_STMT
4961 step 1: vectorize stmt S1 (done in vectorizable_load. See more details
4963 RELATED_STMT VEC_STMT
4964 VS1_0: vx0 = memref0 VS1_1 -
4965 VS1_1: vx1 = memref1 VS1_2 -
4966 VS1_2: vx2 = memref2 VS1_3 -
4967 VS1_3: vx3 = memref3 - -
4968 S1: x = load - VS1_0
4971 step2: vectorize stmt S2 (done here):
4972 To vectorize stmt S2 we first need to find the relevant vector
4973 def for the first operand 'x'. This is, as usual, obtained from
4974 the vector stmt recorded in the STMT_VINFO_VEC_STMT of the stmt
4975 that defines 'x' (S1). This way we find the stmt VS1_0, and the
4976 relevant vector def 'vx0'. Having found 'vx0' we can generate
4977 the vector stmt VS2_0, and as usual, record it in the
4978 STMT_VINFO_VEC_STMT of stmt S2.
4979 When creating the second copy (VS2_1), we obtain the relevant vector
4980 def from the vector stmt recorded in the STMT_VINFO_RELATED_STMT of
4981 stmt VS1_0. This way we find the stmt VS1_1 and the relevant
4982 vector def 'vx1'. Using 'vx1' we create stmt VS2_1 and record a
4983 pointer to it in the STMT_VINFO_RELATED_STMT of the vector stmt VS2_0.
4984 Similarly when creating stmts VS2_2 and VS2_3. This is the resulting
4985 chain of stmts and pointers:
4986 RELATED_STMT VEC_STMT
4987 VS1_0: vx0 = memref0 VS1_1 -
4988 VS1_1: vx1 = memref1 VS1_2 -
4989 VS1_2: vx2 = memref2 VS1_3 -
4990 VS1_3: vx3 = memref3 - -
4991 S1: x = load - VS1_0
4992 VS2_0: vz0 = vx0 + v1 VS2_1 -
4993 VS2_1: vz1 = vx1 + v1 VS2_2 -
4994 VS2_2: vz2 = vx2 + v1 VS2_3 -
4995 VS2_3: vz3 = vx3 + v1 - -
4996 S2: z = x + 1 - VS2_0 */
4998 prev_stmt_info
= NULL
;
4999 for (j
= 0; j
< ncopies
; j
++)
5004 if (op_type
== binary_op
|| op_type
== ternary_op
)
5005 vect_get_vec_defs (op0
, op1
, stmt
, &vec_oprnds0
, &vec_oprnds1
,
5008 vect_get_vec_defs (op0
, NULL_TREE
, stmt
, &vec_oprnds0
, NULL
,
5010 if (op_type
== ternary_op
)
5012 vec_oprnds2
.create (1);
5013 vec_oprnds2
.quick_push (vect_get_vec_def_for_operand (op2
,
5019 vect_get_vec_defs_for_stmt_copy (dt
, &vec_oprnds0
, &vec_oprnds1
);
5020 if (op_type
== ternary_op
)
5022 tree vec_oprnd
= vec_oprnds2
.pop ();
5023 vec_oprnds2
.quick_push (vect_get_vec_def_for_stmt_copy (dt
[2],
5028 /* Arguments are ready. Create the new vector stmt. */
5029 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vop0
)
5031 vop1
= ((op_type
== binary_op
|| op_type
== ternary_op
)
5032 ? vec_oprnds1
[i
] : NULL_TREE
);
5033 vop2
= ((op_type
== ternary_op
)
5034 ? vec_oprnds2
[i
] : NULL_TREE
);
5035 new_stmt
= gimple_build_assign (vec_dest
, code
, vop0
, vop1
, vop2
);
5036 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
5037 gimple_assign_set_lhs (new_stmt
, new_temp
);
5038 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
5040 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
5047 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
5049 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
5050 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
5053 vec_oprnds0
.release ();
5054 vec_oprnds1
.release ();
5055 vec_oprnds2
.release ();
5060 /* A helper function to ensure data reference DR's base alignment
5064 ensure_base_align (stmt_vec_info stmt_info
, struct data_reference
*dr
)
5069 if (DR_VECT_AUX (dr
)->base_misaligned
)
5071 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
5072 tree base_decl
= DR_VECT_AUX (dr
)->base_decl
;
5074 if (decl_in_symtab_p (base_decl
))
5075 symtab_node::get (base_decl
)->increase_alignment (TYPE_ALIGN (vectype
));
5078 DECL_ALIGN (base_decl
) = TYPE_ALIGN (vectype
);
5079 DECL_USER_ALIGN (base_decl
) = 1;
5081 DR_VECT_AUX (dr
)->base_misaligned
= false;
5086 /* Given a vector type VECTYPE returns the VECTOR_CST mask that implements
5087 reversal of the vector elements. If that is impossible to do,
5091 perm_mask_for_reverse (tree vectype
)
5096 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
5097 sel
= XALLOCAVEC (unsigned char, nunits
);
5099 for (i
= 0; i
< nunits
; ++i
)
5100 sel
[i
] = nunits
- 1 - i
;
5102 if (!can_vec_perm_p (TYPE_MODE (vectype
), false, sel
))
5104 return vect_gen_perm_mask_checked (vectype
, sel
);
5107 /* Function vectorizable_store.
5109 Check if STMT defines a non scalar data-ref (array/pointer/structure) that
5111 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
5112 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
5113 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
5116 vectorizable_store (gimple
*stmt
, gimple_stmt_iterator
*gsi
, gimple
**vec_stmt
,
5122 tree vec_oprnd
= NULL_TREE
;
5123 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
5124 struct data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
), *first_dr
= NULL
;
5126 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
5127 struct loop
*loop
= NULL
;
5128 machine_mode vec_mode
;
5130 enum dr_alignment_support alignment_support_scheme
;
5132 enum vect_def_type dt
;
5133 stmt_vec_info prev_stmt_info
= NULL
;
5134 tree dataref_ptr
= NULL_TREE
;
5135 tree dataref_offset
= NULL_TREE
;
5136 gimple
*ptr_incr
= NULL
;
5139 gimple
*next_stmt
, *first_stmt
= NULL
;
5140 bool grouped_store
= false;
5141 bool store_lanes_p
= false;
5142 unsigned int group_size
, i
;
5143 vec
<tree
> dr_chain
= vNULL
;
5144 vec
<tree
> oprnds
= vNULL
;
5145 vec
<tree
> result_chain
= vNULL
;
5147 bool negative
= false;
5148 tree offset
= NULL_TREE
;
5149 vec
<tree
> vec_oprnds
= vNULL
;
5150 bool slp
= (slp_node
!= NULL
);
5151 unsigned int vec_num
;
5152 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
5153 vec_info
*vinfo
= stmt_info
->vinfo
;
5155 tree scatter_base
= NULL_TREE
, scatter_off
= NULL_TREE
;
5156 tree scatter_off_vectype
= NULL_TREE
, scatter_decl
= NULL_TREE
;
5157 int scatter_scale
= 1;
5158 enum vect_def_type scatter_idx_dt
= vect_unknown_def_type
;
5159 enum vect_def_type scatter_src_dt
= vect_unknown_def_type
;
5162 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
5165 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
5168 /* Is vectorizable store? */
5170 if (!is_gimple_assign (stmt
))
5173 scalar_dest
= gimple_assign_lhs (stmt
);
5174 if (TREE_CODE (scalar_dest
) == VIEW_CONVERT_EXPR
5175 && is_pattern_stmt_p (stmt_info
))
5176 scalar_dest
= TREE_OPERAND (scalar_dest
, 0);
5177 if (TREE_CODE (scalar_dest
) != ARRAY_REF
5178 && TREE_CODE (scalar_dest
) != BIT_FIELD_REF
5179 && TREE_CODE (scalar_dest
) != INDIRECT_REF
5180 && TREE_CODE (scalar_dest
) != COMPONENT_REF
5181 && TREE_CODE (scalar_dest
) != IMAGPART_EXPR
5182 && TREE_CODE (scalar_dest
) != REALPART_EXPR
5183 && TREE_CODE (scalar_dest
) != MEM_REF
)
5186 gcc_assert (gimple_assign_single_p (stmt
));
5188 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
5189 unsigned int nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
5192 loop
= LOOP_VINFO_LOOP (loop_vinfo
);
5194 /* Multiple types in SLP are handled by creating the appropriate number of
5195 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
5197 if (slp
|| PURE_SLP_STMT (stmt_info
))
5200 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits
;
5202 gcc_assert (ncopies
>= 1);
5204 /* FORNOW. This restriction should be relaxed. */
5205 if (loop
&& nested_in_vect_loop_p (loop
, stmt
) && ncopies
> 1)
5207 if (dump_enabled_p ())
5208 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5209 "multiple types in nested loop.\n");
5213 op
= gimple_assign_rhs1 (stmt
);
5214 if (!vect_is_simple_use (op
, vinfo
, &def_stmt
, &dt
))
5216 if (dump_enabled_p ())
5217 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5218 "use not simple.\n");
5222 elem_type
= TREE_TYPE (vectype
);
5223 vec_mode
= TYPE_MODE (vectype
);
5225 /* FORNOW. In some cases can vectorize even if data-type not supported
5226 (e.g. - array initialization with 0). */
5227 if (optab_handler (mov_optab
, vec_mode
) == CODE_FOR_nothing
)
5230 if (!STMT_VINFO_DATA_REF (stmt_info
))
5233 if (!STMT_VINFO_STRIDED_P (stmt_info
))
5236 tree_int_cst_compare (loop
&& nested_in_vect_loop_p (loop
, stmt
)
5237 ? STMT_VINFO_DR_STEP (stmt_info
) : DR_STEP (dr
),
5238 size_zero_node
) < 0;
5239 if (negative
&& ncopies
> 1)
5241 if (dump_enabled_p ())
5242 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5243 "multiple types with negative step.\n");
5248 gcc_assert (!grouped_store
);
5249 alignment_support_scheme
= vect_supportable_dr_alignment (dr
, false);
5250 if (alignment_support_scheme
!= dr_aligned
5251 && alignment_support_scheme
!= dr_unaligned_supported
)
5253 if (dump_enabled_p ())
5254 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5255 "negative step but alignment required.\n");
5258 if (dt
!= vect_constant_def
5259 && dt
!= vect_external_def
5260 && !perm_mask_for_reverse (vectype
))
5262 if (dump_enabled_p ())
5263 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5264 "negative step and reversing not supported.\n");
5270 if (STMT_VINFO_GROUPED_ACCESS (stmt_info
))
5272 grouped_store
= true;
5273 first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
5274 group_size
= GROUP_SIZE (vinfo_for_stmt (first_stmt
));
5276 && !PURE_SLP_STMT (stmt_info
)
5277 && !STMT_VINFO_STRIDED_P (stmt_info
))
5279 if (vect_store_lanes_supported (vectype
, group_size
))
5280 store_lanes_p
= true;
5281 else if (!vect_grouped_store_supported (vectype
, group_size
))
5285 if (STMT_VINFO_STRIDED_P (stmt_info
)
5286 && (slp
|| PURE_SLP_STMT (stmt_info
))
5287 && (group_size
> nunits
5288 || nunits
% group_size
!= 0))
5290 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5291 "unhandled strided group store\n");
5295 if (first_stmt
== stmt
)
5297 /* STMT is the leader of the group. Check the operands of all the
5298 stmts of the group. */
5299 next_stmt
= GROUP_NEXT_ELEMENT (stmt_info
);
5302 gcc_assert (gimple_assign_single_p (next_stmt
));
5303 op
= gimple_assign_rhs1 (next_stmt
);
5304 if (!vect_is_simple_use (op
, vinfo
, &def_stmt
, &dt
))
5306 if (dump_enabled_p ())
5307 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5308 "use not simple.\n");
5311 next_stmt
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt
));
5316 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info
))
5319 scatter_decl
= vect_check_gather_scatter (stmt
, loop_vinfo
, &scatter_base
,
5320 &scatter_off
, &scatter_scale
);
5321 gcc_assert (scatter_decl
);
5322 if (!vect_is_simple_use (scatter_off
, vinfo
, &def_stmt
, &scatter_idx_dt
,
5323 &scatter_off_vectype
))
5325 if (dump_enabled_p ())
5326 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
5327 "scatter index use not simple.");
5332 if (!vec_stmt
) /* transformation not required. */
5334 STMT_VINFO_TYPE (stmt_info
) = store_vec_info_type
;
5335 /* The SLP costs are calculated during SLP analysis. */
5336 if (!PURE_SLP_STMT (stmt_info
))
5337 vect_model_store_cost (stmt_info
, ncopies
, store_lanes_p
, dt
,
5344 ensure_base_align (stmt_info
, dr
);
5346 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info
))
5348 tree vec_oprnd0
= NULL_TREE
, vec_oprnd1
= NULL_TREE
, op
, src
;
5349 tree arglist
= TYPE_ARG_TYPES (TREE_TYPE (scatter_decl
));
5350 tree rettype
, srctype
, ptrtype
, idxtype
, masktype
, scaletype
;
5351 tree ptr
, mask
, var
, scale
, perm_mask
= NULL_TREE
;
5352 edge pe
= loop_preheader_edge (loop
);
5355 enum { NARROW
, NONE
, WIDEN
} modifier
;
5356 int scatter_off_nunits
= TYPE_VECTOR_SUBPARTS (scatter_off_vectype
);
5358 if (nunits
== (unsigned int) scatter_off_nunits
)
5360 else if (nunits
== (unsigned int) scatter_off_nunits
/ 2)
5362 unsigned char *sel
= XALLOCAVEC (unsigned char, scatter_off_nunits
);
5365 for (i
= 0; i
< (unsigned int) scatter_off_nunits
; ++i
)
5366 sel
[i
] = i
| nunits
;
5368 perm_mask
= vect_gen_perm_mask_checked (scatter_off_vectype
, sel
);
5369 gcc_assert (perm_mask
!= NULL_TREE
);
5371 else if (nunits
== (unsigned int) scatter_off_nunits
* 2)
5373 unsigned char *sel
= XALLOCAVEC (unsigned char, nunits
);
5376 for (i
= 0; i
< (unsigned int) nunits
; ++i
)
5377 sel
[i
] = i
| scatter_off_nunits
;
5379 perm_mask
= vect_gen_perm_mask_checked (vectype
, sel
);
5380 gcc_assert (perm_mask
!= NULL_TREE
);
5386 rettype
= TREE_TYPE (TREE_TYPE (scatter_decl
));
5387 ptrtype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
5388 masktype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
5389 idxtype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
5390 srctype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
5391 scaletype
= TREE_VALUE (arglist
);
5393 gcc_checking_assert (TREE_CODE (masktype
) == INTEGER_TYPE
5394 && TREE_CODE (rettype
) == VOID_TYPE
);
5396 ptr
= fold_convert (ptrtype
, scatter_base
);
5397 if (!is_gimple_min_invariant (ptr
))
5399 ptr
= force_gimple_operand (ptr
, &seq
, true, NULL_TREE
);
5400 new_bb
= gsi_insert_seq_on_edge_immediate (pe
, seq
);
5401 gcc_assert (!new_bb
);
5404 /* Currently we support only unconditional scatter stores,
5405 so mask should be all ones. */
5406 mask
= build_int_cst (masktype
, -1);
5407 mask
= vect_init_vector (stmt
, mask
, masktype
, NULL
);
5409 scale
= build_int_cst (scaletype
, scatter_scale
);
5411 prev_stmt_info
= NULL
;
5412 for (j
= 0; j
< ncopies
; ++j
)
5417 = vect_get_vec_def_for_operand (gimple_assign_rhs1 (stmt
), stmt
);
5419 = vect_get_vec_def_for_operand (scatter_off
, stmt
);
5421 else if (modifier
!= NONE
&& (j
& 1))
5423 if (modifier
== WIDEN
)
5426 = vect_get_vec_def_for_stmt_copy (scatter_src_dt
, vec_oprnd1
);
5427 op
= permute_vec_elements (vec_oprnd0
, vec_oprnd0
, perm_mask
,
5430 else if (modifier
== NARROW
)
5432 src
= permute_vec_elements (vec_oprnd1
, vec_oprnd1
, perm_mask
,
5435 = vect_get_vec_def_for_stmt_copy (scatter_idx_dt
, vec_oprnd0
);
5443 = vect_get_vec_def_for_stmt_copy (scatter_src_dt
, vec_oprnd1
);
5445 = vect_get_vec_def_for_stmt_copy (scatter_idx_dt
, vec_oprnd0
);
5448 if (!useless_type_conversion_p (srctype
, TREE_TYPE (src
)))
5450 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (src
))
5451 == TYPE_VECTOR_SUBPARTS (srctype
));
5452 var
= vect_get_new_ssa_name (srctype
, vect_simple_var
);
5453 src
= build1 (VIEW_CONVERT_EXPR
, srctype
, src
);
5454 new_stmt
= gimple_build_assign (var
, VIEW_CONVERT_EXPR
, src
);
5455 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
5459 if (!useless_type_conversion_p (idxtype
, TREE_TYPE (op
)))
5461 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op
))
5462 == TYPE_VECTOR_SUBPARTS (idxtype
));
5463 var
= vect_get_new_ssa_name (idxtype
, vect_simple_var
);
5464 op
= build1 (VIEW_CONVERT_EXPR
, idxtype
, op
);
5465 new_stmt
= gimple_build_assign (var
, VIEW_CONVERT_EXPR
, op
);
5466 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
5471 = gimple_build_call (scatter_decl
, 5, ptr
, mask
, op
, src
, scale
);
5473 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
5475 if (prev_stmt_info
== NULL
)
5476 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
5478 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
5479 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
5486 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
5487 group_size
= GROUP_SIZE (vinfo_for_stmt (first_stmt
));
5489 GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt
))++;
5492 gcc_assert (!loop
|| !nested_in_vect_loop_p (loop
, stmt
));
5494 /* We vectorize all the stmts of the interleaving group when we
5495 reach the last stmt in the group. */
5496 if (GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt
))
5497 < GROUP_SIZE (vinfo_for_stmt (first_stmt
))
5506 grouped_store
= false;
5507 /* VEC_NUM is the number of vect stmts to be created for this
5509 vec_num
= SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node
);
5510 first_stmt
= SLP_TREE_SCALAR_STMTS (slp_node
)[0];
5511 gcc_assert (GROUP_FIRST_ELEMENT (vinfo_for_stmt (first_stmt
)) == first_stmt
);
5512 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
5513 op
= gimple_assign_rhs1 (first_stmt
);
5516 /* VEC_NUM is the number of vect stmts to be created for this
5518 vec_num
= group_size
;
5524 group_size
= vec_num
= 1;
5527 if (dump_enabled_p ())
5528 dump_printf_loc (MSG_NOTE
, vect_location
,
5529 "transform store. ncopies = %d\n", ncopies
);
5531 if (STMT_VINFO_STRIDED_P (stmt_info
))
5533 gimple_stmt_iterator incr_gsi
;
5539 gimple_seq stmts
= NULL
;
5540 tree stride_base
, stride_step
, alias_off
;
5544 gcc_assert (!nested_in_vect_loop_p (loop
, stmt
));
5547 = fold_build_pointer_plus
5548 (unshare_expr (DR_BASE_ADDRESS (first_dr
)),
5549 size_binop (PLUS_EXPR
,
5550 convert_to_ptrofftype (unshare_expr (DR_OFFSET (first_dr
))),
5551 convert_to_ptrofftype (DR_INIT(first_dr
))));
5552 stride_step
= fold_convert (sizetype
, unshare_expr (DR_STEP (first_dr
)));
5554 /* For a store with loop-invariant (but other than power-of-2)
5555 stride (i.e. not a grouped access) like so:
5557 for (i = 0; i < n; i += stride)
5560 we generate a new induction variable and new stores from
5561 the components of the (vectorized) rhs:
5563 for (j = 0; ; j += VF*stride)
5568 array[j + stride] = tmp2;
5572 unsigned nstores
= nunits
;
5573 tree ltype
= elem_type
;
5576 nstores
= nunits
/ group_size
;
5577 if (group_size
< nunits
)
5578 ltype
= build_vector_type (elem_type
, group_size
);
5581 ltype
= build_aligned_type (ltype
, TYPE_ALIGN (elem_type
));
5582 ncopies
= SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node
);
5586 ivstep
= stride_step
;
5587 ivstep
= fold_build2 (MULT_EXPR
, TREE_TYPE (ivstep
), ivstep
,
5588 build_int_cst (TREE_TYPE (ivstep
),
5589 ncopies
* nstores
));
5591 standard_iv_increment_position (loop
, &incr_gsi
, &insert_after
);
5593 create_iv (stride_base
, ivstep
, NULL
,
5594 loop
, &incr_gsi
, insert_after
,
5596 incr
= gsi_stmt (incr_gsi
);
5597 set_vinfo_for_stmt (incr
, new_stmt_vec_info (incr
, loop_vinfo
));
5599 stride_step
= force_gimple_operand (stride_step
, &stmts
, true, NULL_TREE
);
5601 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop
), stmts
);
5603 prev_stmt_info
= NULL
;
5604 alias_off
= build_int_cst (reference_alias_ptr_type (DR_REF (first_dr
)), 0);
5605 next_stmt
= first_stmt
;
5606 for (g
= 0; g
< group_size
; g
++)
5608 running_off
= offvar
;
5611 tree size
= TYPE_SIZE_UNIT (ltype
);
5612 tree pos
= fold_build2 (MULT_EXPR
, sizetype
, size_int (g
),
5614 tree newoff
= copy_ssa_name (running_off
, NULL
);
5615 incr
= gimple_build_assign (newoff
, POINTER_PLUS_EXPR
,
5617 vect_finish_stmt_generation (stmt
, incr
, gsi
);
5618 running_off
= newoff
;
5620 for (j
= 0; j
< ncopies
; j
++)
5622 /* We've set op and dt above, from gimple_assign_rhs1(stmt),
5623 and first_stmt == stmt. */
5628 vect_get_vec_defs (op
, NULL_TREE
, stmt
, &vec_oprnds
, NULL
,
5630 vec_oprnd
= vec_oprnds
[0];
5634 gcc_assert (gimple_assign_single_p (next_stmt
));
5635 op
= gimple_assign_rhs1 (next_stmt
);
5636 vec_oprnd
= vect_get_vec_def_for_operand (op
, next_stmt
);
5642 vec_oprnd
= vec_oprnds
[j
];
5645 vect_is_simple_use (vec_oprnd
, vinfo
, &def_stmt
, &dt
);
5646 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
, vec_oprnd
);
5650 for (i
= 0; i
< nstores
; i
++)
5652 tree newref
, newoff
;
5653 gimple
*incr
, *assign
;
5654 tree size
= TYPE_SIZE (ltype
);
5655 /* Extract the i'th component. */
5656 tree pos
= fold_build2 (MULT_EXPR
, bitsizetype
,
5657 bitsize_int (i
), size
);
5658 tree elem
= fold_build3 (BIT_FIELD_REF
, ltype
, vec_oprnd
,
5661 elem
= force_gimple_operand_gsi (gsi
, elem
, true,
5665 newref
= build2 (MEM_REF
, ltype
,
5666 running_off
, alias_off
);
5668 /* And store it to *running_off. */
5669 assign
= gimple_build_assign (newref
, elem
);
5670 vect_finish_stmt_generation (stmt
, assign
, gsi
);
5672 newoff
= copy_ssa_name (running_off
, NULL
);
5673 incr
= gimple_build_assign (newoff
, POINTER_PLUS_EXPR
,
5674 running_off
, stride_step
);
5675 vect_finish_stmt_generation (stmt
, incr
, gsi
);
5677 running_off
= newoff
;
5678 if (g
== group_size
- 1
5681 if (j
== 0 && i
== 0)
5682 STMT_VINFO_VEC_STMT (stmt_info
)
5683 = *vec_stmt
= assign
;
5685 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = assign
;
5686 prev_stmt_info
= vinfo_for_stmt (assign
);
5690 next_stmt
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt
));
5695 dr_chain
.create (group_size
);
5696 oprnds
.create (group_size
);
5698 alignment_support_scheme
= vect_supportable_dr_alignment (first_dr
, false);
5699 gcc_assert (alignment_support_scheme
);
5700 /* Targets with store-lane instructions must not require explicit
5702 gcc_assert (!store_lanes_p
5703 || alignment_support_scheme
== dr_aligned
5704 || alignment_support_scheme
== dr_unaligned_supported
);
5707 offset
= size_int (-TYPE_VECTOR_SUBPARTS (vectype
) + 1);
5710 aggr_type
= build_array_type_nelts (elem_type
, vec_num
* nunits
);
5712 aggr_type
= vectype
;
5714 /* In case the vectorization factor (VF) is bigger than the number
5715 of elements that we can fit in a vectype (nunits), we have to generate
5716 more than one vector stmt - i.e - we need to "unroll" the
5717 vector stmt by a factor VF/nunits. For more details see documentation in
5718 vect_get_vec_def_for_copy_stmt. */
5720 /* In case of interleaving (non-unit grouped access):
5727 We create vectorized stores starting from base address (the access of the
5728 first stmt in the chain (S2 in the above example), when the last store stmt
5729 of the chain (S4) is reached:
5732 VS2: &base + vec_size*1 = vx0
5733 VS3: &base + vec_size*2 = vx1
5734 VS4: &base + vec_size*3 = vx3
5736 Then permutation statements are generated:
5738 VS5: vx5 = VEC_PERM_EXPR < vx0, vx3, {0, 8, 1, 9, 2, 10, 3, 11} >
5739 VS6: vx6 = VEC_PERM_EXPR < vx0, vx3, {4, 12, 5, 13, 6, 14, 7, 15} >
5742 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
5743 (the order of the data-refs in the output of vect_permute_store_chain
5744 corresponds to the order of scalar stmts in the interleaving chain - see
5745 the documentation of vect_permute_store_chain()).
5747 In case of both multiple types and interleaving, above vector stores and
5748 permutation stmts are created for every copy. The result vector stmts are
5749 put in STMT_VINFO_VEC_STMT for the first copy and in the corresponding
5750 STMT_VINFO_RELATED_STMT for the next copies.
5753 prev_stmt_info
= NULL
;
5754 for (j
= 0; j
< ncopies
; j
++)
5761 /* Get vectorized arguments for SLP_NODE. */
5762 vect_get_vec_defs (op
, NULL_TREE
, stmt
, &vec_oprnds
,
5763 NULL
, slp_node
, -1);
5765 vec_oprnd
= vec_oprnds
[0];
5769 /* For interleaved stores we collect vectorized defs for all the
5770 stores in the group in DR_CHAIN and OPRNDS. DR_CHAIN is then
5771 used as an input to vect_permute_store_chain(), and OPRNDS as
5772 an input to vect_get_vec_def_for_stmt_copy() for the next copy.
5774 If the store is not grouped, GROUP_SIZE is 1, and DR_CHAIN and
5775 OPRNDS are of size 1. */
5776 next_stmt
= first_stmt
;
5777 for (i
= 0; i
< group_size
; i
++)
5779 /* Since gaps are not supported for interleaved stores,
5780 GROUP_SIZE is the exact number of stmts in the chain.
5781 Therefore, NEXT_STMT can't be NULL_TREE. In case that
5782 there is no interleaving, GROUP_SIZE is 1, and only one
5783 iteration of the loop will be executed. */
5784 gcc_assert (next_stmt
5785 && gimple_assign_single_p (next_stmt
));
5786 op
= gimple_assign_rhs1 (next_stmt
);
5788 vec_oprnd
= vect_get_vec_def_for_operand (op
, next_stmt
);
5789 dr_chain
.quick_push (vec_oprnd
);
5790 oprnds
.quick_push (vec_oprnd
);
5791 next_stmt
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt
));
5795 /* We should have catched mismatched types earlier. */
5796 gcc_assert (useless_type_conversion_p (vectype
,
5797 TREE_TYPE (vec_oprnd
)));
5798 bool simd_lane_access_p
5799 = STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info
);
5800 if (simd_lane_access_p
5801 && TREE_CODE (DR_BASE_ADDRESS (first_dr
)) == ADDR_EXPR
5802 && VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (first_dr
), 0))
5803 && integer_zerop (DR_OFFSET (first_dr
))
5804 && integer_zerop (DR_INIT (first_dr
))
5805 && alias_sets_conflict_p (get_alias_set (aggr_type
),
5806 get_alias_set (DR_REF (first_dr
))))
5808 dataref_ptr
= unshare_expr (DR_BASE_ADDRESS (first_dr
));
5809 dataref_offset
= build_int_cst (reference_alias_ptr_type
5810 (DR_REF (first_dr
)), 0);
5815 = vect_create_data_ref_ptr (first_stmt
, aggr_type
,
5816 simd_lane_access_p
? loop
: NULL
,
5817 offset
, &dummy
, gsi
, &ptr_incr
,
5818 simd_lane_access_p
, &inv_p
);
5819 gcc_assert (bb_vinfo
|| !inv_p
);
5823 /* For interleaved stores we created vectorized defs for all the
5824 defs stored in OPRNDS in the previous iteration (previous copy).
5825 DR_CHAIN is then used as an input to vect_permute_store_chain(),
5826 and OPRNDS as an input to vect_get_vec_def_for_stmt_copy() for the
5828 If the store is not grouped, GROUP_SIZE is 1, and DR_CHAIN and
5829 OPRNDS are of size 1. */
5830 for (i
= 0; i
< group_size
; i
++)
5833 vect_is_simple_use (op
, vinfo
, &def_stmt
, &dt
);
5834 vec_oprnd
= vect_get_vec_def_for_stmt_copy (dt
, op
);
5835 dr_chain
[i
] = vec_oprnd
;
5836 oprnds
[i
] = vec_oprnd
;
5840 = int_const_binop (PLUS_EXPR
, dataref_offset
,
5841 TYPE_SIZE_UNIT (aggr_type
));
5843 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
, stmt
,
5844 TYPE_SIZE_UNIT (aggr_type
));
5851 /* Combine all the vectors into an array. */
5852 vec_array
= create_vector_array (vectype
, vec_num
);
5853 for (i
= 0; i
< vec_num
; i
++)
5855 vec_oprnd
= dr_chain
[i
];
5856 write_vector_array (stmt
, gsi
, vec_oprnd
, vec_array
, i
);
5860 MEM_REF[...all elements...] = STORE_LANES (VEC_ARRAY). */
5861 data_ref
= create_array_ref (aggr_type
, dataref_ptr
, first_dr
);
5862 new_stmt
= gimple_build_call_internal (IFN_STORE_LANES
, 1, vec_array
);
5863 gimple_call_set_lhs (new_stmt
, data_ref
);
5864 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
5872 result_chain
.create (group_size
);
5874 vect_permute_store_chain (dr_chain
, group_size
, stmt
, gsi
,
5878 next_stmt
= first_stmt
;
5879 for (i
= 0; i
< vec_num
; i
++)
5881 unsigned align
, misalign
;
5884 /* Bump the vector pointer. */
5885 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
,
5889 vec_oprnd
= vec_oprnds
[i
];
5890 else if (grouped_store
)
5891 /* For grouped stores vectorized defs are interleaved in
5892 vect_permute_store_chain(). */
5893 vec_oprnd
= result_chain
[i
];
5895 data_ref
= fold_build2 (MEM_REF
, TREE_TYPE (vec_oprnd
),
5899 : build_int_cst (reference_alias_ptr_type
5900 (DR_REF (first_dr
)), 0));
5901 align
= TYPE_ALIGN_UNIT (vectype
);
5902 if (aligned_access_p (first_dr
))
5904 else if (DR_MISALIGNMENT (first_dr
) == -1)
5906 if (DR_VECT_AUX (first_dr
)->base_element_aligned
)
5907 align
= TYPE_ALIGN_UNIT (elem_type
);
5909 align
= get_object_alignment (DR_REF (first_dr
))
5912 TREE_TYPE (data_ref
)
5913 = build_aligned_type (TREE_TYPE (data_ref
),
5914 align
* BITS_PER_UNIT
);
5918 TREE_TYPE (data_ref
)
5919 = build_aligned_type (TREE_TYPE (data_ref
),
5920 TYPE_ALIGN (elem_type
));
5921 misalign
= DR_MISALIGNMENT (first_dr
);
5923 if (dataref_offset
== NULL_TREE
5924 && TREE_CODE (dataref_ptr
) == SSA_NAME
)
5925 set_ptr_info_alignment (get_ptr_info (dataref_ptr
), align
,
5929 && dt
!= vect_constant_def
5930 && dt
!= vect_external_def
)
5932 tree perm_mask
= perm_mask_for_reverse (vectype
);
5934 = vect_create_destination_var (gimple_assign_rhs1 (stmt
),
5936 tree new_temp
= make_ssa_name (perm_dest
);
5938 /* Generate the permute statement. */
5940 = gimple_build_assign (new_temp
, VEC_PERM_EXPR
, vec_oprnd
,
5941 vec_oprnd
, perm_mask
);
5942 vect_finish_stmt_generation (stmt
, perm_stmt
, gsi
);
5944 perm_stmt
= SSA_NAME_DEF_STMT (new_temp
);
5945 vec_oprnd
= new_temp
;
5948 /* Arguments are ready. Create the new vector stmt. */
5949 new_stmt
= gimple_build_assign (data_ref
, vec_oprnd
);
5950 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
5955 next_stmt
= GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt
));
5963 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
5965 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
5966 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
5970 dr_chain
.release ();
5972 result_chain
.release ();
5973 vec_oprnds
.release ();
5978 /* Given a vector type VECTYPE, turns permutation SEL into the equivalent
5979 VECTOR_CST mask. No checks are made that the target platform supports the
5980 mask, so callers may wish to test can_vec_perm_p separately, or use
5981 vect_gen_perm_mask_checked. */
5984 vect_gen_perm_mask_any (tree vectype
, const unsigned char *sel
)
5986 tree mask_elt_type
, mask_type
, mask_vec
, *mask_elts
;
5989 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
5991 mask_elt_type
= lang_hooks
.types
.type_for_mode
5992 (int_mode_for_mode (TYPE_MODE (TREE_TYPE (vectype
))), 1);
5993 mask_type
= get_vectype_for_scalar_type (mask_elt_type
);
5995 mask_elts
= XALLOCAVEC (tree
, nunits
);
5996 for (i
= nunits
- 1; i
>= 0; i
--)
5997 mask_elts
[i
] = build_int_cst (mask_elt_type
, sel
[i
]);
5998 mask_vec
= build_vector (mask_type
, mask_elts
);
6003 /* Checked version of vect_gen_perm_mask_any. Asserts can_vec_perm_p,
6004 i.e. that the target supports the pattern _for arbitrary input vectors_. */
6007 vect_gen_perm_mask_checked (tree vectype
, const unsigned char *sel
)
6009 gcc_assert (can_vec_perm_p (TYPE_MODE (vectype
), false, sel
));
6010 return vect_gen_perm_mask_any (vectype
, sel
);
6013 /* Given a vector variable X and Y, that was generated for the scalar
6014 STMT, generate instructions to permute the vector elements of X and Y
6015 using permutation mask MASK_VEC, insert them at *GSI and return the
6016 permuted vector variable. */
6019 permute_vec_elements (tree x
, tree y
, tree mask_vec
, gimple
*stmt
,
6020 gimple_stmt_iterator
*gsi
)
6022 tree vectype
= TREE_TYPE (x
);
6023 tree perm_dest
, data_ref
;
6026 perm_dest
= vect_create_destination_var (gimple_get_lhs (stmt
), vectype
);
6027 data_ref
= make_ssa_name (perm_dest
);
6029 /* Generate the permute statement. */
6030 perm_stmt
= gimple_build_assign (data_ref
, VEC_PERM_EXPR
, x
, y
, mask_vec
);
6031 vect_finish_stmt_generation (stmt
, perm_stmt
, gsi
);
6036 /* Hoist the definitions of all SSA uses on STMT out of the loop LOOP,
6037 inserting them on the loops preheader edge. Returns true if we
6038 were successful in doing so (and thus STMT can be moved then),
6039 otherwise returns false. */
6042 hoist_defs_of_uses (gimple
*stmt
, struct loop
*loop
)
6048 FOR_EACH_SSA_TREE_OPERAND (op
, stmt
, i
, SSA_OP_USE
)
6050 gimple
*def_stmt
= SSA_NAME_DEF_STMT (op
);
6051 if (!gimple_nop_p (def_stmt
)
6052 && flow_bb_inside_loop_p (loop
, gimple_bb (def_stmt
)))
6054 /* Make sure we don't need to recurse. While we could do
6055 so in simple cases when there are more complex use webs
6056 we don't have an easy way to preserve stmt order to fulfil
6057 dependencies within them. */
6060 if (gimple_code (def_stmt
) == GIMPLE_PHI
)
6062 FOR_EACH_SSA_TREE_OPERAND (op2
, def_stmt
, i2
, SSA_OP_USE
)
6064 gimple
*def_stmt2
= SSA_NAME_DEF_STMT (op2
);
6065 if (!gimple_nop_p (def_stmt2
)
6066 && flow_bb_inside_loop_p (loop
, gimple_bb (def_stmt2
)))
6076 FOR_EACH_SSA_TREE_OPERAND (op
, stmt
, i
, SSA_OP_USE
)
6078 gimple
*def_stmt
= SSA_NAME_DEF_STMT (op
);
6079 if (!gimple_nop_p (def_stmt
)
6080 && flow_bb_inside_loop_p (loop
, gimple_bb (def_stmt
)))
6082 gimple_stmt_iterator gsi
= gsi_for_stmt (def_stmt
);
6083 gsi_remove (&gsi
, false);
6084 gsi_insert_on_edge_immediate (loop_preheader_edge (loop
), def_stmt
);
6091 /* vectorizable_load.
6093 Check if STMT reads a non scalar data-ref (array/pointer/structure) that
6095 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
6096 stmt to replace it, put it in VEC_STMT, and insert it at BSI.
6097 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
6100 vectorizable_load (gimple
*stmt
, gimple_stmt_iterator
*gsi
, gimple
**vec_stmt
,
6101 slp_tree slp_node
, slp_instance slp_node_instance
)
6104 tree vec_dest
= NULL
;
6105 tree data_ref
= NULL
;
6106 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
6107 stmt_vec_info prev_stmt_info
;
6108 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
6109 struct loop
*loop
= NULL
;
6110 struct loop
*containing_loop
= (gimple_bb (stmt
))->loop_father
;
6111 bool nested_in_vect_loop
= false;
6112 struct data_reference
*dr
= STMT_VINFO_DATA_REF (stmt_info
), *first_dr
= NULL
;
6116 gimple
*new_stmt
= NULL
;
6118 enum dr_alignment_support alignment_support_scheme
;
6119 tree dataref_ptr
= NULL_TREE
;
6120 tree dataref_offset
= NULL_TREE
;
6121 gimple
*ptr_incr
= NULL
;
6123 int i
, j
, group_size
= -1, group_gap_adj
;
6124 tree msq
= NULL_TREE
, lsq
;
6125 tree offset
= NULL_TREE
;
6126 tree byte_offset
= NULL_TREE
;
6127 tree realignment_token
= NULL_TREE
;
6129 vec
<tree
> dr_chain
= vNULL
;
6130 bool grouped_load
= false;
6131 bool load_lanes_p
= false;
6134 bool negative
= false;
6135 bool compute_in_loop
= false;
6136 struct loop
*at_loop
;
6138 bool slp
= (slp_node
!= NULL
);
6139 bool slp_perm
= false;
6140 enum tree_code code
;
6141 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
6144 tree gather_base
= NULL_TREE
, gather_off
= NULL_TREE
;
6145 tree gather_off_vectype
= NULL_TREE
, gather_decl
= NULL_TREE
;
6146 int gather_scale
= 1;
6147 enum vect_def_type gather_dt
= vect_unknown_def_type
;
6148 vec_info
*vinfo
= stmt_info
->vinfo
;
6150 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
6153 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
)
6156 /* Is vectorizable load? */
6157 if (!is_gimple_assign (stmt
))
6160 scalar_dest
= gimple_assign_lhs (stmt
);
6161 if (TREE_CODE (scalar_dest
) != SSA_NAME
)
6164 code
= gimple_assign_rhs_code (stmt
);
6165 if (code
!= ARRAY_REF
6166 && code
!= BIT_FIELD_REF
6167 && code
!= INDIRECT_REF
6168 && code
!= COMPONENT_REF
6169 && code
!= IMAGPART_EXPR
6170 && code
!= REALPART_EXPR
6172 && TREE_CODE_CLASS (code
) != tcc_declaration
)
6175 if (!STMT_VINFO_DATA_REF (stmt_info
))
6178 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
6179 int nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
6183 loop
= LOOP_VINFO_LOOP (loop_vinfo
);
6184 nested_in_vect_loop
= nested_in_vect_loop_p (loop
, stmt
);
6185 vf
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
);
6190 /* Multiple types in SLP are handled by creating the appropriate number of
6191 vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
6193 if (slp
|| PURE_SLP_STMT (stmt_info
))
6196 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits
;
6198 gcc_assert (ncopies
>= 1);
6200 /* FORNOW. This restriction should be relaxed. */
6201 if (nested_in_vect_loop
&& ncopies
> 1)
6203 if (dump_enabled_p ())
6204 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6205 "multiple types in nested loop.\n");
6209 /* Invalidate assumptions made by dependence analysis when vectorization
6210 on the unrolled body effectively re-orders stmts. */
6212 && STMT_VINFO_MIN_NEG_DIST (stmt_info
) != 0
6213 && ((unsigned)LOOP_VINFO_VECT_FACTOR (loop_vinfo
)
6214 > STMT_VINFO_MIN_NEG_DIST (stmt_info
)))
6216 if (dump_enabled_p ())
6217 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6218 "cannot perform implicit CSE when unrolling "
6219 "with negative dependence distance\n");
6223 elem_type
= TREE_TYPE (vectype
);
6224 mode
= TYPE_MODE (vectype
);
6226 /* FORNOW. In some cases can vectorize even if data-type not supported
6227 (e.g. - data copies). */
6228 if (optab_handler (mov_optab
, mode
) == CODE_FOR_nothing
)
6230 if (dump_enabled_p ())
6231 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6232 "Aligned load, but unsupported type.\n");
6236 /* Check if the load is a part of an interleaving chain. */
6237 if (STMT_VINFO_GROUPED_ACCESS (stmt_info
))
6239 grouped_load
= true;
6241 gcc_assert (!nested_in_vect_loop
&& !STMT_VINFO_GATHER_SCATTER_P (stmt_info
));
6243 first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
6245 /* If this is single-element interleaving with an element distance
6246 that leaves unused vector loads around punt - we at least create
6247 very sub-optimal code in that case (and blow up memory,
6249 if (first_stmt
== stmt
6250 && !GROUP_NEXT_ELEMENT (stmt_info
)
6251 && GROUP_SIZE (stmt_info
) > TYPE_VECTOR_SUBPARTS (vectype
))
6253 if (dump_enabled_p ())
6254 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6255 "single-element interleaving not supported "
6256 "for not adjacent vector loads\n");
6260 if (slp
&& SLP_TREE_LOAD_PERMUTATION (slp_node
).exists ())
6263 group_size
= GROUP_SIZE (vinfo_for_stmt (first_stmt
));
6265 && !PURE_SLP_STMT (stmt_info
)
6266 && !STMT_VINFO_STRIDED_P (stmt_info
))
6268 if (vect_load_lanes_supported (vectype
, group_size
))
6269 load_lanes_p
= true;
6270 else if (!vect_grouped_load_supported (vectype
, group_size
))
6274 /* Invalidate assumptions made by dependence analysis when vectorization
6275 on the unrolled body effectively re-orders stmts. */
6276 if (!PURE_SLP_STMT (stmt_info
)
6277 && STMT_VINFO_MIN_NEG_DIST (stmt_info
) != 0
6278 && ((unsigned)LOOP_VINFO_VECT_FACTOR (loop_vinfo
)
6279 > STMT_VINFO_MIN_NEG_DIST (stmt_info
)))
6281 if (dump_enabled_p ())
6282 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6283 "cannot perform implicit CSE when performing "
6284 "group loads with negative dependence distance\n");
6288 /* Similarly when the stmt is a load that is both part of a SLP
6289 instance and a loop vectorized stmt via the same-dr mechanism
6290 we have to give up. */
6291 if (STMT_VINFO_GROUP_SAME_DR_STMT (stmt_info
)
6292 && (STMT_SLP_TYPE (stmt_info
)
6293 != STMT_SLP_TYPE (vinfo_for_stmt
6294 (STMT_VINFO_GROUP_SAME_DR_STMT (stmt_info
)))))
6296 if (dump_enabled_p ())
6297 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6298 "conflicting SLP types for CSEd load\n");
6304 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info
))
6307 gather_decl
= vect_check_gather_scatter (stmt
, loop_vinfo
, &gather_base
,
6308 &gather_off
, &gather_scale
);
6309 gcc_assert (gather_decl
);
6310 if (!vect_is_simple_use (gather_off
, vinfo
, &def_stmt
, &gather_dt
,
6311 &gather_off_vectype
))
6313 if (dump_enabled_p ())
6314 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6315 "gather index use not simple.\n");
6319 else if (STMT_VINFO_STRIDED_P (stmt_info
))
6322 && (slp
|| PURE_SLP_STMT (stmt_info
)))
6323 && (group_size
> nunits
6324 || nunits
% group_size
!= 0))
6326 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6327 "unhandled strided group load\n");
6333 negative
= tree_int_cst_compare (nested_in_vect_loop
6334 ? STMT_VINFO_DR_STEP (stmt_info
)
6336 size_zero_node
) < 0;
6337 if (negative
&& ncopies
> 1)
6339 if (dump_enabled_p ())
6340 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6341 "multiple types with negative step.\n");
6349 if (dump_enabled_p ())
6350 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6351 "negative step for group load not supported"
6355 alignment_support_scheme
= vect_supportable_dr_alignment (dr
, false);
6356 if (alignment_support_scheme
!= dr_aligned
6357 && alignment_support_scheme
!= dr_unaligned_supported
)
6359 if (dump_enabled_p ())
6360 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6361 "negative step but alignment required.\n");
6364 if (!perm_mask_for_reverse (vectype
))
6366 if (dump_enabled_p ())
6367 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
6368 "negative step and reversing not supported."
6375 if (!vec_stmt
) /* transformation not required. */
6377 STMT_VINFO_TYPE (stmt_info
) = load_vec_info_type
;
6378 /* The SLP costs are calculated during SLP analysis. */
6379 if (!PURE_SLP_STMT (stmt_info
))
6380 vect_model_load_cost (stmt_info
, ncopies
, load_lanes_p
,
6385 if (dump_enabled_p ())
6386 dump_printf_loc (MSG_NOTE
, vect_location
,
6387 "transform load. ncopies = %d\n", ncopies
);
6391 ensure_base_align (stmt_info
, dr
);
6393 if (STMT_VINFO_GATHER_SCATTER_P (stmt_info
))
6395 tree vec_oprnd0
= NULL_TREE
, op
;
6396 tree arglist
= TYPE_ARG_TYPES (TREE_TYPE (gather_decl
));
6397 tree rettype
, srctype
, ptrtype
, idxtype
, masktype
, scaletype
;
6398 tree ptr
, mask
, var
, scale
, merge
, perm_mask
= NULL_TREE
, prev_res
= NULL_TREE
;
6399 edge pe
= loop_preheader_edge (loop
);
6402 enum { NARROW
, NONE
, WIDEN
} modifier
;
6403 int gather_off_nunits
= TYPE_VECTOR_SUBPARTS (gather_off_vectype
);
6405 if (nunits
== gather_off_nunits
)
6407 else if (nunits
== gather_off_nunits
/ 2)
6409 unsigned char *sel
= XALLOCAVEC (unsigned char, gather_off_nunits
);
6412 for (i
= 0; i
< gather_off_nunits
; ++i
)
6413 sel
[i
] = i
| nunits
;
6415 perm_mask
= vect_gen_perm_mask_checked (gather_off_vectype
, sel
);
6417 else if (nunits
== gather_off_nunits
* 2)
6419 unsigned char *sel
= XALLOCAVEC (unsigned char, nunits
);
6422 for (i
= 0; i
< nunits
; ++i
)
6423 sel
[i
] = i
< gather_off_nunits
6424 ? i
: i
+ nunits
- gather_off_nunits
;
6426 perm_mask
= vect_gen_perm_mask_checked (vectype
, sel
);
6432 rettype
= TREE_TYPE (TREE_TYPE (gather_decl
));
6433 srctype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
6434 ptrtype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
6435 idxtype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
6436 masktype
= TREE_VALUE (arglist
); arglist
= TREE_CHAIN (arglist
);
6437 scaletype
= TREE_VALUE (arglist
);
6438 gcc_checking_assert (types_compatible_p (srctype
, rettype
));
6440 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
6442 ptr
= fold_convert (ptrtype
, gather_base
);
6443 if (!is_gimple_min_invariant (ptr
))
6445 ptr
= force_gimple_operand (ptr
, &seq
, true, NULL_TREE
);
6446 new_bb
= gsi_insert_seq_on_edge_immediate (pe
, seq
);
6447 gcc_assert (!new_bb
);
6450 /* Currently we support only unconditional gather loads,
6451 so mask should be all ones. */
6452 if (TREE_CODE (masktype
) == INTEGER_TYPE
)
6453 mask
= build_int_cst (masktype
, -1);
6454 else if (TREE_CODE (TREE_TYPE (masktype
)) == INTEGER_TYPE
)
6456 mask
= build_int_cst (TREE_TYPE (masktype
), -1);
6457 mask
= build_vector_from_val (masktype
, mask
);
6458 mask
= vect_init_vector (stmt
, mask
, masktype
, NULL
);
6460 else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (masktype
)))
6464 for (j
= 0; j
< 6; ++j
)
6466 real_from_target (&r
, tmp
, TYPE_MODE (TREE_TYPE (masktype
)));
6467 mask
= build_real (TREE_TYPE (masktype
), r
);
6468 mask
= build_vector_from_val (masktype
, mask
);
6469 mask
= vect_init_vector (stmt
, mask
, masktype
, NULL
);
6474 scale
= build_int_cst (scaletype
, gather_scale
);
6476 if (TREE_CODE (TREE_TYPE (rettype
)) == INTEGER_TYPE
)
6477 merge
= build_int_cst (TREE_TYPE (rettype
), 0);
6478 else if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (rettype
)))
6482 for (j
= 0; j
< 6; ++j
)
6484 real_from_target (&r
, tmp
, TYPE_MODE (TREE_TYPE (rettype
)));
6485 merge
= build_real (TREE_TYPE (rettype
), r
);
6489 merge
= build_vector_from_val (rettype
, merge
);
6490 merge
= vect_init_vector (stmt
, merge
, rettype
, NULL
);
6492 prev_stmt_info
= NULL
;
6493 for (j
= 0; j
< ncopies
; ++j
)
6495 if (modifier
== WIDEN
&& (j
& 1))
6496 op
= permute_vec_elements (vec_oprnd0
, vec_oprnd0
,
6497 perm_mask
, stmt
, gsi
);
6500 = vect_get_vec_def_for_operand (gather_off
, stmt
);
6503 = vect_get_vec_def_for_stmt_copy (gather_dt
, vec_oprnd0
);
6505 if (!useless_type_conversion_p (idxtype
, TREE_TYPE (op
)))
6507 gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op
))
6508 == TYPE_VECTOR_SUBPARTS (idxtype
));
6509 var
= vect_get_new_ssa_name (idxtype
, vect_simple_var
);
6510 op
= build1 (VIEW_CONVERT_EXPR
, idxtype
, op
);
6512 = gimple_build_assign (var
, VIEW_CONVERT_EXPR
, op
);
6513 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6518 = gimple_build_call (gather_decl
, 5, merge
, ptr
, op
, mask
, scale
);
6520 if (!useless_type_conversion_p (vectype
, rettype
))
6522 gcc_assert (TYPE_VECTOR_SUBPARTS (vectype
)
6523 == TYPE_VECTOR_SUBPARTS (rettype
));
6524 op
= vect_get_new_ssa_name (rettype
, vect_simple_var
);
6525 gimple_call_set_lhs (new_stmt
, op
);
6526 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6527 var
= make_ssa_name (vec_dest
);
6528 op
= build1 (VIEW_CONVERT_EXPR
, vectype
, op
);
6530 = gimple_build_assign (var
, VIEW_CONVERT_EXPR
, op
);
6534 var
= make_ssa_name (vec_dest
, new_stmt
);
6535 gimple_call_set_lhs (new_stmt
, var
);
6538 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6540 if (modifier
== NARROW
)
6547 var
= permute_vec_elements (prev_res
, var
,
6548 perm_mask
, stmt
, gsi
);
6549 new_stmt
= SSA_NAME_DEF_STMT (var
);
6552 if (prev_stmt_info
== NULL
)
6553 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
6555 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
6556 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
6560 else if (STMT_VINFO_STRIDED_P (stmt_info
))
6562 gimple_stmt_iterator incr_gsi
;
6568 vec
<constructor_elt
, va_gc
> *v
= NULL
;
6569 gimple_seq stmts
= NULL
;
6570 tree stride_base
, stride_step
, alias_off
;
6572 gcc_assert (!nested_in_vect_loop
);
6574 if (slp
&& grouped_load
)
6575 first_dr
= STMT_VINFO_DATA_REF
6576 (vinfo_for_stmt (GROUP_FIRST_ELEMENT (stmt_info
)));
6581 = fold_build_pointer_plus
6582 (DR_BASE_ADDRESS (first_dr
),
6583 size_binop (PLUS_EXPR
,
6584 convert_to_ptrofftype (DR_OFFSET (first_dr
)),
6585 convert_to_ptrofftype (DR_INIT (first_dr
))));
6586 stride_step
= fold_convert (sizetype
, DR_STEP (first_dr
));
6588 /* For a load with loop-invariant (but other than power-of-2)
6589 stride (i.e. not a grouped access) like so:
6591 for (i = 0; i < n; i += stride)
6594 we generate a new induction variable and new accesses to
6595 form a new vector (or vectors, depending on ncopies):
6597 for (j = 0; ; j += VF*stride)
6599 tmp2 = array[j + stride];
6601 vectemp = {tmp1, tmp2, ...}
6604 ivstep
= fold_build2 (MULT_EXPR
, TREE_TYPE (stride_step
), stride_step
,
6605 build_int_cst (TREE_TYPE (stride_step
), vf
));
6607 standard_iv_increment_position (loop
, &incr_gsi
, &insert_after
);
6609 create_iv (unshare_expr (stride_base
), unshare_expr (ivstep
), NULL
,
6610 loop
, &incr_gsi
, insert_after
,
6612 incr
= gsi_stmt (incr_gsi
);
6613 set_vinfo_for_stmt (incr
, new_stmt_vec_info (incr
, loop_vinfo
));
6615 stride_step
= force_gimple_operand (unshare_expr (stride_step
),
6616 &stmts
, true, NULL_TREE
);
6618 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop
), stmts
);
6620 prev_stmt_info
= NULL
;
6621 running_off
= offvar
;
6622 alias_off
= build_int_cst (reference_alias_ptr_type (DR_REF (first_dr
)), 0);
6623 int nloads
= nunits
;
6624 tree ltype
= TREE_TYPE (vectype
);
6625 auto_vec
<tree
> dr_chain
;
6628 nloads
= nunits
/ group_size
;
6629 if (group_size
< nunits
)
6630 ltype
= build_vector_type (TREE_TYPE (vectype
), group_size
);
6633 ltype
= build_aligned_type (ltype
, TYPE_ALIGN (TREE_TYPE (vectype
)));
6634 ncopies
= SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node
);
6636 dr_chain
.create (ncopies
);
6638 for (j
= 0; j
< ncopies
; j
++)
6644 vec_alloc (v
, nloads
);
6645 for (i
= 0; i
< nloads
; i
++)
6647 tree newref
, newoff
;
6649 newref
= build2 (MEM_REF
, ltype
, running_off
, alias_off
);
6651 newref
= force_gimple_operand_gsi (gsi
, newref
, true,
6654 CONSTRUCTOR_APPEND_ELT (v
, NULL_TREE
, newref
);
6655 newoff
= copy_ssa_name (running_off
);
6656 incr
= gimple_build_assign (newoff
, POINTER_PLUS_EXPR
,
6657 running_off
, stride_step
);
6658 vect_finish_stmt_generation (stmt
, incr
, gsi
);
6660 running_off
= newoff
;
6663 vec_inv
= build_constructor (vectype
, v
);
6664 new_temp
= vect_init_vector (stmt
, vec_inv
, vectype
, gsi
);
6665 new_stmt
= SSA_NAME_DEF_STMT (new_temp
);
6669 new_stmt
= gimple_build_assign (make_ssa_name (ltype
),
6670 build2 (MEM_REF
, ltype
,
6671 running_off
, alias_off
));
6672 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6674 tree newoff
= copy_ssa_name (running_off
);
6675 gimple
*incr
= gimple_build_assign (newoff
, POINTER_PLUS_EXPR
,
6676 running_off
, stride_step
);
6677 vect_finish_stmt_generation (stmt
, incr
, gsi
);
6679 running_off
= newoff
;
6684 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
6686 dr_chain
.quick_push (gimple_assign_lhs (new_stmt
));
6691 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
6693 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
6694 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
6698 vect_transform_slp_perm_load (slp_node
, dr_chain
, gsi
, vf
,
6699 slp_node_instance
, false);
6705 first_stmt
= GROUP_FIRST_ELEMENT (stmt_info
);
6706 /* For BB vectorization we directly vectorize a subchain
6707 without permutation. */
6708 if (slp
&& ! SLP_TREE_LOAD_PERMUTATION (slp_node
).exists ())
6709 first_stmt
= SLP_TREE_SCALAR_STMTS (slp_node
)[0];
6711 /* Check if the chain of loads is already vectorized. */
6712 if (STMT_VINFO_VEC_STMT (vinfo_for_stmt (first_stmt
))
6713 /* For SLP we would need to copy over SLP_TREE_VEC_STMTS.
6714 ??? But we can only do so if there is exactly one
6715 as we have no way to get at the rest. Leave the CSE
6717 ??? With the group load eventually participating
6718 in multiple different permutations (having multiple
6719 slp nodes which refer to the same group) the CSE
6720 is even wrong code. See PR56270. */
6723 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
6726 first_dr
= STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt
));
6727 group_size
= GROUP_SIZE (vinfo_for_stmt (first_stmt
));
6730 /* VEC_NUM is the number of vect stmts to be created for this group. */
6733 grouped_load
= false;
6734 /* For SLP permutation support we need to load the whole group,
6735 not only the number of vector stmts the permutation result
6738 vec_num
= (group_size
* vf
+ nunits
- 1) / nunits
;
6740 vec_num
= SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node
);
6741 group_gap_adj
= vf
* group_size
- nunits
* vec_num
;
6744 vec_num
= group_size
;
6750 group_size
= vec_num
= 1;
6754 alignment_support_scheme
= vect_supportable_dr_alignment (first_dr
, false);
6755 gcc_assert (alignment_support_scheme
);
6756 /* Targets with load-lane instructions must not require explicit
6758 gcc_assert (!load_lanes_p
6759 || alignment_support_scheme
== dr_aligned
6760 || alignment_support_scheme
== dr_unaligned_supported
);
6762 /* In case the vectorization factor (VF) is bigger than the number
6763 of elements that we can fit in a vectype (nunits), we have to generate
6764 more than one vector stmt - i.e - we need to "unroll" the
6765 vector stmt by a factor VF/nunits. In doing so, we record a pointer
6766 from one copy of the vector stmt to the next, in the field
6767 STMT_VINFO_RELATED_STMT. This is necessary in order to allow following
6768 stages to find the correct vector defs to be used when vectorizing
6769 stmts that use the defs of the current stmt. The example below
6770 illustrates the vectorization process when VF=16 and nunits=4 (i.e., we
6771 need to create 4 vectorized stmts):
6773 before vectorization:
6774 RELATED_STMT VEC_STMT
6778 step 1: vectorize stmt S1:
6779 We first create the vector stmt VS1_0, and, as usual, record a
6780 pointer to it in the STMT_VINFO_VEC_STMT of the scalar stmt S1.
6781 Next, we create the vector stmt VS1_1, and record a pointer to
6782 it in the STMT_VINFO_RELATED_STMT of the vector stmt VS1_0.
6783 Similarly, for VS1_2 and VS1_3. This is the resulting chain of
6785 RELATED_STMT VEC_STMT
6786 VS1_0: vx0 = memref0 VS1_1 -
6787 VS1_1: vx1 = memref1 VS1_2 -
6788 VS1_2: vx2 = memref2 VS1_3 -
6789 VS1_3: vx3 = memref3 - -
6790 S1: x = load - VS1_0
6793 See in documentation in vect_get_vec_def_for_stmt_copy for how the
6794 information we recorded in RELATED_STMT field is used to vectorize
6797 /* In case of interleaving (non-unit grouped access):
6804 Vectorized loads are created in the order of memory accesses
6805 starting from the access of the first stmt of the chain:
6808 VS2: vx1 = &base + vec_size*1
6809 VS3: vx3 = &base + vec_size*2
6810 VS4: vx4 = &base + vec_size*3
6812 Then permutation statements are generated:
6814 VS5: vx5 = VEC_PERM_EXPR < vx0, vx1, { 0, 2, ..., i*2 } >
6815 VS6: vx6 = VEC_PERM_EXPR < vx0, vx1, { 1, 3, ..., i*2+1 } >
6818 And they are put in STMT_VINFO_VEC_STMT of the corresponding scalar stmts
6819 (the order of the data-refs in the output of vect_permute_load_chain
6820 corresponds to the order of scalar stmts in the interleaving chain - see
6821 the documentation of vect_permute_load_chain()).
6822 The generation of permutation stmts and recording them in
6823 STMT_VINFO_VEC_STMT is done in vect_transform_grouped_load().
6825 In case of both multiple types and interleaving, the vector loads and
6826 permutation stmts above are created for every copy. The result vector
6827 stmts are put in STMT_VINFO_VEC_STMT for the first copy and in the
6828 corresponding STMT_VINFO_RELATED_STMT for the next copies. */
6830 /* If the data reference is aligned (dr_aligned) or potentially unaligned
6831 on a target that supports unaligned accesses (dr_unaligned_supported)
6832 we generate the following code:
6836 p = p + indx * vectype_size;
6841 Otherwise, the data reference is potentially unaligned on a target that
6842 does not support unaligned accesses (dr_explicit_realign_optimized) -
6843 then generate the following code, in which the data in each iteration is
6844 obtained by two vector loads, one from the previous iteration, and one
6845 from the current iteration:
6847 msq_init = *(floor(p1))
6848 p2 = initial_addr + VS - 1;
6849 realignment_token = call target_builtin;
6852 p2 = p2 + indx * vectype_size
6854 vec_dest = realign_load (msq, lsq, realignment_token)
6859 /* If the misalignment remains the same throughout the execution of the
6860 loop, we can create the init_addr and permutation mask at the loop
6861 preheader. Otherwise, it needs to be created inside the loop.
6862 This can only occur when vectorizing memory accesses in the inner-loop
6863 nested within an outer-loop that is being vectorized. */
6865 if (nested_in_vect_loop
6866 && (TREE_INT_CST_LOW (DR_STEP (dr
))
6867 % GET_MODE_SIZE (TYPE_MODE (vectype
)) != 0))
6869 gcc_assert (alignment_support_scheme
!= dr_explicit_realign_optimized
);
6870 compute_in_loop
= true;
6873 if ((alignment_support_scheme
== dr_explicit_realign_optimized
6874 || alignment_support_scheme
== dr_explicit_realign
)
6875 && !compute_in_loop
)
6877 msq
= vect_setup_realignment (first_stmt
, gsi
, &realignment_token
,
6878 alignment_support_scheme
, NULL_TREE
,
6880 if (alignment_support_scheme
== dr_explicit_realign_optimized
)
6882 phi
= as_a
<gphi
*> (SSA_NAME_DEF_STMT (msq
));
6883 byte_offset
= size_binop (MINUS_EXPR
, TYPE_SIZE_UNIT (vectype
),
6891 offset
= size_int (-TYPE_VECTOR_SUBPARTS (vectype
) + 1);
6894 aggr_type
= build_array_type_nelts (elem_type
, vec_num
* nunits
);
6896 aggr_type
= vectype
;
6898 prev_stmt_info
= NULL
;
6899 for (j
= 0; j
< ncopies
; j
++)
6901 /* 1. Create the vector or array pointer update chain. */
6904 bool simd_lane_access_p
6905 = STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info
);
6906 if (simd_lane_access_p
6907 && TREE_CODE (DR_BASE_ADDRESS (first_dr
)) == ADDR_EXPR
6908 && VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (first_dr
), 0))
6909 && integer_zerop (DR_OFFSET (first_dr
))
6910 && integer_zerop (DR_INIT (first_dr
))
6911 && alias_sets_conflict_p (get_alias_set (aggr_type
),
6912 get_alias_set (DR_REF (first_dr
)))
6913 && (alignment_support_scheme
== dr_aligned
6914 || alignment_support_scheme
== dr_unaligned_supported
))
6916 dataref_ptr
= unshare_expr (DR_BASE_ADDRESS (first_dr
));
6917 dataref_offset
= build_int_cst (reference_alias_ptr_type
6918 (DR_REF (first_dr
)), 0);
6923 = vect_create_data_ref_ptr (first_stmt
, aggr_type
, at_loop
,
6924 offset
, &dummy
, gsi
, &ptr_incr
,
6925 simd_lane_access_p
, &inv_p
,
6928 else if (dataref_offset
)
6929 dataref_offset
= int_const_binop (PLUS_EXPR
, dataref_offset
,
6930 TYPE_SIZE_UNIT (aggr_type
));
6932 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
, stmt
,
6933 TYPE_SIZE_UNIT (aggr_type
));
6935 if (grouped_load
|| slp_perm
)
6936 dr_chain
.create (vec_num
);
6942 vec_array
= create_vector_array (vectype
, vec_num
);
6945 VEC_ARRAY = LOAD_LANES (MEM_REF[...all elements...]). */
6946 data_ref
= create_array_ref (aggr_type
, dataref_ptr
, first_dr
);
6947 new_stmt
= gimple_build_call_internal (IFN_LOAD_LANES
, 1, data_ref
);
6948 gimple_call_set_lhs (new_stmt
, vec_array
);
6949 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
6951 /* Extract each vector into an SSA_NAME. */
6952 for (i
= 0; i
< vec_num
; i
++)
6954 new_temp
= read_vector_array (stmt
, gsi
, scalar_dest
,
6956 dr_chain
.quick_push (new_temp
);
6959 /* Record the mapping between SSA_NAMEs and statements. */
6960 vect_record_grouped_load_vectors (stmt
, dr_chain
);
6964 for (i
= 0; i
< vec_num
; i
++)
6967 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
,
6970 /* 2. Create the vector-load in the loop. */
6971 switch (alignment_support_scheme
)
6974 case dr_unaligned_supported
:
6976 unsigned int align
, misalign
;
6979 = fold_build2 (MEM_REF
, vectype
, dataref_ptr
,
6982 : build_int_cst (reference_alias_ptr_type
6983 (DR_REF (first_dr
)), 0));
6984 align
= TYPE_ALIGN_UNIT (vectype
);
6985 if (alignment_support_scheme
== dr_aligned
)
6987 gcc_assert (aligned_access_p (first_dr
));
6990 else if (DR_MISALIGNMENT (first_dr
) == -1)
6992 if (DR_VECT_AUX (first_dr
)->base_element_aligned
)
6993 align
= TYPE_ALIGN_UNIT (elem_type
);
6995 align
= (get_object_alignment (DR_REF (first_dr
))
6998 TREE_TYPE (data_ref
)
6999 = build_aligned_type (TREE_TYPE (data_ref
),
7000 align
* BITS_PER_UNIT
);
7004 TREE_TYPE (data_ref
)
7005 = build_aligned_type (TREE_TYPE (data_ref
),
7006 TYPE_ALIGN (elem_type
));
7007 misalign
= DR_MISALIGNMENT (first_dr
);
7009 if (dataref_offset
== NULL_TREE
7010 && TREE_CODE (dataref_ptr
) == SSA_NAME
)
7011 set_ptr_info_alignment (get_ptr_info (dataref_ptr
),
7015 case dr_explicit_realign
:
7019 tree vs
= size_int (TYPE_VECTOR_SUBPARTS (vectype
));
7021 if (compute_in_loop
)
7022 msq
= vect_setup_realignment (first_stmt
, gsi
,
7024 dr_explicit_realign
,
7027 if (TREE_CODE (dataref_ptr
) == SSA_NAME
)
7028 ptr
= copy_ssa_name (dataref_ptr
);
7030 ptr
= make_ssa_name (TREE_TYPE (dataref_ptr
));
7031 new_stmt
= gimple_build_assign
7032 (ptr
, BIT_AND_EXPR
, dataref_ptr
,
7034 (TREE_TYPE (dataref_ptr
),
7035 -(HOST_WIDE_INT
)TYPE_ALIGN_UNIT (vectype
)));
7036 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
7038 = build2 (MEM_REF
, vectype
, ptr
,
7039 build_int_cst (reference_alias_ptr_type
7040 (DR_REF (first_dr
)), 0));
7041 vec_dest
= vect_create_destination_var (scalar_dest
,
7043 new_stmt
= gimple_build_assign (vec_dest
, data_ref
);
7044 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
7045 gimple_assign_set_lhs (new_stmt
, new_temp
);
7046 gimple_set_vdef (new_stmt
, gimple_vdef (stmt
));
7047 gimple_set_vuse (new_stmt
, gimple_vuse (stmt
));
7048 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
7051 bump
= size_binop (MULT_EXPR
, vs
,
7052 TYPE_SIZE_UNIT (elem_type
));
7053 bump
= size_binop (MINUS_EXPR
, bump
, size_one_node
);
7054 ptr
= bump_vector_ptr (dataref_ptr
, NULL
, gsi
, stmt
, bump
);
7055 new_stmt
= gimple_build_assign
7056 (NULL_TREE
, BIT_AND_EXPR
, ptr
,
7059 -(HOST_WIDE_INT
)TYPE_ALIGN_UNIT (vectype
)));
7060 ptr
= copy_ssa_name (ptr
, new_stmt
);
7061 gimple_assign_set_lhs (new_stmt
, ptr
);
7062 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
7064 = build2 (MEM_REF
, vectype
, ptr
,
7065 build_int_cst (reference_alias_ptr_type
7066 (DR_REF (first_dr
)), 0));
7069 case dr_explicit_realign_optimized
:
7070 if (TREE_CODE (dataref_ptr
) == SSA_NAME
)
7071 new_temp
= copy_ssa_name (dataref_ptr
);
7073 new_temp
= make_ssa_name (TREE_TYPE (dataref_ptr
));
7074 new_stmt
= gimple_build_assign
7075 (new_temp
, BIT_AND_EXPR
, dataref_ptr
,
7077 (TREE_TYPE (dataref_ptr
),
7078 -(HOST_WIDE_INT
)TYPE_ALIGN_UNIT (vectype
)));
7079 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
7081 = build2 (MEM_REF
, vectype
, new_temp
,
7082 build_int_cst (reference_alias_ptr_type
7083 (DR_REF (first_dr
)), 0));
7088 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
7089 new_stmt
= gimple_build_assign (vec_dest
, data_ref
);
7090 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
7091 gimple_assign_set_lhs (new_stmt
, new_temp
);
7092 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
7094 /* 3. Handle explicit realignment if necessary/supported.
7096 vec_dest = realign_load (msq, lsq, realignment_token) */
7097 if (alignment_support_scheme
== dr_explicit_realign_optimized
7098 || alignment_support_scheme
== dr_explicit_realign
)
7100 lsq
= gimple_assign_lhs (new_stmt
);
7101 if (!realignment_token
)
7102 realignment_token
= dataref_ptr
;
7103 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
7104 new_stmt
= gimple_build_assign (vec_dest
, REALIGN_LOAD_EXPR
,
7105 msq
, lsq
, realignment_token
);
7106 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
7107 gimple_assign_set_lhs (new_stmt
, new_temp
);
7108 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
7110 if (alignment_support_scheme
== dr_explicit_realign_optimized
)
7113 if (i
== vec_num
- 1 && j
== ncopies
- 1)
7114 add_phi_arg (phi
, lsq
,
7115 loop_latch_edge (containing_loop
),
7121 /* 4. Handle invariant-load. */
7122 if (inv_p
&& !bb_vinfo
)
7124 gcc_assert (!grouped_load
);
7125 /* If we have versioned for aliasing or the loop doesn't
7126 have any data dependencies that would preclude this,
7127 then we are sure this is a loop invariant load and
7128 thus we can insert it on the preheader edge. */
7129 if (LOOP_VINFO_NO_DATA_DEPENDENCIES (loop_vinfo
)
7130 && !nested_in_vect_loop
7131 && hoist_defs_of_uses (stmt
, loop
))
7133 if (dump_enabled_p ())
7135 dump_printf_loc (MSG_NOTE
, vect_location
,
7136 "hoisting out of the vectorized "
7138 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
7140 tree tem
= copy_ssa_name (scalar_dest
);
7141 gsi_insert_on_edge_immediate
7142 (loop_preheader_edge (loop
),
7143 gimple_build_assign (tem
,
7145 (gimple_assign_rhs1 (stmt
))));
7146 new_temp
= vect_init_vector (stmt
, tem
, vectype
, NULL
);
7150 gimple_stmt_iterator gsi2
= *gsi
;
7152 new_temp
= vect_init_vector (stmt
, scalar_dest
,
7155 new_stmt
= SSA_NAME_DEF_STMT (new_temp
);
7156 set_vinfo_for_stmt (new_stmt
,
7157 new_stmt_vec_info (new_stmt
, vinfo
));
7162 tree perm_mask
= perm_mask_for_reverse (vectype
);
7163 new_temp
= permute_vec_elements (new_temp
, new_temp
,
7164 perm_mask
, stmt
, gsi
);
7165 new_stmt
= SSA_NAME_DEF_STMT (new_temp
);
7168 /* Collect vector loads and later create their permutation in
7169 vect_transform_grouped_load (). */
7170 if (grouped_load
|| slp_perm
)
7171 dr_chain
.quick_push (new_temp
);
7173 /* Store vector loads in the corresponding SLP_NODE. */
7174 if (slp
&& !slp_perm
)
7175 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
7177 /* Bump the vector pointer to account for a gap or for excess
7178 elements loaded for a permuted SLP load. */
7179 if (group_gap_adj
!= 0)
7183 = wide_int_to_tree (sizetype
,
7184 wi::smul (TYPE_SIZE_UNIT (elem_type
),
7185 group_gap_adj
, &ovf
));
7186 dataref_ptr
= bump_vector_ptr (dataref_ptr
, ptr_incr
, gsi
,
7191 if (slp
&& !slp_perm
)
7196 if (!vect_transform_slp_perm_load (slp_node
, dr_chain
, gsi
, vf
,
7197 slp_node_instance
, false))
7199 dr_chain
.release ();
7208 vect_transform_grouped_load (stmt
, dr_chain
, group_size
, gsi
);
7209 *vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
7214 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
7216 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
7217 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
7220 dr_chain
.release ();
7226 /* Function vect_is_simple_cond.
7229 LOOP - the loop that is being vectorized.
7230 COND - Condition that is checked for simple use.
7233 *COMP_VECTYPE - the vector type for the comparison.
7235 Returns whether a COND can be vectorized. Checks whether
7236 condition operands are supportable using vec_is_simple_use. */
7239 vect_is_simple_cond (tree cond
, vec_info
*vinfo
, tree
*comp_vectype
)
7242 enum vect_def_type dt
;
7243 tree vectype1
= NULL_TREE
, vectype2
= NULL_TREE
;
7246 if (TREE_CODE (cond
) == SSA_NAME
7247 && TREE_CODE (TREE_TYPE (cond
)) == BOOLEAN_TYPE
)
7249 gimple
*lhs_def_stmt
= SSA_NAME_DEF_STMT (cond
);
7250 if (!vect_is_simple_use (cond
, vinfo
, &lhs_def_stmt
,
7253 || !VECTOR_BOOLEAN_TYPE_P (*comp_vectype
))
7258 if (!COMPARISON_CLASS_P (cond
))
7261 lhs
= TREE_OPERAND (cond
, 0);
7262 rhs
= TREE_OPERAND (cond
, 1);
7264 if (TREE_CODE (lhs
) == SSA_NAME
)
7266 gimple
*lhs_def_stmt
= SSA_NAME_DEF_STMT (lhs
);
7267 if (!vect_is_simple_use (lhs
, vinfo
, &lhs_def_stmt
, &dt
, &vectype1
))
7270 else if (TREE_CODE (lhs
) != INTEGER_CST
&& TREE_CODE (lhs
) != REAL_CST
7271 && TREE_CODE (lhs
) != FIXED_CST
)
7274 if (TREE_CODE (rhs
) == SSA_NAME
)
7276 gimple
*rhs_def_stmt
= SSA_NAME_DEF_STMT (rhs
);
7277 if (!vect_is_simple_use (rhs
, vinfo
, &rhs_def_stmt
, &dt
, &vectype2
))
7280 else if (TREE_CODE (rhs
) != INTEGER_CST
&& TREE_CODE (rhs
) != REAL_CST
7281 && TREE_CODE (rhs
) != FIXED_CST
)
7284 *comp_vectype
= vectype1
? vectype1
: vectype2
;
7288 /* vectorizable_condition.
7290 Check if STMT is conditional modify expression that can be vectorized.
7291 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
7292 stmt using VEC_COND_EXPR to replace it, put it in VEC_STMT, and insert it
7295 When STMT is vectorized as nested cycle, REDUC_DEF is the vector variable
7296 to be used at REDUC_INDEX (in then clause if REDUC_INDEX is 1, and in
7297 else clause if it is 2).
7299 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
7302 vectorizable_condition (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
7303 gimple
**vec_stmt
, tree reduc_def
, int reduc_index
,
7306 tree scalar_dest
= NULL_TREE
;
7307 tree vec_dest
= NULL_TREE
;
7308 tree cond_expr
, then_clause
, else_clause
;
7309 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
7310 tree comp_vectype
= NULL_TREE
;
7311 tree vec_cond_lhs
= NULL_TREE
, vec_cond_rhs
= NULL_TREE
;
7312 tree vec_then_clause
= NULL_TREE
, vec_else_clause
= NULL_TREE
;
7313 tree vec_compare
, vec_cond_expr
;
7315 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
7316 enum vect_def_type dt
, dts
[4];
7318 enum tree_code code
;
7319 stmt_vec_info prev_stmt_info
= NULL
;
7321 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
7322 vec
<tree
> vec_oprnds0
= vNULL
;
7323 vec
<tree
> vec_oprnds1
= vNULL
;
7324 vec
<tree
> vec_oprnds2
= vNULL
;
7325 vec
<tree
> vec_oprnds3
= vNULL
;
7327 bool masked
= false;
7329 if (reduc_index
&& STMT_SLP_TYPE (stmt_info
))
7332 if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info
) == TREE_CODE_REDUCTION
)
7334 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
7337 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
7338 && !(STMT_VINFO_DEF_TYPE (stmt_info
) == vect_nested_cycle
7342 /* FORNOW: not yet supported. */
7343 if (STMT_VINFO_LIVE_P (stmt_info
))
7345 if (dump_enabled_p ())
7346 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7347 "value used after loop.\n");
7352 /* Is vectorizable conditional operation? */
7353 if (!is_gimple_assign (stmt
))
7356 code
= gimple_assign_rhs_code (stmt
);
7358 if (code
!= COND_EXPR
)
7361 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
7362 int nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
7364 if (slp_node
|| PURE_SLP_STMT (stmt_info
))
7367 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits
;
7369 gcc_assert (ncopies
>= 1);
7370 if (reduc_index
&& ncopies
> 1)
7371 return false; /* FORNOW */
7373 cond_expr
= gimple_assign_rhs1 (stmt
);
7374 then_clause
= gimple_assign_rhs2 (stmt
);
7375 else_clause
= gimple_assign_rhs3 (stmt
);
7377 if (!vect_is_simple_cond (cond_expr
, stmt_info
->vinfo
, &comp_vectype
)
7382 if (!vect_is_simple_use (then_clause
, stmt_info
->vinfo
, &def_stmt
, &dt
))
7384 if (!vect_is_simple_use (else_clause
, stmt_info
->vinfo
, &def_stmt
, &dt
))
7387 if (VECTOR_BOOLEAN_TYPE_P (comp_vectype
))
7389 vec_cmp_type
= comp_vectype
;
7393 vec_cmp_type
= build_same_sized_truth_vector_type (comp_vectype
);
7394 if (vec_cmp_type
== NULL_TREE
)
7399 STMT_VINFO_TYPE (stmt_info
) = condition_vec_info_type
;
7400 return expand_vec_cond_expr_p (vectype
, comp_vectype
);
7407 vec_oprnds0
.create (1);
7408 vec_oprnds1
.create (1);
7409 vec_oprnds2
.create (1);
7410 vec_oprnds3
.create (1);
7414 scalar_dest
= gimple_assign_lhs (stmt
);
7415 vec_dest
= vect_create_destination_var (scalar_dest
, vectype
);
7417 /* Handle cond expr. */
7418 for (j
= 0; j
< ncopies
; j
++)
7420 gassign
*new_stmt
= NULL
;
7425 auto_vec
<tree
, 4> ops
;
7426 auto_vec
<vec
<tree
>, 4> vec_defs
;
7429 ops
.safe_push (cond_expr
);
7432 ops
.safe_push (TREE_OPERAND (cond_expr
, 0));
7433 ops
.safe_push (TREE_OPERAND (cond_expr
, 1));
7435 ops
.safe_push (then_clause
);
7436 ops
.safe_push (else_clause
);
7437 vect_get_slp_defs (ops
, slp_node
, &vec_defs
, -1);
7438 vec_oprnds3
= vec_defs
.pop ();
7439 vec_oprnds2
= vec_defs
.pop ();
7441 vec_oprnds1
= vec_defs
.pop ();
7442 vec_oprnds0
= vec_defs
.pop ();
7445 vec_defs
.release ();
7453 = vect_get_vec_def_for_operand (cond_expr
, stmt
,
7455 vect_is_simple_use (cond_expr
, stmt_info
->vinfo
,
7461 vect_get_vec_def_for_operand (TREE_OPERAND (cond_expr
, 0),
7462 stmt
, comp_vectype
);
7463 vect_is_simple_use (TREE_OPERAND (cond_expr
, 0),
7464 loop_vinfo
, >emp
, &dts
[0]);
7467 vect_get_vec_def_for_operand (TREE_OPERAND (cond_expr
, 1),
7468 stmt
, comp_vectype
);
7469 vect_is_simple_use (TREE_OPERAND (cond_expr
, 1),
7470 loop_vinfo
, >emp
, &dts
[1]);
7472 if (reduc_index
== 1)
7473 vec_then_clause
= reduc_def
;
7476 vec_then_clause
= vect_get_vec_def_for_operand (then_clause
,
7478 vect_is_simple_use (then_clause
, loop_vinfo
,
7481 if (reduc_index
== 2)
7482 vec_else_clause
= reduc_def
;
7485 vec_else_clause
= vect_get_vec_def_for_operand (else_clause
,
7487 vect_is_simple_use (else_clause
, loop_vinfo
, >emp
, &dts
[3]);
7494 = vect_get_vec_def_for_stmt_copy (dts
[0],
7495 vec_oprnds0
.pop ());
7498 = vect_get_vec_def_for_stmt_copy (dts
[1],
7499 vec_oprnds1
.pop ());
7501 vec_then_clause
= vect_get_vec_def_for_stmt_copy (dts
[2],
7502 vec_oprnds2
.pop ());
7503 vec_else_clause
= vect_get_vec_def_for_stmt_copy (dts
[3],
7504 vec_oprnds3
.pop ());
7509 vec_oprnds0
.quick_push (vec_cond_lhs
);
7511 vec_oprnds1
.quick_push (vec_cond_rhs
);
7512 vec_oprnds2
.quick_push (vec_then_clause
);
7513 vec_oprnds3
.quick_push (vec_else_clause
);
7516 /* Arguments are ready. Create the new vector stmt. */
7517 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vec_cond_lhs
)
7519 vec_then_clause
= vec_oprnds2
[i
];
7520 vec_else_clause
= vec_oprnds3
[i
];
7523 vec_compare
= vec_cond_lhs
;
7526 vec_cond_rhs
= vec_oprnds1
[i
];
7527 vec_compare
= build2 (TREE_CODE (cond_expr
), vec_cmp_type
,
7528 vec_cond_lhs
, vec_cond_rhs
);
7530 vec_cond_expr
= build3 (VEC_COND_EXPR
, vectype
,
7531 vec_compare
, vec_then_clause
, vec_else_clause
);
7533 new_stmt
= gimple_build_assign (vec_dest
, vec_cond_expr
);
7534 new_temp
= make_ssa_name (vec_dest
, new_stmt
);
7535 gimple_assign_set_lhs (new_stmt
, new_temp
);
7536 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
7538 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
7545 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
7547 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
7549 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
7552 vec_oprnds0
.release ();
7553 vec_oprnds1
.release ();
7554 vec_oprnds2
.release ();
7555 vec_oprnds3
.release ();
7560 /* vectorizable_comparison.
7562 Check if STMT is comparison expression that can be vectorized.
7563 If VEC_STMT is also passed, vectorize the STMT: create a vectorized
7564 comparison, put it in VEC_STMT, and insert it at GSI.
7566 Return FALSE if not a vectorizable STMT, TRUE otherwise. */
7569 vectorizable_comparison (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
7570 gimple
**vec_stmt
, tree reduc_def
,
7573 tree lhs
, rhs1
, rhs2
;
7574 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
7575 tree vectype1
= NULL_TREE
, vectype2
= NULL_TREE
;
7576 tree vectype
= STMT_VINFO_VECTYPE (stmt_info
);
7577 tree vec_rhs1
= NULL_TREE
, vec_rhs2
= NULL_TREE
;
7579 loop_vec_info loop_vinfo
= STMT_VINFO_LOOP_VINFO (stmt_info
);
7580 enum vect_def_type dts
[2] = {vect_unknown_def_type
, vect_unknown_def_type
};
7583 enum tree_code code
;
7584 stmt_vec_info prev_stmt_info
= NULL
;
7586 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
7587 vec
<tree
> vec_oprnds0
= vNULL
;
7588 vec
<tree
> vec_oprnds1
= vNULL
;
7593 if (!VECTOR_BOOLEAN_TYPE_P (vectype
))
7596 mask_type
= vectype
;
7597 nunits
= TYPE_VECTOR_SUBPARTS (vectype
);
7599 if (slp_node
|| PURE_SLP_STMT (stmt_info
))
7602 ncopies
= LOOP_VINFO_VECT_FACTOR (loop_vinfo
) / nunits
;
7604 gcc_assert (ncopies
>= 1);
7605 if (!STMT_VINFO_RELEVANT_P (stmt_info
) && !bb_vinfo
)
7608 if (STMT_VINFO_DEF_TYPE (stmt_info
) != vect_internal_def
7609 && !(STMT_VINFO_DEF_TYPE (stmt_info
) == vect_nested_cycle
7613 if (STMT_VINFO_LIVE_P (stmt_info
))
7615 if (dump_enabled_p ())
7616 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7617 "value used after loop.\n");
7621 if (!is_gimple_assign (stmt
))
7624 code
= gimple_assign_rhs_code (stmt
);
7626 if (TREE_CODE_CLASS (code
) != tcc_comparison
)
7629 rhs1
= gimple_assign_rhs1 (stmt
);
7630 rhs2
= gimple_assign_rhs2 (stmt
);
7632 if (!vect_is_simple_use (rhs1
, stmt_info
->vinfo
, &def_stmt
,
7633 &dts
[0], &vectype1
))
7636 if (!vect_is_simple_use (rhs2
, stmt_info
->vinfo
, &def_stmt
,
7637 &dts
[1], &vectype2
))
7640 if (vectype1
&& vectype2
7641 && TYPE_VECTOR_SUBPARTS (vectype1
) != TYPE_VECTOR_SUBPARTS (vectype2
))
7644 vectype
= vectype1
? vectype1
: vectype2
;
7646 /* Invariant comparison. */
7649 vectype
= build_vector_type (TREE_TYPE (rhs1
), nunits
);
7650 if (tree_to_shwi (TYPE_SIZE_UNIT (vectype
)) != current_vector_size
)
7653 else if (nunits
!= TYPE_VECTOR_SUBPARTS (vectype
))
7658 STMT_VINFO_TYPE (stmt_info
) = comparison_vec_info_type
;
7659 vect_model_simple_cost (stmt_info
, ncopies
, dts
, NULL
, NULL
);
7660 return expand_vec_cmp_expr_p (vectype
, mask_type
);
7666 vec_oprnds0
.create (1);
7667 vec_oprnds1
.create (1);
7671 lhs
= gimple_assign_lhs (stmt
);
7672 mask
= vect_create_destination_var (lhs
, mask_type
);
7674 /* Handle cmp expr. */
7675 for (j
= 0; j
< ncopies
; j
++)
7677 gassign
*new_stmt
= NULL
;
7682 auto_vec
<tree
, 2> ops
;
7683 auto_vec
<vec
<tree
>, 2> vec_defs
;
7685 ops
.safe_push (rhs1
);
7686 ops
.safe_push (rhs2
);
7687 vect_get_slp_defs (ops
, slp_node
, &vec_defs
, -1);
7688 vec_oprnds1
= vec_defs
.pop ();
7689 vec_oprnds0
= vec_defs
.pop ();
7693 vec_rhs1
= vect_get_vec_def_for_operand (rhs1
, stmt
, vectype
);
7694 vec_rhs2
= vect_get_vec_def_for_operand (rhs2
, stmt
, vectype
);
7699 vec_rhs1
= vect_get_vec_def_for_stmt_copy (dts
[0],
7700 vec_oprnds0
.pop ());
7701 vec_rhs2
= vect_get_vec_def_for_stmt_copy (dts
[1],
7702 vec_oprnds1
.pop ());
7707 vec_oprnds0
.quick_push (vec_rhs1
);
7708 vec_oprnds1
.quick_push (vec_rhs2
);
7711 /* Arguments are ready. Create the new vector stmt. */
7712 FOR_EACH_VEC_ELT (vec_oprnds0
, i
, vec_rhs1
)
7714 vec_rhs2
= vec_oprnds1
[i
];
7716 new_temp
= make_ssa_name (mask
);
7717 new_stmt
= gimple_build_assign (new_temp
, code
, vec_rhs1
, vec_rhs2
);
7718 vect_finish_stmt_generation (stmt
, new_stmt
, gsi
);
7720 SLP_TREE_VEC_STMTS (slp_node
).quick_push (new_stmt
);
7727 STMT_VINFO_VEC_STMT (stmt_info
) = *vec_stmt
= new_stmt
;
7729 STMT_VINFO_RELATED_STMT (prev_stmt_info
) = new_stmt
;
7731 prev_stmt_info
= vinfo_for_stmt (new_stmt
);
7734 vec_oprnds0
.release ();
7735 vec_oprnds1
.release ();
7740 /* Make sure the statement is vectorizable. */
7743 vect_analyze_stmt (gimple
*stmt
, bool *need_to_vectorize
, slp_tree node
)
7745 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
7746 bb_vec_info bb_vinfo
= STMT_VINFO_BB_VINFO (stmt_info
);
7747 enum vect_relevant relevance
= STMT_VINFO_RELEVANT (stmt_info
);
7749 tree scalar_type
, vectype
;
7750 gimple
*pattern_stmt
;
7751 gimple_seq pattern_def_seq
;
7753 if (dump_enabled_p ())
7755 dump_printf_loc (MSG_NOTE
, vect_location
, "==> examining statement: ");
7756 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
7759 if (gimple_has_volatile_ops (stmt
))
7761 if (dump_enabled_p ())
7762 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7763 "not vectorized: stmt has volatile operands\n");
7768 /* Skip stmts that do not need to be vectorized. In loops this is expected
7770 - the COND_EXPR which is the loop exit condition
7771 - any LABEL_EXPRs in the loop
7772 - computations that are used only for array indexing or loop control.
7773 In basic blocks we only analyze statements that are a part of some SLP
7774 instance, therefore, all the statements are relevant.
7776 Pattern statement needs to be analyzed instead of the original statement
7777 if the original statement is not relevant. Otherwise, we analyze both
7778 statements. In basic blocks we are called from some SLP instance
7779 traversal, don't analyze pattern stmts instead, the pattern stmts
7780 already will be part of SLP instance. */
7782 pattern_stmt
= STMT_VINFO_RELATED_STMT (stmt_info
);
7783 if (!STMT_VINFO_RELEVANT_P (stmt_info
)
7784 && !STMT_VINFO_LIVE_P (stmt_info
))
7786 if (STMT_VINFO_IN_PATTERN_P (stmt_info
)
7788 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt
))
7789 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt
))))
7791 /* Analyze PATTERN_STMT instead of the original stmt. */
7792 stmt
= pattern_stmt
;
7793 stmt_info
= vinfo_for_stmt (pattern_stmt
);
7794 if (dump_enabled_p ())
7796 dump_printf_loc (MSG_NOTE
, vect_location
,
7797 "==> examining pattern statement: ");
7798 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
7803 if (dump_enabled_p ())
7804 dump_printf_loc (MSG_NOTE
, vect_location
, "irrelevant.\n");
7809 else if (STMT_VINFO_IN_PATTERN_P (stmt_info
)
7812 && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt
))
7813 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt
))))
7815 /* Analyze PATTERN_STMT too. */
7816 if (dump_enabled_p ())
7818 dump_printf_loc (MSG_NOTE
, vect_location
,
7819 "==> examining pattern statement: ");
7820 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, stmt
, 0);
7823 if (!vect_analyze_stmt (pattern_stmt
, need_to_vectorize
, node
))
7827 if (is_pattern_stmt_p (stmt_info
)
7829 && (pattern_def_seq
= STMT_VINFO_PATTERN_DEF_SEQ (stmt_info
)))
7831 gimple_stmt_iterator si
;
7833 for (si
= gsi_start (pattern_def_seq
); !gsi_end_p (si
); gsi_next (&si
))
7835 gimple
*pattern_def_stmt
= gsi_stmt (si
);
7836 if (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_def_stmt
))
7837 || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_def_stmt
)))
7839 /* Analyze def stmt of STMT if it's a pattern stmt. */
7840 if (dump_enabled_p ())
7842 dump_printf_loc (MSG_NOTE
, vect_location
,
7843 "==> examining pattern def statement: ");
7844 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, pattern_def_stmt
, 0);
7847 if (!vect_analyze_stmt (pattern_def_stmt
,
7848 need_to_vectorize
, node
))
7854 switch (STMT_VINFO_DEF_TYPE (stmt_info
))
7856 case vect_internal_def
:
7859 case vect_reduction_def
:
7860 case vect_nested_cycle
:
7861 gcc_assert (!bb_vinfo
7862 && (relevance
== vect_used_in_outer
7863 || relevance
== vect_used_in_outer_by_reduction
7864 || relevance
== vect_used_by_reduction
7865 || relevance
== vect_unused_in_scope
));
7868 case vect_induction_def
:
7869 case vect_constant_def
:
7870 case vect_external_def
:
7871 case vect_unknown_def_type
:
7878 gcc_assert (PURE_SLP_STMT (stmt_info
));
7880 scalar_type
= TREE_TYPE (gimple_get_lhs (stmt
));
7881 if (dump_enabled_p ())
7883 dump_printf_loc (MSG_NOTE
, vect_location
,
7884 "get vectype for scalar type: ");
7885 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, scalar_type
);
7886 dump_printf (MSG_NOTE
, "\n");
7889 vectype
= get_vectype_for_scalar_type (scalar_type
);
7892 if (dump_enabled_p ())
7894 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7895 "not SLPed: unsupported data-type ");
7896 dump_generic_expr (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
,
7898 dump_printf (MSG_MISSED_OPTIMIZATION
, "\n");
7903 if (dump_enabled_p ())
7905 dump_printf_loc (MSG_NOTE
, vect_location
, "vectype: ");
7906 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, vectype
);
7907 dump_printf (MSG_NOTE
, "\n");
7910 STMT_VINFO_VECTYPE (stmt_info
) = vectype
;
7913 if (STMT_VINFO_RELEVANT_P (stmt_info
))
7915 gcc_assert (!VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt
))));
7916 gcc_assert (STMT_VINFO_VECTYPE (stmt_info
)
7917 || (is_gimple_call (stmt
)
7918 && gimple_call_lhs (stmt
) == NULL_TREE
));
7919 *need_to_vectorize
= true;
7922 if (PURE_SLP_STMT (stmt_info
) && !node
)
7924 dump_printf_loc (MSG_NOTE
, vect_location
,
7925 "handled only by SLP analysis\n");
7931 && (STMT_VINFO_RELEVANT_P (stmt_info
)
7932 || STMT_VINFO_DEF_TYPE (stmt_info
) == vect_reduction_def
))
7933 ok
= (vectorizable_simd_clone_call (stmt
, NULL
, NULL
, node
)
7934 || vectorizable_conversion (stmt
, NULL
, NULL
, node
)
7935 || vectorizable_shift (stmt
, NULL
, NULL
, node
)
7936 || vectorizable_operation (stmt
, NULL
, NULL
, node
)
7937 || vectorizable_assignment (stmt
, NULL
, NULL
, node
)
7938 || vectorizable_load (stmt
, NULL
, NULL
, node
, NULL
)
7939 || vectorizable_call (stmt
, NULL
, NULL
, node
)
7940 || vectorizable_store (stmt
, NULL
, NULL
, node
)
7941 || vectorizable_reduction (stmt
, NULL
, NULL
, node
)
7942 || vectorizable_condition (stmt
, NULL
, NULL
, NULL
, 0, node
)
7943 || vectorizable_comparison (stmt
, NULL
, NULL
, NULL
, node
));
7947 ok
= (vectorizable_simd_clone_call (stmt
, NULL
, NULL
, node
)
7948 || vectorizable_conversion (stmt
, NULL
, NULL
, node
)
7949 || vectorizable_shift (stmt
, NULL
, NULL
, node
)
7950 || vectorizable_operation (stmt
, NULL
, NULL
, node
)
7951 || vectorizable_assignment (stmt
, NULL
, NULL
, node
)
7952 || vectorizable_load (stmt
, NULL
, NULL
, node
, NULL
)
7953 || vectorizable_call (stmt
, NULL
, NULL
, node
)
7954 || vectorizable_store (stmt
, NULL
, NULL
, node
)
7955 || vectorizable_condition (stmt
, NULL
, NULL
, NULL
, 0, node
)
7956 || vectorizable_comparison (stmt
, NULL
, NULL
, NULL
, node
));
7961 if (dump_enabled_p ())
7963 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7964 "not vectorized: relevant stmt not ");
7965 dump_printf (MSG_MISSED_OPTIMIZATION
, "supported: ");
7966 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
7975 /* Stmts that are (also) "live" (i.e. - that are used out of the loop)
7976 need extra handling, except for vectorizable reductions. */
7977 if (STMT_VINFO_LIVE_P (stmt_info
)
7978 && STMT_VINFO_TYPE (stmt_info
) != reduc_vec_info_type
)
7979 ok
= vectorizable_live_operation (stmt
, NULL
, NULL
);
7983 if (dump_enabled_p ())
7985 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
7986 "not vectorized: live stmt not ");
7987 dump_printf (MSG_MISSED_OPTIMIZATION
, "supported: ");
7988 dump_gimple_stmt (MSG_MISSED_OPTIMIZATION
, TDF_SLIM
, stmt
, 0);
7998 /* Function vect_transform_stmt.
8000 Create a vectorized stmt to replace STMT, and insert it at BSI. */
8003 vect_transform_stmt (gimple
*stmt
, gimple_stmt_iterator
*gsi
,
8004 bool *grouped_store
, slp_tree slp_node
,
8005 slp_instance slp_node_instance
)
8007 bool is_store
= false;
8008 gimple
*vec_stmt
= NULL
;
8009 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
8012 gimple
*old_vec_stmt
= STMT_VINFO_VEC_STMT (stmt_info
);
8014 switch (STMT_VINFO_TYPE (stmt_info
))
8016 case type_demotion_vec_info_type
:
8017 case type_promotion_vec_info_type
:
8018 case type_conversion_vec_info_type
:
8019 done
= vectorizable_conversion (stmt
, gsi
, &vec_stmt
, slp_node
);
8023 case induc_vec_info_type
:
8024 gcc_assert (!slp_node
);
8025 done
= vectorizable_induction (stmt
, gsi
, &vec_stmt
);
8029 case shift_vec_info_type
:
8030 done
= vectorizable_shift (stmt
, gsi
, &vec_stmt
, slp_node
);
8034 case op_vec_info_type
:
8035 done
= vectorizable_operation (stmt
, gsi
, &vec_stmt
, slp_node
);
8039 case assignment_vec_info_type
:
8040 done
= vectorizable_assignment (stmt
, gsi
, &vec_stmt
, slp_node
);
8044 case load_vec_info_type
:
8045 done
= vectorizable_load (stmt
, gsi
, &vec_stmt
, slp_node
,
8050 case store_vec_info_type
:
8051 done
= vectorizable_store (stmt
, gsi
, &vec_stmt
, slp_node
);
8053 if (STMT_VINFO_GROUPED_ACCESS (stmt_info
) && !slp_node
)
8055 /* In case of interleaving, the whole chain is vectorized when the
8056 last store in the chain is reached. Store stmts before the last
8057 one are skipped, and there vec_stmt_info shouldn't be freed
8059 *grouped_store
= true;
8060 if (STMT_VINFO_VEC_STMT (stmt_info
))
8067 case condition_vec_info_type
:
8068 done
= vectorizable_condition (stmt
, gsi
, &vec_stmt
, NULL
, 0, slp_node
);
8072 case comparison_vec_info_type
:
8073 done
= vectorizable_comparison (stmt
, gsi
, &vec_stmt
, NULL
, slp_node
);
8077 case call_vec_info_type
:
8078 done
= vectorizable_call (stmt
, gsi
, &vec_stmt
, slp_node
);
8079 stmt
= gsi_stmt (*gsi
);
8080 if (is_gimple_call (stmt
)
8081 && gimple_call_internal_p (stmt
)
8082 && gimple_call_internal_fn (stmt
) == IFN_MASK_STORE
)
8086 case call_simd_clone_vec_info_type
:
8087 done
= vectorizable_simd_clone_call (stmt
, gsi
, &vec_stmt
, slp_node
);
8088 stmt
= gsi_stmt (*gsi
);
8091 case reduc_vec_info_type
:
8092 done
= vectorizable_reduction (stmt
, gsi
, &vec_stmt
, slp_node
);
8097 if (!STMT_VINFO_LIVE_P (stmt_info
))
8099 if (dump_enabled_p ())
8100 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
8101 "stmt not supported.\n");
8106 /* Verify SLP vectorization doesn't mess with STMT_VINFO_VEC_STMT.
8107 This would break hybrid SLP vectorization. */
8109 gcc_assert (!vec_stmt
8110 && STMT_VINFO_VEC_STMT (stmt_info
) == old_vec_stmt
);
8112 /* Handle inner-loop stmts whose DEF is used in the loop-nest that
8113 is being vectorized, but outside the immediately enclosing loop. */
8115 && STMT_VINFO_LOOP_VINFO (stmt_info
)
8116 && nested_in_vect_loop_p (LOOP_VINFO_LOOP (
8117 STMT_VINFO_LOOP_VINFO (stmt_info
)), stmt
)
8118 && STMT_VINFO_TYPE (stmt_info
) != reduc_vec_info_type
8119 && (STMT_VINFO_RELEVANT (stmt_info
) == vect_used_in_outer
8120 || STMT_VINFO_RELEVANT (stmt_info
) ==
8121 vect_used_in_outer_by_reduction
))
8123 struct loop
*innerloop
= LOOP_VINFO_LOOP (
8124 STMT_VINFO_LOOP_VINFO (stmt_info
))->inner
;
8125 imm_use_iterator imm_iter
;
8126 use_operand_p use_p
;
8130 if (dump_enabled_p ())
8131 dump_printf_loc (MSG_NOTE
, vect_location
,
8132 "Record the vdef for outer-loop vectorization.\n");
8134 /* Find the relevant loop-exit phi-node, and reord the vec_stmt there
8135 (to be used when vectorizing outer-loop stmts that use the DEF of
8137 if (gimple_code (stmt
) == GIMPLE_PHI
)
8138 scalar_dest
= PHI_RESULT (stmt
);
8140 scalar_dest
= gimple_assign_lhs (stmt
);
8142 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, scalar_dest
)
8144 if (!flow_bb_inside_loop_p (innerloop
, gimple_bb (USE_STMT (use_p
))))
8146 exit_phi
= USE_STMT (use_p
);
8147 STMT_VINFO_VEC_STMT (vinfo_for_stmt (exit_phi
)) = vec_stmt
;
8152 /* Handle stmts whose DEF is used outside the loop-nest that is
8153 being vectorized. */
8154 if (STMT_VINFO_LIVE_P (stmt_info
)
8155 && STMT_VINFO_TYPE (stmt_info
) != reduc_vec_info_type
)
8157 done
= vectorizable_live_operation (stmt
, gsi
, &vec_stmt
);
8162 STMT_VINFO_VEC_STMT (stmt_info
) = vec_stmt
;
8168 /* Remove a group of stores (for SLP or interleaving), free their
8172 vect_remove_stores (gimple
*first_stmt
)
8174 gimple
*next
= first_stmt
;
8176 gimple_stmt_iterator next_si
;
8180 stmt_vec_info stmt_info
= vinfo_for_stmt (next
);
8182 tmp
= GROUP_NEXT_ELEMENT (stmt_info
);
8183 if (is_pattern_stmt_p (stmt_info
))
8184 next
= STMT_VINFO_RELATED_STMT (stmt_info
);
8185 /* Free the attached stmt_vec_info and remove the stmt. */
8186 next_si
= gsi_for_stmt (next
);
8187 unlink_stmt_vdef (next
);
8188 gsi_remove (&next_si
, true);
8189 release_defs (next
);
8190 free_stmt_vec_info (next
);
8196 /* Function new_stmt_vec_info.
8198 Create and initialize a new stmt_vec_info struct for STMT. */
8201 new_stmt_vec_info (gimple
*stmt
, vec_info
*vinfo
)
8204 res
= (stmt_vec_info
) xcalloc (1, sizeof (struct _stmt_vec_info
));
8206 STMT_VINFO_TYPE (res
) = undef_vec_info_type
;
8207 STMT_VINFO_STMT (res
) = stmt
;
8209 STMT_VINFO_RELEVANT (res
) = vect_unused_in_scope
;
8210 STMT_VINFO_LIVE_P (res
) = false;
8211 STMT_VINFO_VECTYPE (res
) = NULL
;
8212 STMT_VINFO_VEC_STMT (res
) = NULL
;
8213 STMT_VINFO_VECTORIZABLE (res
) = true;
8214 STMT_VINFO_IN_PATTERN_P (res
) = false;
8215 STMT_VINFO_RELATED_STMT (res
) = NULL
;
8216 STMT_VINFO_PATTERN_DEF_SEQ (res
) = NULL
;
8217 STMT_VINFO_DATA_REF (res
) = NULL
;
8218 STMT_VINFO_VEC_REDUCTION_TYPE (res
) = TREE_CODE_REDUCTION
;
8220 STMT_VINFO_DR_BASE_ADDRESS (res
) = NULL
;
8221 STMT_VINFO_DR_OFFSET (res
) = NULL
;
8222 STMT_VINFO_DR_INIT (res
) = NULL
;
8223 STMT_VINFO_DR_STEP (res
) = NULL
;
8224 STMT_VINFO_DR_ALIGNED_TO (res
) = NULL
;
8226 if (gimple_code (stmt
) == GIMPLE_PHI
8227 && is_loop_header_bb_p (gimple_bb (stmt
)))
8228 STMT_VINFO_DEF_TYPE (res
) = vect_unknown_def_type
;
8230 STMT_VINFO_DEF_TYPE (res
) = vect_internal_def
;
8232 STMT_VINFO_SAME_ALIGN_REFS (res
).create (0);
8233 STMT_SLP_TYPE (res
) = loop_vect
;
8234 GROUP_FIRST_ELEMENT (res
) = NULL
;
8235 GROUP_NEXT_ELEMENT (res
) = NULL
;
8236 GROUP_SIZE (res
) = 0;
8237 GROUP_STORE_COUNT (res
) = 0;
8238 GROUP_GAP (res
) = 0;
8239 GROUP_SAME_DR_STMT (res
) = NULL
;
8245 /* Create a hash table for stmt_vec_info. */
8248 init_stmt_vec_info_vec (void)
8250 gcc_assert (!stmt_vec_info_vec
.exists ());
8251 stmt_vec_info_vec
.create (50);
8255 /* Free hash table for stmt_vec_info. */
8258 free_stmt_vec_info_vec (void)
8262 FOR_EACH_VEC_ELT (stmt_vec_info_vec
, i
, info
)
8264 free_stmt_vec_info (STMT_VINFO_STMT (info
));
8265 gcc_assert (stmt_vec_info_vec
.exists ());
8266 stmt_vec_info_vec
.release ();
8270 /* Free stmt vectorization related info. */
8273 free_stmt_vec_info (gimple
*stmt
)
8275 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
8280 /* Check if this statement has a related "pattern stmt"
8281 (introduced by the vectorizer during the pattern recognition
8282 pass). Free pattern's stmt_vec_info and def stmt's stmt_vec_info
8284 if (STMT_VINFO_IN_PATTERN_P (stmt_info
))
8286 stmt_vec_info patt_info
8287 = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info
));
8290 gimple_seq seq
= STMT_VINFO_PATTERN_DEF_SEQ (patt_info
);
8291 gimple
*patt_stmt
= STMT_VINFO_STMT (patt_info
);
8292 gimple_set_bb (patt_stmt
, NULL
);
8293 tree lhs
= gimple_get_lhs (patt_stmt
);
8294 if (lhs
&& TREE_CODE (lhs
) == SSA_NAME
)
8295 release_ssa_name (lhs
);
8298 gimple_stmt_iterator si
;
8299 for (si
= gsi_start (seq
); !gsi_end_p (si
); gsi_next (&si
))
8301 gimple
*seq_stmt
= gsi_stmt (si
);
8302 gimple_set_bb (seq_stmt
, NULL
);
8303 lhs
= gimple_get_lhs (seq_stmt
);
8304 if (lhs
&& TREE_CODE (lhs
) == SSA_NAME
)
8305 release_ssa_name (lhs
);
8306 free_stmt_vec_info (seq_stmt
);
8309 free_stmt_vec_info (patt_stmt
);
8313 STMT_VINFO_SAME_ALIGN_REFS (stmt_info
).release ();
8314 STMT_VINFO_SIMD_CLONE_INFO (stmt_info
).release ();
8315 set_vinfo_for_stmt (stmt
, NULL
);
8320 /* Function get_vectype_for_scalar_type_and_size.
8322 Returns the vector type corresponding to SCALAR_TYPE and SIZE as supported
8326 get_vectype_for_scalar_type_and_size (tree scalar_type
, unsigned size
)
8328 machine_mode inner_mode
= TYPE_MODE (scalar_type
);
8329 machine_mode simd_mode
;
8330 unsigned int nbytes
= GET_MODE_SIZE (inner_mode
);
8337 if (GET_MODE_CLASS (inner_mode
) != MODE_INT
8338 && GET_MODE_CLASS (inner_mode
) != MODE_FLOAT
)
8341 /* For vector types of elements whose mode precision doesn't
8342 match their types precision we use a element type of mode
8343 precision. The vectorization routines will have to make sure
8344 they support the proper result truncation/extension.
8345 We also make sure to build vector types with INTEGER_TYPE
8346 component type only. */
8347 if (INTEGRAL_TYPE_P (scalar_type
)
8348 && (GET_MODE_BITSIZE (inner_mode
) != TYPE_PRECISION (scalar_type
)
8349 || TREE_CODE (scalar_type
) != INTEGER_TYPE
))
8350 scalar_type
= build_nonstandard_integer_type (GET_MODE_BITSIZE (inner_mode
),
8351 TYPE_UNSIGNED (scalar_type
));
8353 /* We shouldn't end up building VECTOR_TYPEs of non-scalar components.
8354 When the component mode passes the above test simply use a type
8355 corresponding to that mode. The theory is that any use that
8356 would cause problems with this will disable vectorization anyway. */
8357 else if (!SCALAR_FLOAT_TYPE_P (scalar_type
)
8358 && !INTEGRAL_TYPE_P (scalar_type
))
8359 scalar_type
= lang_hooks
.types
.type_for_mode (inner_mode
, 1);
8361 /* We can't build a vector type of elements with alignment bigger than
8363 else if (nbytes
< TYPE_ALIGN_UNIT (scalar_type
))
8364 scalar_type
= lang_hooks
.types
.type_for_mode (inner_mode
,
8365 TYPE_UNSIGNED (scalar_type
));
8367 /* If we felt back to using the mode fail if there was
8368 no scalar type for it. */
8369 if (scalar_type
== NULL_TREE
)
8372 /* If no size was supplied use the mode the target prefers. Otherwise
8373 lookup a vector mode of the specified size. */
8375 simd_mode
= targetm
.vectorize
.preferred_simd_mode (inner_mode
);
8377 simd_mode
= mode_for_vector (inner_mode
, size
/ nbytes
);
8378 nunits
= GET_MODE_SIZE (simd_mode
) / nbytes
;
8382 vectype
= build_vector_type (scalar_type
, nunits
);
8384 if (!VECTOR_MODE_P (TYPE_MODE (vectype
))
8385 && !INTEGRAL_MODE_P (TYPE_MODE (vectype
)))
8391 unsigned int current_vector_size
;
8393 /* Function get_vectype_for_scalar_type.
8395 Returns the vector type corresponding to SCALAR_TYPE as supported
8399 get_vectype_for_scalar_type (tree scalar_type
)
8402 vectype
= get_vectype_for_scalar_type_and_size (scalar_type
,
8403 current_vector_size
);
8405 && current_vector_size
== 0)
8406 current_vector_size
= GET_MODE_SIZE (TYPE_MODE (vectype
));
8410 /* Function get_mask_type_for_scalar_type.
8412 Returns the mask type corresponding to a result of comparison
8413 of vectors of specified SCALAR_TYPE as supported by target. */
8416 get_mask_type_for_scalar_type (tree scalar_type
)
8418 tree vectype
= get_vectype_for_scalar_type (scalar_type
);
8423 return build_truth_vector_type (TYPE_VECTOR_SUBPARTS (vectype
),
8424 current_vector_size
);
8427 /* Function get_same_sized_vectype
8429 Returns a vector type corresponding to SCALAR_TYPE of size
8430 VECTOR_TYPE if supported by the target. */
8433 get_same_sized_vectype (tree scalar_type
, tree vector_type
)
8435 if (TREE_CODE (scalar_type
) == BOOLEAN_TYPE
)
8436 return build_same_sized_truth_vector_type (vector_type
);
8438 return get_vectype_for_scalar_type_and_size
8439 (scalar_type
, GET_MODE_SIZE (TYPE_MODE (vector_type
)));
8442 /* Function vect_is_simple_use.
8445 VINFO - the vect info of the loop or basic block that is being vectorized.
8446 OPERAND - operand in the loop or bb.
8448 DEF_STMT - the defining stmt in case OPERAND is an SSA_NAME.
8449 DT - the type of definition
8451 Returns whether a stmt with OPERAND can be vectorized.
8452 For loops, supportable operands are constants, loop invariants, and operands
8453 that are defined by the current iteration of the loop. Unsupportable
8454 operands are those that are defined by a previous iteration of the loop (as
8455 is the case in reduction/induction computations).
8456 For basic blocks, supportable operands are constants and bb invariants.
8457 For now, operands defined outside the basic block are not supported. */
8460 vect_is_simple_use (tree operand
, vec_info
*vinfo
,
8461 gimple
**def_stmt
, enum vect_def_type
*dt
)
8464 *dt
= vect_unknown_def_type
;
8466 if (dump_enabled_p ())
8468 dump_printf_loc (MSG_NOTE
, vect_location
,
8469 "vect_is_simple_use: operand ");
8470 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, operand
);
8471 dump_printf (MSG_NOTE
, "\n");
8474 if (CONSTANT_CLASS_P (operand
))
8476 *dt
= vect_constant_def
;
8480 if (is_gimple_min_invariant (operand
))
8482 *dt
= vect_external_def
;
8486 if (TREE_CODE (operand
) != SSA_NAME
)
8488 if (dump_enabled_p ())
8489 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
8494 if (SSA_NAME_IS_DEFAULT_DEF (operand
))
8496 *dt
= vect_external_def
;
8500 *def_stmt
= SSA_NAME_DEF_STMT (operand
);
8501 if (dump_enabled_p ())
8503 dump_printf_loc (MSG_NOTE
, vect_location
, "def_stmt: ");
8504 dump_gimple_stmt (MSG_NOTE
, TDF_SLIM
, *def_stmt
, 0);
8507 if (! vect_stmt_in_region_p (vinfo
, *def_stmt
))
8508 *dt
= vect_external_def
;
8511 stmt_vec_info stmt_vinfo
= vinfo_for_stmt (*def_stmt
);
8512 if (is_a
<bb_vec_info
> (vinfo
) && !STMT_VINFO_VECTORIZABLE (stmt_vinfo
))
8513 *dt
= vect_external_def
;
8515 *dt
= STMT_VINFO_DEF_TYPE (stmt_vinfo
);
8518 if (dump_enabled_p ())
8520 dump_printf_loc (MSG_NOTE
, vect_location
, "type of def: ");
8523 case vect_uninitialized_def
:
8524 dump_printf (MSG_NOTE
, "uninitialized\n");
8526 case vect_constant_def
:
8527 dump_printf (MSG_NOTE
, "constant\n");
8529 case vect_external_def
:
8530 dump_printf (MSG_NOTE
, "external\n");
8532 case vect_internal_def
:
8533 dump_printf (MSG_NOTE
, "internal\n");
8535 case vect_induction_def
:
8536 dump_printf (MSG_NOTE
, "induction\n");
8538 case vect_reduction_def
:
8539 dump_printf (MSG_NOTE
, "reduction\n");
8541 case vect_double_reduction_def
:
8542 dump_printf (MSG_NOTE
, "double reduction\n");
8544 case vect_nested_cycle
:
8545 dump_printf (MSG_NOTE
, "nested cycle\n");
8547 case vect_unknown_def_type
:
8548 dump_printf (MSG_NOTE
, "unknown\n");
8553 if (*dt
== vect_unknown_def_type
)
8555 if (dump_enabled_p ())
8556 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
8557 "Unsupported pattern.\n");
8561 switch (gimple_code (*def_stmt
))
8568 if (dump_enabled_p ())
8569 dump_printf_loc (MSG_MISSED_OPTIMIZATION
, vect_location
,
8570 "unsupported defining stmt:\n");
8577 /* Function vect_is_simple_use.
8579 Same as vect_is_simple_use but also determines the vector operand
8580 type of OPERAND and stores it to *VECTYPE. If the definition of
8581 OPERAND is vect_uninitialized_def, vect_constant_def or
8582 vect_external_def *VECTYPE will be set to NULL_TREE and the caller
8583 is responsible to compute the best suited vector type for the
8587 vect_is_simple_use (tree operand
, vec_info
*vinfo
,
8588 gimple
**def_stmt
, enum vect_def_type
*dt
, tree
*vectype
)
8590 if (!vect_is_simple_use (operand
, vinfo
, def_stmt
, dt
))
8593 /* Now get a vector type if the def is internal, otherwise supply
8594 NULL_TREE and leave it up to the caller to figure out a proper
8595 type for the use stmt. */
8596 if (*dt
== vect_internal_def
8597 || *dt
== vect_induction_def
8598 || *dt
== vect_reduction_def
8599 || *dt
== vect_double_reduction_def
8600 || *dt
== vect_nested_cycle
)
8602 stmt_vec_info stmt_info
= vinfo_for_stmt (*def_stmt
);
8604 if (STMT_VINFO_IN_PATTERN_P (stmt_info
)
8605 && !STMT_VINFO_RELEVANT (stmt_info
)
8606 && !STMT_VINFO_LIVE_P (stmt_info
))
8607 stmt_info
= vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info
));
8609 *vectype
= STMT_VINFO_VECTYPE (stmt_info
);
8610 gcc_assert (*vectype
!= NULL_TREE
);
8612 else if (*dt
== vect_uninitialized_def
8613 || *dt
== vect_constant_def
8614 || *dt
== vect_external_def
)
8615 *vectype
= NULL_TREE
;
8623 /* Function supportable_widening_operation
8625 Check whether an operation represented by the code CODE is a
8626 widening operation that is supported by the target platform in
8627 vector form (i.e., when operating on arguments of type VECTYPE_IN
8628 producing a result of type VECTYPE_OUT).
8630 Widening operations we currently support are NOP (CONVERT), FLOAT
8631 and WIDEN_MULT. This function checks if these operations are supported
8632 by the target platform either directly (via vector tree-codes), or via
8636 - CODE1 and CODE2 are codes of vector operations to be used when
8637 vectorizing the operation, if available.
8638 - MULTI_STEP_CVT determines the number of required intermediate steps in
8639 case of multi-step conversion (like char->short->int - in that case
8640 MULTI_STEP_CVT will be 1).
8641 - INTERM_TYPES contains the intermediate type required to perform the
8642 widening operation (short in the above example). */
8645 supportable_widening_operation (enum tree_code code
, gimple
*stmt
,
8646 tree vectype_out
, tree vectype_in
,
8647 enum tree_code
*code1
, enum tree_code
*code2
,
8648 int *multi_step_cvt
,
8649 vec
<tree
> *interm_types
)
8651 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
8652 loop_vec_info loop_info
= STMT_VINFO_LOOP_VINFO (stmt_info
);
8653 struct loop
*vect_loop
= NULL
;
8654 machine_mode vec_mode
;
8655 enum insn_code icode1
, icode2
;
8656 optab optab1
, optab2
;
8657 tree vectype
= vectype_in
;
8658 tree wide_vectype
= vectype_out
;
8659 enum tree_code c1
, c2
;
8661 tree prev_type
, intermediate_type
;
8662 machine_mode intermediate_mode
, prev_mode
;
8663 optab optab3
, optab4
;
8665 *multi_step_cvt
= 0;
8667 vect_loop
= LOOP_VINFO_LOOP (loop_info
);
8671 case WIDEN_MULT_EXPR
:
8672 /* The result of a vectorized widening operation usually requires
8673 two vectors (because the widened results do not fit into one vector).
8674 The generated vector results would normally be expected to be
8675 generated in the same order as in the original scalar computation,
8676 i.e. if 8 results are generated in each vector iteration, they are
8677 to be organized as follows:
8678 vect1: [res1,res2,res3,res4],
8679 vect2: [res5,res6,res7,res8].
8681 However, in the special case that the result of the widening
8682 operation is used in a reduction computation only, the order doesn't
8683 matter (because when vectorizing a reduction we change the order of
8684 the computation). Some targets can take advantage of this and
8685 generate more efficient code. For example, targets like Altivec,
8686 that support widen_mult using a sequence of {mult_even,mult_odd}
8687 generate the following vectors:
8688 vect1: [res1,res3,res5,res7],
8689 vect2: [res2,res4,res6,res8].
8691 When vectorizing outer-loops, we execute the inner-loop sequentially
8692 (each vectorized inner-loop iteration contributes to VF outer-loop
8693 iterations in parallel). We therefore don't allow to change the
8694 order of the computation in the inner-loop during outer-loop
8696 /* TODO: Another case in which order doesn't *really* matter is when we
8697 widen and then contract again, e.g. (short)((int)x * y >> 8).
8698 Normally, pack_trunc performs an even/odd permute, whereas the
8699 repack from an even/odd expansion would be an interleave, which
8700 would be significantly simpler for e.g. AVX2. */
8701 /* In any case, in order to avoid duplicating the code below, recurse
8702 on VEC_WIDEN_MULT_EVEN_EXPR. If it succeeds, all the return values
8703 are properly set up for the caller. If we fail, we'll continue with
8704 a VEC_WIDEN_MULT_LO/HI_EXPR check. */
8706 && STMT_VINFO_RELEVANT (stmt_info
) == vect_used_by_reduction
8707 && !nested_in_vect_loop_p (vect_loop
, stmt
)
8708 && supportable_widening_operation (VEC_WIDEN_MULT_EVEN_EXPR
,
8709 stmt
, vectype_out
, vectype_in
,
8710 code1
, code2
, multi_step_cvt
,
8713 /* Elements in a vector with vect_used_by_reduction property cannot
8714 be reordered if the use chain with this property does not have the
8715 same operation. One such an example is s += a * b, where elements
8716 in a and b cannot be reordered. Here we check if the vector defined
8717 by STMT is only directly used in the reduction statement. */
8718 tree lhs
= gimple_assign_lhs (stmt
);
8719 use_operand_p dummy
;
8721 stmt_vec_info use_stmt_info
= NULL
;
8722 if (single_imm_use (lhs
, &dummy
, &use_stmt
)
8723 && (use_stmt_info
= vinfo_for_stmt (use_stmt
))
8724 && STMT_VINFO_DEF_TYPE (use_stmt_info
) == vect_reduction_def
)
8727 c1
= VEC_WIDEN_MULT_LO_EXPR
;
8728 c2
= VEC_WIDEN_MULT_HI_EXPR
;
8741 case VEC_WIDEN_MULT_EVEN_EXPR
:
8742 /* Support the recursion induced just above. */
8743 c1
= VEC_WIDEN_MULT_EVEN_EXPR
;
8744 c2
= VEC_WIDEN_MULT_ODD_EXPR
;
8747 case WIDEN_LSHIFT_EXPR
:
8748 c1
= VEC_WIDEN_LSHIFT_LO_EXPR
;
8749 c2
= VEC_WIDEN_LSHIFT_HI_EXPR
;
8753 c1
= VEC_UNPACK_LO_EXPR
;
8754 c2
= VEC_UNPACK_HI_EXPR
;
8758 c1
= VEC_UNPACK_FLOAT_LO_EXPR
;
8759 c2
= VEC_UNPACK_FLOAT_HI_EXPR
;
8762 case FIX_TRUNC_EXPR
:
8763 /* ??? Not yet implemented due to missing VEC_UNPACK_FIX_TRUNC_HI_EXPR/
8764 VEC_UNPACK_FIX_TRUNC_LO_EXPR tree codes and optabs used for
8765 computing the operation. */
8772 if (BYTES_BIG_ENDIAN
&& c1
!= VEC_WIDEN_MULT_EVEN_EXPR
)
8775 if (code
== FIX_TRUNC_EXPR
)
8777 /* The signedness is determined from output operand. */
8778 optab1
= optab_for_tree_code (c1
, vectype_out
, optab_default
);
8779 optab2
= optab_for_tree_code (c2
, vectype_out
, optab_default
);
8783 optab1
= optab_for_tree_code (c1
, vectype
, optab_default
);
8784 optab2
= optab_for_tree_code (c2
, vectype
, optab_default
);
8787 if (!optab1
|| !optab2
)
8790 vec_mode
= TYPE_MODE (vectype
);
8791 if ((icode1
= optab_handler (optab1
, vec_mode
)) == CODE_FOR_nothing
8792 || (icode2
= optab_handler (optab2
, vec_mode
)) == CODE_FOR_nothing
)
8798 if (insn_data
[icode1
].operand
[0].mode
== TYPE_MODE (wide_vectype
)
8799 && insn_data
[icode2
].operand
[0].mode
== TYPE_MODE (wide_vectype
))
8802 /* Check if it's a multi-step conversion that can be done using intermediate
8805 prev_type
= vectype
;
8806 prev_mode
= vec_mode
;
8808 if (!CONVERT_EXPR_CODE_P (code
))
8811 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
8812 intermediate steps in promotion sequence. We try
8813 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do
8815 interm_types
->create (MAX_INTERM_CVT_STEPS
);
8816 for (i
= 0; i
< MAX_INTERM_CVT_STEPS
; i
++)
8818 intermediate_mode
= insn_data
[icode1
].operand
[0].mode
;
8820 = lang_hooks
.types
.type_for_mode (intermediate_mode
,
8821 TYPE_UNSIGNED (prev_type
));
8822 optab3
= optab_for_tree_code (c1
, intermediate_type
, optab_default
);
8823 optab4
= optab_for_tree_code (c2
, intermediate_type
, optab_default
);
8825 if (!optab3
|| !optab4
8826 || (icode1
= optab_handler (optab1
, prev_mode
)) == CODE_FOR_nothing
8827 || insn_data
[icode1
].operand
[0].mode
!= intermediate_mode
8828 || (icode2
= optab_handler (optab2
, prev_mode
)) == CODE_FOR_nothing
8829 || insn_data
[icode2
].operand
[0].mode
!= intermediate_mode
8830 || ((icode1
= optab_handler (optab3
, intermediate_mode
))
8831 == CODE_FOR_nothing
)
8832 || ((icode2
= optab_handler (optab4
, intermediate_mode
))
8833 == CODE_FOR_nothing
))
8836 interm_types
->quick_push (intermediate_type
);
8837 (*multi_step_cvt
)++;
8839 if (insn_data
[icode1
].operand
[0].mode
== TYPE_MODE (wide_vectype
)
8840 && insn_data
[icode2
].operand
[0].mode
== TYPE_MODE (wide_vectype
))
8843 prev_type
= intermediate_type
;
8844 prev_mode
= intermediate_mode
;
8847 interm_types
->release ();
8852 /* Function supportable_narrowing_operation
8854 Check whether an operation represented by the code CODE is a
8855 narrowing operation that is supported by the target platform in
8856 vector form (i.e., when operating on arguments of type VECTYPE_IN
8857 and producing a result of type VECTYPE_OUT).
8859 Narrowing operations we currently support are NOP (CONVERT) and
8860 FIX_TRUNC. This function checks if these operations are supported by
8861 the target platform directly via vector tree-codes.
8864 - CODE1 is the code of a vector operation to be used when
8865 vectorizing the operation, if available.
8866 - MULTI_STEP_CVT determines the number of required intermediate steps in
8867 case of multi-step conversion (like int->short->char - in that case
8868 MULTI_STEP_CVT will be 1).
8869 - INTERM_TYPES contains the intermediate type required to perform the
8870 narrowing operation (short in the above example). */
8873 supportable_narrowing_operation (enum tree_code code
,
8874 tree vectype_out
, tree vectype_in
,
8875 enum tree_code
*code1
, int *multi_step_cvt
,
8876 vec
<tree
> *interm_types
)
8878 machine_mode vec_mode
;
8879 enum insn_code icode1
;
8880 optab optab1
, interm_optab
;
8881 tree vectype
= vectype_in
;
8882 tree narrow_vectype
= vectype_out
;
8884 tree intermediate_type
;
8885 machine_mode intermediate_mode
, prev_mode
;
8889 *multi_step_cvt
= 0;
8893 c1
= VEC_PACK_TRUNC_EXPR
;
8896 case FIX_TRUNC_EXPR
:
8897 c1
= VEC_PACK_FIX_TRUNC_EXPR
;
8901 /* ??? Not yet implemented due to missing VEC_PACK_FLOAT_EXPR
8902 tree code and optabs used for computing the operation. */
8909 if (code
== FIX_TRUNC_EXPR
)
8910 /* The signedness is determined from output operand. */
8911 optab1
= optab_for_tree_code (c1
, vectype_out
, optab_default
);
8913 optab1
= optab_for_tree_code (c1
, vectype
, optab_default
);
8918 vec_mode
= TYPE_MODE (vectype
);
8919 if ((icode1
= optab_handler (optab1
, vec_mode
)) == CODE_FOR_nothing
)
8924 if (insn_data
[icode1
].operand
[0].mode
== TYPE_MODE (narrow_vectype
))
8927 /* Check if it's a multi-step conversion that can be done using intermediate
8929 prev_mode
= vec_mode
;
8930 if (code
== FIX_TRUNC_EXPR
)
8931 uns
= TYPE_UNSIGNED (vectype_out
);
8933 uns
= TYPE_UNSIGNED (vectype
);
8935 /* For multi-step FIX_TRUNC_EXPR prefer signed floating to integer
8936 conversion over unsigned, as unsigned FIX_TRUNC_EXPR is often more
8937 costly than signed. */
8938 if (code
== FIX_TRUNC_EXPR
&& uns
)
8940 enum insn_code icode2
;
8943 = lang_hooks
.types
.type_for_mode (TYPE_MODE (vectype_out
), 0);
8945 = optab_for_tree_code (c1
, intermediate_type
, optab_default
);
8946 if (interm_optab
!= unknown_optab
8947 && (icode2
= optab_handler (optab1
, vec_mode
)) != CODE_FOR_nothing
8948 && insn_data
[icode1
].operand
[0].mode
8949 == insn_data
[icode2
].operand
[0].mode
)
8952 optab1
= interm_optab
;
8957 /* We assume here that there will not be more than MAX_INTERM_CVT_STEPS
8958 intermediate steps in promotion sequence. We try
8959 MAX_INTERM_CVT_STEPS to get to NARROW_VECTYPE, and fail if we do not. */
8960 interm_types
->create (MAX_INTERM_CVT_STEPS
);
8961 for (i
= 0; i
< MAX_INTERM_CVT_STEPS
; i
++)
8963 intermediate_mode
= insn_data
[icode1
].operand
[0].mode
;
8965 = lang_hooks
.types
.type_for_mode (intermediate_mode
, uns
);
8967 = optab_for_tree_code (VEC_PACK_TRUNC_EXPR
, intermediate_type
,
8970 || ((icode1
= optab_handler (optab1
, prev_mode
)) == CODE_FOR_nothing
)
8971 || insn_data
[icode1
].operand
[0].mode
!= intermediate_mode
8972 || ((icode1
= optab_handler (interm_optab
, intermediate_mode
))
8973 == CODE_FOR_nothing
))
8976 interm_types
->quick_push (intermediate_type
);
8977 (*multi_step_cvt
)++;
8979 if (insn_data
[icode1
].operand
[0].mode
== TYPE_MODE (narrow_vectype
))
8982 prev_mode
= intermediate_mode
;
8983 optab1
= interm_optab
;
8986 interm_types
->release ();